metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "README.md",
"repo_name": "astro-datalab/notebooks-latest",
"repo_path": "notebooks-latest_extracted/notebooks-latest-master/06_EPO/e-TeenAstronomyCafe_Spanish/05_Gravitational_Lensing/README.md",
"type": "Markdown"
}
|
**05 Gravitational Lensing**
For information about the program, please visit: http://www.teenastronomycafe.org/
If you want to test this notebook you can:
[](https://colab.research.google.com/github/astro-datalab/notebooks-latest/blob/master/06_EPO/e-TeenAstronomyCafe/05_Gravitational_Lensing/Gravitational_Lensing.ipynb)
GET IN TOUCH: If you have suggestions to make the notebooks better, or if you encounter problems, please get in touch with the Astro Data Lab team at datalab@noirlab.edu
|
astro-datalabREPO_NAMEnotebooks-latestPATH_START.@notebooks-latest_extracted@notebooks-latest-master@06_EPO@e-TeenAstronomyCafe_Spanish@05_Gravitational_Lensing@README.md@.PATH_END.py
|
{
"filename": "update_manager.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/Workflow/update_manager.py",
"type": "Python"
}
|
import copy
import numpy as np
from lenstronomy.Sampling.parameters import Param
__all__ = ["UpdateManager"]
class UpdateManager(object):
"""This class manages the parameter constraints as they may evolve through the steps
of the modeling.
This includes: keeping certain parameters fixed during one modelling step
"""
def __init__(
self, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params
):
"""
:param kwargs_model: keyword arguments to describe all model components used in
class_creator.create_class_instances()
:param kwargs_constraints: keyword arguments of the Param() class to handle parameter constraints during the
sampling (except upper and lower limits and sampling input mean and width)
:param kwargs_likelihood: keyword arguments of the Likelihood() class to handle parameters and settings of the
likelihood
:param kwargs_params: setting of the sampling bounds and initial guess mean and spread.
The argument is organized as:
'lens_model': [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_lower, kwargs_upper]
'source_model': [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_lower, kwargs_upper]
'lens_light_model': [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_lower, kwargs_upper]
'point_source_model': [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_lower, kwargs_upper]
'extinction_model': [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_lower, kwargs_upper]
'special': [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_lower, kwargs_upper]
"""
self.kwargs_model = kwargs_model
self.kwargs_constraints = kwargs_constraints
self.kwargs_likelihood = kwargs_likelihood
if kwargs_model.get("lens_model_list", None) is not None:
(
self._lens_init,
self._lens_sigma,
self._lens_fixed,
self._lens_lower,
self._lens_upper,
) = kwargs_params["lens_model"]
else:
(
self._lens_init,
self._lens_sigma,
self._lens_fixed,
self._lens_lower,
self._lens_upper,
) = ([], [], [], [], [])
if kwargs_model.get("source_light_model_list", None) is not None:
(
self._source_init,
self._source_sigma,
self._source_fixed,
self._source_lower,
self._source_upper,
) = kwargs_params["source_model"]
else:
(
self._source_init,
self._source_sigma,
self._source_fixed,
self._source_lower,
self._source_upper,
) = ([], [], [], [], [])
if kwargs_model.get("lens_light_model_list", None) is not None:
(
self._lens_light_init,
self._lens_light_sigma,
self._lens_light_fixed,
self._lens_light_lower,
self._lens_light_upper,
) = kwargs_params["lens_light_model"]
else:
(
self._lens_light_init,
self._lens_light_sigma,
self._lens_light_fixed,
self._lens_light_lower,
self._lens_light_upper,
) = ([], [], [], [], [])
if kwargs_model.get("point_source_model_list", None) is not None:
(
self._ps_init,
self._ps_sigma,
self._ps_fixed,
self._ps_lower,
self._ps_upper,
) = kwargs_params["point_source_model"]
else:
(
self._ps_init,
self._ps_sigma,
self._ps_fixed,
self._ps_lower,
self._ps_upper,
) = ([], [], [], [], [])
if kwargs_model.get("optical_depth_model_list", None) is not None:
(
self._extinction_init,
self._extinction_sigma,
self._extinction_fixed,
self._extinction_lower,
self._extinction_upper,
) = kwargs_params["extinction_model"]
else:
(
self._extinction_init,
self._extinction_sigma,
self._extinction_fixed,
self._extinction_lower,
self._extinction_upper,
) = ([], [], [], [], [])
if kwargs_model.get("tracer_source_model_list", None) is not None:
(
self._tracer_source_init,
self._tracer_source_sigma,
self._tracer_source_fixed,
self._tracer_source_lower,
self._tracer_source_upper,
) = kwargs_params["tracer_source_model"]
else:
(
self._tracer_source_init,
self._tracer_source_sigma,
self._tracer_source_fixed,
self._tracer_source_lower,
self._tracer_source_upper,
) = ([], [], [], [], [])
if "special" in kwargs_params:
(
self._special_init,
self._special_sigma,
self._special_fixed,
self._special_lower,
self._special_upper,
) = kwargs_params["special"]
else:
(
self._special_init,
self._special_sigma,
self._special_fixed,
self._special_lower,
self._special_upper,
) = ({}, {}, {}, {}, {})
self._kwargs_temp = self.init_kwargs
# TODO: check compatibility with number of point sources provided as well as other parameter labeling
# TODO: give raise statement if sigma value is =0
@property
def init_kwargs(self):
"""
:return: keyword arguments for all model components of the initial mean model proposition in the sampling
"""
return {
"kwargs_lens": self._lens_init,
"kwargs_source": self._source_init,
"kwargs_lens_light": self._lens_light_init,
"kwargs_ps": self._ps_init,
"kwargs_special": self._special_init,
"kwargs_extinction": self._extinction_init,
"kwargs_tracer_source": self._tracer_source_init,
}
@property
def _init_kwargs(self):
"""Keyword arguments for all model components of the initial mean model
proposition in the sampling.
:return: list of keyword arguments
"""
return (
self._lens_init,
self._source_init,
self._lens_light_init,
self._ps_init,
self._special_init,
self._extinction_init,
self._tracer_source_init,
)
@property
def sigma_kwargs(self):
"""
:return: keyword arguments for all model components of the initial 1-sigma width proposition in the sampling
"""
return {
"kwargs_lens": self._lens_sigma,
"kwargs_source": self._source_sigma,
"kwargs_lens_light": self._lens_light_sigma,
"kwargs_ps": self._ps_sigma,
"kwargs_special": self._special_sigma,
"kwargs_extinction": self._extinction_sigma,
"kwargs_tracer_source": self._tracer_source_sigma,
}
@property
def _lower_kwargs(self):
return (
self._lens_lower,
self._source_lower,
self._lens_light_lower,
self._ps_lower,
self._special_lower,
self._extinction_lower,
self._tracer_source_lower,
)
@property
def _upper_kwargs(self):
return (
self._lens_upper,
self._source_upper,
self._lens_light_upper,
self._ps_upper,
self._special_upper,
self._extinction_upper,
self._tracer_source_upper,
)
@property
def fixed_kwargs(self):
return (
self._lens_fixed,
self._source_fixed,
self._lens_light_fixed,
self._ps_fixed,
self._special_fixed,
self._extinction_fixed,
self._tracer_source_fixed,
)
@property
def fixed_kwargs_updated_list(self):
"""
:return: list of fixed arguments including all settings
"""
return self.param_class.fixed_kwargs_list
def check_initial_state(self):
"""Checks if initial state is within the bounds in all parameters returns a
warning for specific arguments being out of the bounds.
#TODO: checks whether parameters match the model definitions
:return:
"""
_model_class_list = [
"kwargs_lens",
"kwargs_source",
"kwargs_lens_light",
"kwargs_ps",
"kwargs_special",
"kwargs_extinction",
"kwargs_tracer_source",
]
init_kwargs_list = self._init_kwargs
lower_kwargs_list = self._lower_kwargs
upper_kwargs_list = self._upper_kwargs
fixed_kwargs_list = self.fixed_kwargs_updated_list
for i in range(len(init_kwargs_list)):
init_kwargs, lower_kwargs, upper_kwargs, fixed_kwargs = (
init_kwargs_list[i],
lower_kwargs_list[i],
upper_kwargs_list[i],
fixed_kwargs_list[i],
)
if type(init_kwargs) is list:
# loop through dictionary
for k in range(len(init_kwargs)):
_compare_bounds(
init_kwargs[k],
lower_kwargs[k],
upper_kwargs[k],
fixed_kwargs[k],
model_index=k,
model_class=_model_class_list[i],
)
else:
_compare_bounds(
init_kwargs,
lower_kwargs,
upper_kwargs,
fixed_kwargs,
model_index=None,
model_class=_model_class_list[i],
)
def set_init_state(self):
"""Set the current state of the parameters to the initial one.
:return:
"""
self._kwargs_temp = self.init_kwargs
@property
def parameter_state(self):
"""
:return: parameter state saved in this class
"""
return self._kwargs_temp
def best_fit(self, bijective=False):
"""Best fit (max likelihood) position for all the model parameters.
:param bijective: boolean, if True, returns the parameters in the argument of
the sampling that might deviate from the convention of the ImSim module. For
example, if parameterized in the image position, the parameters remain in
the image plane rather than being mapped to the source plane.
:return: kwargs_result with all the keyword arguments of the best fit for the
model components
"""
(
lens_temp,
source_temp,
lens_light_temp,
ps_temp,
special_temp,
extinction_temp,
tracer_source_temp,
) = (
self._kwargs_temp["kwargs_lens"],
self._kwargs_temp["kwargs_source"],
self._kwargs_temp["kwargs_lens_light"],
self._kwargs_temp["kwargs_ps"],
self._kwargs_temp["kwargs_special"],
self._kwargs_temp["kwargs_extinction"],
self._kwargs_temp["kwargs_tracer_source"],
)
if bijective is False:
lens_temp = self.param_class.update_lens_scaling(
special_temp, lens_temp, inverse=False
)
source_temp = self.param_class.image2source_plane(
source_temp, lens_temp, special_temp
)
return {
"kwargs_lens": lens_temp,
"kwargs_source": source_temp,
"kwargs_lens_light": lens_light_temp,
"kwargs_ps": ps_temp,
"kwargs_special": special_temp,
"kwargs_extinction": extinction_temp,
"kwargs_tracer_source": tracer_source_temp,
}
def update_param_state(
self,
kwargs_lens=None,
kwargs_source=None,
kwargs_lens_light=None,
kwargs_ps=None,
kwargs_special=None,
kwargs_extinction=None,
kwargs_tracer_source=None,
):
"""Updates the temporary state of the parameters being saved. ATTENTION: Any
previous knowledge gets lost if you call this function.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:param kwargs_special:
:param kwargs_extinction:
:param kwargs_tracer_source:
:return:
"""
self._kwargs_temp = {
"kwargs_lens": kwargs_lens,
"kwargs_source": kwargs_source,
"kwargs_lens_light": kwargs_lens_light,
"kwargs_ps": kwargs_ps,
"kwargs_special": kwargs_special,
"kwargs_extinction": kwargs_extinction,
"kwargs_tracer_source": kwargs_tracer_source,
}
self.update_kwargs_model(kwargs_special)
def update_param_value(self, lens=None, source=None, lens_light=None, ps=None):
"""Set a model parameter to a specific value.
:param lens: [[i_model, ['param1', 'param2',...], [...]]
:param source: [[i_model, ['param1', 'param2',...], [...]]
:param lens_light: [[i_model, ['param1', 'param2',...], [...]]
:param ps: [[i_model, ['param1', 'param2',...], [...]]
:return: 0, the value of the param is overwritten
"""
if lens is None:
lens = []
if source is None:
source = []
if lens_light is None:
lens_light = []
if ps is None:
ps = []
for items, kwargs_key in zip(
[lens, source, lens_light, ps],
["kwargs_lens", "kwargs_source", "kwargs_lens_light", "kwargs_ps"],
):
for item in items:
index = item[0]
keys = item[1]
values = item[2]
for key, value in zip(keys, values):
self._kwargs_temp[kwargs_key][index][key] = value
@property
def param_class(self):
"""Creating instance of lenstronomy Param() class. It uses the keyword arguments
in self.kwargs_constraints as __init__() arguments, as well as
self.kwargs_model, and the set of kwargs_fixed___, kwargs_lower___,
kwargs_upper___ arguments for lens, lens_light, source, point source, extinction
and special parameters.
:return: instance of the Param class with the recent options and bounds
"""
(
kwargs_fixed_lens,
kwargs_fixed_source,
kwargs_fixed_lens_light,
kwargs_fixed_ps,
kwargs_fixed_special,
kwargs_fixed_extinction,
kwargs_fixed_tracer_source,
) = self.fixed_kwargs
(
kwargs_lower_lens,
kwargs_lower_source,
kwargs_lower_lens_light,
kwargs_lower_ps,
kwargs_lower_special,
kwargs_lower_extinction,
kwargs_lower_tracer_source,
) = self._lower_kwargs
(
kwargs_upper_lens,
kwargs_upper_source,
kwargs_upper_lens_light,
kwargs_upper_ps,
kwargs_upper_special,
kwargs_upper_extinction,
kwargs_upper_tracer_source,
) = self._upper_kwargs
kwargs_model = self.kwargs_model
kwargs_constraints = self.kwargs_constraints
lens_temp = self._kwargs_temp["kwargs_lens"]
param_class = Param(
kwargs_model,
kwargs_fixed_lens,
kwargs_fixed_source,
kwargs_fixed_lens_light,
kwargs_fixed_ps,
kwargs_fixed_special,
kwargs_fixed_extinction,
kwargs_fixed_tracer_source,
kwargs_lower_lens,
kwargs_lower_source,
kwargs_lower_lens_light,
kwargs_lower_ps,
kwargs_lower_special,
kwargs_lower_extinction,
kwargs_lower_tracer_source,
kwargs_upper_lens,
kwargs_upper_source,
kwargs_upper_lens_light,
kwargs_upper_ps,
kwargs_upper_special,
kwargs_upper_extinction,
kwargs_upper_tracer_source,
kwargs_lens_init=lens_temp,
**kwargs_constraints
)
return param_class
def update_kwargs_model(self, kwargs_special):
"""Update the kwargs_model with the new kwargs_special."""
kwargs_model, update_bool = self.param_class.update_kwargs_model(kwargs_special)
if update_bool:
self.kwargs_model = kwargs_model
return kwargs_model
def update_options(
self, kwargs_model=None, kwargs_constraints=None, kwargs_likelihood=None
):
"""
updates the options by overwriting the kwargs with the new ones being added/changed
WARNING: some updates may not be valid depending on the model options. Use carefully!
:param kwargs_model: keyword arguments to describe all model components used in
class_creator.create_class_instances() that are updated from previous arguments
:param kwargs_constraints:
:param kwargs_likelihood:
:return: kwargs_model, kwargs_constraints, kwargs_likelihood
"""
if kwargs_model is None:
kwargs_model = {}
if kwargs_constraints is None:
kwargs_constraints = {}
if kwargs_likelihood is None:
kwargs_likelihood = []
kwargs_model_updated = self.kwargs_model.update(kwargs_model)
kwargs_constraints_updated = self.kwargs_constraints.update(kwargs_constraints)
kwargs_likelihood_updated = self.kwargs_likelihood.update(kwargs_likelihood)
return (
kwargs_model_updated,
kwargs_constraints_updated,
kwargs_likelihood_updated,
)
def update_limits(
self,
change_source_lower_limit=None,
change_source_upper_limit=None,
change_lens_lower_limit=None,
change_lens_upper_limit=None,
):
"""Updates the limits (lower and upper) of the update manager instance.
:param change_source_lower_limit: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]]
:param change_lens_lower_limit: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]]
:param change_source_upper_limit: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]]
:param change_lens_upper_limit: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]]
:return: updates internal state of lower and upper limits accessible from outside
"""
if change_source_lower_limit is not None:
self._source_lower = self._update_kwargs_list(
change_source_lower_limit, self._source_lower
)
if change_source_upper_limit is not None:
self._source_upper = self._update_kwargs_list(
change_source_upper_limit, self._source_upper
)
if change_lens_lower_limit is not None:
self._lens_lower = self._update_kwargs_list(
change_lens_lower_limit, self._lens_lower
)
if change_lens_upper_limit is not None:
self._lens_upper = self._update_kwargs_list(
change_lens_upper_limit, self._lens_upper
)
def update_sigmas(
self,
change_sigma_lens=None,
change_sigma_source=None,
change_sigma_lens_light=None,
):
"""Updates individual estimated uncertainty levels for the initialization of
search and sampling algorithms.
:param change_sigma_lens: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]]
:param change_sigma_source: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]]
:param change_sigma_lens_light: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]]
:return: updated internal state of the spread to initialize samplers
"""
if change_sigma_lens is not None:
self._lens_sigma = self._update_kwargs_list(
change_sigma_lens, self._lens_sigma
)
if change_sigma_source is not None:
self._source_sigma = self._update_kwargs_list(
change_sigma_source, self._source_sigma
)
if change_sigma_lens_light is not None:
self._lens_light_sigma = self._update_kwargs_list(
change_sigma_lens_light, self._lens_light_sigma
)
@staticmethod
def _update_kwargs_list(change_list, kwargs_list_previous):
"""
:param change_list: input format of def update_limits [[i_model, ['param_name1', 'param_name2', ...],
[value1, value2, ...]]]
:param kwargs_list_previous: keyword argument list
:return: update limits
"""
kwargs_limit_updated = copy.deepcopy(kwargs_list_previous)
for i in range(len(change_list)):
i_model = change_list[i][0]
change_names = change_list[i][1]
values = change_list[i][2]
for j, param_name in enumerate(change_names):
kwargs_limit_updated[i_model][param_name] = values[j]
return kwargs_limit_updated
def update_fixed(
self,
lens_add_fixed=None,
source_add_fixed=None,
lens_light_add_fixed=None,
ps_add_fixed=None,
special_add_fixed=None,
tracer_source_add_fixed=None,
lens_remove_fixed=None,
source_remove_fixed=None,
lens_light_remove_fixed=None,
ps_remove_fixed=None,
special_remove_fixed=None,
tracer_source_remove_fixed=None,
):
"""Adds or removes the values of the keyword arguments that are stated in the
_add_fixed to the existing fixed arguments. convention for input arguments are:
[[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...
(optional)], [], ...]
:param lens_add_fixed: added fixed parameter in lens model
:param source_add_fixed: added fixed parameter in source model
:param lens_light_add_fixed: added fixed parameter in lens light model
:param ps_add_fixed: added fixed parameter in point source model
:param special_add_fixed: added fixed parameter in special model
:param tracer_source_add_fixed: added fixed parameter in tracer source model
:param lens_remove_fixed: remove fixed parameter in lens model
:param source_remove_fixed: remove fixed parameter in source model
:param lens_light_remove_fixed: remove fixed parameter in lens light model
:param ps_remove_fixed: remove fixed parameter in point source model
:param special_remove_fixed: remove fixed parameter in special model
:param tracer_source_remove_fixed: remove fixed parameter in tracer source model
:return: updated kwargs fixed
"""
lens_fixed = self._add_fixed(
self._kwargs_temp["kwargs_lens"], self._lens_fixed, lens_add_fixed
)
lens_fixed = self._remove_fixed(lens_fixed, lens_remove_fixed)
source_fixed = self._add_fixed(
self._kwargs_temp["kwargs_source"], self._source_fixed, source_add_fixed
)
source_fixed = self._remove_fixed(source_fixed, source_remove_fixed)
lens_light_fixed = self._add_fixed(
self._kwargs_temp["kwargs_lens_light"],
self._lens_light_fixed,
lens_light_add_fixed,
)
lens_light_fixed = self._remove_fixed(lens_light_fixed, lens_light_remove_fixed)
ps_fixed = self._add_fixed(
self._kwargs_temp["kwargs_ps"], self._ps_fixed, ps_add_fixed
)
ps_fixed = self._remove_fixed(ps_fixed, ps_remove_fixed)
special_fixed = copy.deepcopy(self._special_fixed)
special_temp = self._kwargs_temp["kwargs_special"]
tracer_source_fixed = self._add_fixed(
self._kwargs_temp["kwargs_tracer_source"],
self._tracer_source_fixed,
tracer_source_add_fixed,
)
tracer_source_fixed = self._remove_fixed(
tracer_source_fixed, tracer_source_remove_fixed
)
if special_add_fixed is None:
special_add_fixed = []
for param_name in special_add_fixed:
if param_name not in special_fixed:
special_fixed[param_name] = special_temp[param_name]
if special_remove_fixed is None:
special_remove_fixed = []
for param_name in special_remove_fixed:
if param_name in special_fixed:
del special_fixed[param_name]
(
self._lens_fixed,
self._source_fixed,
self._lens_light_fixed,
self._ps_fixed,
self._special_fixed,
self._tracer_source_fixed,
) = (
lens_fixed,
source_fixed,
lens_light_fixed,
ps_fixed,
special_fixed,
tracer_source_fixed,
)
@staticmethod
def _add_fixed(kwargs_model, kwargs_fixed, add_fixed):
"""
:param kwargs_model: model parameters
:param kwargs_fixed: parameters that are held fixed (even before)
:param add_fixed: additional fixed parameters
[[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ... (optional)], [], ...]
:return: updated kwargs_fixed
"""
if add_fixed is None:
add_fixed = []
# fixed_kwargs = copy.deepcopy(kwargs_fixed)
for i in range(len(add_fixed)):
i_model = add_fixed[i][0]
fix_names = add_fixed[i][1]
if len(add_fixed[i]) > 2:
values = add_fixed[i][2]
else:
values = [None] * len(fix_names)
for j, param_name in enumerate(fix_names):
if values[j] is None:
kwargs_fixed[i_model][param_name] = kwargs_model[i_model][
param_name
] # add fixed list
else:
kwargs_fixed[i_model][param_name] = values[j]
return kwargs_fixed
@staticmethod
def _remove_fixed(kwargs_fixed, remove_fixed):
"""
:param kwargs_fixed: fixed parameters (before)
:param remove_fixed: list of parameters to be removed from the fixed list and initialized by the valuye of
kwargs_model [[i_model, ['param_name1', 'param_name2', ...]], [], ...]
:return: updated kwargs fixed parameters
"""
if remove_fixed is None:
remove_fixed = []
for i in range(len(remove_fixed)):
i_model = remove_fixed[i][0]
fix_names = remove_fixed[i][1]
for param_name in fix_names:
if (
param_name in kwargs_fixed[i_model]
): # if the parameter already is in the fixed list, do not change it
del kwargs_fixed[i_model][param_name]
return kwargs_fixed
def fix_image_parameters(self, image_index=0):
"""Fixes all parameters that are only assigned to a specific image. This allows
to sample only parameters that constraint by the fitting of a sub-set of the
images.
:param image_index: index
:return: None
"""
pass
def _compare_bounds(
init_kwargs,
lower_kwargs,
upper_kwargs,
fixed_kwargs,
model_index=None,
model_class=None,
):
"""Raises Warnings when parameters are out of bounds.
:param init_kwargs: dictionary of initial parameters
:param lower_kwargs: lower bound dictionary
:param upper_kwargs: upper bound dictionary
:param model_index: integer of the index of the model in a certain model class
:param model_class: string of model class
:return: None
"""
for key in init_kwargs:
# make sure linear parameters are not required when linear solver is on
if key not in fixed_kwargs: # key not in ['amp'] or
if key not in lower_kwargs:
raise ValueError(
"Variable %s missing in lower bounds of %s'th model of %s"
% (key, model_index, model_class)
)
else:
if np.all(lower_kwargs[key] > init_kwargs[key]):
Warning(
"Variable %s of %s'th model of %s with initial guess %s is below the lower bound %s"
% (
key,
model_index,
model_class,
init_kwargs[key],
lower_kwargs[key],
)
)
if key not in upper_kwargs:
raise ValueError(
"Variable %s missing in upper bounds of %s'th model of %s"
% (key, model_index, model_class)
)
else:
if np.all(upper_kwargs[key] < init_kwargs[key]):
Warning(
"Variable %s of %s'th model of %s with initial guess %s is above the upper bound %s"
% (
key,
model_index,
model_class,
init_kwargs[key],
upper_kwargs[key],
)
)
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@Workflow@update_manager.py@.PATH_END.py
|
{
"filename": "t01.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/geopack/t01.py",
"type": "Python"
}
|
import logging
import numpy as np
from pytplot import get_data, store_data
from geopack import geopack, t01
def tt01(pos_var_gsm, parmod=None, suffix=''):
"""
tplot wrapper for the functional interface to Sheng Tian's implementation of the Tsyganenko 2001 and IGRF model:
https://github.com/tsssss/geopack
Input
------
pos_gsm_tvar: str
tplot variable containing the position data (km) in GSM coordinates
Parameters
-----------
parmod: string
A tplot variable containing a 10-element model parameter array (vs. time). The timestamps should match the input position variable.
Only the first 6 elements are used::
(1) solar wind pressure pdyn (nanopascals),
(2) dst (nanotesla)
(3) byimf (nanotesla)
(4) bzimf (nanotesla)
(5) g1-index
(6) g2-index (see Tsyganenko [2001] for an exact definition of these two indices)
suffix: str
Suffix to append to the tplot output variable
Returns
--------
str
Name of the tplot variable containing the model data
"""
pos_data = get_data(pos_var_gsm)
if pos_data is None:
logging.error('Variable not found: ' + pos_var_gsm)
return
b0gsm = np.zeros((len(pos_data.times), 3))
dbgsm = np.zeros((len(pos_data.times), 3))
# convert to Re
pos_re = pos_data.y/6371.2
if parmod is not None:
par = get_data(parmod)
if par is not None:
par = par.y
else:
logging.error('parmod keyword required.')
return
for idx, time in enumerate(pos_data.times):
tilt = geopack.recalc(time)
# IGRF B in GSM
b0gsm[idx, 0], b0gsm[idx, 1], b0gsm[idx, 2] = geopack.igrf_gsm(pos_re[idx, 0], pos_re[idx, 1], pos_re[idx, 2])
# T96 dB in GSM
dbgsm[idx, 0], dbgsm[idx, 1], dbgsm[idx, 2] = t01.t01(par[idx, :], tilt, pos_re[idx, 0], pos_re[idx, 1], pos_re[idx, 2])
bgsm = b0gsm + dbgsm
saved = store_data(pos_var_gsm + '_bt01' + suffix, data={'x': pos_data.times, 'y': bgsm})
if saved:
return pos_var_gsm + '_bt01' + suffix
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@geopack@t01.py@.PATH_END.py
|
{
"filename": "_x.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/ternary/domain/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="x", parent_name="layout.ternary.domain", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
items=kwargs.pop(
"items",
[
{"valType": "number", "min": 0, "max": 1, "editType": "plot"},
{"valType": "number", "min": 0, "max": 1, "editType": "plot"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@ternary@domain@_x.py@.PATH_END.py
|
{
"filename": "sigma_dA.py",
"repo_name": "HETDEX/elixer",
"repo_path": "elixer_extracted/elixer-main/elixer/Bayes/sigma_dA.py",
"type": "Python"
}
|
from datapath import *
import numpy as np
def sigma_dA ( fContam, fIncomp, cntLAErecovered, scale, bin, version, baserun ) :
if 0 <= version <= 2 :
if version == 'old' or version == 0 :
if bin == '1.9-2.5' or bin == 0 :
sigma2_dA = (fContam/0.025)**2 + (270000*scale)/cntLAErecovered
elif bin == '2.5-3.5' or bin == 1 :
sigma2_dA = (fContam/0.05)**2 + (360000*scale)/cntLAErecovered
elif 1 <= version <= 2 :
purity = 1.-fContam
comp = 1.-fIncomp
x = [purity,comp]
if version == 1:
if bin == '1.9-2.5' or bin == 0 :
a,b,c,d,e,f,g = 0.681689624681, -2.16250317052, 0.740562744622, -0.668443232827, 2.08179253546, -5.13850338411, -1.6775132763
elif bin == '2.5-3.5' or bin == 1 :
a,b,c,d,e,f,g = 0.951538667763, -1.93045963513, -4.5814059474, -1.84896416802, 8.03559001831, -3.46946410056, -1.73147001525
elif version == 2:
if bin == '1.9-2.5' or bin == 0 :
a,b,c,d,e,f,g = 0.604236809478, -1.19216195965, 0.674273808159, -0.744444591131, 2.20603037196, -3.43169450808, -1.62080260745
elif bin == '2.5-3.5' or bin == 1 :
a,b,c,d,e,f,g = 0.918252305828, -0.559265810365, -28.9396959949, -1.70351167331, 32.4206021953, -0.725856758197, -1.68961729049
sigma2_dA = a*x[0]**b + c*x[1]**d + e*x[0]**f*x[1]**g
return np.sqrt(sigma2_dA)
elif version >= 3 :
x = [fContam,fIncomp]
if version == 3 :
if bin == '1.9-2.5' or bin == 0 :
floor = 1.86481
a,b,c,d,e,f,g = 3.32245291712, 1.08726762021, 1.82220092906, 1.1720357335, 12.4199946023, 0.845825026382, 2.37163507386
elif bin == '2.5-3.5' or bin == 1 :
floor = 2.13722
a,b,c,d,e,f,g = 14.749980163, 1.40431000771, 2.80882389269, 1.30706702444, 21.4442583753, 1.57984728605, 0.930866040527
elif version == 4:
if baserun == '150525_34.38': survey_area = int(41253./2**2)
elif baserun == '150525' or baserun[:7] == '150824_' : survey_area = 300 * float(baserun[7:])
else: survey_area = 300.
if bin == '1.9-2.5' or bin == 0: opt = 'bin1'
elif bin == '2.5-3.5' or bin == 1: opt = 'bin2'
elif bin == '1.9-3.5' or bin == 2: opt = 'all'
floor,a,b,c,d,e,f,g, = 0,0,0,0,0,0,0,0
data = open(sudepath+'%.1f'%(survey_area)+'_coefficients_'+opt+'.dat','r')
for line in data.readlines():
thisline = line.split()
if thisline[0] == 'floor':
floor = float(thisline[2])
if thisline[0]+' '+thisline[1]+' '+thisline[2] == 'With 7 params,':
a = float(thisline[3])
b = float(thisline[4])
c = float(thisline[5])
d = float(thisline[6])
e = float(thisline[7])
f = float(thisline[8])
g = float(thisline[9])
data.close()
sigma_dA = floor + a*x[0]**b + c*x[1]**d + e*x[0]**f*x[1]**g
return sigma_dA
|
HETDEXREPO_NAMEelixerPATH_START.@elixer_extracted@elixer-main@elixer@Bayes@sigma_dA.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/rangeselector/button/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="visible",
parent_name="layout.xaxis.rangeselector.button",
**kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@rangeselector@button@_visible.py@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/scene/xaxis/tickfont/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self,
plotly_name="lineposition",
parent_name="layout.scene.xaxis.tickfont",
**kwargs,
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@scene@xaxis@tickfont@_lineposition.py@.PATH_END.py
|
{
"filename": "tools.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/tsa/arima/tools.py",
"type": "Python"
}
|
"""
SARIMAX tools.
Author: Chad Fulton
License: BSD-3
"""
import numpy as np
def standardize_lag_order(order, title=None):
"""
Standardize lag order input.
Parameters
----------
order : int or array_like
Maximum lag order (if integer) or iterable of specific lag orders.
title : str, optional
Description of the order (e.g. "autoregressive") to use in error
messages.
Returns
-------
order : int or list of int
Maximum lag order if consecutive lag orders were specified, otherwise
a list of integer lag orders.
Notes
-----
It is ambiguous if order=[1] is meant to be a boolean list or
a list of lag orders to include, but this is irrelevant because either
interpretation gives the same result.
Order=[0] would be ambiguous, except that 0 is not a valid lag
order to include, so there is no harm in interpreting as a boolean
list, in which case it is the same as order=0, which seems like
reasonable behavior.
Examples
--------
>>> standardize_lag_order(3)
3
>>> standardize_lag_order(np.arange(1, 4))
3
>>> standardize_lag_order([1, 3])
[1, 3]
"""
order = np.array(order)
title = 'order' if title is None else '%s order' % title
# Only integer orders are valid
if not np.all(order == order.astype(int)):
raise ValueError('Invalid %s. Non-integer order (%s) given.'
% (title, order))
order = order.astype(int)
# Only positive integers are valid
if np.any(order < 0):
raise ValueError('Terms in the %s cannot be negative.' % title)
# Try to squeeze out an irrelevant trailing dimension
if order.ndim == 2 and order.shape[1] == 1:
order = order[:, 0]
elif order.ndim > 1:
raise ValueError('Invalid %s. Must be an integer or'
' 1-dimensional array-like object (e.g. list,'
' ndarray, etc.). Got %s.' % (title, order))
# Option 1: the typical integer response (implies including all
# lags up through and including the value)
if order.ndim == 0:
order = order.item()
elif len(order) == 0:
order = 0
else:
# Option 2: boolean list
has_zeros = (0 in order)
has_multiple_ones = np.sum(order == 1) > 1
has_gt_one = np.any(order > 1)
if has_zeros or has_multiple_ones:
if has_gt_one:
raise ValueError('Invalid %s. Appears to be a boolean list'
' (since it contains a 0 element and/or'
' multiple elements) but also contains'
' elements greater than 1 like a list of'
' lag orders.' % title)
order = (np.where(order == 1)[0] + 1)
# (Default) Option 3: list of lag orders to include
else:
order = np.sort(order)
# If we have an empty list, set order to zero
if len(order) == 0:
order = 0
# If we actually were given consecutive lag orders, just use integer
elif np.all(order == np.arange(1, len(order) + 1)):
order = order[-1]
# Otherwise, convert to list
else:
order = order.tolist()
# Check for duplicates
has_duplicate = isinstance(order, list) and np.any(np.diff(order) == 0)
if has_duplicate:
raise ValueError('Invalid %s. Cannot have duplicate elements.' % title)
return order
def validate_basic(params, length, allow_infnan=False, title=None):
"""
Validate parameter vector for basic correctness.
Parameters
----------
params : array_like
Array of parameters to validate.
length : int
Expected length of the parameter vector.
allow_infnan : bool, optional
Whether or not to allow `params` to contain -np.inf, np.inf, and
np.nan. Default is False.
title : str, optional
Description of the parameters (e.g. "autoregressive") to use in error
messages.
Returns
-------
params : ndarray
Array of validated parameters.
Notes
-----
Basic check that the parameters are numeric and that they are the right
shape. Optionally checks for NaN / infinite values.
"""
title = '' if title is None else ' for %s' % title
# Check for invalid type and coerce to non-integer
try:
params = np.array(params, dtype=object)
is_complex = [isinstance(p, complex) for p in params.ravel()]
dtype = complex if any(is_complex) else float
params = np.array(params, dtype=dtype)
except TypeError:
raise ValueError('Parameters vector%s includes invalid values.'
% title)
# Check for NaN, inf
if not allow_infnan and (np.any(np.isnan(params)) or
np.any(np.isinf(params))):
raise ValueError('Parameters vector%s includes NaN or Inf values.'
% title)
params = np.atleast_1d(np.squeeze(params))
# Check for right number of parameters
if params.shape != (length,):
plural = '' if length == 1 else 's'
raise ValueError('Specification%s implies %d parameter%s, but'
' values with shape %s were provided.'
% (title, length, plural, params.shape))
return params
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@tsa@arima@tools.py@.PATH_END.py
|
{
"filename": "analyze.py",
"repo_name": "jmd-dk/concept",
"repo_path": "concept_extracted/concept-master/test/lpt/analyze.py",
"type": "Python"
}
|
# This file has to be run in pure Python mode!
# Imports from the CO𝘕CEPT code
from commons import *
plt = get_matplotlib().pyplot
# Absolute path and name of this test
this_dir = os.path.dirname(os.path.realpath(__file__))
this_test = os.path.basename(os.path.dirname(this_dir))
# Begin analysis
masterprint(f'Analysing {this_test} data ...')
# Load power spectrum data
power = {}
for lpt in [2, 3]:
for dealias in [False, True]:
power[lpt, dealias] = 0
for shift in [False, True]:
k, _power = np.loadtxt(
glob(
f'{this_dir}/output/{lpt}LPT'
+ '_dealias'*dealias
+ '_shift'*shift
+ f'/powerspec*'
)[0],
usecols=[0, 2],
unpack=True,
)
power[lpt, dealias] += _power
power[lpt, dealias] /= 2
# Compute power spectrum ratios
peak_relratios_expected = {False: 3.00/100, True: 2.39/100}
indices_peak = {}
ratios = {}
ratios_peak = {}
for dealias in [False, True]:
ratios[dealias] = power[3, dealias]/power[2, dealias]
indices_peak[dealias] = ratios[dealias].argmax()
ratios_peak[dealias] = ratios[dealias][indices_peak[dealias]]
# Plot
fig, ax = plt.subplots()
for ls, dealias in zip(['-', '--'], [False, True]):
ax.semilogx(k, (ratios[dealias] - 1)*100, ls, label=f'{dealias = }')
x = exp(log(k[0]) + (0.85)*(log(k[-1]) - log(k[0])))
y = peak_relratios_expected[dealias]*100
ax.semilogx([x, k[-1]], [y]*2, f'k{ls}', lw=1)
ax.text(x, y, r'expected $\rightarrow$', ha='right', va='center')
ax.set_xlim(k[0], k[-1])
ax.set_xlabel(rf'$k\, [\mathrm{{{unit_length}}}^{{-1}}]$')
ax.set_ylabel(r'$P_{\mathrm{3LPT}}/P_{\mathrm{2LPT}} - 1\, [\%]$')
ax.legend()
fig_file = f'{this_dir}/result.png'
fig.savefig(fig_file, dpi=150)
# Check
abs_tol = 0.001
rel_tol = 0
for dealias in [False, True]:
dealiased = ('dealiased' if dealias else 'aliased')
if indices_peak[dealias] != len(k) - 1:
abort(
f'The largest value of the {dealiased} power spectrum ratio f'
f'between 3LPT and 2LPT does not occur at the highest '
f'k available (the Nyquist frequency). See {fig_file}.'
)
if not isclose(
ratios[dealias][-1] - 1,
peak_relratios_expected[dealias],
abs_tol=abs_tol,
rel_tol=rel_tol,
):
abort(
f'The {dealiased} 3LPT to 2LPT power spectrum ratio at the '
f'Nyquist frequency does not match the expected value. '
f'See {fig_file}.'
)
# Done analysing
masterprint('done')
|
jmd-dkREPO_NAMEconceptPATH_START.@concept_extracted@concept-master@test@lpt@analyze.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "JLBLine/CHIPS_wrappers",
"repo_path": "CHIPS_wrappers_extracted/CHIPS_wrappers-main/README.md",
"type": "Markdown"
}
|
# CHIPS_wrappers
Scripts to run CHIPS power spectrum estimator, and plot the outputs. This repo is under development, and does not have unit/integration tests as of yet.
## Installation
Real basic at the mo, you need to do
```bash
git clone https://github.com/JLBLine/CHIPS_wrappers.git
cd CHIPS_wrappers
pip install .
```
maybe we can make it fancier later
## Usage
You'll need a working `CHIPS` installation, and to set a few environment variables to use `run_CHIPS.py`. The plotting code should work without env variables. *Fill in later*
### Docker
```bash
docker run --rm -it -w /app --entrypoint python d3vnull0/chips_wrappers:latest scripts/plotchips_all.py --help
```
### singularity
```bash
singularity exec --cleanenv -B $PWD --home $PWD docker://d3vnull0/chips_wrappers:latest python /app/scripts/plotchips_all.py --help
```
|
JLBLineREPO_NAMECHIPS_wrappersPATH_START.@CHIPS_wrappers_extracted@CHIPS_wrappers-main@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/map/layer/fill/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._outlinecolor import OutlinecolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._outlinecolor.OutlinecolorValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@map@layer@fill@__init__.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/table/header/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="table.header", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
width
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@table@header@_line.py@.PATH_END.py
|
{
"filename": "solvers.py",
"repo_name": "astro-informatics/DarkMappy",
"repo_path": "DarkMappy_extracted/DarkMappy-main/darkmappy/solvers.py",
"type": "Python"
}
|
import numpy as np
import optimusprimal as opt
import darkmappy.logs as lg
class PrimalDual:
"""
Class which handles all primal dual optimisation paradigms.
"""
def __init__(
self,
data,
phi,
psi,
options={
"tol": 1e-5,
"iter": 5000,
"update_iter": 50,
"record_iters": False,
"positivity": False,
"real": False,
"nu": 0,
"constrained": True,
},
):
"""Construct primal dual general class.
Any additional details should be here.
Args:
phi (): Measurement operator (weights for poisson noise)
psi (): Redundant dictionary (wavelets etc.)
beta (): (?)
constrained (bool): Constrained vs unconstrained problem
Raises:
ValueError: Data vector contains NaN values.
"""
if "viewer" not in options:
self.viewer = lambda *args: None
else:
self.viewer = options["viewer"]
self.options = options
self.data = data
self.phi = phi
if "nu" not in self.options:
self.nu = opt.linear_operators.power_method(
phi, np.ones(phi.shape, dtype=complex)
)[0]
else:
self.nu = self.options["nu"]
if "mu" not in self.options:
self.mu = opt.linear_operators.power_method(
psi, np.ones(phi.shape, dtype=complex)
)[0]
else:
self.mu = self.options["mu"]
self.psi = psi
self.f = None
if self.options["real"]:
self.f = opt.prox_operators.real_prox()
if self.options["positivity"]:
self.f = opt.prox_operators.positive_prox()
self.constrained = self.options["constrained"]
self.solver = (
self.l1_constrained_gaussian
if self.constrained
else self.l1_unconstrained_gaussian
)
# Joint Map Estimation Variables
self.jm_max_iter = 5
self.kappa = 1
self.eta = 1
self.jm_tol = 1e-3
def l1_constrained_gaussian(self, warm_start, sigma, beta=1e-2):
"""Solve constrained l1 regularisation problem with Gaussian noise.
Can be instantiated from warm_start.
Args:
data (): Data-set to be optimised over.
warm_start (): Initial solution of optimisation.
sigma (): Noise-level present in optimisation.
beta (): Scaling for l1-norm threshold
Raises:
ValueError: Datavector size is 0 (empty set).
ValueError: Datavector contains NaN values.
"""
size = len(np.ravel(self.data))
if size == 0:
raise ValueError("Data vector is the empty set!")
if np.any(np.isnan(self.data)):
raise ValueError("Data vector contains NaN values!")
epsilon = np.sqrt(size + 2 * np.sqrt(2 * size)) * sigma
p = opt.prox_operators.l2_ball(epsilon, self.data, self.phi)
p.beta = self.nu
h = opt.prox_operators.l1_norm(
np.max(np.abs(self.psi.dir_op(self.phi.adj_op(self.data))))
* beta
* self.psi.weights,
self.psi,
)
h.beta = self.mu
return opt.primal_dual.FBPD(
warm_start, self.options, None, self.f, h, p, viewer=self.viewer
)
def l1_unconstrained_gaussian(self, warm_start, sigma, beta):
"""Solve unconstrained l1 regularisation problem with Gaussian noise.
Can be instantiated from warm_start.
Args:
data (): Data-set to be optimised over.
warm_start (): Initial solution of optimisation.
sigma (): Noise-level present in optimisation.
beta (): Regularisation parameter
Raises:
ValueError: Datavector size is 0 (empty set).
ValueError: Datavector contains NaN values.
"""
if len(np.ravel(self.data)) == 0:
raise ValueError("Data vector is the empty set!")
if np.any(np.isnan(self.data)):
raise ValueError("Data vector contains NaN values!")
g = opt.grad_operators.l2_norm(sigma, self.data, self.phi)
g.beta = self.nu / sigma ** 2
h = (
None
if (beta <= 0)
else opt.prox_operators.l1_norm(beta * self.psi.weights, self.psi)
)
h.beta = self.mu
return opt.primal_dual.FBPD(
warm_start, self.options, g, self.f, h, viewer=self.viewer
)
def l1_unconstrained_gaussian_jm(self, warm_start, sigma, beta):
"""Solve unconstrained l1 regularisation problem with Gaussian noise.
Can be instantiated from warm_start.
Args:
data (): Data-set to be optimised over.
warm_start (): Initial solution of optimisation.
sigma (): Noise-level present in optimisation.
beta (): Regularisation parameter
Raises:
ValueError: Datavector size is 0 (empty set).
ValueError: Datavector contains NaN values.
"""
if len(np.ravel(self.data)) == 0:
raise ValueError("Data vector is the empty set!")
if np.any(np.isnan(self.data)):
raise ValueError("Data vector contains NaN values!")
g = opt.grad_operators.l2_norm(sigma, self.data, self.phi)
g.beta = self.nu / sigma ** 2
h = (
None
if (beta <= 0)
else opt.prox_operators.l1_norm(beta * self.psi.weights, self.psi)
)
h.beta = self.mu
y = h.dir_op(warm_start) * 0.0
z = warm_start * 0
w = warm_start * 0
sol, diagnostics = opt.primal_dual.FBPD_warm_start(
warm_start,
y,
z,
w,
self.options,
g=g,
f=self.f,
h=h,
p=None,
r=None,
viewer=self.viewer,
)
for it in range(1, self.jm_max_iter):
beta_old = beta
beta = (self.eta + len(y.flatten())) / (
self.kappa + h.fun(h.dir_op(sol)) / beta
)
lg.info_log(
"[JM] %d out of %d iterations, tol = %f",
it,
self.jm_max_iter,
np.linalg.norm(beta - beta_old) / np.linalg.norm(beta_old),
)
lg.info_log("[JM] regularisation is %f", beta)
if (
np.linalg.norm(beta - beta_old) < self.jm_tol * np.linalg.norm(beta_old)
and it > 10
):
lg.info_log("[JM] converged in %d iterations", it)
break
h = (
None
if (beta <= 0)
else opt.prox_operators.l1_norm(beta * self.psi.weights, self.psi)
)
h.beta = self.mu
y = diagnostics["y"]
z = diagnostics["z"]
w = diagnostics["w"]
sol, diagnostics = opt.primal_dual.FBPD_warm_start(
sol,
y,
z,
w,
self.options,
g=g,
f=self.f,
h=h,
p=None,
r=None,
viewer=self.viewer,
)
return sol, diagnostics
|
astro-informaticsREPO_NAMEDarkMappyPATH_START.@DarkMappy_extracted@DarkMappy-main@darkmappy@solvers.py@.PATH_END.py
|
{
"filename": "tf_numpy_mlp.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/numpy_ops/integration_test/benchmarks/tf_numpy_mlp.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the MLP network."""
import tensorflow.compat.v2 as tf
np = tf.experimental.numpy
NUM_CLASSES = 3
INPUT_SIZE = 10
HIDDEN_UNITS = 10
class MLP:
"""MLP model.
T = Relu(Add(MatMul(A, B), C))
R = Relu(Add(MatMul(T, D), E))
"""
def __init__(self, num_classes=NUM_CLASSES, input_size=INPUT_SIZE,
hidden_units=HIDDEN_UNITS):
self.w1 = np.random.uniform(size=[input_size, hidden_units]).astype(
np.float32)
self.w2 = np.random.uniform(size=[hidden_units, num_classes]).astype(
np.float32)
self.b1 = np.random.uniform(size=[1, hidden_units]).astype(
np.float32)
self.b2 = np.random.uniform(size=[1, num_classes]).astype(
np.float32)
def inference(self, inputs):
return self._forward(inputs, self.w1, self.w2, self.b1, self.b2)
def _forward(self, x, w1, w2, b1, b2):
x = np.maximum(np.matmul(x, w1) + b1, 0.)
x = np.maximum(np.matmul(x, w2) + b2, 0.)
return x
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@numpy_ops@integration_test@benchmarks@tf_numpy_mlp.py@.PATH_END.py
|
{
"filename": "eismap.py",
"repo_name": "USNavalResearchLaboratory/eispac",
"repo_path": "eispac_extracted/eispac-main/eispac/core/eismap.py",
"type": "Python"
}
|
"""
`~sunpy.map.Map` subclass for the EUV Imaging Spectrometer (EIS) on Hinode
"""
import sys
import pathlib
import numpy as np
import astropy.units as u
from astropy.io import fits
from astropy.nddata import StdDevUncertainty
from astropy.visualization import ImageNormalize, AsinhStretch, LinearStretch
import sunpy.map
from sunpy.map.mapbase import SpatialPair
from sunpy.time import parse_time
__all__ = ['EISMap']
class EISMap(sunpy.map.GenericMap):
"""
EIS fit parameter map.
The EUV Imaging Spectrometer (EIS) is part of the Hinode mission and was sponsored
by the Japan Aerospace Exploration Agency (JAXA), the United Kingdom Space
Agency (UKSA), and National Aeronautics and Space Administration (NASA) with
contributions from ESA and Norway. Hinode was launched on September 22, 2006
at 21:36 UTC from the Uchinoura Space Center in Japan and continues to
operate. EIS observes two wavelength ranges in the extreme ultraviolet,
171—212 Å and 245—291 Å with a spectral resolution of about 22 mÅ and a plate
scale of 100 per pixel.
This data structure is designed to hold the fit parameters derived from multi-gaussian
spectral fits to level 1, wavelength-resolved EIS rasters. These maps can contain the
intensity, doppler velocity, or line width.
Notes
-----
Measurement errors are stored in a binary table. To load them correctly,
you must pass the .fits file directly to `eispac.EISMap` instead of using
sunpy.map.Map
References
----------
* `Instrument Paper: Culhane, J. L., Harra, L. K., James, A. M., et al.
2007, Sol. Phys., 243, 19`_
"""
def __init__(self, data, header=None, **kwargs):
step_date_obs = None
step_exptime = None
# Check for a fits file containing a binary table with the errs
# NB: errs are in a table with y-axis number of rows, each with x-axis
# number of values
if isinstance(data, (str, pathlib.Path)):
if pathlib.Path(data).suffix.lower().startswith('.fit'):
with fits.open(data, mode='readonly') as hdul:
data = hdul[0].data
header = hdul[0].header
if len(hdul) >= 2:
data_errors = StdDevUncertainty(hdul[1].data['errors'])
kwargs['uncertainty'] = data_errors
if len(hdul) >= 3:
step_date_obs = parse_time(hdul[2].data['step_date_obs'])
step_exptime = hdul[2].data['step_exptime']
# Get user-input step_date_obs and step_exptime
# NB: this will overwrite any values from an input fits file
user_step_date_obs = kwargs.pop('step_date_obs', None)
user_step_exptime = kwargs.pop('step_exptime', None)
if user_step_date_obs is not None and header is not None:
if len(user_step_date_obs) == header['naxis1']:
step_date_obs = parse_time(user_step_date_obs)
else:
print(f'WARNING incorrect number of "step_date_obs" values!'
+f' This EIS observation has {header["naxis1"]} steps.',
file=sys.stderr)
if user_step_exptime is not None and header is not None:
if len(user_step_exptime) == header['naxis1']:
step_exptime = user_step_exptime
else:
print(f'WARNING: incorrect number of "step_exptime" values!'
+f' This EIS observation has {header["naxis1"]} steps.',
file=sys.stderr)
# Initalize the map
super().__init__(data, header, **kwargs)
self._step_date_obs = step_date_obs
self._step_exptime = step_exptime
# Setup plot settings and get default data masks
# This includes adjusting colormap and normalization depending on whether
# the map contains intensity, velocity, or line width data
default_mask = None
self.plot_settings['aspect'] = self.meta['cdelt2'] / self.meta['cdelt1']
if self.meta['measrmnt'].lower().startswith('int'):
self.plot_settings['cmap'] = 'Blues_r'
self.plot_settings['norm'] = ImageNormalize(stretch=AsinhStretch())
default_mask = self.data == 0
elif self.meta['measrmnt'].lower().startswith('vel'):
self.plot_settings['cmap'] = 'RdBu_r'
# Autoscale color range to 3*std (rounded to nearest multiple of 5)
vlim = 5*round(3*self.data.std()/5)
self.plot_settings['norm'] = ImageNormalize(vmin=-vlim, vmax=vlim)
if self.uncertainty is not None:
# Note: velocities of 0 may be valid UNLESS the errors are NaN
default_mask = (self.data == 0) & np.isnan(self.uncertainty.array)
elif self.meta['measrmnt'].lower().startswith('wid'):
self.plot_settings['cmap'] = 'viridis'
default_mask = self.data == 0
# Set the default mask (ignored if the user input their own mask)
if self.mask is None:
self.mask = default_mask
@property
def spatial_units(self):
units = self.meta.get('cunit1', 'arcsec'), self.meta.get('cunit2', 'arcsec')
return SpatialPair(u.Unit(units[0]), u.Unit(units[1]))
@property
def processing_level(self):
return self.meta.get('lvl_num', 2)
@property
def waveunit(self):
return u.Unit(self.meta.get('waveunit', 'angstrom'))
@property
def wavelength(self):
line_id = self.meta.get('line_id')
if line_id is not None:
wave = float(line_id.split()[-1])
return u.Quantity(wave, self.waveunit)
@property
def measurement(self):
return self.meta.get('measrmnt', '')
@property
def observatory(self):
return 'Hinode'
@property
def nickname(self):
line_id = self.meta.get('line_id', '')
return f'{self.observatory} {self.instrument} {line_id}'
@property
def date_start(self):
# Try default key DATE-BEG. This is to future proof against
# switching to DATE-BEG when constructing the L1 headers
# NOTE: the DATE_OBS key is the beginning of the observation
# so we can use this in case DATE_BEG is missing
date_beg = self._get_date('date_beg') or super().date_start
date_beg = date_beg or self._date_obs
return date_beg
@property
def date_end(self):
# Try default key DATE-END. This is to future proof against
# switching to DATE-END when constructing the L1 headers
return self._get_date('date_end') or super().date_end
@property
def date_average(self):
return self._get_date('date_avg') or super().date_average
@property
def reference_date(self):
"""
The reference date for the coordinate system
According to Section 2.4 of
`EIS Software Note 9 <https://solarb.mssl.ucl.ac.uk/SolarB/eis_docs/eis_notes/09_POINTING/eis_swnote_09.pdf>`_,
the pointing keywords are defined at the start of the raster. As such, this property returns the
time at the beginning of the raster.
.. note:: This property is overridden because `sunpy.map.GenericMap` sets
this to be the ``.date_average`` which in this case is the midpoint
of the raster.
"""
return self.date_start
@property
def duration(self):
"""Total duration of the observation in units of [min]
"""
total_time = (np.datetime64(self.meta['date_end'])
- np.datetime64(self.meta['date_obs']))
total_time = total_time / np.timedelta64(60, 's') # convert to [min]
return total_time * u.Unit('min')
@property
def step_date_obs(self):
"""date_obs timestamp of each step along the x-axis
"""
if self._step_date_obs is not None:
return self._step_date_obs
else:
print(f'WARNING: exact "step_date_obs" values are unknown!'
+f' Estimating based on the observation start and end times.',
file=sys.stderr)
total_time = self.duration.to('s').value
est_cad = total_time / self.meta['naxis1']
est_date_obs = (np.datetime64(self.meta['date_obs'])
+ np.arange(self.meta['naxis1'])
* np.timedelta64(int(est_cad*1000), 'ms'))
if self.meta['nraster'] == 1:
# Sit-and-stare timestamps inc left to right
return parse_time(est_date_obs)
else:
# Normal raster timestamps inc from right to left (scan dir)
return parse_time(np.flip(est_date_obs))
@property
def step_exptime(self):
"""Exposure time of each step along the x-axis
"""
if self._step_exptime is not None:
return self._step_exptime
else:
print(f'WARNING: exact "step_exptime" values are unknown!'
+f' Estimating based on the observation start and end times.'
+f' Actual exposure times will be shorter due to on-board'
+f' processing and the specific observation plan.',
file=sys.stderr)
total_time = self.duration.to('s').value
est_avg_exptime = total_time / self.meta['naxis1']
return np.zeros(self.meta['naxis1']) + est_avg_exptime
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""
Determines if header corresponds to an EIS image. Used to register
EISMap with the sunpy.map.Map factory.
"""
return str(header.get('instrume', '')).startswith('EIS')
|
USNavalResearchLaboratoryREPO_NAMEeispacPATH_START.@eispac_extracted@eispac-main@eispac@core@eismap.py@.PATH_END.py
|
{
"filename": "test_misc.py",
"repo_name": "radiocosmology/caput",
"repo_path": "caput_extracted/caput-master/tests/test_misc.py",
"type": "Python"
}
|
"""Test the miscellaneous tools."""
import unittest
import tempfile
import os
import pytest
import shutil
from caput import misc
class TestLock(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
def test_lock_new(self):
"""Test the normal behaviour"""
base = "newfile.dat"
newfile_name = os.path.join(self.dir, base)
lockfile_name = os.path.join(self.dir, "." + base + ".lock")
with misc.lock_file(newfile_name) as fname:
# Check lock file has been created
self.assertTrue(os.path.exists(lockfile_name))
# Create a stub file
with open(fname, "w+") as fh:
fh.write("hello")
# Check the file exists only at the temporary path
self.assertTrue(os.path.exists(fname))
self.assertFalse(os.path.exists(newfile_name))
# Check the file exists at the final path and the lock file removed
self.assertTrue(os.path.exists(newfile_name))
self.assertFalse(os.path.exists(lockfile_name))
def test_lock_exception(self):
"""Check what happens in an exception"""
base = "newfile2.dat"
newfile_name = os.path.join(self.dir, base)
lockfile_name = os.path.join(self.dir, "." + base + ".lock")
with pytest.raises(RuntimeError):
with misc.lock_file(newfile_name) as fname:
# Create a stub file
with open(fname, "w+") as fh:
fh.write("hello")
raise RuntimeError("Test error")
# Check that neither the file, nor its lock exists
self.assertFalse(os.path.exists(newfile_name))
self.assertFalse(os.path.exists(lockfile_name))
def test_lock_exception_preserve(self):
"""Check what happens in an exception when asked to preserve the temp file"""
base = "newfile3.dat"
newfile_name = os.path.join(self.dir, base)
lockfile_name = os.path.join(self.dir, "." + base + ".lock")
tmpfile_name = os.path.join(self.dir, "." + base)
with pytest.raises(RuntimeError):
with misc.lock_file(newfile_name, preserve=True) as fname:
# Create a stub file
with open(fname, "w+") as fh:
fh.write("hello")
raise RuntimeError("Test error")
# Check that neither the file, nor its lock exists, but that the
# temporary file does
self.assertTrue(os.path.exists(tmpfile_name))
self.assertFalse(os.path.exists(newfile_name))
self.assertFalse(os.path.exists(lockfile_name))
def tearDown(self):
shutil.rmtree(self.dir)
if __name__ == "__main__":
unittest.main()
|
radiocosmologyREPO_NAMEcaputPATH_START.@caput_extracted@caput-master@tests@test_misc.py@.PATH_END.py
|
{
"filename": "dtau_mmwl.py",
"repo_name": "HajimeKawahara/exojax",
"repo_path": "exojax_extracted/exojax-master/src/exojax/spec/dtau_mmwl.py",
"type": "Python"
}
|
"""compute dtau (opacity difference in atmospheric layers) using mean molecular weight
"""
import jax.numpy as jnp
from exojax.spec.hitrancia import interp_logacia_matrix
from exojax.spec.hminus import log_hminus_continuum
from exojax.atm.idealgas import number_density
from exojax.utils.constants import logkB, logm_ucgs
from exojax.utils.constants import opfac
import warnings
warnings.warn("dtau_mmwl might be removed in future.", FutureWarning)
def dtauCIA_mmwl(nus, Tarr, Parr, dParr, vmr1, vmr2, mmw, g, nucia, tcia, logac):
"""dtau of the CIA continuum.
(for the case where mmw is given for each atmospheric layer)
Args:
nus: wavenumber matrix (cm-1)
Tarr: temperature array (K)
Parr: temperature array (bar)
dParr: delta temperature array (bar)
vmr1: volume mixing ratio (VMR) for molecules 1 [N_layer]
vmr2: volume mixing ratio (VMR) for molecules 2 [N_layer]
mmw: mean molecular weight of atmosphere [N_layer]
g: gravity (cm2/s)
nucia: wavenumber array for CIA
tcia: temperature array for CIA
logac: log10(absorption coefficient of CIA)
Returns:
optical depth matrix [N_layer, N_nus]
"""
narr = number_density(Parr, Tarr)
lognarr1 = jnp.log10(vmr1 * narr) # log number density
lognarr2 = jnp.log10(vmr2 * narr) # log number density
logg = jnp.log10(g)
ddParr = dParr / Parr
dtauc = (
10
** (
interp_logacia_matrix(Tarr, nus, nucia, tcia, logac)
+ lognarr1[:, None]
+ lognarr2[:, None]
+ logkB
- logg
- logm_ucgs
)
* Tarr[:, None]
/ mmw[:, None]
* ddParr[:, None]
)
return dtauc
def dtauM_mmwl(dParr, xsm, MR, mass, g):
"""dtau of the molecular cross section.
(for the case where mmw is given for each atmospheric layer)
Note:
opfac=bar_cgs/(m_u (g)). m_u: atomic mass unit. It can be obtained by fac=1.e3/m_u, where m_u = scipy.constants.m_u.
Args:
dParr: delta pressure profile (bar) [N_layer]
xsm: cross section matrix (cm2) [N_layer, N_nus]
MR: volume mixing ratio (VMR) or mass mixing ratio (MMR) [N_layer]
mass: mean molecular weight for VMR or molecular mass for MMR [N_layer]
g: gravity (cm/s2)
Returns:
optical depth matrix [N_layer, N_nus]
"""
return opfac * xsm * dParr[:, None] * MR[:, None] / (mass[:, None] * g)
def dtauHminus_mmwl(nus, Tarr, Parr, dParr, vmre, vmrh, mmw, g):
"""dtau of the H- continuum.
(for the case where mmw is given for each atmospheric layer)
Args:
nus: wavenumber matrix (cm-1)
Tarr: temperature array (K)
Parr: temperature array (bar)
dParr: delta temperature array (bar)
vmre: volume mixing ratio (VMR) for e- [N_layer]
vmrH: volume mixing ratio (VMR) for H atoms [N_layer]
mmw: mean molecular weight of atmosphere [N_layer]
g: gravity (cm2/s)
Returns:
optical depth matrix [N_layer, N_nus]
"""
narr = number_density(Parr, Tarr)
# number_density_e: number density for e- [N_layer]
# number_density_h: number density for H atoms [N_layer]
number_density_e = vmre * narr
number_density_h = vmrh * narr
logg = jnp.log10(g)
ddParr = dParr / Parr
logabc = log_hminus_continuum(nus, Tarr, number_density_e, number_density_h)
dtauh = (
10 ** (logabc + logkB - logg - logm_ucgs)
* Tarr[:, None]
/ mmw[:, None]
* ddParr[:, None]
)
return dtauh
|
HajimeKawaharaREPO_NAMEexojaxPATH_START.@exojax_extracted@exojax-master@src@exojax@spec@dtau_mmwl.py@.PATH_END.py
|
{
"filename": "_plot.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/model_selection/_plot.py",
"type": "Python"
}
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from ..utils._optional_dependencies import check_matplotlib_support
from ..utils._plotting import _interval_max_min_ratio, _validate_score_name
from ._validation import learning_curve, validation_curve
class _BaseCurveDisplay:
def _plot_curve(
self,
x_data,
*,
ax=None,
negate_score=False,
score_name=None,
score_type="test",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
check_matplotlib_support(f"{self.__class__.__name__}.plot")
import matplotlib.pyplot as plt
if ax is None:
_, ax = plt.subplots()
if negate_score:
train_scores, test_scores = -self.train_scores, -self.test_scores
else:
train_scores, test_scores = self.train_scores, self.test_scores
if std_display_style not in ("errorbar", "fill_between", None):
raise ValueError(
f"Unknown std_display_style: {std_display_style}. Should be one of"
" 'errorbar', 'fill_between', or None."
)
if score_type not in ("test", "train", "both"):
raise ValueError(
f"Unknown score_type: {score_type}. Should be one of 'test', "
"'train', or 'both'."
)
if score_type == "train":
scores = {"Train": train_scores}
elif score_type == "test":
scores = {"Test": test_scores}
else: # score_type == "both"
scores = {"Train": train_scores, "Test": test_scores}
if std_display_style in ("fill_between", None):
# plot the mean score
if line_kw is None:
line_kw = {}
self.lines_ = []
for line_label, score in scores.items():
self.lines_.append(
*ax.plot(
x_data,
score.mean(axis=1),
label=line_label,
**line_kw,
)
)
self.errorbar_ = None
self.fill_between_ = None # overwritten below by fill_between
if std_display_style == "errorbar":
if errorbar_kw is None:
errorbar_kw = {}
self.errorbar_ = []
for line_label, score in scores.items():
self.errorbar_.append(
ax.errorbar(
x_data,
score.mean(axis=1),
score.std(axis=1),
label=line_label,
**errorbar_kw,
)
)
self.lines_, self.fill_between_ = None, None
elif std_display_style == "fill_between":
if fill_between_kw is None:
fill_between_kw = {}
default_fill_between_kw = {"alpha": 0.5}
fill_between_kw = {**default_fill_between_kw, **fill_between_kw}
self.fill_between_ = []
for line_label, score in scores.items():
self.fill_between_.append(
ax.fill_between(
x_data,
score.mean(axis=1) - score.std(axis=1),
score.mean(axis=1) + score.std(axis=1),
**fill_between_kw,
)
)
score_name = self.score_name if score_name is None else score_name
ax.legend()
# We found that a ratio, smaller or bigger than 5, between the largest and
# smallest gap of the x values is a good indicator to choose between linear
# and log scale.
if _interval_max_min_ratio(x_data) > 5:
xscale = "symlog" if x_data.min() <= 0 else "log"
else:
xscale = "linear"
ax.set_xscale(xscale)
ax.set_ylabel(f"{score_name}")
self.ax_ = ax
self.figure_ = ax.figure
class LearningCurveDisplay(_BaseCurveDisplay):
"""Learning Curve visualization.
It is recommended to use
:meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to
create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance.
All parameters are stored as attributes.
Read more in the :ref:`User Guide <visualizations>` for general information
about the visualization API and
:ref:`detailed documentation <learning_curve>` regarding the learning
curve visualization.
.. versionadded:: 1.2
Parameters
----------
train_sizes : ndarray of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve.
train_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on test set.
score_name : str, default=None
The name of the score used in `learning_curve`. It will override the name
inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
`negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
string or a callable, we infer the name. We replace `_` by spaces and capitalize
the first letter. We remove `neg_` and replace it by `"Negative"` if
`negate_score` is `False` or just remove it otherwise.
Attributes
----------
ax_ : matplotlib Axes
Axes with the learning curve.
figure_ : matplotlib Figure
Figure containing the learning curve.
errorbar_ : list of matplotlib Artist or None
When the `std_display_style` is `"errorbar"`, this is a list of
`matplotlib.container.ErrorbarContainer` objects. If another style is
used, `errorbar_` is `None`.
lines_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.lines.Line2D` objects corresponding to the mean train and
test scores. If another style is used, `line_` is `None`.
fill_between_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.collections.PolyCollection` objects. If another style is
used, `fill_between_` is `None`.
See Also
--------
sklearn.model_selection.learning_curve : Compute the learning curve.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay, learning_curve
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> train_sizes, train_scores, test_scores = learning_curve(
... tree, X, y)
>>> display = LearningCurveDisplay(train_sizes=train_sizes,
... train_scores=train_scores, test_scores=test_scores, score_name="Score")
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None):
self.train_sizes = train_sizes
self.train_scores = train_scores
self.test_scores = test_scores
self.score_name = score_name
def plot(
self,
ax=None,
*,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
"""
self._plot_curve(
self.train_sizes,
ax=ax,
negate_score=negate_score,
score_name=score_name,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
self.ax_.set_xlabel("Number of samples in the training set")
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
groups=None,
train_sizes=np.linspace(0.1, 1.0, 5),
cv=None,
scoring=None,
exploit_incremental_learning=False,
n_jobs=None,
pre_dispatch="all",
verbose=0,
shuffle=False,
random_state=None,
error_score=np.nan,
fit_params=None,
ax=None,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Create a learning curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <learning_curve>` regarding the learning curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used
to generate the learning curve. If the dtype is float, it is
regarded as a fraction of the maximum size of the training set
(that is determined by the selected validation method), i.e. it has
to be within (0, 1]. Otherwise it is interpreted as absolute sizes
of the training sets. Note that for classification the number of
samples usually have to be big enough to contain at least one
sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
A string (see :ref:`scoring_parameter`) or
a scorer callable object / function with signature
`scorer(estimator, X, y)` (see :ref:`scoring_callable`).
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on`train_sizes`.
random_state : int, RandomState instance or None, default=None
Used when `shuffle` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> LearningCurveDisplay.from_estimator(tree, X, y)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
score_name = _validate_score_name(score_name, scoring, negate_score)
train_sizes, train_scores, test_scores = learning_curve(
estimator,
X,
y,
groups=groups,
train_sizes=train_sizes,
cv=cv,
scoring=scoring,
exploit_incremental_learning=exploit_incremental_learning,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
verbose=verbose,
shuffle=shuffle,
random_state=random_state,
error_score=error_score,
return_times=False,
fit_params=fit_params,
)
viz = cls(
train_sizes=train_sizes,
train_scores=train_scores,
test_scores=test_scores,
score_name=score_name,
)
return viz.plot(
ax=ax,
negate_score=negate_score,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
class ValidationCurveDisplay(_BaseCurveDisplay):
"""Validation Curve visualization.
It is recommended to use
:meth:`~sklearn.model_selection.ValidationCurveDisplay.from_estimator` to
create a :class:`~sklearn.model_selection.ValidationCurveDisplay` instance.
All parameters are stored as attributes.
Read more in the :ref:`User Guide <visualizations>` for general information
about the visualization API and :ref:`detailed documentation
<validation_curve>` regarding the validation curve visualization.
.. versionadded:: 1.3
Parameters
----------
param_name : str
Name of the parameter that has been varied.
param_range : array-like of shape (n_ticks,)
The values of the parameter that have been evaluated.
train_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on test set.
score_name : str, default=None
The name of the score used in `validation_curve`. It will override the name
inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
`negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
string or a callable, we infer the name. We replace `_` by spaces and capitalize
the first letter. We remove `neg_` and replace it by `"Negative"` if
`negate_score` is `False` or just remove it otherwise.
Attributes
----------
ax_ : matplotlib Axes
Axes with the validation curve.
figure_ : matplotlib Figure
Figure containing the validation curve.
errorbar_ : list of matplotlib Artist or None
When the `std_display_style` is `"errorbar"`, this is a list of
`matplotlib.container.ErrorbarContainer` objects. If another style is
used, `errorbar_` is `None`.
lines_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.lines.Line2D` objects corresponding to the mean train and
test scores. If another style is used, `line_` is `None`.
fill_between_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.collections.PolyCollection` objects. If another style is
used, `fill_between_` is `None`.
See Also
--------
sklearn.model_selection.validation_curve : Compute the validation curve.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import ValidationCurveDisplay, validation_curve
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(n_samples=1_000, random_state=0)
>>> logistic_regression = LogisticRegression()
>>> param_name, param_range = "C", np.logspace(-8, 3, 10)
>>> train_scores, test_scores = validation_curve(
... logistic_regression, X, y, param_name=param_name, param_range=param_range
... )
>>> display = ValidationCurveDisplay(
... param_name=param_name, param_range=param_range,
... train_scores=train_scores, test_scores=test_scores, score_name="Score"
... )
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(
self, *, param_name, param_range, train_scores, test_scores, score_name=None
):
self.param_name = param_name
self.param_range = param_range
self.train_scores = train_scores
self.test_scores = test_scores
self.score_name = score_name
def plot(
self,
ax=None,
*,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
Object that stores computed values.
"""
self._plot_curve(
self.param_range,
ax=ax,
negate_score=negate_score,
score_name=score_name,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
self.ax_.set_xlabel(f"{self.param_name}")
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
param_name,
param_range,
groups=None,
cv=None,
scoring=None,
n_jobs=None,
pre_dispatch="all",
verbose=0,
error_score=np.nan,
fit_params=None,
ax=None,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Create a validation curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <validation_curve>` regarding the validation curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
A string (see :ref:`scoring_parameter`) or
a scorer callable object / function with signature
`scorer(estimator, X, y)` (see :ref:`scoring_callable`).
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import ValidationCurveDisplay
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(n_samples=1_000, random_state=0)
>>> logistic_regression = LogisticRegression()
>>> param_name, param_range = "C", np.logspace(-8, 3, 10)
>>> ValidationCurveDisplay.from_estimator(
... logistic_regression, X, y, param_name=param_name,
... param_range=param_range,
... )
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
score_name = _validate_score_name(score_name, scoring, negate_score)
train_scores, test_scores = validation_curve(
estimator,
X,
y,
param_name=param_name,
param_range=param_range,
groups=groups,
cv=cv,
scoring=scoring,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
verbose=verbose,
error_score=error_score,
fit_params=fit_params,
)
viz = cls(
param_name=param_name,
param_range=np.asarray(param_range),
train_scores=train_scores,
test_scores=test_scores,
score_name=score_name,
)
return viz.plot(
ax=ax,
negate_score=negate_score,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@model_selection@_plot.py@.PATH_END.py
|
{
"filename": "cal_on_karl_results.py",
"repo_name": "HETDEX/hetdex_api",
"repo_path": "hetdex_api_extracted/hetdex_api-master/hetdex_tools/cal_on_karl_results.py",
"type": "Python"
}
|
"""
Derive a scaling between the values in
from the ShotSensitivity API and the completeness
simulations Karl Gebhardt ran.
Daniel Farrow (MPE) 2021, 2022
"""
from numpy import (loadtxt, savetxt, transpose, interp, sqrt, exp,
mean, linspace, zeros, array, polyfit, polyval,
abs, std, unique, histogram)
from numpy.random import uniform
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.colors import TABLEAU_COLORS
from astropy.table import Table, vstack
from astropy.stats import biweight_location
from hetdex_api.extinction import get_2pt1_extinction_fix
from hetdex_api.flux_limits.shot_sensitivity import ShotSensitivity
from hetdex_api.flux_limits.flim_models import (SimulationInterpolator, read_karl_file,
return_flux_limit_model)
FS=16.0
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['mathtext.fontset'] = 'dejavuserif'
mpl.rcParams["xtick.labelsize"] = 15.0
mpl.rcParams["ytick.labelsize"] = 15.0
mpl.rcParams["xtick.direction"] = "in"
mpl.rcParams["ytick.direction"] = "in"
mpl.use('tkagg')
def biweight_one_sigma_from_table(table, wave_bins):
"""
Compute the biweight midvariance of
the noise in wavelength bins in
an astropy table
Parameters
----------
table : astropy.table:Table
a table containing
`flux_noise_1sigma` and
`wave`
wave_bins : array
edges of wavelength bins
"""
waves = 0.5*(wave_bins[1:] + wave_bins[:-1])
sigmas = []
for wl, wh in zip(wave_bins[:-1], wave_bins[1:]):
noise = table["flux_noise_1sigma"][(table["wave"] > wl) & (table["wave"] <= wh)]
sigmas.append(biweight_location(noise[(noise < 100.0) & (noise > 0)],
ignore_nan = True))
return waves, array(sigmas)
def measure_sigma_to_f50(fout):
"""
Measure the conversion between the
noise measured in apertures, to
the flux at 50% completeness from
the simulation.
Parameters
----------
fout : str
filename to output the
scaling to
"""
# read in the shots to consider
with open("datevobs_list_cal", 'r') as fp:
rshots = fp.readlines()
shots = [x.strip() for x in rshots]
fix = get_2pt1_extinction_fix()
all_tables = []
wave_bins_fine = linspace(3500, 5500, 200)
wave_bins = array([3550., 3821., 4093., 4364., 4635., 4907., 5178., 5450.])
snlist = [4.8, 5.0, 5.5, 6.0, 6.5, 7.0]
for shot in shots:
# For each shot measure the average flux noise 1 sigma
# This should be a table of API-extracted noise
# values for the cal shots
table = Table.read("{:s}_full_input_0.fits".format(shot))
src_waves = table["wave"]
# above atmosphere units
src_noise = table["flux_noise_1sigma"]
wlav, sigma_av = biweight_one_sigma_from_table(table, wave_bins_fine)
wlav_binned, sigma_av_binned = biweight_one_sigma_from_table(table, wave_bins)
t = Table()
t["sn"] = snlist
t["datevobs"] = shot
scales = zeros((len(snlist), len(wlav_binned)))
# Now loop over deriving a scaling
for i, sn in enumerate(snlist):
waves, f50, compl_curves_base, fluxes_base =\
read_karl_file("karl_f50_stuff/headfix{:s}.sn{:2.1f}.fcor".format(shot, sn))
# Correct for the extinction bug in HDR 2.1, so
# that we're working in above the atmosphere units
f50 = f50/fix(waves)
scales[i, :] = f50*1e-17/sigma_av_binned
# Correct for the scatter within the shot
for j in range(scales.shape[1]):
sel = (src_waves > wave_bins[j]) & (src_waves <= wave_bins[j + 1])
scales[i, j] = derive_corrections_to_conversion(src_noise[sel], src_waves[sel],
f50[j]*1e-17, scales[i, j], sn,
fluxes_base/fix(waves[j]),
compl_curves_base[j])
for i, wl in enumerate(wlav_binned):
t["wave_{:4.0f}".format(wl)] = scales[:, i]
all_tables.append(t)
table = vstack(all_tables)
table.write(fout)
return table
def derive_corrections_to_conversion(flux_noise_1sigma, waves,
f50, guess, sncut,
fluxes_base, compl_curves_base,
maxiter=20, frac=0.02):
"""
Derive corrections to the 1 sigma to f50 conversion
iteratively by evaluating the completeness model
on a set of sampled positions. The goal is to
match the results of source simulations.
Parameters
----------
flux_noise_1sigma : array
the noise in the sampled positions
waves : array
the wavelength of the sampled
positions
f50 : float
the target 50% completeness
as measured from the source simulations
guess : float
initial guess for scaling factor
sncut : float
the S/N threshold
fluxes_base, compl_curves_base : array
the fluxes and completeness curves
from the source simulation
maxiter : int
maximum iterations of evaluating the
completeness model before giving up
frac : float
target fractional accuracy on the
conversion to f50
Returns
-------
guess : float
the new value for the conversion between
the source simulation 50% completeness flux
and the noise in apertures from the API
"""
# datevshot just needed for the completeness interpolator
s = ShotSensitivity("20200519v013")
flux_bins = linspace(5e-18, 1e-15, 60)
fbcens = 0.5*(flux_bins[1:] + flux_bins[:-1])
fluxes = uniform(5e-18, 1e-15, size=len(flux_noise_1sigma))
hist_in = histogram(fluxes, bins=flux_bins)[0]
difflast = 1e99
guesslast = 0.
for i in range(maxiter):
compl = s.return_completeness(fluxes, None, None, waves, sncut,
f50s = guess*flux_noise_1sigma)
hist_out = histogram(fluxes, bins=flux_bins,
weights=compl)[0]
ratios = 1.0*hist_out/hist_in
f50_new = interp(0.5, ratios/max(ratios),
fbcens)
#plt.plot(fbcens, ratios/max(ratios))
#plt.plot(fluxes_base*1e-17, compl_curves_base/max(compl_curves_base), "k:")
#plt.axvline(f50)
#plt.axvline(f50_new)
#plt.show()
diff = (f50_new - f50)/f50_new
i = i + 1
print("Iteration {:d} ".format(i), f50, f50_new, f50/f50_new, diff)
if abs(diff) < 0.02:
break
# If we're getting worse decrease step size
# and go back
if abs(diff) > abs(difflast):
frac = frac/2.0
guess = guesslast
diff = difflast
else:
difflast = diff
guesslast = guess
if diff > 0.0:
guess = guess - frac*guess
else:
guess = guess + frac*guess
if not abs(diff) < frac:
print("Couldn't meet spec here: ", min(waves), max(waves), diff)
return guess
def plot_sigma_to_f50_scaling(table):
"""
Plot the scaling between the 1 sigma
value in apertures and the 50%
completeness flux. Add the current
model fits as a dashed line.
Parameters
----------
table : astropy.table:Table
the table of simulation
measured scaling values
"""
fig = plt.figure(figsize=(9.5,8))
sns = unique(table["sn"])
scalecens = []
scalecens_err = []
wavetrends = []
cols = []
f50_from_noise, sinterp, interp_sigmas = \
return_flux_limit_model("v3")
for color, sn in zip(TABLEAU_COLORS, sns):
there = table[abs(table["sn"] - sn) < 1e-8]
waves = []
mscales = []
stds = []
cols.append(color)
for colname in there.colnames:
if "wave" in colname:
waves.append(float(colname.replace("wave_", "")))
mscales.append(mean(there[colname]))
stds.append(std(there[colname]))
scalecens.append(mscales[4])
scalecens_err.append(stds[4])
wavetrends.append(mscales/mscales[4])
#plt.axhline(sn, linestyle=":", color=color)
model_vals = f50_from_noise(0*array(waves) + 1.0, waves, sn)
plt.plot(waves, model_vals, "--", color=color)
plt.errorbar(waves, mscales, yerr=stds, marker="o",
label = "S/N $>$" + "{:2.1f}".format(sn),
color=color, linestyle="None")
# Parts adapted from the official matplotlib tutorial
cmap = mpl.colors.ListedColormap(cols)
bounds = range(len(sns) + 1)
cens = 0.5*(array(bounds[1:]) + array(bounds[:-1]))
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
#cax = plt.gca().inset_axes([5600, 4.2, 80.0, 3.4],
# transform=plt.gca().transData)
cax = plt.gca().inset_axes([5600, 3.98, 80.0, 2.92],
transform=plt.gca().transData)
cb = fig.colorbar(mpl.cm.ScalarMappable(cmap=cmap, norm=norm),
cax=cax, ticks=cens,
boundaries=bounds)
cb.ax.set_yticklabels(sns)
cb.set_label("S/N cut", fontsize=FS)
plt.xlabel("Wavelength (A)", fontsize=FS)
plt.ylabel(r"Simulation-derived $f_{\rm 50}/\sigma_{\rm aper}$ ratio", fontsize=FS)
plt.tight_layout()
return sns, waves, scalecens, scalecens_err, wavetrends, mscales, stds
def fit_wavetrend(waves, wavetrend):
"""
Fit the wavelength trends of the
1 sigma -> f50 conversion.
Parameters
----------
wave : array
the wavelengths
wavetrend : array
2D array of the
simulation derived f50/1sigma
ratios for the different
calibration shots
Return
------
vals : array
the best fitting polynomial
"""
plt.figure(figsize=(9,8))
for wavetrend in wavetrends:
plt.plot(waves, wavetrend)
mean_wavetrend = mean(wavetrends, axis=0)
vals = polyfit(waves, mean_wavetrend, 4)
plt.plot(waves, polyval(vals, waves), "k:",
linewidth=8, label="Best-fit")
plt.legend(frameon=False,
prop={"size" : FS})
plt.ylabel("Wavelength dependence",
fontsize=FS)
plt.xlabel("Wavelength (A)",
fontsize=FS)
plt.tight_layout()
return vals
def fit_sntrend(sns, scalecens, scalecens_err):
"""
Fit the conversion between 1-sigma and
50% completeness for different S/N
thresholds.
Parameters
----------
sns : array
the S/N theshholds
scalecens, scalecens_err :
the ration f50/1 sigma
for different S/N
cuts and its error
Return
------
vals : array
the best fitting polynomial
"""
plt.figure(figsize=(9,8))
vals = polyfit(sns, scalecens, 3)
plt.errorbar(sns, scalecens, linestyle="none",
yerr=scalecens_err,
marker="o", markersize=8)
plt.plot(sns, polyval(vals, sns), "k:",
linewidth=4, label="Best-fit")
plt.legend(frameon=False,
prop={"size" : FS})
plt.ylabel(r"$f_{50}/\sigma_{\rm aper}$",
fontsize=FS)
plt.xlabel("S/N",
fontsize=FS)
plt.tight_layout()
return vals
if __name__ == "__main__":
remeasure = False
fscale = "scaling.fits"
if remeasure:
table = measure_sigma_to_f50(fscale)
else:
table = Table.read(fscale)
sns, waves, scalecens, scalecens_err, wavetrends, allscales, allstd = \
plot_sigma_to_f50_scaling(table)
wave_vals = fit_wavetrend(waves, wavetrends)
sn_vals = fit_sntrend(sns, scalecens, scalecens_err)
print("Wavelength poly: ", wave_vals)
print("S/N poly:", sn_vals)
plt.show()
|
HETDEXREPO_NAMEhetdex_apiPATH_START.@hetdex_api_extracted@hetdex_api-master@hetdex_tools@cal_on_karl_results.py@.PATH_END.py
|
{
"filename": "_extendsunburstcolors.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/_extendsunburstcolors.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ExtendsunburstcolorsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="extendsunburstcolors", parent_name="layout", **kwargs
):
super(ExtendsunburstcolorsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@_extendsunburstcolors.py@.PATH_END.py
|
{
"filename": "_tracerefminus.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/error_x/_tracerefminus.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TracerefminusValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="tracerefminus", parent_name="scatter.error_x", **kwargs
):
super(TracerefminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter@error_x@_tracerefminus.py@.PATH_END.py
|
{
"filename": "pool.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/.circleci/circleci-build-ubuntu18.04/pool.py",
"type": "Python"
}
|
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool', 'ThreadPool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import os
import time
import traceback
# If threading is available then ThreadPool should be provided. Therefore
# we avoid top-level imports which are liable to fail on some systems.
from . import util
from . import get_context, TimeoutError
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Hack to embed stringification of remote traceback in local traceback
#
class RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
def rebuild_exc(exc, tb):
exc.__cause__ = RemoteTraceback(tb)
return exc
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
wrap_exception=False):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, OSError):
util.debug('worker got EOFError or OSError -- exiting')
break
if task is None:
util.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
if wrap_exception and func is not _helper_reraises_exception:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
util.debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
task = job = result = func = args = kwds = None
completed += 1
util.debug('worker exiting after %d tasks' % completed)
def _helper_reraises_exception(ex):
'Pickle-able helper function for use by _guarded_task_generation.'
raise ex
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
_wrap_exception = True
def Process(self, *args, **kwds):
return self._ctx.Process(*args, **kwds)
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, context=None):
self._ctx = context or get_context()
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
processes = os.cpu_count() or 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue,
self._pool, self._cache)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = util.Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
util.debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
self._inqueue = self._ctx.SimpleQueue()
self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def _guarded_task_generation(self, result_job, func, iterable):
'''Provides a generator of tasks for imap and imap_unordered with
appropriate handling for iterables which throw exceptions during
iteration.'''
try:
i = -1
for i, x in enumerate(iterable):
yield (result_job, i, func, (x,), {})
except Exception as e:
yield (result_job, i+1, _helper_reraises_exception, (e,), {})
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put(
(
self._guarded_task_generation(result._job, func, iterable),
result._set_length
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put(
(
self._guarded_task_generation(result._job,
mapstar,
task_batches),
result._set_length
))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put(
(
self._guarded_task_generation(result._job, func, iterable),
result._set_length
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put(
(
self._guarded_task_generation(result._job,
mapstar,
task_batches),
result._set_length
))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put(
(
self._guarded_task_generation(result._job,
mapper,
task_batches),
None
)
)
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
util.debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool, cache):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
task = None
try:
# iterating taskseq cannot fail
for task in taskseq:
if thread._state:
util.debug('task handler found thread._state != RUN')
break
try:
put(task)
except Exception as e:
job, idx = task[:2]
try:
cache[job]._set(idx, (False, e))
except KeyError:
pass
else:
if set_length:
util.debug('doing set_length()')
idx = task[1] if task else -1
set_length(idx + 1)
continue
break
finally:
task = taskseq = job = None
else:
util.debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
util.debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
util.debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except OSError:
util.debug('task handler got OSError when sending sentinels')
util.debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (OSError, EOFError):
util.debug('result handler got EOFError/OSError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
util.debug('result handler found thread._state=TERMINATE')
break
if task is None:
util.debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
task = job = obj = None
while cache and thread._state != TERMINATE:
try:
task = get()
except (OSError, EOFError):
util.debug('result handler got EOFError/OSError -- exiting')
return
if task is None:
util.debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
task = job = obj = None
if hasattr(outqueue, '_reader'):
util.debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (OSError, EOFError):
pass
util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
util.debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
util.debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
util.debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
util.debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
util.debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
util.debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
util.debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
util.debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
util.debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
util.debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
util.debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
util.debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
self._number_left -= 1
success, result = success_result
if success and self._success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
if not success and self._success:
# only store first exception
self._success = False
self._value = result
if self._number_left == 0:
# only consider the result ready once all jobs are done
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
with self._cond:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
with self._cond:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
def _set_length(self, length):
with self._cond:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
with self._cond:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
#
#
#
class ThreadPool(Pool):
_wrap_exception = False
@staticmethod
def Process(*args, **kwds):
from .dummy import Process
return Process(*args, **kwds)
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
with inqueue.not_empty:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@.circleci@circleci-build-ubuntu18.04@pool.py@.PATH_END.py
|
{
"filename": "model.py",
"repo_name": "wilkinsdr/pylag",
"repo_path": "pylag_extracted/pylag-master/pylag/model.py",
"type": "Python"
}
|
"""
pylag.model
Provides Model classes for fitting functions to data
v1.0 05/07/2019 - D.R. Wilkins
"""
import numpy as np
import lmfit
import copy
def param2array(params, variable_only=False):
"""
Convert a Parameters object into an array of the parameter values
"""
if variable_only:
return np.array([params[p].value for p in params if params[p].vary])
else:
return np.array([params[p].value for p in params])
def array2param(values, params, variable_only=False):
"""
Create a new Parameters object from a prototype object params, with values from an array
"""
out_params = copy.copy(params)
if variable_only:
for p, v in zip([p for p in out_params if out_params[p].vary], values):
out_params[p].value = v
else:
for p, v in zip(params, values):
out_params[p].value = v
return out_params
def get_param_names(params, variable_only=False):
"""
Return a list of the parameter names
"""
if variable_only:
return [k for k in params if params[k].vary]
else:
return [k for k in params]
class Model(object):
"""
pylag.model.Model
Base class for deriving models that can be fit to data. Models are defined as a class that inhertis from this class.
The model class contains definitions of all the model parameters, and the means to evaluate the model for some set
of parameter values. Parameters are handled via lmfit Parameters objects to store parameter values and limits, and
to enable parameters to be free or frozen during the fit.
Each model class should define the following functions that override the functions in this base class:
get_params(): returns a new Parameters() objects containing all of the aprameters required for this model.
eval(params, x): evaluates the model for the parameter values stored in params at points x along the x-axis. The
eval function should return an array containing the model value at each point x.
Optionally, the class can also provide the method eval_gradient(params, x) to evalate the derivative of the model
with respect to each parameter. This function should return a 2-dimensional array of dimension (Nx, Npar), containing
the derivative at each point x with respect to each parameter. Providing an eval_gradient method enables more
precise analytic derivatives to be used during fitting, rather than having to evauate numberical derivatives.
"""
def __init__(self, component_name=None, **kwargs):
self.prefix = component_name + "_" if component_name is not None else ''
self.params = self.get_params(**kwargs)
def get_params(self):
raise AssertionError("I should be overridden!")
def get_param_names(self, par=None, variable_only=False):
if par is None:
par = self.get_params()
if variable_only:
return [k for k in par if par[k].vary]
else:
return [k for k in par]
def eval(self, params, x):
raise AssertionError("I should be overriden")
def eval_gradient(self, params, x):
raise AssertionError("I should be overriden")
def __call__(self, *args):
return self.eval(*args)
class AdditiveModel(Model):
def __init__(self, components):
self.component_names = [c.__name__ for c in components]
for i in range(len(self.component_names)):
if self.component_names[i] in self.component_names[:i]:
self.component_names[i] += '_%0d' % i
self.components = [c(component_name=n) for c, n in zip(components, self.component_names)]
def get_params(self):
params = lmfit.Parameters()
for c in self.components:
params = params + c.get_params()
return params
def eval(self, params, x):
return np.sum(np.vstack([c.eval(params, x) for c in self.components]), axis=0)
def eval_gradient(self, params, x):
return np.hstack([c.eval_gradient(params, x) for c in self.components])
def __getitem__(self, item):
"""
Overload the [] operator to return the specified component number
"""
return self.components[item]
class Linear(Model):
def get_params(self, slope=1., intercept=0.):
params = lmfit.Parameters()
params.add('%sslope' % self.prefix, value=slope, min=-1e10, max=1e10)
params.add('%sintercept' % self.prefix, value=intercept, min=-1e10, max=1e10)
return params
def eval(self, params, x):
slope = params['%sslope' % self.prefix].value
intercept = params['%sintercept' % self.prefix].value
return slope*x + intercept
def eval_gradient(self, params, x):
slope = params['%sslope' % self.prefix].value
intercept = params['%sintercept' % self.prefix].value
gradients = []
if params['%sslope' % self.prefix].vary:
gradients.append(x)
if params['%sintercept' % self.prefix].vary:
gradients.append(np.ones_like(x))
return np.stack(gradients, axis=-1)
class PowerLaw(Model):
def get_params(self, slope=1., intercept=0.):
params = lmfit.Parameters()
params.add('%snorm' % self.prefix, value=slope, min=-50, max=50)
params.add('%sslope' % self.prefix, value=slope, min=-10, max=10)
return params
def eval(self, params, x):
norm = np.exp(params['%snorm' % self.prefix].value)
slope = params['%sslope' % self.prefix].value
return norm * x**slope
def eval_gradient(self, params, x):
norm = np.exp(params['%snorm' % self.prefix].value)
slope = params['%sslope' % self.prefix].value
return np.stack([norm * x**slope, norm * x**slope * np.log(x)], axis=-1)
class BendingPowerLaw(Model):
def get_params(self, norm=1., slope1=0., fbend=-5., slope2=-2.):
params = lmfit.Parameters()
params.add('%snorm' % self.prefix, value=norm, min=-50, max=50)
params.add('%sslope1' % self.prefix, value=slope1, min=-2, max=-0)
params.add('%sfbend' % self.prefix, value=fbend, min=-6, max=-2)
params.add('%sslope2' % self.prefix, value=slope2, min=-10, max=0)
return params
def eval(self, params, x):
norm = np.exp(params['%snorm' % self.prefix].value)
slope1 = params['%sslope1' % self.prefix].value
fbend = 10. ** params['%sfbend' % self.prefix].value
slope2 = params['%sslope2' % self.prefix].value
return norm * x ** slope1 / (1. + (x / fbend) ** (slope1 - slope2))
def eval_gradient(self, params, x):
norm = np.exp(params['%snorm' % self.prefix].value)
slope1 = params['%sslope1' % self.prefix].value
fbend = 10. ** params['%sfbend' % self.prefix].value
slope2 = params['%sslope2' % self.prefix].value
return np.stack([norm * x ** slope1 / (1. + (x / fbend) ** (slope1 - slope2)),
norm * x ** slope1 * (np.log(x) + (x / fbend) ** (slope1 - slope2) + np.log(fbend)) / (
(1 + (x / fbend) ** (slope1 - slope2))**2),
norm * x ** (2*slope1 - slope2) * fbend ** (slope2 - slope1 - 1) * (slope2 - slope1) / (
(1 + (x / fbend) ** (slope1 - slope2))**2),
norm * x ** slope1 * (x / fbend) ** (slope1 - slope2) * (np.log(x) - np.log(fbend)) / (
(1 + (x / fbend) ** (slope1 - slope2)) ** 2)
], axis=-1)
class Lorentzian(Model):
def get_params(self, norm=1., centre=-4, width=1e-3):
params = lmfit.Parameters()
params.add('%snorm' % self.prefix, value=norm, min=-50, max=50)
params.add('%scentre' % self.prefix, value=centre, min=-6, max=-2)
params.add('%swidth' % self.prefix, value=width, min=-10, max=-3)
return params
def eval(self, params, x):
norm = np.exp(params['%snorm' % self.prefix].value)
centre = 10. ** params['%scentre' % self.prefix].value
width = 10. ** params['%swidth' % self.prefix].value
return norm * (1./np.pi) * 0.5 * width / ((x - centre)**2 + 0.25*width**2)
def eval_gradient(self, params, x):
norm = np.exp(params['%snorm' % self.prefix].value)
centre = 10. ** params['%scentre' % self.prefix].value
width = 10. ** params['%swidth' % self.prefix].value
return np.stack([norm * (1. / np.pi) * 0.5 * width / ((x - centre) ** 2 + 0.25 * width ** 2),
centre * np.log(10) * (norm * width / np.pi) * (x - centre) / (
(x - centre) ** 2 + 0.25 * width ** 2) ** 2,
width * np.log(10) * norm * (1. / (2. * np.pi)) * (((x - centre) ** 2 + 0.25 * width ** 2) - width ** 2) / (
(x - centre) ** 2 + 0.25 * width ** 2) ** 2
], axis=-1)
class Constant(Model):
def get_params(self, slope=1., intercept=0.):
params = lmfit.Parameters()
params.add('%sconstant' % self.prefix, value=slope, min=-1e10, max=1e10)
return params
def eval(self, params, x):
constant = params['%sconstant' % self.prefix].value
return constant * np.ones_like(x)
def eval_gradient(self, params, x):
return np.ones_like(x)[:, np.newaxis] # add a dimension to match the shape of gradients from other models
class LogConstant(Model):
def get_params(self, slope=1., intercept=0.):
params = lmfit.Parameters()
params.add('%slgconstant' % self.prefix, value=slope, min=-10, max=10)
return params
def eval(self, params, x):
constant = 10. ** params['%slgconstant' % self.prefix].value
return constant * np.ones_like(x)
def eval_gradient(self, params, x):
constant = 10. ** params['%slgconstant' % self.prefix].value
return constant * np.log(10) * np.ones_like(x)[:, np.newaxis] # add a dimension to match the shape of gradients from other models
|
wilkinsdrREPO_NAMEpylagPATH_START.@pylag_extracted@pylag-master@pylag@model.py@.PATH_END.py
|
{
"filename": "_surfacecolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter3d/_surfacecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SurfacecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="surfacecolor", parent_name="scatter3d", **kwargs):
super(SurfacecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter3d@_surfacecolor.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/groq/langchain_groq/__init__.py",
"type": "Python"
}
|
from langchain_groq.chat_models import ChatGroq
__all__ = ["ChatGroq"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@groq@langchain_groq@__init__.py@.PATH_END.py
|
{
"filename": "_textcasesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnelarea/hoverlabel/font/_textcasesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcasesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="textcasesrc",
parent_name="funnelarea.hoverlabel.font",
**kwargs,
):
super(TextcasesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnelarea@hoverlabel@font@_textcasesrc.py@.PATH_END.py
|
{
"filename": "CentroidalVoronoiRelaxation.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/NodeGenerators/CentroidalVoronoiRelaxation.py",
"type": "Python"
}
|
# Apply various centroidal Voronoi relaxation methods to optimize NodeGenerators.
from Spheral2d import *
from generateMesh import *
#-------------------------------------------------------------------------------
# Relax nodes on fixed radii.
#-------------------------------------------------------------------------------
class RadialCentroidalRelaxation:
def __init__(self, origin,
tolerance = 1.0e-5,
maxIterations = 100):
self.origin = origin
self.tolerance = tolerance
self.maxIterations = maxIterations
return
def __call__(self, generator):
n = generator.localNumNodes()
nodes = makeVoidNodeList("temp void nodes",
numInternal = n)
pos = nodes.positions()
rnodes = []
for i in range(n):
pos[i] = Vector(generator.x[i], generator.y[i])
rnodes.append((pos[i] - self.origin).magnitude())
assert len(rnodes) == n
if generator.xmin:
xmin = Vector(generator.xmin[0], generator.xmin[1])
else:
xmin = None
if generator.xmax:
xmax = Vector(generator.xmax[0], generator.xmax[1])
else:
xmax = None
# Iterate until we either converge or hit the max iterations.
maxDelta = 10.0*self.tolerance
iter = 0
while maxDelta > self.tolerance and iter < self.maxIterations:
maxDelta = 0.0
iter += 1
mesh, void = generatePolygonalMesh([nodes],
xmin = None, # xmin,
xmax = None, # xmax,
generateVoid = False,
removeBoundaryZones = True)
assert mesh.numZones == n
for izone in range(n):
zone = mesh.zone(izone)
centroid = zone.position()
rhat = (centroid - self.origin).unitVector()
newpos = rnodes[izone]*rhat
maxDelta = max(maxDelta, (newpos - pos[izone]).magnitude())
pos[izone] = newpos
maxDelta = mpi.allreduce(maxDelta, mpi.MAX)
print("Iteration %i : max delta = %g" % (iter, maxDelta))
# Assign the postions.
generator.x = [pos[i].x for i in range(nodes.numInternalNodes)]
generator.y = [pos[i].y for i in range(nodes.numInternalNodes)]
return
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@NodeGenerators@CentroidalVoronoiRelaxation.py@.PATH_END.py
|
{
"filename": "ipkernel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipykernel/py3/ipykernel/inprocess/ipkernel.py",
"type": "Python"
}
|
"""An in-process kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import logging
import sys
from contextlib import contextmanager
from IPython.core.interactiveshell import InteractiveShellABC
from traitlets import Any, Enum, Instance, List, Type, default
from ipykernel.ipkernel import IPythonKernel
from ipykernel.jsonutil import json_clean
from ipykernel.zmqshell import ZMQInteractiveShell
from ..iostream import BackgroundSocket, IOPubThread, OutStream
from .constants import INPROCESS_KEY
from .socket import DummySocket
# -----------------------------------------------------------------------------
# Main kernel class
# -----------------------------------------------------------------------------
class InProcessKernel(IPythonKernel):
"""An in-process kernel."""
# -------------------------------------------------------------------------
# InProcessKernel interface
# -------------------------------------------------------------------------
# The frontends connected to this kernel.
frontends = List(Instance("ipykernel.inprocess.client.InProcessKernelClient", allow_none=True))
# The GUI environment that the kernel is running under. This need not be
# specified for the normal operation for the kernel, but is required for
# IPython's GUI support (including pylab). The default is 'inline' because
# it is safe under all GUI toolkits.
gui = Enum(("tk", "gtk", "wx", "qt", "qt4", "inline"), default_value="inline")
raw_input_str = Any()
stdout = Any()
stderr = Any()
# -------------------------------------------------------------------------
# Kernel interface
# -------------------------------------------------------------------------
shell_class = Type(allow_none=True) # type:ignore[assignment]
_underlying_iopub_socket = Instance(DummySocket, ())
iopub_thread: IOPubThread = Instance(IOPubThread) # type:ignore[assignment]
shell_stream = Instance(DummySocket, ()) # type:ignore[arg-type]
@default("iopub_thread")
def _default_iopub_thread(self):
thread = IOPubThread(self._underlying_iopub_socket)
thread.start()
return thread
iopub_socket: BackgroundSocket = Instance(BackgroundSocket) # type:ignore[assignment]
@default("iopub_socket")
def _default_iopub_socket(self):
return self.iopub_thread.background_socket
stdin_socket = Instance(DummySocket, ()) # type:ignore[assignment]
def __init__(self, **traits):
"""Initialize the kernel."""
super().__init__(**traits)
self._underlying_iopub_socket.observe(self._io_dispatch, names=["message_sent"])
if self.shell:
self.shell.kernel = self
async def execute_request(self, stream, ident, parent):
"""Override for temporary IO redirection."""
with self._redirected_io():
await super().execute_request(stream, ident, parent)
def start(self):
"""Override registration of dispatchers for streams."""
if self.shell:
self.shell.exit_now = False
def _abort_queues(self):
"""The in-process kernel doesn't abort requests."""
async def _flush_control_queue(self):
"""No need to flush control queues for in-process"""
def _input_request(self, prompt, ident, parent, password=False):
# Flush output before making the request.
self.raw_input_str = None
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = json_clean(dict(prompt=prompt, password=password))
assert self.session is not None
msg = self.session.msg("input_request", content, parent)
for frontend in self.frontends:
assert frontend is not None
if frontend.session.session == parent["header"]["session"]:
frontend.stdin_channel.call_handlers(msg)
break
else:
logging.error("No frontend found for raw_input request")
return ""
# Await a response.
while self.raw_input_str is None:
frontend.stdin_channel.process_events()
return self.raw_input_str # type:ignore[unreachable]
# -------------------------------------------------------------------------
# Protected interface
# -------------------------------------------------------------------------
@contextmanager
def _redirected_io(self):
"""Temporarily redirect IO to the kernel."""
sys_stdout, sys_stderr = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = self.stdout, self.stderr
yield
finally:
sys.stdout, sys.stderr = sys_stdout, sys_stderr
# ------ Trait change handlers --------------------------------------------
def _io_dispatch(self, change):
"""Called when a message is sent to the IO socket."""
assert self.iopub_socket.io_thread is not None
assert self.session is not None
ident, msg = self.session.recv(self.iopub_socket.io_thread.socket, copy=False)
for frontend in self.frontends:
assert frontend is not None
frontend.iopub_channel.call_handlers(msg)
# ------ Trait initializers -----------------------------------------------
@default("log")
def _default_log(self):
return logging.getLogger(__name__)
@default("session")
def _default_session(self):
from jupyter_client.session import Session
return Session(parent=self, key=INPROCESS_KEY)
@default("shell_class")
def _default_shell_class(self):
return InProcessInteractiveShell
@default("stdout")
def _default_stdout(self):
return OutStream(self.session, self.iopub_thread, "stdout", watchfd=False)
@default("stderr")
def _default_stderr(self):
return OutStream(self.session, self.iopub_thread, "stderr", watchfd=False)
# -----------------------------------------------------------------------------
# Interactive shell subclass
# -----------------------------------------------------------------------------
class InProcessInteractiveShell(ZMQInteractiveShell):
"""An in-process interactive shell."""
kernel: InProcessKernel = Instance(
"ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True
) # type:ignore[assignment]
# -------------------------------------------------------------------------
# InteractiveShell interface
# -------------------------------------------------------------------------
def enable_gui(self, gui=None):
"""Enable GUI integration for the kernel."""
if not gui:
gui = self.kernel.gui
self.active_eventloop = gui
def enable_matplotlib(self, gui=None):
"""Enable matplotlib integration for the kernel."""
if not gui:
gui = self.kernel.gui
return super().enable_matplotlib(gui)
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime."""
if not gui:
gui = self.kernel.gui
return super().enable_pylab(gui, import_all, welcome_message)
InteractiveShellABC.register(InProcessInteractiveShell)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipykernel@py3@ipykernel@inprocess@ipkernel.py@.PATH_END.py
|
{
"filename": "tec.py",
"repo_name": "revoltek/losoto",
"repo_path": "losoto_extracted/losoto-master/losoto/operations/tec.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import multiprocessing as mp
import numpy as np
import scipy.optimize
from scipy.interpolate import interp1d
from losoto.lib_unwrap import unwrap_2d
from losoto.lib_operations import *
from losoto._logging import logger as logging
logging.debug('Loading TEC module.')
def _run_parser(soltab, parser, step):
soltabOut = parser.getstr( step, 'soltabOut', 'tec000' )
refAnt = parser.getstr( step, 'refAnt', '')
maxResidualFlag = parser.getfloat( step, 'maxResidualFlag', 2.5 )
maxResidualProp = parser.getfloat( step, 'maxResidualProp', 1. )
ncpu = parser.getint( '_global', 'ncpu', 0 )
parser.checkSpelling( step, soltab, ['soltabOut', 'refAnt', 'maxResidualFlag', 'maxResidualProp'])
return run(soltab, soltabOut, refAnt, maxResidualFlag, maxResidualProp, ncpu)
def mod(d):
""" wrap phases to (-pi,pi)"""
return np.mod(d + np.pi, 2. * np.pi) - np.pi
_noiseweight = None
def tec_merit_brute(dTEC, freq, phases, weight=True):
"""
Merit function for brute-force grid search.
Parameters
----------
dTEC: float, dTEC in TECU
freq: np array of floats, phases in Hz
phases: phases to fit in rad
weight: bool, default True. Whether to weight residuals by noise or not
Returns
-------
rms_phase_residual: float, rms of phase residuals
"""
if weight:
w = _noiseweight(freq)
w /= np.mean(w)
rms_phase_residual = np.std(w * (mod(-8.44797245e9 * dTEC / freq - phases)))
else:
rms_phase_residual = np.std(mod(-8.44797245e9 * dTEC / freq - phases))
return rms_phase_residual
def tec_merit(dTEC, freq, phases, weight=True):
"""
Merit function for least-square fit.
Parameters
----------
dTEC: float, dTEC in TECU
freq: np array of floats, phases in Hz
phases: phases to fit in rad
weight: bool, default True. Whether to weight residuals by noise or not
Returns
-------
rms_phase_residuals: array of floats, rms of phase residuals
"""
if weight:
w = _noiseweight(freq)
w /= np.mean(w)
rms_phase_residuals = w * mod(-8.44797245e9 * dTEC / freq - phases)
else:
rms_phase_residuals = mod(-8.44797245e9 * dTEC / freq - phases)
return rms_phase_residuals
def fit_tec_to_phases(vals, weights, coord, refAnt, maxResidualFlag, maxResidualProp):
"""
Fit dTEC to phases and frequencies for a range of times.
Parameters
----------
vals: numpy array of floats, phases to fit in rad. Shape (n_times, n_freq)
weights: numpy array of floats, phases to fit in rad. Shape (n_times, n_freq)
coord: dict of coords of current selection. Contains time, freq, ant, (optional: dir)
refAnt: string, reference antenna
maxResidualFlag: float, default = 2.5 Maximum residual that is not flagged. 0=don't flag.
maxResidualProp: float, default = 1. Maximum residual that is propagated. 0=propagate all.
Returns
-------
[fitdTEC, fitweights]: list of numpy array of floats, dTEC restults in TECU / weights
"""
# Prepare output arrays
fitdTEC = np.zeros(len(coord['time']))
fitweights = np.ones(len(coord['time'])) # all unflagged to start
# skip refAnt and ants in stationconstraint with refAnt
if coord['ant'] == refAnt or np.all(vals == 0.):
pass
else:
# find flagged times, either fully flagged or less than 10 freq points...
flagged_t = np.sum(weights, axis=1)
flagged_t = flagged_t < 10
flag_frac = np.sum(flagged_t) / len(flagged_t)
if flag_frac > 0.1:
logging.warning(f'''Times with less than 10 unflagged freqs: {coord['time'][flagged_t]}: percentage: {flag_frac:.1%}''')
ranges, Ns = (-0.5, 0.5), 1000 # default range for brute grid minimization, size of grid
freq = coord['freq'].copy()
# Iterate times
for t, (time, phases, w_t) in enumerate(zip(coord['time'],vals,weights)):
w_t = w_t.astype(bool)
if sum(w_t) < 10:
fitdTEC[t] = 0.
fitweights[t] = 0
continue
# brute force to find global minimum
dTEC_gridsearch = scipy.optimize.brute(tec_merit_brute, ranges=(ranges,), Ns=Ns, args=(freq[w_t], phases[w_t]))[0]
result, success = scipy.optimize.leastsq(tec_merit, dTEC_gridsearch, args=(freq[w_t], phases[w_t]))
best_residual = tec_merit_brute(result, freq[w_t], phases[w_t])
# logging.info(f'result {result} cost {best_residual}')
fitdTEC[t] = result
if maxResidualFlag == 0 or best_residual < maxResidualFlag:
fitweights[t] = 1
if maxResidualProp == 0 or best_residual < maxResidualProp:
ranges = (fitdTEC[t] - 0.05, fitdTEC[t] + 0.05)
Ns = 100
else:
ranges = (-0.5, 0.5)
Ns = 1000
else:
# high residual, flag and reset initial guess
if 'dir' in coord.keys():
logging.warning('Bad solution for ant: ' + coord['ant'] + '; dir: ' + coord['dir'] + ' (time: ' + str(
t) + ', resdiual: ' + str(best_residual) + ').')
else:
logging.warning('Bad solution for ant: ' + coord['ant'] + ' (time: ' + str(t) + ', resdiual: ' + str(
best_residual) + ').')
fitweights[t] = 0
ranges = (-0.5, 0.5)
Ns = 1000
# Debug plot
# doplot = False
# if doplot and (coord['ant'] == 'RS509LBA' or coord['ant'] == 'RS210LBA') and t % 50 == 0:
# print("Plotting")
# if not 'matplotlib' in sys.modules:
# import matplotlib as mpl
#
# mpl.rc('figure.subplot', left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=0.22, hspace=0.22)
# mpl.use("Agg")
# import matplotlib.pyplot as plt
#
# fig = plt.figure()
# fig.subplots_adjust(wspace=0)
# ax = fig.add_subplot(111)
#
# # plot rm fit
# plotd = lambda d, freq: -8.44797245e9 * d / freq
# ax.plot(freq, plotd(fitresultd[0], freq[:]), "-", color='purple')
# ax.plot(freq, mod(plotd(fitresultd[0], freq[:])), ":", color='purple')
#
# # ax.plot(freq, vals[idx,t], '.b' )
# # ax.plot(freq, phaseComb + numjumps * 2*np.pi, 'x', color='purple' )
# ax.plot(freq, phaseComb, 'o', color='purple')
#
# residual = mod(plotd(fitd[t], freq[:]) - phaseComb)
# ax.plot(freq, residual, '.', color='orange')
#
# ax.set_xlabel('freq')
# ax.set_ylabel('phase')
# # ax.set_ylim(ymin=-np.pi, ymax=np.pi)
#
# logging.warning('Save pic: ' + str(t) + '_' + coord['ant'] + '.png')
# plt.savefig(str(t) + '_' + coord['ant'] + '.png', bbox_inches='tight')
# del fig
if 'dir' in coord.keys():
logging.info('%s; %s: average tec: %f TECU; std tec: %f TECU' % (coord['ant'], coord['dir'], np.mean(fitdTEC), np.std(fitdTEC))) # prev. factor of 2? why?
else:
logging.info('%s: average tec: %f TECU; std tec: %f TECU' % (coord['ant'], np.mean(fitdTEC), np.std(fitdTEC)))
return [fitdTEC, fitweights]
def run( soltab, soltabOut, refAnt, maxResidualFlag, maxResidualProp, ncpu ):
"""
Bruteforce TEC extraction from phase solutions.
Parameters
----------
soltabOut : str, optional
output table name (same solset), by deault "tec".
refAnt : str, optional
Reference antenna, by default 'auto'.
maxResidualFlag : float, optional
Max average residual in radians before flagging datapoint, by default 2.5 If 0: no check.
maxResidualProp : float, optional
Max average residual in radians before stop propagating solutions, by default 1. If 0: no check.
"""
# TODO: does not work for global selection, i.e. ant=[...] in parset.
# define weight function for fitting (global scope)
global _noiseweight
# Freq/SEFD estimate for LBA lofar2.0, TODO: move to data dir, add lofar outer/inner
data_lba_all = np.array([[2.850000000000000000e+07, 2.994030080626945710e+04],
[2.903030303030303121e+07, 2.913874315792467314e+04],
[2.956060606060606241e+07, 2.848637311863995637e+04],
[3.009090909090908989e+07, 2.793457127624304485e+04],
[3.062121212121212110e+07, 2.726112344516822850e+04],
[3.115151515151515231e+07, 2.656675093925880356e+04],
[3.168181818181817979e+07, 2.597196672786273120e+04],
[3.221212121212121099e+07, 2.571364929933567691e+04],
[3.274242424242424220e+07, 2.546725999266218423e+04],
[3.327272727272727340e+07, 2.514084102040380822e+04],
[3.380303030303030461e+07, 2.486516674111211978e+04],
[3.433333333333333582e+07, 2.467008078555054453e+04],
[3.486363636363635957e+07, 2.456891934402090556e+04],
[3.539393939393939078e+07, 2.449906041042739525e+04],
[3.592424242424242198e+07, 2.441152557008936856e+04],
[3.645454545454545319e+07, 2.435284099484204125e+04],
[3.698484848484848440e+07, 2.428130362751187931e+04],
[3.751515151515151560e+07, 2.412378187290063579e+04],
[3.804545454545454681e+07, 2.398172239664983135e+04],
[3.857575757575757802e+07, 2.395988451670273935e+04],
[3.910606060606060922e+07, 2.430045490772785342e+04],
[3.963636363636364043e+07, 2.471031800485042186e+04],
[4.016666666666666418e+07, 2.386110364099146318e+04],
[4.069696969696969539e+07, 2.353690296084890724e+04],
[4.122727272727272660e+07, 2.336803344857854609e+04],
[4.175757575757575780e+07, 2.337549122132260163e+04],
[4.228787878787878901e+07, 2.282559852902076818e+04],
[4.281818181818181276e+07, 2.288857468625183537e+04],
[4.334848484848484397e+07, 2.293901290797947513e+04],
[4.387878787878787518e+07, 2.279201399700451293e+04],
[4.440909090909090638e+07, 2.263575546276593013e+04],
[4.493939393939393759e+07, 2.234381817033494008e+04],
[4.546969696969696879e+07, 2.201090887195047253e+04],
[4.600000000000000000e+07, 2.167511043603119106e+04],
[4.653030303030303121e+07, 2.126023280379924108e+04],
[4.706060606060606241e+07, 2.094471500899021703e+04],
[4.759090909090909362e+07, 2.076310086716620208e+04],
[4.812121212121212482e+07, 2.072452147961255469e+04],
[4.865151515151514858e+07, 2.072524703336137827e+04],
[4.918181818181817979e+07, 2.024623970337267383e+04],
[4.971212121212121099e+07, 2.024902352195448111e+04],
[5.024242424242424220e+07, 2.042157956576801007e+04],
[5.077272727272727340e+07, 2.061233436890421581e+04],
[5.130303030303029716e+07, 2.052460108698517070e+04],
[5.183333333333332837e+07, 2.044476653198705753e+04],
[5.236363636363635957e+07, 2.029609696197773519e+04],
[5.289393939393939078e+07, 2.011980530139794064e+04],
[5.342424242424242198e+07, 2.000652821604120982e+04],
[5.395454545454545319e+07, 1.972491939080056181e+04],
[5.448484848484848440e+07, 1.970762288441187411e+04],
[5.501515151515151560e+07, 1.963280298568139551e+04],
[5.554545454545454681e+07, 1.947178074763434779e+04],
[5.607575757575757802e+07, 1.930311130654076624e+04],
[5.660606060606060922e+07, 1.930880388910132751e+04],
[5.713636363636363298e+07, 1.896554359589192609e+04],
[5.766666666666666418e+07, 1.897491931565966297e+04],
[5.819696969696969539e+07, 1.899107079252671974e+04],
[5.872727272727272660e+07, 1.897387182282819413e+04],
[5.925757575757575780e+07, 1.910503545843373649e+04],
[5.978787878787878156e+07, 1.940954548637071275e+04],
[6.031818181818181276e+07, 1.984135252611626129e+04],
[6.084848484848484397e+07, 2.014750239689876980e+04],
[6.137878787878787518e+07, 2.045096111245894645e+04],
[6.190909090909090638e+07, 2.102328279043795555e+04],
[6.243939393939393759e+07, 2.125787547095409172e+04],
[6.296969696969696879e+07, 2.165665026710620077e+04],
[6.350000000000000000e+07, 2.219586934836382716e+04],
[6.403030303030303121e+07, 2.251618565684030182e+04],
[6.456060606060606241e+07, 2.256982609767890244e+04],
[6.509090909090908617e+07, 2.299487046699088387e+04],
[6.562121212121211737e+07, 2.360399101053923732e+04],
[6.615151515151514858e+07, 2.452839877523585892e+04],
[6.668181818181817979e+07, 2.475335631368393661e+04],
[6.721212121212121844e+07, 2.541818455542022275e+04],
[6.774242424242424965e+07, 2.632339058027763167e+04],
[6.827272727272728086e+07, 2.722260078658507700e+04],
[6.880303030303029716e+07, 2.809248882867760403e+04],
[6.933333333333332837e+07, 2.869324085220565030e+04],
[6.986363636363635957e+07, 2.937062652000980597e+04],
[7.039393939393939078e+07, 2.975876253021111552e+04],
[7.092424242424242198e+07, 3.023381387615679705e+04],
[7.145454545454545319e+07, 3.078095987549442361e+04],
[7.198484848484848440e+07, 3.136289409735724985e+04],
[7.251515151515151560e+07, 3.194191655745387106e+04],
[7.304545454545454681e+07, 3.256773533922632851e+04],
[7.357575757575756311e+07, 3.323538538253790466e+04],
[7.410606060606059432e+07, 3.408304345233587082e+04],
[7.463636363636362553e+07, 3.532438151289376401e+04],
[7.516666666666665673e+07, 3.611054891041758674e+04],
[7.569696969696968794e+07, 3.677927807149000728e+04],
[7.622727272727271914e+07, 3.742928695890052768e+04],
[7.675757575757575035e+07, 3.831019856693004840e+04],
[7.728787878787878156e+07, 3.942348853846100246e+04],
[7.781818181818181276e+07, 3.970818633546015189e+04],
[7.834848484848484397e+07, 4.055220533173037984e+04],
[7.887878787878787518e+07, 4.174217174788207922e+04],
[7.940909090909090638e+07, 4.371878037581651733e+04],
[7.993939393939393759e+07, 4.366688385026387550e+04],
[8.046969696969696879e+07, 4.441932939676849492e+04],
[8.100000000000000000e+07, 4.513826943235301587e+04]])
sefd_lba_all, wfreq = data_lba_all[:,1], data_lba_all[:,0]
wgt = 1 / sefd_lba_all
wgt /= wgt.max()
_noiseweight = interp1d(wfreq, wgt, bounds_error=False, fill_value=(0, 1))
logging.info("Find TEC for soltab: "+soltab.name)
# input check
solType = soltab.getType()
if solType != 'phase':
logging.warning("Soltab type of "+soltab._v_name+" is of type "+solType+", should be phase. Ignoring.")
return 1
if 'pol' in soltab.getAxesNames():
logging.warning("Soltab with pol axis not supported for TEC extraction. Ignoring.")
return 1
if refAnt == '': refAnt = 'auto'
elif refAnt != 'closest' and refAnt != 'auto' and not refAnt in soltab.getAxisValues('ant', ignoreSelection = True):
logging.warning('Reference antenna '+refAnt+' not found. Using: atomatic search.')
refAnt = 'auto'
# times and ants needs to be complete or selection is much slower
times = soltab.getAxisValues('time')
# create new table
axes = soltab.getAxesNames() # ['time','freq','ant'] or ['time','freq','ant','dir']
outaxes = axes.copy()
outaxes.remove('freq') # ['time','ant'] or ['time','ant','dir']
results_dTEC = np.zeros(shape=tuple(soltab.getAxisLen(axisName) for axisName in outaxes))
results_w = np.ones(shape=tuple(soltab.getAxisLen(axisName) for axisName in outaxes))
solset = soltab.getSolset()
if 'tec000' in solset.getSoltabNames():
logging.warning('Soltab tec000 exists. Overwriting...')
solset.getSoltab('tec000').delete()
soltabout = solset.makeSoltab(soltype = 'tec', soltabName = soltabOut, axesNames=outaxes, \
axesVals=[soltab.getAxisValues(axisName) for axisName in outaxes], \
vals=np.zeros(shape=tuple(soltab.getAxisLen(axisName) for axisName in outaxes)), \
weights=np.ones(shape=tuple(soltab.getAxisLen(axisName) for axisName in outaxes)) )
soltabout.addHistory('Created by TEC operation from %s.' % soltab.name)
# Collect arguments for pool.map()
args = []
selections = []
for vals, weights, coord, selection in soltab.getValuesIter(returnAxes=['time','freq'], weight=True, refAnt=refAnt):
if len(coord['freq']) < 10:
logging.error('Delay estimation needs at least 10 frequency channels, preferably distributed over a wide range.')
return 1
args.append([vals, weights, coord, refAnt, maxResidualFlag, maxResidualProp])
selections.append(selection)
if ncpu == 0:
ncpu = nproc()
with mp.Pool(ncpu) as pool:
logging.info('Start TEC fitting.')
results = pool.starmap(fit_tec_to_phases, args)
# reorder results
for selection, result in zip(selections,results):
selection = tuple([axsel for axsel, ax in zip(selection,axes) if ax in outaxes]) # get rid of selection along freq axis
results_dTEC[selection] = np.resize(result[0], results_dTEC[selection].shape)
results_w[selection] = np.resize(result[1], results_dTEC[selection].shape)
# write results
soltabout.setValues( results_dTEC )
soltabout.setValues( results_w, weight=True )
return 0
|
revoltekREPO_NAMElosotoPATH_START.@losoto_extracted@losoto-master@losoto@operations@tec.py@.PATH_END.py
|
{
"filename": "combine_cut_FASTpsrfits_freq_time_splitpol.py",
"repo_name": "qianlivan/RPPPS",
"repo_path": "RPPPS_extracted/RPPPS-master/combine_cut_FASTpsrfits_freq_time_splitpol.py",
"type": "Python"
}
|
import numpy as np
import pyfits
import os
import datetime
import time
import sys
from array import array
import matplotlib as mpl
import matplotlib.pyplot as plt
from pylab import *
##############################################################
# 20161008 adapted from cut_FASTpsrfits_freq_time_splitpol.py
# output 2 pol and pol averaged data
# 20161009 dimension of DAT_OFFS changed from chnum*2 to chnum
# format of DAT_OFFS changed from dataformat3 to dataformat2
# size(float_data)/nline/nchan/npol=nsblk
##############################################################
if (len(sys.argv)<6):
print 'too few inputs!'
print 'example:'
print 'python cut_FASTpsrfits.py startchan endchan startn endn FAST.fits FAST2.fits'
sys.exit()
starttime=datetime.datetime.now()
startfreq=int(sys.argv[1])
endfreq=int(sys.argv[2])
startn=int(sys.argv[3])
endn=int(sys.argv[4])
filename=sys.argv[5]
filename2=sys.argv[6]
fileroot=filename[0:-5]
print fileroot
fileroot2=filename2[0:-5]
print fileroot2
#u19700101=62135683200.0
#==============================================================
hdulist = pyfits.open(filename)
hdu0 = hdulist[0]
data0 = hdu0.data
header0 = hdu0.header
print data0
hdu1 = hdulist[1]
data1 = hdu1.data
header1 = hdu1.header
nchan=header0['OBSNCHAN']
nsblk=header1['NSBLK']
npol=header1['NPOL']
tbin=header1['TBIN']
chan_bw=header1['CHAN_BW']
nline=header1['NAXIS2']
print header0['OBSBW']
print header0['OBSNCHAN']
#==============================================================
hdulist2 = pyfits.open(filename2)
hdu20 = hdulist2[0]
data20 = hdu20.data
header20 = hdu20.header
print data20
hdu21 = hdulist2[1]
data21 = hdu21.data
header21 = hdu21.header
nchan2=header20['OBSNCHAN']
nsblk2=header21['NSBLK']
npol2=header21['NPOL']
tbin2=header21['TBIN']
chan_bw2=header21['CHAN_BW']
nline2=header21['NAXIS2']
print header20['OBSBW']
print header20['OBSNCHAN']
#==============================================================
chnum=endfreq-startfreq+1
#linenum=endn-startn+1
#linenum1=(nline-startn+1)
linenum1=(nline-startn)
linenum2=(endn-0+1)
linenum=linenum1+linenum2
freq=hdu0.header['OBSFREQ']
print 'hehe',hdu0.header['OBSFREQ']
hdu0.header['OBSFREQ']=((startfreq+endfreq)*1.0/2+1.0)/((nchan-1.0)*1.0/2+1.0)*freq
print 'hehe',hdu0.header['OBSFREQ']
hdu0.header['OBSBW']=chnum*1.0
hdu0.header['OBSNCHAN']=chnum
print hdu0.header['OBSBW']
print hdu0.header['OBSNCHAN']
float_tsubint=np.zeros(linenum)
float_tsubint[0:linenum1]=np.array(data1['TSUBINT'])[startn:nline]
float_tsubint[linenum1:linenum+1]=np.array(data21['TSUBINT'])[0:endn+1]
float_offs_sub=np.zeros(linenum)
float_offs_sub[0:linenum1]=np.array(data1['OFFS_SUB'])[startn:nline]
float_offs_sub[linenum1:linenum]=np.array(data21['OFFS_SUB'])[0:endn+1]
float_lst_sub=np.zeros(linenum)
float_lst_sub[0:linenum1]=np.array(data1['LST_SUB'])[startn:nline]
float_lst_sub[linenum1:linenum]=np.array(data21['LST_SUB'])[0:endn+1]
float_ra_sub=np.zeros(linenum)
float_ra_sub[0:linenum1]=np.array(data1['RA_SUB'])[startn:nline]
float_ra_sub[linenum1:linenum]=np.array(data21['RA_SUB'])[0:endn+1]
float_dec_sub=np.zeros(linenum)
float_dec_sub[0:linenum1]=np.array(data1['DEC_SUB'])[startn:nline]
float_dec_sub[linenum1:linenum]=np.array(data21['DEC_SUB'])[0:endn+1]
float_glon_sub=np.zeros(linenum)
float_glon_sub[0:linenum1]=np.array(data1['GLON_SUB'])[startn:nline]
float_glon_sub[linenum1:linenum]=np.array(data21['GLON_SUB'])[0:endn+1]
float_glat_sub=np.zeros(linenum)
float_glat_sub[0:linenum1]=np.array(data1['GLAT_SUB'])[startn:nline]
float_glat_sub[linenum1:linenum]=np.array(data21['GLAT_SUB'])[0:endn+1]
float_fd_ang=np.zeros(linenum)
float_fd_ang[0:linenum1]=np.array(data1['FD_ANG'])[startn:nline]
float_fd_ang[linenum1:linenum]=np.array(data21['FD_ANG'])[0:endn+1]
float_pos_ang=np.zeros(linenum)
float_pos_ang[0:linenum1]=np.array(data1['POS_ANG'])[startn:nline]
float_pos_ang[linenum1:linenum]=np.array(data21['POS_ANG'])[0:endn+1]
float_par_ang=np.zeros(linenum)
float_par_ang[0:linenum1]=np.array(data1['PAR_ANG'])[startn:nline]
float_par_ang[linenum1:linenum]=np.array(data21['PAR_ANG'])[0:endn+1]
float_tel_az=np.zeros(linenum)
float_tel_az[0:linenum1]=np.array(data1['TEL_AZ'])[startn:nline]
float_tel_az[linenum1:linenum]=np.array(data21['TEL_AZ'])[0:endn+1]
float_tel_zen=np.zeros(linenum)
float_tel_zen[0:linenum1]=np.array(data1['TEL_ZEN'])[startn:nline]
float_tel_zen[linenum1:linenum]=np.array(data21['TEL_ZEN'])[0:endn+1]
float_data=np.array(data1['DATA'])
float_data_2=np.array(data21['DATA'])
temp_float_dat_scl=np.array(data1['DAT_SCL'])
print size(float_data)
print size(temp_float_dat_scl)/npol/nchan
float_dat_freq=np.zeros([linenum,endfreq+1-startfreq])
float_dat_wts=np.zeros([linenum,endfreq+1-startfreq])
float_dat_freq[0:linenum1,:]=np.array(data1['DAT_FREQ'])[startn:nline,startfreq:endfreq+1]
float_dat_freq[linenum1:linenum,:]=np.array(data21['DAT_FREQ'])[0:endn+1,startfreq:endfreq+1]
float_dat_wts[0:linenum1,:]=np.array(data1['DAT_WTS'])[startn:nline,startfreq:endfreq+1]
float_dat_wts[linenum1:linenum,:]=np.array(data21['DAT_WTS'])[0:endn+1,startfreq:endfreq+1]
float_dat_offs=np.zeros([linenum,chnum])
float_dat_scl=np.zeros([linenum,chnum])
float_dat_offs[0:linenum1,:]=np.array(data1['DAT_OFFS'])[startn:nline,startfreq:endfreq+1]
float_dat_offs[linenum1:linenum,:]=np.array(data21['DAT_OFFS'])[0:endn+1,startfreq:endfreq+1]
float_dat_scl[0:linenum1,:]=np.array(data1['DAT_SCL'])[startn:nline,startfreq:endfreq+1]
float_dat_scl[linenum1:linenum,:]=np.array(data21['DAT_SCL'])[0:endn+1,startfreq:endfreq+1]
print size(float_dat_freq),size(np.array(data1['DAT_FREQ']))
float_data2=np.zeros([linenum,nsblk*chnum])
float_data3=np.zeros([linenum,nsblk*chnum])
float_data_tot=np.zeros([linenum,nsblk*chnum])
dataformat=str(nsblk*chnum)+'B'
print dataformat,size(float_data2),linenum,nline
for i in range(linenum1):
temp_data=float_data[i+startn,:].reshape([size(float_data[i+startn,:])/nchan/npol,npol*nchan])
temp_data2=temp_data[:,startfreq:endfreq+1].reshape(size(float_data[i+startn,:])/nchan/npol*chnum)
temp_data3=temp_data[:,nchan+startfreq:nchan+endfreq+1].reshape(size(float_data[i+startn,:])/nchan/npol*chnum)
temp_data_tot=(temp_data2+temp_data3)/2
float_data2[i, :]=temp_data2
float_data3[i, :]=temp_data3
float_data_tot[i, :]=temp_data_tot
for i in range(linenum2):
temp_data=float_data_2[i,:].reshape([size(float_data_2[i,:])/nchan/npol,npol*nchan])
temp_data2=temp_data[:,startfreq:endfreq+1].reshape(size(float_data_2[i,:])/nchan/npol*chnum)
temp_data3=temp_data[:,nchan+startfreq:nchan+endfreq+1].reshape(size(float_data_2[i,:])/nchan/npol*chnum)
temp_data_tot=(temp_data2+temp_data3)/2
float_data2[i+linenum1, :]=temp_data2
float_data3[i+linenum1, :]=temp_data3
float_data_tot[i+linenum1, :]=temp_data_tot
#dataformat=str(size(float_data)/nline/nchan*chnum)+'E'
dataformat2=str(chnum)+'E'
#dataformat3=str(chnum*2)+'E'
#dimformat='(1,'+str(chnum)+',1,2500)'
#print dataformat,dataformat2,dataformat3
print dataformat,dataformat2
#column1_data = pyfits.Column(name='INDEXVAL',format='1D',array=float_indexval)
column2_data = pyfits.Column(name='TSUBINT',format='1D',array=float_tsubint,unit='s')
column3_data = pyfits.Column(name='OFFS_SUB',format='1D',array=float_offs_sub,unit='s')
column4_data = pyfits.Column(name='LST_SUB',format='1D',array=float_lst_sub,unit='s')
column5_data = pyfits.Column(name='RA_SUB',format='1D',array=float_ra_sub,unit='deg')
column6_data = pyfits.Column(name='DEC_SUB',format='1D',array=float_dec_sub,unit='deg')
column7_data = pyfits.Column(name='GLON_SUB',format='1D',array=float_glon_sub,unit='deg')
column8_data = pyfits.Column(name='GLAT_SUB',format='1D',array=float_glat_sub,unit='deg')
column9_data = pyfits.Column(name='FD_ANG',format='1E',array=float_fd_ang,unit='deg')
column10_data = pyfits.Column(name='POS_ANG',format='1E',array=float_pos_ang,unit='deg')
column11_data = pyfits.Column(name='PAR_ANG',format='1E',array=float_par_ang,unit='deg')
column12_data = pyfits.Column(name='TEL_AZ',format='1E',array=float_tel_az,unit='deg')
column13_data = pyfits.Column(name='TEL_ZEN',format='1E',array=float_tel_zen,unit='deg')
#column14_data = pyfits.Column(name='AUX_DM',format='1E',array=float_aux_dm)
#column15_data = pyfits.Column(name='AUX_RM',format='1E',array=float_aux_rm)
#column16_data = pyfits.Column(name='DAT_FREQ',format=dataformat2,array=float_dat_freq)
column16_data = pyfits.Column(name='DAT_FREQ',format=dataformat2,array=float_dat_freq,unit='deg')
column17_data = pyfits.Column(name='DAT_WTS',format=dataformat2,array=float_dat_wts,unit='deg')
column18_data = pyfits.Column(name='DAT_OFFS',format=dataformat2,array=float_dat_offs,unit='deg')
column19_data = pyfits.Column(name='DAT_SCL',format=dataformat2,array=float_dat_scl,unit='MHz')
column20_data = pyfits.Column(name='DATA',format=dataformat,array=float_data2,unit='Jy')
#column20_data = pyfits.Column(name='DATA',format=dataformat,array=float_data2,unit='Jy')
print size(float_data2),size(float_data)
column20_data_2 = pyfits.Column(name='DATA',format=dataformat,array=float_data3,unit='Jy')
column20_data_tot = pyfits.Column(name='DATA',format=dataformat,array=float_data_tot,unit='Jy')
table_hdu = pyfits.new_table([column2_data,column3_data,column4_data,column5_data,column6_data,column7_data,column8_data,column9_data,column10_data,column11_data,column12_data,column13_data,column16_data,column17_data,column18_data,column19_data,column20_data])
table_hdu.header.append(('INT_TYPE','TIME','Time axis (TIME, BINPHSPERI, BINLNGASC, etc)'))
table_hdu.header.append(('INT_UNIT','SEC','Unit of time axis (SEC, PHS (0-1),DEG)'))
table_hdu.header.append(('SCALE','FluxDec','Intensiy units (FluxDec/RefFlux/Jansky)'))
table_hdu.header.append(('NPOL',1,'Nr of polarisations'))
table_hdu.header.append(('POL_TYPE','AABB','Polarisation identifier (e.g., AABBCRCI, AA+BB)'))
table_hdu.header.append(('TBIN',tbin,'[s] Time per bin or sample'))
table_hdu.header.append(('NBIN',1,'Nr of bins (PSR/CAL mode; else 1)'))
table_hdu.header.append(('NBIN_PRD',0,'Nr of bins/pulse period (for gated data)'))
table_hdu.header.append(('PHS_OFFS',0.0,'Phase offset of bin 0 for gated data'))
table_hdu.header.append(('NBITS',8,'Nr of bits/datum (SEARCH mode "X" data, else 1)'))
table_hdu.header.append(('NSUBOFFS',0,'Subint offset (Contiguous SEARCH-mode files)'))
table_hdu.header.append(('NCHAN',chnum,'Number of channels/sub-bands in this file'))
table_hdu.header.append(('CHAN_BW',chan_bw,'[MHz] Channel/sub-band width'))
table_hdu.header.append(('NCHNOFFS',0,'Channel/sub-band offset for split files'))
table_hdu.header.append(('NSBLK',nsblk,'Samples/row (SEARCH mode, else 1)'))
table_hdu.header.append(('EXTNAME','SUBINT ','name of this binary table extension'))
table_hdu2 = pyfits.new_table([column2_data,column3_data,column4_data,column5_data,column6_data,column7_data,column8_data,column9_data,column10_data,column11_data,column12_data,column13_data,column16_data,column17_data,column18_data,column19_data,column20_data_2])
table_hdu2.header.append(('INT_TYPE','TIME','Time axis (TIME, BINPHSPERI, BINLNGASC, etc)'))
table_hdu2.header.append(('INT_UNIT','SEC','Unit of time axis (SEC, PHS (0-1),DEG)'))
table_hdu2.header.append(('SCALE','FluxDec','Intensiy units (FluxDec/RefFlux/Jansky)'))
table_hdu2.header.append(('NPOL',1,'Nr of polarisations'))
table_hdu2.header.append(('POL_TYPE','AABB','Polarisation identifier (e.g., AABBCRCI, AA+BB)'))
table_hdu2.header.append(('TBIN',tbin,'[s] Time per bin or sample'))
table_hdu2.header.append(('NBIN',1,'Nr of bins (PSR/CAL mode; else 1)'))
table_hdu2.header.append(('NBIN_PRD',0,'Nr of bins/pulse period (for gated data)'))
table_hdu2.header.append(('PHS_OFFS',0.0,'Phase offset of bin 0 for gated data'))
table_hdu2.header.append(('NBITS',8,'Nr of bits/datum (SEARCH mode "X" data, else 1)'))
table_hdu2.header.append(('NSUBOFFS',0,'Subint offset (Contiguous SEARCH-mode files)'))
table_hdu2.header.append(('NCHAN',chnum,'Number of channels/sub-bands in this file'))
table_hdu2.header.append(('CHAN_BW',chan_bw,'[MHz] Channel/sub-band width'))
table_hdu2.header.append(('NCHNOFFS',0,'Channel/sub-band offset for split files'))
table_hdu2.header.append(('NSBLK',nsblk,'Samples/row (SEARCH mode, else 1)'))
table_hdu2.header.append(('EXTNAME','SUBINT ','name of this binary table extension'))
table_hdu3 = pyfits.new_table([column2_data,column3_data,column4_data,column5_data,column6_data,column7_data,column8_data,column9_data,column10_data,column11_data,column12_data,column13_data,column16_data,column17_data,column18_data,column19_data,column20_data_tot])
table_hdu3.header.append(('INT_TYPE','TIME','Time axis (TIME, BINPHSPERI, BINLNGASC, etc)'))
table_hdu3.header.append(('INT_UNIT','SEC','Unit of time axis (SEC, PHS (0-1),DEG)'))
table_hdu3.header.append(('SCALE','FluxDec','Intensiy units (FluxDec/RefFlux/Jansky)'))
table_hdu3.header.append(('NPOL',1,'Nr of polarisations'))
table_hdu3.header.append(('POL_TYPE','AABB','Polarisation identifier (e.g., AABBCRCI, AA+BB)'))
table_hdu3.header.append(('TBIN',tbin,'[s] Time per bin or sample'))
table_hdu3.header.append(('NBIN',1,'Nr of bins (PSR/CAL mode; else 1)'))
table_hdu3.header.append(('NBIN_PRD',0,'Nr of bins/pulse period (for gated data)'))
table_hdu3.header.append(('PHS_OFFS',0.0,'Phase offset of bin 0 for gated data'))
table_hdu3.header.append(('NBITS',8,'Nr of bits/datum (SEARCH mode "X" data, else 1)'))
table_hdu3.header.append(('NSUBOFFS',0,'Subint offset (Contiguous SEARCH-mode files)'))
table_hdu3.header.append(('NCHAN',chnum,'Number of channels/sub-bands in this file'))
table_hdu3.header.append(('CHAN_BW',chan_bw,'[MHz] Channel/sub-band width'))
table_hdu3.header.append(('NCHNOFFS',0,'Channel/sub-band offset for split files'))
table_hdu3.header.append(('NSBLK',nsblk,'Samples/row (SEARCH mode, else 1)'))
table_hdu3.header.append(('EXTNAME','SUBINT ','name of this binary table extension'))
hdulist2 = pyfits.HDUList([hdu0,table_hdu])
#outname1=fileroot+'_'+fileroot2+'_pol1_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'.fits'
#rmcomm1='rm -f '+outname1
#os.system(rmcomm1)
#hdulist2.writeto(outname1)
#hdulist3 = pyfits.HDUList([hdu0,table_hdu2])
#outname2=fileroot+'_'+fileroot2+'_pol2_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'.fits'
#rmcomm2='rm -f '+outname2
#os.system(rmcomm2)
#hdulist3.writeto(outname2)
hdulist4 = pyfits.HDUList([hdu0,table_hdu3])
#outname3=fileroot+'_'+fileroot2+'_tot_'+sys.argv[1]+'_'+sys.argv[2]+'_'+sys.argv[3]+'_'+sys.argv[4]+'.fits'
outname3="combined.fits"
rmcomm3='rm -f '+outname3
os.system(rmcomm3)
hdulist4.writeto(outname3)
print '--------------------------------------------'
print ' Finished! '
endtime=datetime.datetime.now()
print 'START:',starttime
print 'END:',endtime
duration=endtime-starttime
print 'DURATION:',duration.seconds,' sec'
|
qianlivanREPO_NAMERPPPSPATH_START.@RPPPS_extracted@RPPPS-master@combine_cut_FASTpsrfits_freq_time_splitpol.py@.PATH_END.py
|
{
"filename": "weaviate.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/retrievers/self_query/weaviate.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.query_constructors.weaviate import WeaviateTranslator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"WeaviateTranslator": "langchain_community.query_constructors.weaviate",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["WeaviateTranslator"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@retrievers@self_query@weaviate.py@.PATH_END.py
|
{
"filename": "_trirefine.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/tri/_trirefine.py",
"type": "Python"
}
|
"""
Mesh refinement for triangular grids.
"""
import numpy as np
from matplotlib import _api
from matplotlib.tri._triangulation import Triangulation
import matplotlib.tri._triinterpolate
class TriRefiner:
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implement:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns:
- a refined triangulation,
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)``, where:
- *z* array of field values (to refine) defined at the base
triangulation nodes,
- *triinterpolator* is an optional `~matplotlib.tri.TriInterpolator`,
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation;
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
_api.check_isinstance(Triangulation, triangulation=triangulation)
self._triangulation = triangulation
class UniformTriRefiner(TriRefiner):
"""
Uniform mesh refinement by recursive subdivisions.
Parameters
----------
triangulation : `~matplotlib.tri.Triangulation`
The encapsulated triangulation (to be refined)
"""
# See Also
# --------
# :class:`~matplotlib.tri.CubicTriInterpolator` and
# :class:`~matplotlib.tri.TriAnalyzer`.
# """
def __init__(self, triangulation):
super().__init__(triangulation)
def refine_triangulation(self, return_tri_index=False, subdiv=3):
"""
Compute a uniformly refined triangulation *refi_triangulation* of
the encapsulated :attr:`triangulation`.
This function refines the encapsulated triangulation by splitting each
father triangle into 4 child sub-triangles built on the edges midside
nodes, recursing *subdiv* times. In the end, each triangle is hence
divided into ``4**subdiv`` child triangles.
Parameters
----------
return_tri_index : bool, default: False
Whether an index table indicating the father triangle index of each
point is returned.
subdiv : int, default: 3
Recursion level for the subdivision.
Each triangle is divided into ``4**subdiv`` child triangles;
hence, the default results in 64 refined subtriangles for each
triangle of the initial triangulation.
Returns
-------
refi_triangulation : `~matplotlib.tri.Triangulation`
The refined triangulation.
found_index : int array
Index of the initial triangulation containing triangle, for each
point of *refi_triangulation*.
Returned only if *return_tri_index* is set to True.
"""
refi_triangulation = self._triangulation
ntri = refi_triangulation.triangles.shape[0]
# Computes the triangulation ancestors numbers in the reference
# triangulation.
ancestors = np.arange(ntri, dtype=np.int32)
for _ in range(subdiv):
refi_triangulation, ancestors = self._refine_triangulation_once(
refi_triangulation, ancestors)
refi_npts = refi_triangulation.x.shape[0]
refi_triangles = refi_triangulation.triangles
# Now we compute found_index table if needed
if return_tri_index:
# We have to initialize found_index with -1 because some nodes
# may very well belong to no triangle at all, e.g., in case of
# Delaunay Triangulation with DuplicatePointWarning.
found_index = np.full(refi_npts, -1, dtype=np.int32)
tri_mask = self._triangulation.mask
if tri_mask is None:
found_index[refi_triangles] = np.repeat(ancestors,
3).reshape(-1, 3)
else:
# There is a subtlety here: we want to avoid whenever possible
# that refined points container is a masked triangle (which
# would result in artifacts in plots).
# So we impose the numbering from masked ancestors first,
# then overwrite it with unmasked ancestor numbers.
ancestor_mask = tri_mask[ancestors]
found_index[refi_triangles[ancestor_mask, :]
] = np.repeat(ancestors[ancestor_mask],
3).reshape(-1, 3)
found_index[refi_triangles[~ancestor_mask, :]
] = np.repeat(ancestors[~ancestor_mask],
3).reshape(-1, 3)
return refi_triangulation, found_index
else:
return refi_triangulation
def refine_field(self, z, triinterpolator=None, subdiv=3):
"""
Refine a field defined on the encapsulated triangulation.
Parameters
----------
z : (npoints,) array-like
Values of the field to refine, defined at the nodes of the
encapsulated triangulation. (``n_points`` is the number of points
in the initial triangulation)
triinterpolator : `~matplotlib.tri.TriInterpolator`, optional
Interpolator used for field interpolation. If not specified,
a `~matplotlib.tri.CubicTriInterpolator` will be used.
subdiv : int, default: 3
Recursion level for the subdivision.
Each triangle is divided into ``4**subdiv`` child triangles.
Returns
-------
refi_tri : `~matplotlib.tri.Triangulation`
The returned refined triangulation.
refi_z : 1D array of length: *refi_tri* node count.
The returned interpolated field (at *refi_tri* nodes).
"""
if triinterpolator is None:
interp = matplotlib.tri.CubicTriInterpolator(
self._triangulation, z)
else:
_api.check_isinstance(matplotlib.tri.TriInterpolator,
triinterpolator=triinterpolator)
interp = triinterpolator
refi_tri, found_index = self.refine_triangulation(
subdiv=subdiv, return_tri_index=True)
refi_z = interp._interpolate_multikeys(
refi_tri.x, refi_tri.y, tri_index=found_index)[0]
return refi_tri, refi_z
@staticmethod
def _refine_triangulation_once(triangulation, ancestors=None):
"""
Refine a `.Triangulation` by splitting each triangle into 4
child-masked_triangles built on the edges midside nodes.
Masked triangles, if present, are also split, but their children
returned masked.
If *ancestors* is not provided, returns only a new triangulation:
child_triangulation.
If the array-like key table *ancestor* is given, it shall be of shape
(ntri,) where ntri is the number of *triangulation* masked_triangles.
In this case, the function returns
(child_triangulation, child_ancestors)
child_ancestors is defined so that the 4 child masked_triangles share
the same index as their father: child_ancestors.shape = (4 * ntri,).
"""
x = triangulation.x
y = triangulation.y
# According to tri.triangulation doc:
# neighbors[i, j] is the triangle that is the neighbor
# to the edge from point index masked_triangles[i, j] to point
# index masked_triangles[i, (j+1)%3].
neighbors = triangulation.neighbors
triangles = triangulation.triangles
npts = np.shape(x)[0]
ntri = np.shape(triangles)[0]
if ancestors is not None:
ancestors = np.asarray(ancestors)
if np.shape(ancestors) != (ntri,):
raise ValueError(
"Incompatible shapes provide for "
"triangulation.masked_triangles and ancestors: "
f"{np.shape(triangles)} and {np.shape(ancestors)}")
# Initiating tables refi_x and refi_y of the refined triangulation
# points
# hint: each apex is shared by 2 masked_triangles except the borders.
borders = np.sum(neighbors == -1)
added_pts = (3*ntri + borders) // 2
refi_npts = npts + added_pts
refi_x = np.zeros(refi_npts)
refi_y = np.zeros(refi_npts)
# First part of refi_x, refi_y is just the initial points
refi_x[:npts] = x
refi_y[:npts] = y
# Second part contains the edge midside nodes.
# Each edge belongs to 1 triangle (if border edge) or is shared by 2
# masked_triangles (interior edge).
# We first build 2 * ntri arrays of edge starting nodes (edge_elems,
# edge_apexes); we then extract only the masters to avoid overlaps.
# The so-called 'master' is the triangle with biggest index
# The 'slave' is the triangle with lower index
# (can be -1 if border edge)
# For slave and master we will identify the apex pointing to the edge
# start
edge_elems = np.tile(np.arange(ntri, dtype=np.int32), 3)
edge_apexes = np.repeat(np.arange(3, dtype=np.int32), ntri)
edge_neighbors = neighbors[edge_elems, edge_apexes]
mask_masters = (edge_elems > edge_neighbors)
# Identifying the "masters" and adding to refi_x, refi_y vec
masters = edge_elems[mask_masters]
apex_masters = edge_apexes[mask_masters]
x_add = (x[triangles[masters, apex_masters]] +
x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
y_add = (y[triangles[masters, apex_masters]] +
y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
refi_x[npts:] = x_add
refi_y[npts:] = y_add
# Building the new masked_triangles; each old masked_triangles hosts
# 4 new masked_triangles
# there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
# 3 new_pt_midside
new_pt_corner = triangles
# What is the index in refi_x, refi_y of point at middle of apex iapex
# of elem ielem ?
# If ielem is the apex master: simple count, given the way refi_x was
# built.
# If ielem is the apex slave: yet we do not know; but we will soon
# using the neighbors table.
new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
cum_sum = npts
for imid in range(3):
mask_st_loc = (imid == apex_masters)
n_masters_loc = np.sum(mask_st_loc)
elem_masters_loc = masters[mask_st_loc]
new_pt_midside[:, imid][elem_masters_loc] = np.arange(
n_masters_loc, dtype=np.int32) + cum_sum
cum_sum += n_masters_loc
# Now dealing with slave elems.
# for each slave element we identify the master and then the inode
# once slave_masters is identified, slave_masters_apex is such that:
# neighbors[slaves_masters, slave_masters_apex] == slaves
mask_slaves = np.logical_not(mask_masters)
slaves = edge_elems[mask_slaves]
slaves_masters = edge_neighbors[mask_slaves]
diff_table = np.abs(neighbors[slaves_masters, :] -
np.outer(slaves, np.ones(3, dtype=np.int32)))
slave_masters_apex = np.argmin(diff_table, axis=1)
slaves_apex = edge_apexes[mask_slaves]
new_pt_midside[slaves, slaves_apex] = new_pt_midside[
slaves_masters, slave_masters_apex]
# Builds the 4 child masked_triangles
child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
child_triangles[0::4, :] = np.vstack([
new_pt_corner[:, 0], new_pt_midside[:, 0],
new_pt_midside[:, 2]]).T
child_triangles[1::4, :] = np.vstack([
new_pt_corner[:, 1], new_pt_midside[:, 1],
new_pt_midside[:, 0]]).T
child_triangles[2::4, :] = np.vstack([
new_pt_corner[:, 2], new_pt_midside[:, 2],
new_pt_midside[:, 1]]).T
child_triangles[3::4, :] = np.vstack([
new_pt_midside[:, 0], new_pt_midside[:, 1],
new_pt_midside[:, 2]]).T
child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
# Builds the child mask
if triangulation.mask is not None:
child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
if ancestors is None:
return child_triangulation
else:
return child_triangulation, np.repeat(ancestors, 4)
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@tri@_trirefine.py@.PATH_END.py
|
{
"filename": "use_case_38_1L2S_qflux.py",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/examples/use_cases/use_case_38_1L2S_qflux.py",
"type": "Python"
}
|
"""
Fit a binary source event. Allow the flux ratio to be freely fit for KMTC data,
but then constrained for other datasets in the same band.
For example, suppose there is a short-term anomaly that is only covered by KMTC
data. Then, for a binary source fit, the KMTC data constrain q_flux but the
other datasets do not. However, for a self-consistent fit, fits to KMTA and
KMTS data must still track the contribution from the second source because even
if it doesn't *appear* to contribute to the other datasets, it might. Possibly
this is not something you would ever want to do In Real Life, but the point is,
you could if you wanted to.
This use case is not functional. To make it functional, someone needs to track
down an event with appropriate data.
"""
import MulensModel as mm
raise NotImplementedError('Needs fake data.')
# define some fake data
files = ['phot.dat', 'KMTC_I.pysis', 'KMTA_I.pysis', 'KMTS_I.pysis',
'KMTC_V.pysis', 'KMTA_V.pysis', 'KMTS_V.pysis']
bandpasses = ['I'] * 4 + ['V'] * 3
kwargs = {'phot_fmt': 'mag', 'usecols': range(3)}
datasets = [mm.MulensData(file_name=file_, bandpass=bandpass, **kwargs)
for (file_, bandpass) in zip(files, bandpasses)]
# define the model
binary_source_model = mm.Model(
{'t_0_1': 2459000.0, 'u_0_1': 1.5, 't_0_2': 2459007.0, 'u_0_2': 0.01,
't_E': 30.})
# Create my own event class
class MyEvent(mm.Event):
def fit_fluxes(self):
"""
Allow the two source fluxes to be freely fit for some reference
dataset, but then constrain the fluxes for all other datasets in
the same bandpass.
"""
self.fits = []
kmtc_fits = {
1: mm.FitData(model=self.model, dataset=self.datasets[1]),
4: mm.FitData(model=self.model, dataset=self.datasets[4])}
band = {'I': 1, 'V': 4} # This simplies the code below.
for (i, dataset) in enumerate(self.datasets):
if i in kmtc_fits:
fit = kmtc_fits[i]
else:
q_flux = kmtc_fits[band[dataset.bandpass]].source_flux_ratio
fit = mm.FitData(model=self.model, dataset=dataset,
fix_source_flux_ratio=q_flux)
self.fits.append(fit)
# Fit the fluxes
event = MyEvent(model=binary_source_model, datasets=datasets)
print(event.chi2)
print(event.source_fluxes)
print(event.blend_fluxes)
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@examples@use_cases@use_case_38_1L2S_qflux.py@.PATH_END.py
|
{
"filename": "Scaling-Crossbar.io.md",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/docs-old/pages/administration/production/Scaling-Crossbar.io.md",
"type": "Markdown"
}
|
title: Scaling Crossbar.io
toc: [Documentation, Administration, Going to Production, Scaling Crossbar.io]
# Scaling Crossbar.io
The following discusses Crossbar.io scalability in terms of
* scale-up: utilizing faster and more cores on a single machine
* scale-out: utilizing multiple machines
and with regard to
* scaling WAMP application components
* scaling WAMP routing
## Scaling Application Components
Crossbar.io can host WAMP application components (which connect to WAMP routers) and supports scale-up and scale-out.
Application components are run in worker processes, and hence multiple cores on a single machine can be utilized by starting multiple, functionally different components or multiple instances of the same component.
Application components can also be spread across multiple machines, all connecting to the same router. Doing so allows you to scale-out and utilize the resources of multiple machines.
Above features already work today with the current Crossbar.io release.
## Scaling Routers
A Crossbar.io router worker process can manage multiple, independent realms and a single Crossbar.io node can run multiple router worker processes managing independent realms.
A *single* Crossbar.io router worker process already scales to 100-200k concurrently active connections.
You can utilize multiple cores on one machine for routing by starting *multiple* router worker processes, each managing *independent* realms.
The same works for scale-out, by running router workers on different machines, all managing different, independent realms.
Above features already work today with the current Crossbar.io release.
However, if you need to scale beyond 100-200k concurrently active connection on a *single realm*, this is not yet possible today.
For this, router workers will need to work together as a single logical router, managing the same realm. This feature is under development and currently scheduled for Q1/2016.
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@docs-old@pages@administration@production@Scaling-Crossbar.io.md@.PATH_END.py
|
{
"filename": "rotation_measure_gauss.ipynb",
"repo_name": "me-manu/gammaALPs",
"repo_path": "gammaALPs_extracted/gammaALPs-master/docs/tutorials/rotation_measure_gauss.ipynb",
"type": "Jupyter Notebook"
}
|
[](https://colab.research.google.com/github/me-manu/gammaALPs/blob/master/docs/tutorials/rotation_measure_gauss.ipynb)
# Calculating the coherence length and rotation measure for Gaussian turbulent field
This tutorial demonstrates how to the coherence length and rotation measure of a magnetic field with Gaussian turbulence on the example of the Perseus cluster with the central radio galaxy NGC 1275. The assumed B-field environments are the same as in <a href="http://inspirehep.net/record/1432667">Ajello et al. (2016)</a>.
If you haven't installed `gammaALPs` already, you can do so using `pip`. Just uncomment the line below:
```python
#!pip install gammaALPs
```
First some imports:
```python
from gammaALPs.core import Source, ALP, ModuleList
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.integrate import simps
from scipy.stats import norm
```
Next we set the source properties (redshift and sky coordinates), the ALP, and the energy range.
We only need those to init the `ModuleList` later -- we won't actually run the photon-ALP conversion probability calculation here.
```python
ngc1275 = Source(z=0.017559, ra='03h19m48.1s', dec='+41d30m42s')
alp = ALP(m=1., g=1.)
```
```python
ml = ModuleList(alp, ngc1275)
```
We add the propagation within the intra-cluster Gaussian turbulent field with the same values as in <a href="http://inspirehep.net/record/1432667">Ajello et al. (2016)</a>.
```python
ml.add_propagation("ICMGaussTurb",
0, # position of module counted from the source.
nsim=10, # number of random B-field realizations
B0=10., # rms of B field in muG
n0=3.9e-2, # normalization of electron density in cm-3
n2=4.05e-3, # second normalization of electron density, see Churazov et al. 2003, Eq. 4 on cm-3
r_abell=500., # extension of the cluster in kpc
r_core=80., # electron density parameter, see Churazov et al. 2003, Eq. 4 in kpc
r_core2=280., # electron density parameter, see Churazov et al. 2003, Eq. 4 in kpc
beta=1.2, # electron density parameter, see Churazov et al. 2003, Eq. 4
beta2=0.58, # electron density parameter, see Churazov et al. 2003, Eq. 4
eta=0.5, # scaling of B-field with electron denstiy
kL=0.18, # maximum turbulence scale in kpc^-1, taken from A2199 cool-core cluster, see Vacca et al. 2012
kH=9., # minimum turbulence scale, taken from A2199 cool-core cluster, see Vacca et al. 2012
q=-2.80, # turbulence spectral index, taken from A2199 cool-core cluster, see Vacca et al. 2012
seed=0 # random seed for reproducability, set to None for random seed.
)
```
[0;36menvirons.py:[0;35m 431[0;0m --- [1;36mINFO[1;0m: Using inputted chi
We peek at the transversal magnetic field and the electron density:
```python
plt.plot(ml.modules["ICMGaussTurb"].r,
ml.modules["ICMGaussTurb"].B * np.sin(ml.modules["ICMGaussTurb"].psi),
lw=1)
plt.plot(ml.modules["ICMGaussTurb"].r,
ml.modules["ICMGaussTurb"].B * np.cos(ml.modules["ICMGaussTurb"].psi),
lw=1, ls = '--')
plt.ylabel('$B$ field ($\mu$G)')
plt.xlabel('$r$ (kpc)')
```
Text(0.5, 0, '$r$ (kpc)')

```python
plt.loglog(ml.modules["ICMGaussTurb"].r,ml.modules["ICMGaussTurb"].nel * 1e-3)
plt.ylabel('$n_\mathrm{el}$ (cm$^{-3}$)')
plt.xlabel('$r$ (kpc)')
```
Text(0.5, 0, '$r$ (kpc)')

## Spatial correlation and coherence legnth
The `gammaALPs.bfields.Bgaussian` class has methods to calculate the spatial correlation of the magnetic field and the rotation measure. We can access these methods through the the magnetic field model mehtod, which we can access through `ml.modules['ICMGaussTurb'].Bfield_model`.
The spatial correlation $C(x_3) = \langle B_\perp(\vec{x}) B_\perp(\vec{x} + x_3 \vec{e}_3)\rangle$ of the transversal magnetic field along the line of sight $z$ is computed like this:
```python
z = np.linspace(0.,50.,1000) # distance in kpc from cluster center
c = ml.modules["ICMGaussTurb"].Bfield_model.spatial_correlation(z)
plt.plot(z, c / c[0])
plt.xlabel("$z$ (kpc)")
plt.ylabel("$C(z) / C(0)$")
plt.grid(True)
```

This is turn can be used to calculate the coherence length of the field,
$$ \Lambda_C = \frac{1}{C(0)} \int\limits_0^\infty C(z)dz. $$
```python
z = np.linspace(0.,1e3,1000) # distance in kpc from cluster center
c = ml.modules["ICMGaussTurb"].Bfield_model.spatial_correlation(z)
Lambda_c = simps(c, z) / c[0]
print ("Coherence length of the field is Lambda_C = {0:.3e} kpc".format(Lambda_c))
```
Coherence length of the field is Lambda_C = 1.492e+00 kpc
## Calculate the rotation measure of the field
The rotation measure describes the rotation of the polarization vector due to the propagation of an electromagnetic wave through a medium with magnetic field and an electron plasma.
The change in polarization angle $\Delta\phi$ is proportianol to the wavewavelength and the rotation measure $\mathrm{RM}$ which is given by the integral over the magnetic field parallel to the propagation direction multiplied with the electron density:
$$ \mathrm{RM} \approx 812\,\mathrm{rad}\,\mathrm{m}^{-2} \int\limits_0^z dz' B_{||}(z') n_\mathrm{el}(z'). $$
We can calcluate the $\mathrm{RM}$ through the `ml.modules['ICMGaussTurb'].Bfield_model.rotation_measure` function which computes $B_{||}$ for a requested number `nsim` of random realizations.
In addition to the line of sight we're interested in, we also have to provide the electron density along $z$ and the scaling of the magnetic field with the electron density, i.e. the factor $(n_\mathrm{el}(z) / n_\mathrm{el}(z=0)^\eta$. The latter can be calculated through the `ml.modules["ICMGaussTurb"].nel_model.Bscale` function.
We calculate the electron density and the scaling:
```python
n_el = ml.modules["ICMGaussTurb"].nel
Bscale = ml.modules["ICMGaussTurb"].nel_model.Bscale(ml.modules["ICMGaussTurb"].r)
```
And provide this to the `rotation_measure` function. The loop over the realizations is rather slow, so we calculate the $\mathrm{RM}$ for only 500 random realizations:
```python
ml.modules["ICMGaussTurb"].Bfield_model.seed = 0
t1 = time.time()
nsim=500
rm = ml.modules["ICMGaussTurb"].Bfield_model.rotation_measure(ml.modules["ICMGaussTurb"].r,
n_el=n_el,
Bscale=Bscale,
nsim=nsim)
t2 = time.time()
print("Calculating RM for {0:d} realizations took {1:.2f} seconds".format(nsim, t2 - t1))
```
Calculating RM for 500 realizations took 17.96 seconds
Finally, we plot the histogram of the $\mathrm{RM}$ values. Since we're assuming a purely turbulent field, we expect a distribution that peaks close to zero and we can compare the spread to the $\mathrm{RM}$ values reported by <a href="https://ui.adsabs.harvard.edu/abs/2006MNRAS.368.1500T/abstract">Taylor et al. (2006)</a> who found RM values between 6500 and 7500 $\mathrm{rad}\,\mathrm{m}^{-2}$.
```python
n, bins, _ = plt.hist(np.sort((rm)), bins=30, density=True, label="Simulated RM")
mean = np.mean(rm) # mean
var = np.var(rm) # variance
print("RM mean +/- sqrt(var) in rad m^-2: {0:.2f} +/- {1:.2f}".format(mean, np.sqrt(var)))
plt.plot(bins, norm.pdf(bins, loc=mean, scale=np.sqrt(var)),
lw=2,
label="Gaussian Fit\n$\mu = {0:.2f}$\n$\sigma={1:.2f}$".format(mean, np.sqrt(var)))
plt.legend()
plt.gca().tick_params(labelleft=False, left=False, right=False, top=False)
plt.xlabel("Rotation Measure (rad m${}^{-2}$)")
plt.ylabel("Density")
```
RM mean +/- sqrt(var) in rad m^-2: 49.62 +/- 2707.52
Text(0, 0.5, 'Density')

With our chosen $B$ field, $\sigma(\mathrm{RM}) \sim 2700 \,\mathrm{m}^{-2}$.
```python
```
|
me-manuREPO_NAMEgammaALPsPATH_START.@gammaALPs_extracted@gammaALPs-master@docs@tutorials@rotation_measure_gauss.ipynb@.PATH_END.py
|
{
"filename": "file_leff.py",
"repo_name": "grand-mother/grand",
"repo_path": "grand_extracted/grand-main/grand/io/file_leff.py",
"type": "Python"
}
|
from __future__ import annotations
from dataclasses import dataclass, fields
from logging import getLogger
from pathlib import Path
from typing import Union, cast
from numbers import Number
import os.path as osp
import numpy
from grand.io import io_node as io
__all__ = ["DataTable", "TabulatedAntennaModel"]
logger = getLogger(__name__)
@dataclass
class DataTable:
frequency: Union[Number, numpy.ndarray]
theta: Union[Number, numpy.ndarray]
phi: Union[Number, numpy.ndarray]
resistance: Union[Number, numpy.ndarray]
reactance: Union[Number, numpy.ndarray]
leff_theta: Union[Number, numpy.ndarray]
phase_theta: Union[Number, numpy.ndarray]
leff_phi: Union[Number, numpy.ndarray]
phase_phi: Union[Number, numpy.ndarray]
def __post_init__(self):
logger.info(f'size phase {self.phase_theta.shape}')
self.phase_theta_rad = numpy.deg2rad(self.phase_theta)
self.phase_phi_rad = numpy.deg2rad(self.phase_phi)
def __post_init__(self):
logger.info(f"size phase {self.phase_theta.shape}")
self.phase_theta_rad = numpy.deg2rad(self.phase_theta)
self.phase_phi_rad = numpy.deg2rad(self.phase_phi)
def dump(self, node: io.DataNode) -> None:
for field in fields(self):
node.write(field.name, getattr(self, field.name), dtype="f4")
@classmethod
def load(cls, node: io.DataNode) -> DataTable:
data = {}
for field in fields(cls):
data[field.name] = node.read(field.name)
return DataTable(**data)
@dataclass
class TabulatedAntennaModel(object):
table: DataTable
n_file: ... = "TBD"
def __str__(self):
ret = f"TabulatedAntennaModel, shape freq {self.table.frequency.shape}"
ret += f"\nleff_theta: {self.table.leff_theta.shape} {self.table.leff_theta.dtype}"
return ret
def dump(self, destination: Union[str, Path, io.DataNode]) -> None:
if type(destination) == io.DataNode:
node = cast(io.DataNode, destination)
self.table.dump(node)
else:
path = cast(Union[Path, str], destination)
with io.open(path, "w") as node:
self.table.dump(node)
@classmethod
def load(cls, source: Union[str, Path, io.DataNode]) -> TabulatedAntennaModel:
if type(source) == io.DataNode:
source = cast(io.DataNode, source)
filename = f"{source.filename}:{source.path}"
loader = "_load_from_node"
else:
source = cast(Union[Path, str], source)
filename = f"{source}:/"
source = Path(source)
if source.suffix == ".npy":
loader = "_load_from_numpy"
else:
loader = "_load_from_datafile"
logger.info(f"Loading tabulated antenna model from {filename}")
load = getattr(cls, loader)
self = load(source)
self.n_file = osp.basename(source)
t = self.table
n = t.frequency.size * t.theta.size * t.phi.size
logger.info(f"Loaded {n} entries from {filename}")
return self
@classmethod
def _load_from_datafile(cls, path: Union[Path, str]) -> TabulatedAntennaModel:
with io.open(path) as root:
return cls._load_from_node(root)
@classmethod
def _load_from_node(cls, node: io.DataNode) -> TabulatedAntennaModel:
return cls(table=DataTable.load(node))
@classmethod
def _load_from_numpy(cls, path: Union[Path, str]) -> TabulatedAntennaModel:
f, R, X, theta, phi, lefft, leffp, phaset, phasep = numpy.load(path)
n_f = f.shape[0]
n_theta = len(numpy.unique(theta[0, :]))
n_phi = int(R.shape[1] / n_theta)
shape = (n_f, n_phi, n_theta)
# logger.debug(shape)
# logger.debug(lefft.shape)
dtype = "f4"
f = f[:, 0].astype(dtype) * 1.0e6 # MHz --> Hz
theta = theta[0, :n_theta].astype(dtype) # deg
phi = phi[0, ::n_theta].astype(dtype) # deg
R = R.reshape(shape).astype(dtype) # Ohm
X = X.reshape(shape).astype(dtype) # Ohm
lefft = lefft.reshape(shape).astype(dtype) # m
leffp = leffp.reshape(shape).astype(dtype) # m
# RK TODO: Make sure going from rad to deg does not affect calculations somewhere else.
phaset = phaset.reshape(shape).astype(dtype) # deg
phasep = phasep.reshape(shape).astype(dtype) # deg
t = DataTable(
frequency=f,
theta=theta,
phi=phi,
resistance=R,
reactance=X,
leff_theta=lefft,
phase_theta=phaset,
leff_phi=leffp,
phase_phi=phasep,
)
return cls(table=t)
|
grand-motherREPO_NAMEgrandPATH_START.@grand_extracted@grand-main@grand@io@file_leff.py@.PATH_END.py
|
{
"filename": "kde_plot4.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/doc/source/tutorial/stats/plots/kde_plot4.py",
"type": "Python"
}
|
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@doc@source@tutorial@stats@plots@kde_plot4.py@.PATH_END.py
|
{
"filename": "_transition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/slider/_transition.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Transition(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.slider"
_path_str = "layout.slider.transition"
_valid_props = {"duration", "easing"}
# duration
# --------
@property
def duration(self):
"""
Sets the duration of the slider transition
The 'duration' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["duration"]
@duration.setter
def duration(self, val):
self["duration"] = val
# easing
# ------
@property
def easing(self):
"""
Sets the easing function of the slider transition
The 'easing' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'quad', 'cubic', 'sin', 'exp', 'circle',
'elastic', 'back', 'bounce', 'linear-in', 'quad-in',
'cubic-in', 'sin-in', 'exp-in', 'circle-in', 'elastic-in',
'back-in', 'bounce-in', 'linear-out', 'quad-out',
'cubic-out', 'sin-out', 'exp-out', 'circle-out',
'elastic-out', 'back-out', 'bounce-out', 'linear-in-out',
'quad-in-out', 'cubic-in-out', 'sin-in-out', 'exp-in-out',
'circle-in-out', 'elastic-in-out', 'back-in-out',
'bounce-in-out']
Returns
-------
Any
"""
return self["easing"]
@easing.setter
def easing(self, val):
self["easing"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
duration
Sets the duration of the slider transition
easing
Sets the easing function of the slider transition
"""
def __init__(self, arg=None, duration=None, easing=None, **kwargs):
"""
Construct a new Transition object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.slider.Transition`
duration
Sets the duration of the slider transition
easing
Sets the easing function of the slider transition
Returns
-------
Transition
"""
super(Transition, self).__init__("transition")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.slider.Transition
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.Transition`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("duration", None)
_v = duration if duration is not None else _v
if _v is not None:
self["duration"] = _v
_v = arg.pop("easing", None)
_v = easing if easing is not None else _v
if _v is not None:
self["easing"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@slider@_transition.py@.PATH_END.py
|
{
"filename": "reproduce_pysm2_dust_pol.ipynb",
"repo_name": "galsci/pysm",
"repo_path": "pysm_extracted/pysm-main/docs/preprocess-templates/reproduce_pysm2_dust_pol.ipynb",
"type": "Jupyter Notebook"
}
|
# Reproduce PySM 2 small scales for dust polarization
The purpose of this notebook is to reproduce the analysis described in the [PySM 2 paper](https://arxiv.org/pdf/1608.02841.pdf) to prepare the input templates used in the Galactic dust and synchrotron models.
In summary we take input template maps from Planck or other sources, smooth them to remove noise and add small scale gaussian fluctuations.
```python
import os
os.environ["OMP_NUM_THREADS"] = "64"
```
```python
import os
import healpy as hp
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
%matplotlib inline
```
```python
hp.disable_warnings()
```
```python
plt.style.use("seaborn-talk")
```
```python
import pysm3 as pysm
import pysm3.units as u
```
```python
nside = 1024
lmax = 3 * nside - 1
```
## Masks
Using the Planck 80% Galactic mask (the WMAP ones is for synchrotron).
The Planck foreground mask is also available with apodization of 2 or 5 degrees, here is the one with no apodization
```python
planck_map_filename = "HFI_Mask_GalPlane-apo0_2048_R2.00.fits"
```
```python
if not os.path.exists(planck_map_filename):
!wget https://irsa.ipac.caltech.edu/data/Planck/release_2/ancillary-data/masks/$planck_map_filename
```
```python
fits.open(planck_map_filename)[1].header
```
XTENSION= 'BINTABLE' /Written by IDL: Tue Dec 9 16:45:44 2014
BITPIX = 8 /
NAXIS = 2 /Binary table
NAXIS1 = 8 /Number of bytes per row
NAXIS2 = 50331648 /Number of rows
PCOUNT = 0 /Random parameter count
GCOUNT = 1 /Group count
TFIELDS = 8 /Number of columns
COMMENT
COMMENT *** End of mandatory fields ***
COMMENT
EXTVER = 1 /Extension version
DATE = '2014-12-09' /Creation date
COMMENT
COMMENT *** Column names ***
COMMENT
TTYPE1 = 'GAL020 ' / 20% sky coverage
TTYPE2 = 'GAL040 ' / 40% sky coverage
TTYPE3 = 'GAL060 ' / 60% sky coverage
TTYPE4 = 'GAL070 ' / 70% sky coverage
TTYPE5 = 'GAL080 ' / 80% sky coverage
TTYPE6 = 'GAL090 ' / 90% sky coverage
TTYPE7 = 'GAL097 ' / 97% sky coverage
TTYPE8 = 'GAL099 ' / 99% sky coverage
COMMENT
COMMENT *** Column formats ***
COMMENT
TFORM1 = 'B ' /
TFORM2 = 'B ' /
TFORM3 = 'B ' /
TFORM4 = 'B ' /
TFORM5 = 'B ' /
TFORM6 = 'B ' /
TFORM7 = 'B ' /
TFORM8 = 'B ' /
COMMENT
COMMENT *** Planck params ***
COMMENT
EXTNAME = 'GAL-MASK' / Extension name
PIXTYPE = 'HEALPIX ' /
COORDSYS= 'GALACTIC' / Coordinate system
ORDERING= 'NESTED ' / Healpix ordering
NSIDE = 2048 / Healpix Nside
FIRSTPIX= 0 / First pixel # (0 based)
LASTPIX = 50331647 / Last pixel # (0 based)
FILENAME= 'HFI_Mask_GalPlane-apo0_2048_R2.00.fits' / FITS filename
PROCVER = 'test ' / Product version
COMMENT ------------------------------------------------------------------------
COMMENT Galactic emission masks, based on 353 GHz emission, apodized, and for
COMMENT various fractions of skycoverage. For general purpose usage.
COMMENT ------------------------------------------------------------------------
COMMENT For further details see Planck Explanatory Supplement at:
COMMENT http://www.cosmos.esa.int/wikiSI/planckpla
COMMENT ------------------------------------------------------------------------
```python
planck_gal80_mask = hp.read_map(planck_map_filename, ("GAL080",))
```
```python
hp.mollview(planck_gal80_mask, title="Planck galactic mask 80% no apodization")
```

We need to downgrade the Planck mask from 2048 to 1024, so we average 4 pixels, we consider a pixel un-masked if 3 of the 4 pixels are unmasked.
```python
# total_mask = np.logical_and(hp.ud_grade(wmap_mask, nside), hp.ud_grade(planck_gal80_mask, nside)>=.75)
```
We can check how many pixels have the value of 0,0.25,0.5,0.75,1
```python
np.bincount((4*hp.ud_grade(planck_gal80_mask, nside)).astype(np.int64))
```
array([ 2511882, 3288, 1665, 3317, 10062760])
```python
total_mask = hp.ud_grade(planck_gal80_mask, nside)>=.75
```
```python
hp.write_map("total_mask.fits", total_mask.astype(np.int), overwrite=True)
```
```python
hp.mollview(total_mask, title="Total mask")
```

## Download the dust polarization map from Planck / Commander
Download the dust polarization map from Commander, see:
https://irsa.ipac.caltech.edu/data/Planck/release_2/all-sky-maps/previews/COM_CompMap_DustPol-commander_1024_R2.00/index.html
We are trying to reproduce the PySM 2 results so we are using the Commander release 2 results. Later on we can switch to the last Planck release.
```python
commander_dust_map_filename = "COM_CompMap_DustPol-commander_1024_R2.00.fits"
```
```python
if not os.path.exists(commander_dust_map_filename):
!wget https://irsa.ipac.caltech.edu/data/Planck/release_2/all-sky-maps/maps/component-maps/foregrounds/$commander_dust_map_filename
```
The input map has no temperature, we repeat the Q component for T as well,
this doesn't impact the polarization spectra:
```python
m_planck,h = hp.read_map("./COM_CompMap_DustPol-commander_1024_R2.00.fits", (0,0,1), h=True)
```
```python
h
```
[('XTENSION', 'BINTABLE'),
('BITPIX', 8),
('NAXIS', 2),
('NAXIS1', 56),
('NAXIS2', 12582912),
('PCOUNT', 0),
('GCOUNT', 1),
('TFIELDS', 14),
('COMMENT', ''),
('COMMENT', ' *** End of mandatory fields ***'),
('COMMENT', ''),
('EXTNAME', 'COMP-MAP-DustPol'),
('EXTVER', 1),
('DATE', '2014-12-11'),
('COMMENT', ''),
('COMMENT', ' *** Column names ***'),
('COMMENT', ''),
('TTYPE1', 'Q_ML_FULL'),
('TTYPE2', 'U_ML_FULL'),
('TTYPE3', 'Q_ML_HM1'),
('TTYPE4', 'U_ML_HM1'),
('TTYPE5', 'Q_ML_HM2'),
('TTYPE6', 'U_ML_HM2'),
('TTYPE7', 'Q_ML_HR1'),
('TTYPE8', 'U_ML_HR1'),
('TTYPE9', 'Q_ML_HR2'),
('TTYPE10', 'U_ML_HR2'),
('TTYPE11', 'Q_ML_YR1'),
('TTYPE12', 'U_ML_YR1'),
('TTYPE13', 'Q_ML_YR2'),
('TTYPE14', 'U_ML_YR2'),
('COMMENT', ''),
('COMMENT', ' *** Column formats ***'),
('COMMENT', ''),
('TFORM1', 'E'),
('TFORM2', 'E'),
('TFORM3', 'E'),
('TFORM4', 'E'),
('TFORM5', 'E'),
('TFORM6', 'E'),
('TFORM7', 'E'),
('TFORM8', 'E'),
('TFORM9', 'E'),
('TFORM10', 'E'),
('TFORM11', 'E'),
('TFORM12', 'E'),
('TFORM13', 'E'),
('TFORM14', 'E'),
('COMMENT', ''),
('COMMENT', '*** Column units ***'),
('COMMENT', ''),
('TUNIT1', 'uK_RJ'),
('TUNIT2', 'uK_RJ'),
('TUNIT3', 'uK_RJ'),
('TUNIT4', 'uK_RJ'),
('TUNIT5', 'uK_RJ'),
('TUNIT6', 'uK_RJ'),
('TUNIT7', 'uK_RJ'),
('TUNIT8', 'uK_RJ'),
('TUNIT9', 'uK_RJ'),
('TUNIT10', 'uK_RJ'),
('TUNIT11', 'uK_RJ'),
('TUNIT12', 'uK_RJ'),
('TUNIT13', 'uK_RJ'),
('TUNIT14', 'uK_RJ'),
('COMMENT', ''),
('COMMENT', '*** Planck params ***'),
('COMMENT', ''),
('PIXTYPE', 'HEALPIX'),
('ORDERING', 'NESTED'),
('COORDSYS', 'GALACTIC'),
('POLCCONV', 'COSMO'),
('POLAR', 'True'),
('NSIDE', 1024),
('FIRSTPIX', 0),
('LASTPIX', 12582911),
('INDXSCHM', 'IMPLICIT'),
('BAD_DATA', -1.6375e+30),
('METHOD', 'COMMANDER'),
('AST-COMP', 'Dust-Polarization'),
('FWHM', 10.0),
('NU_REF', '353.0 GHz'),
('PROCVER', 'DX11D'),
('FILENAME', 'COM_CompMap_DustPol-commander_1024_R2.00.fits'),
('COMMENT', ''),
('COMMENT', ' Original Inputs'),
('COMMENT', '------------------------------------------------------------'),
('COMMENT', 'commander_dx11d2_pol_bpc_n1024_10arc_notempl_v1_full_dust.fits'),
('COMMENT', 'commander_dx11d2_pol_bpc_n1024_10arc_notempl_v1_hm1_dust.fits'),
('COMMENT', 'commander_dx11d2_pol_bpc_n1024_10arc_notempl_v1_hm2_dust.fits'),
('COMMENT', 'commander_dx11d2_pol_bpc_n1024_10arc_notempl_v1_hr1_dust.fits'),
('COMMENT', 'commander_dx11d2_pol_bpc_n1024_10arc_notempl_v1_hr2_dust.fits'),
('COMMENT', 'commander_dx11d2_pol_bpc_n1024_10arc_notempl_v1_yr1_dust.fits'),
('COMMENT', 'commander_dx11d2_pol_bpc_n1024_10arc_notempl_v1_yr2_dust.fits'),
('COMMENT', '------------------------------------------------------------'),
('COMMENT', 'For further details see Planck Explanatory Supplement at:'),
('COMMENT', ' http://www.cosmos.esa.int/wikiSI/planckpla'),
('COMMENT', '------------------------------------------------------------'),
('DATASUM', '2189663489'),
('CHECKSUM', 'ZAIMg2FMZ9FMf9FM')]
```python
for i_pol, pol in [(1, "Q"), (2, "U")]:
hp.mollview(m_planck[i_pol], title="Planck-Commander dust polarization " + pol, unit="uK_RJ", min=-300, max=300)
```


```python
# A T map set to 0 is not supported by PolSpice
m_planck[0] = 1
```
## Extend spectrum to small scales
### Angular power spectrum with NaMaster
```python
import pymaster as nmt
```
```python
hp.mollview(m_planck[1])
```

```python
def run_namaster(m, mask):
binning = nmt.NmtBin.from_nside_linear(hp.npix2nside(m.shape[-1]), 1)
ell_arr = binning.get_effective_ells()
ell_arr = np.concatenate([[0,0], ell_arr])
f_2 = nmt.NmtField(mask, m.copy()) # namaster overwrites the map in place with the mask
cl_22 = nmt.compute_full_master(f_2, f_2, binning)
cl = np.zeros((3, len(ell_arr)), dtype=np.double)
cl[1, 2:] = cl_22[0]
cl[2, 2:] = cl_22[3]
return ell_arr, cl
```
```python
```
```python
ell_arr, spice_cl = run_namaster(m_planck[1:], total_mask)
```
```python
```
```python
plt.plot(ell_arr, spice_cl[1], 'b-', label='EE')
plt.plot(ell_arr, spice_cl[2], 'y-', label='BB')
plt.loglog()
plt.xlabel('$\\ell$', fontsize=16)
plt.ylabel('$C_\\ell$', fontsize=16)
plt.legend(loc='upper right', ncol=2, labelspacing=0.1)
plt.grid()
plt.xlim([0, 400])
plt.show()
```
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:8: UserWarning: Attempted to set non-positive left xlim on a log-scaled axis.
Invalid limit will be ignored.

```python
ell = np.arange(spice_cl.shape[1])
cl_norm = ell*(ell+1)/np.pi/2
```
We plot the output power spectrum and also identify a range in $\ell$ before white noise starts dominating
and after the uncertainty at low-$\ell$.
The power spectrum features a power law behaviour $\ell < 200$ (linear in `loglog` axes), then white noise
starts picking up until $\ell=1000$ and then we see the smoothing applied to the maps (10 arcminutes).
```python
ell_fit_low = 50
ell_fit_high = 200
```
```python
plt.loglog(cl_norm * spice_cl[1], label="spice EE $C_\ell$")
plt.loglog(cl_norm * spice_cl[2], label="spice BB $C_\ell$")
plt.axvline(ell_fit_low, linestyle="--", color="black", label="$ \ell={} $".format(ell_fit_low))
plt.axvline(ell_fit_high, linestyle="--", color="gray", label="$ \ell={} $".format(ell_fit_high))
plt.legend()
plt.xlim([0, 400])
plt.grid();
```
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:6: UserWarning: Attempted to set non-positive left xlim on a log-scaled axis.
Invalid limit will be ignored.

### Fit noise bias
We want to fit for the high-ell slope so that we can extend that to higher ells, but the spectrum
especially BB is dominated by noise, therefore we first fit an estimate of the noise at high ell
and substract it out.
To do this we first "unsmooth" the map by the 10 arcmin beam used by Commander and then
average where we see the spectrum flattening out.
There are many parameters here we can tweak, the target here is just to check we can recover something
similar to the PySM 2 paper.
```python
smoothing_beam = hp.gauss_beam(fwhm=(10 * u.arcmin).to_value(u.radian), lmax=lmax)
```
```python
noise_bias = (spice_cl[1:, 750:850] / smoothing_beam[750:850]**2).mean(axis=1)
```
```python
noise_bias = {"EE":noise_bias[0], "BB":noise_bias[1]}
```
```python
plt.title("EE")
plt.loglog(spice_cl[1], label="spice EE $C_\ell$")
#plt.loglog(spice_cl[2], label="spice BB $C_\ell$")
plt.loglog(spice_cl[1]/smoothing_beam**2, label="spice EE $C_\ell$")
plt.axvline(ell_fit_low, linestyle="--", color="black", label="$ \ell={} $".format(ell_fit_low))
plt.axvline(ell_fit_high, linestyle="--", color="gray", label="$ \ell={} $".format(ell_fit_high))
plt.legend()
for pol,color in zip(["EE", ], ["violet", ]):
plt.axhline(noise_bias[pol], color=color, label=f"noise bias {pol}")
#plt.xlim([0, 400])
plt.grid();
```

```python
noise_bias
```
{'EE': 0.00011287208896345207, 'BB': 0.00010224582025317983}
```python
plt.loglog(spice_cl[1]/smoothing_beam**2, label="spice EE $C_\ell$")
plt.loglog(spice_cl[2]/smoothing_beam**2, label="spice BB $C_\ell$")
plt.axvline(ell_fit_low, linestyle="--", color="black", label="$ \ell={} $".format(ell_fit_low))
plt.axvline(ell_fit_high, linestyle="--", color="gray", label="$ \ell={} $".format(ell_fit_high))
for pol,color in zip(["EE", "BB"], ["violet", "red"]):
plt.axhline(noise_bias[pol], color=color, label=f"noise bias {pol}")
plt.legend()
plt.grid();
```

```python
plt.loglog(spice_cl[1]/smoothing_beam**2, label="spice EE $C_\ell$")
plt.loglog(spice_cl[2]/smoothing_beam**2, label="spice BB $C_\ell$")
plt.axvline(ell_fit_low, linestyle="--", color="black", label="$ \ell={} $".format(ell_fit_low))
plt.axvline(ell_fit_high, linestyle="--", color="gray", label="$ \ell={} $".format(ell_fit_high))
for pol,color in zip(["EE", "BB"], ["violet", "red"]):
plt.axhline(noise_bias[pol], color=color, label=f"noise bias {pol}")
plt.legend()
plt.xlim([4e2, 2e3])
plt.ylim([8e-5, 5e-4])
plt.grid();
```

### Fit for the slope at high ell
We assume the same model from the paper and fit for an amplitude and a power law exponent (slope in log-log)
```python
from scipy.optimize import curve_fit
```
```python
def model(ell, A, gamma):
return A * ell ** gamma
```
```python
xdata = np.arange(ell_fit_low, ell_fit_high)
```
```python
A_fit, gamma_fit, A_fit_std, gamma_fit_std = {},{},{},{}
for pol,i_pol in [("EE",1),("BB",2)]:
ydata = xdata*(xdata+1)/np.pi/2 * (spice_cl[i_pol][xdata] - noise_bias[pol])
(A_fit[pol], gamma_fit[pol]), cov = curve_fit(model, xdata, ydata)
A_fit_std[pol], gamma_fit_std[pol] = np.sqrt(np.diag(cov))
plt.figure()
plt.loglog(ell*(ell+1)/np.pi/2 * (spice_cl[i_pol] ), label="spice $C_\ell$")
plt.loglog(A_fit[pol]*ell**gamma_fit[pol], label="model fit")
plt.axvline(ell_fit_low, linestyle="--", color="black", label="$ \ell={} $".format(ell_fit_low))
plt.axvline(ell_fit_high, linestyle="--", color="gray", label="$ \ell={} $".format(ell_fit_high))
plt.legend()
plt.grid()
plt.ylabel("$\ell(\ell+1)C_\ell/2\pi [\mu K_{RJ}]$")
plt.xlabel(("$\ell$"))
plt.title(f"{pol} power spectrum for dust")
#plt.xlim(0, 400)
#plt.ylim(1, 30);
```
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:10: RuntimeWarning: divide by zero encountered in power
# Remove the CWD from sys.path while we load stuff.
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:10: RuntimeWarning: divide by zero encountered in power
# Remove the CWD from sys.path while we load stuff.


```python
A_fit, A_fit_std
```
({'EE': 18.09672573027622, 'BB': 5.518135999977027},
{'EE': 2.216649476829414, 'BB': 0.6670108771575787})
```python
gamma_fit, gamma_fit_std
```
({'EE': -0.3957026535575404, 'BB': -0.22333811471731838},
{'EE': 0.026341585170764382, 'BB': 0.025694460083852243})
The paper mentions a $\gamma^{EE,dust} = -.31$ and a $\gamma^{BB,dust} = -.15$.
### Window function
The window function is used to smooth the input templates to remove the high $\ell$ noise and its inverse is used for the added small scales.
$\ell_*^{dust}$
```python
ell_star = 69
```
```python
theta_fwhm_deg = 180/ell_star
```
```python
theta_fwhm_deg
```
2.608695652173913
```python
theta_fwhm = np.radians(theta_fwhm_deg)
```
```python
w_ell = hp.gauss_beam(fwhm=theta_fwhm, lmax=lmax)
```
```python
w_ell.shape
```
(3072,)
### Process patches
This process doesn't have a large impact on the output spectra, the idea is that in each $N_{side}=2$ pixel we want to scale the gaussian fluctuations so that they are consistent with the power at low ell.
So we will have higher gaussian fluctuations on the galaxy where there is stronger dust emission.
```python
patch_indices = hp.ud_grade(np.arange(hp.nside2npix(2)), nside)
```
```python
hp.mollview(patch_indices)
```

```python
zeros = np.zeros(len(ell), dtype=np.double)
```
```python
inv_w_ell = 1 - w_ell**2
```
```python
nside_patches = 2
n_patches = hp.nside2npix(nside_patches)
```
```python
plt.loglog(inv_w_ell)
```
[<matplotlib.lines.Line2D at 0x2aace427a950>]

```python
hp.mollview(m_planck[1])
```

```python
m_sigma_G = hp.synfast([
zeros,
A_fit["EE"] * ell**gamma_fit["EE"] * inv_w_ell / cl_norm,A_fit["BB"] * ell**gamma_fit["BB"] * inv_w_ell / cl_norm,
zeros, zeros, zeros], nside, new=True)
```
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:3: RuntimeWarning: divide by zero encountered in power
This is separate from the ipykernel package so we can avoid doing imports until
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:3: RuntimeWarning: invalid value encountered in multiply
This is separate from the ipykernel package so we can avoid doing imports until
```python
hp.mollview(m_sigma_G[0])
```

```python
N = {i_pol:np.zeros(n_patches, dtype=np.double) for i_pol in [1,2]}
```
```python
m_planck[0] = 0
```
```python
m_planck_smoothed = hp.alm2map(hp.smoothalm(hp.map2alm(m_planck, use_pixel_weights=True), fwhm=theta_fwhm),
nside=nside)
```
```python
hp.mollview(m_planck_smoothed[1])
```

```python
for i_patch in range(n_patches):
print(i_patch)
m_patch = np.zeros_like(m_planck_smoothed)
m_patch[1:, patch_indices == i_patch] = m_planck_smoothed[1:, patch_indices == i_patch]
cl_patch = hp.anafast(m_patch, lmax=2*ell_star, use_pixel_weights=True)
for pol,i_pol in [("EE", 1),("BB",2)]:
N[i_pol][i_patch] = np.sqrt(cl_patch[i_pol][ell_star] / n_patches / (A_fit[pol] * ell_star ** gamma_fit[pol]))
```
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
```python
plt.loglog(cl_patch[1])
plt.axvline(ell_star)
```
<matplotlib.lines.Line2D at 0x2aacdc39aa50>

```python
hp.mollview(N[1])
```

```python
m_zeros = np.zeros(hp.nside2npix(nside), dtype=np.double)
```
```python
N_smoothed = hp.smoothing([m_zeros, hp.ud_grade(N[1], nside), hp.ud_grade(N[2], nside)], fwhm=np.radians(10))
```
```python
N_smoothed[1] /= N_smoothed[1].mean()
```
```python
N_smoothed[2] /= N_smoothed[2].mean()
```
```python
hp.mollview(N_smoothed[1], cmap="jet")
```

This also is quite different from Figure 9 in the paper, but it is not the main issue, possibly I need to use PolSpice instead of anafast?
```python
hp.mollview(N_smoothed[1], min=0, max=6, cmap="jet")
```

## Run PolSpice on the total map and just on the small scales
Always using the same Gal80 Planck mask
```python
m_total = m_planck_smoothed + m_sigma_G * N_smoothed
```
```python
m_total[0] = 1
```
```python
_, cl_total = run_namaster(m_total[1:], total_mask)
```
```python
m_sigma_G[0]=1
```
```python
N_smoothed[0]=1
```
```python
_, cl_sigma_G_uniform = run_namaster(m_sigma_G[1:], total_mask)
```
## Download PySM 2 templates
```python
for comp in "tqu":
filename = f"dust_{comp}_new.fits"
if not os.path.exists(filename):
!wget https://portal.nersc.gov/project/cmb/pysm-data/pysm_2/$filename
```
```python
total_mask.shape
```
(12582912,)
```python
total_mask_512 = hp.ud_grade(total_mask, 512)>=.75
```
```python
m_pysm2 = np.array([hp.read_map(f"dust_{comp}_new.fits") for comp in "tqu"])
```
```python
ell_arr_512, cl_pysm2 = run_namaster(m_pysm2[1:], total_mask_512)
```
### Check the impact of modifying the amplitude across the sky
It has the effect of increasing how steeply the spectrum decreases at low-ell
this has the same impact we can see on the plot in the paper copied at the bottom
of the Notebook, the red line which is the fitted spectrum is less steep than
the actual small scale realization.
```python
for pol, i_pol in [("EE",1),("BB",2)]:
plt.figure(figsize=(10,6))
#plt.loglog(cl_norm[:cl_pysm2.shape[1]]*cl_pysm2[i_pol], label="pysm2")
plt.loglog(cl_norm*cl_sigma_G_uniform[i_pol], label="SS uniform")
plt.loglog(cl_norm*cl_total[i_pol], label="This notebook", alpha=.5)
#plt.loglog(cl_norm*spice_cl[i_pol], label="original")
plt.loglog(A_fit[pol] * ell**gamma_fit[pol], label="spectrum fit")
plt.axvline(ell_star, color="black")
plt.title(pol)
plt.legend()
plt.xlim([1,1000])
plt.ylim([1e-1, 1e1])
plt.ylabel("$\ell(\ell+1)C_\ell/2\pi$")
plt.xlabel("$\ell$")
plt.grid();
```
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:9: RuntimeWarning: divide by zero encountered in power
if __name__ == '__main__':
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:9: RuntimeWarning: divide by zero encountered in power
if __name__ == '__main__':


## Compare PySM 2, the input and the output
```python
for pol, i_pol in [("EE",1),("BB",2)]:
plt.figure(figsize=(10,6))
plt.loglog(cl_norm[:cl_pysm2.shape[1]]*cl_pysm2[i_pol], label="pysm2")
#plt.loglog(cl_norm*cl_sigma_G_uniform[i_pol], label="SS uniform")
plt.loglog(cl_norm*cl_total[i_pol], label="This notebook", alpha=.7)
plt.loglog(cl_norm*spice_cl[i_pol], label="original")
plt.loglog(A_fit[pol] * ell**gamma_fit[pol], label="spectrum fit")
plt.axvline(ell_star, color="black")
plt.title(pol)
plt.legend()
plt.xlim([1,1000])
plt.ylim([1e-1, 1e1])
plt.ylabel("$\ell(\ell+1)C_\ell/2\pi$")
plt.xlabel("$\ell$")
plt.grid();
```
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:9: RuntimeWarning: divide by zero encountered in power
if __name__ == '__main__':
/global/u2/z/zonca/condanamaster/lib/python3.7/site-packages/ipykernel_launcher.py:9: RuntimeWarning: divide by zero encountered in power
if __name__ == '__main__':


We can also compare with the dust BB plot (Figure 7) from the PySM 2 paper below
```python
from IPython.display import Image
Image("BB_dust_PySM_2_paper.png")
```

#
|
galsciREPO_NAMEpysmPATH_START.@pysm_extracted@pysm-main@docs@preprocess-templates@reproduce_pysm2_dust_pol.ipynb@.PATH_END.py
|
{
"filename": "_alignsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/pointcloud/hoverlabel/_alignsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="pointcloud.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@pointcloud@hoverlabel@_alignsrc.py@.PATH_END.py
|
{
"filename": "bin_ground_schedule.ipynb",
"repo_name": "hpc4cmb/toast",
"repo_path": "toast_extracted/toast-main/tutorial/02_Simulated_Scan_Strategies/bin_ground_schedule.ipynb",
"type": "Jupyter Notebook"
}
|
# Binning a ground schedule
In this notebook, we take an observing schedule from `toast_ground_sim.py` and translate it into a depth map.
```python
# Capture C++ output in the jupyter cells
%reload_ext wurlitzer
```
First, we need a focalplane. If one does not already exist, TOAST `pipelines` includes a tool for generating mock hexagonal focalplanes:
```python
! toast_fake_focalplane.py --help
```
Here we create a focalplane with 10-degree FOV and a mininimum of 20 pixels:
```python
! toast_fake_focalplane.py \
--minpix 20 \
--out focalplane \
--fwhm 30 \
--fov 10 \
--psd_fknee 5e-2 \
--psd_NET 1e-3 \
--psd_alpha 1 \
--psd_fmin 1e-5
```
The actual focalplane ends up having 37 pixels, instead of the minimum of 20. This is because regular packing of the hexagon is quantized. Notice that the final name of the focalplane is `focalplane_37.pkl`. We'll need the name to run the simulation script. We also need a schedule file. This may already exist if you previously ran the `simscan_ground` notebook, but we'll re-create it here just in case:
```python
! toast_ground_schedule.py \
--site-lat "-22.958064" \
--site-lon "-67.786222" \
--site-alt 5200 \
--site-name Atacama \
--telescope LAT \
--start "2020-01-01 00:00:00" \
--stop "2020-01-01 12:00:00" \
--patch-coord C \
--patch small_patch,1,40,-40,44,-44 \
--out schedule.txt
```
We will use the versatile ground simulation pipeline, `toast_ground_sim.py`, to bin the map. It will be covered in detail in lesson 7 so here we simply write out a parameter file:
```python
%%writefile bin_schedule.par
--sample-rate
10.0
--scan-rate
0.3
--scan-accel
10.0
--nside
64
--focalplane
focalplane_37.pkl
--schedule
schedule.txt
--out
out
--simulate-noise
--freq
100
--no-destripe
--no-binmap
--hits
--wcov
```
Now we run the pipeline with multiple MPI processes that divide into processing groups:
```python
! toast_ground_sim.py @bin_schedule.par
```
Let's examine the resulting hits and depth map
```python
import matplotlib.pyplot as plt
%matplotlib inline
import healpy
hits = healpy.read_map("out/00000000/100/toast_100_telescope_all_time_all_hmap.fits")
hits[hits == 0] = healpy.UNSEEN
healpy.mollview(hits, unit="hits", title="Total hits")
healpy.graticule(22.5, verbose=False)
```
```python
wcov = healpy.read_map("out/00000000/100/toast_100_telescope_all_time_all_wcov.fits")
wcov *= 1e12 # from K^2 to uK^2
wcov[wcov == 0] = healpy.UNSEEN
healpy.mollview(wcov, unit="$\mu$K$^2$", title="White noise variance", min=1e0, max=1e3)
healpy.graticule(22.5, verbose=False)
```
```python
```
|
hpc4cmbREPO_NAMEtoastPATH_START.@toast_extracted@toast-main@tutorial@02_Simulated_Scan_Strategies@bin_ground_schedule.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "lee-group-cmu/cdetools",
"repo_path": "cdetools_extracted/cdetools-master/r/README.md",
"type": "Markdown"
}
|
cdetools: Tools for Conditional Density Estimates
===
Provides tools for evaluating conditional density estimates.
Calculates CDE loss, coverge, and HPD coverage.
Installation
===
Use the =devtools= package to install from Github
```{r}
devtools::install_github("tpospisi/cdetools/r")
```
|
lee-group-cmuREPO_NAMEcdetoolsPATH_START.@cdetools_extracted@cdetools-master@r@README.md@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmap/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="y", parent_name="heatmap", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "array"}),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmap@_y.py@.PATH_END.py
|
{
"filename": "config_test_12.py",
"repo_name": "swagnercarena/paltas",
"repo_path": "paltas_extracted/paltas-main/paltas/Configs/paper_2203_00690/config_test_12.py",
"type": "Python"
}
|
from paltas.Configs.paper_2203_00690.config_val import *
config_dict = copy.deepcopy(config_dict)
config_dict['subhalo']['parameters']['sigma_sub'] = norm(loc=2.4e-3,
scale=1.5e-4).rvs
config_dict['main_deflector']['parameters']['gamma'] = truncnorm(-197.2,
np.inf,loc=1.972,scale=0.01).rvs
config_dict['main_deflector']['parameters']['theta_E'] = truncnorm(-70.9,
np.inf,loc=1.063,scale=0.015).rvs
config_dict['main_deflector']['parameters']['e1'] = norm(loc=0.043,
scale=0.01).rvs
config_dict['main_deflector']['parameters']['e2'] = norm(loc=0.040,
scale=0.01).rvs
config_dict['main_deflector']['parameters']['center_x'] = norm(loc=-0.057,
scale=0.016).rvs
config_dict['main_deflector']['parameters']['center_y'] = norm(loc=-0.075,
scale=0.016).rvs
config_dict['main_deflector']['parameters']['gamma1'] = norm(loc=0.003,
scale=0.005).rvs
config_dict['main_deflector']['parameters']['gamma2'] = norm(loc=-0.005,
scale=0.005).rvs
|
swagnercarenaREPO_NAMEpaltasPATH_START.@paltas_extracted@paltas-main@paltas@Configs@paper_2203_00690@config_test_12.py@.PATH_END.py
|
{
"filename": "utility.py",
"repo_name": "sheydenreich/threepoint",
"repo_path": "threepoint_extracted/threepoint-main/python_scripts/utility.py",
"type": "Python"
}
|
""" Useful functions
"""
import numpy as np
def D(npix = 4096,pixsize = 1.):
""" Calculates D function in Kaiser-Squires relation for a grid
Args:
npix (int, optional): Number of pixels in one direction. Defaults to 4096.
pixsize (float, optional): Length of a pixel. Defaults to 1..
Returns:
np.array: Grid of D-values
"""
xs1,xs2 = np.indices((npix,npix))
xs1 = (xs1 - npix/2)*pixsize
xs2 = (xs2 - npix/2)*pixsize
a = (-xs1**2+xs2**2-xs1*xs2*2.j)/((xs1**2+xs2**2)**2)
a[(xs1**2+xs2**2==0)] = 0
return a
def Dhat_func(npix = 4096,pixsize = 1.):
""" Calculates D_hat function in Kaiser-Squires relation for a grid
Args:
npix (int, optional): Number of pixels in one direction. Defaults to 4096.
pixsize (float, optional): Length of a pixel. Defaults to 1..
Returns:
np.array: Grid of D-values
"""
xs1,xs2 = np.indices((npix,npix))
xs1 = (xs1 - npix/2)*pixsize
xs2 = (xs2 - npix/2)*pixsize
with np.errstate(divide="ignore",invalid="ignore"):
a = (xs1**2-xs2**2+2.j*xs1*xs2)/(xs1**2+xs2**2)
a[(xs1**2+xs2**2==0)] = 0
return a
def create_gamma_field(kappa_field,Dhat=None):
""" Calculates Gamma Field from Kappa
Args:
kappa_field (np.array): Kappa grid
Dhat (np.array, optional): Precomputed Dhat. Defaults to None, then Dhat is calculated new.
Returns:
np.array: Gamma grid
"""
if Dhat is None: #Calculate Dhat if not available
Dhat = Dhat_func(npix=kappa_field.shape[0])
# Calculate kappa hat
fieldhat = np.fft.fftshift(np.fft.fft2(kappa_field))
# Calculate gamma hat
gammahat = fieldhat*Dhat
#Calculate Gamma
gamma = np.fft.ifft2(np.fft.ifftshift(gammahat))
return gamma
def is_triangle(l1,l2,l3):
""" Check if l1, l2, and l3 form a closed triangle
Args:
l1 (float): sidelength
l2 (float): sidelength
l3 (float): sidelength
Returns:
bool: True if l1, l2, l3 form triangle, False otherwise
"""
if(np.abs(l1-l2)>l3 or l1+l2<l3):
return False
if(np.abs(l2-l3)>l1 or l2+l3<l1):
return False
if(np.abs(l3-l1)>l2 or l3+l1<l2):
return False
return True
def create_triangle(r1,r2,r3,offset = [0,0],yscale = 1.):
x1 = np.array([0,0])
x2 = np.array([r1,0])
y = (r2**2+r1**2-r3**2)/(2*r1)
x = np.sqrt(r2**2-y**2)
x3 = np.array([y,x])
offset = np.array(offset)
x1 = x1 + offset
x2 = x2 + offset
x3 = x3 + offset
result = np.array([x1,x2,x3])
result[:,1]*=yscale
return result
|
sheydenreichREPO_NAMEthreepointPATH_START.@threepoint_extracted@threepoint-main@python_scripts@utility.py@.PATH_END.py
|
{
"filename": "test_advanced_analysis.py",
"repo_name": "LSSTDESC/BlendingToolKit",
"repo_path": "BlendingToolKit_extracted/BlendingToolKit-main/tests/test_advanced_analysis.py",
"type": "Python"
}
|
"""We have this unittests to avoid running the very time consuming advanced notebook."""
import multiprocessing as mp
import numpy as np
import btk
def get_psf_size(survey: btk.survey.Survey) -> float:
"""Return the PSF size in pixels."""
psf_size_arcsec = survey.get_filter("r").psf_fwhm.to_value("arcsec")
pixel_scale = survey.pixel_scale.to_value("arcsec")
return psf_size_arcsec / pixel_scale
def _setup_generator(data_dir):
max_n_sources = 10
min_n_sources = 0
stamp_size = 24.0
max_shift = 3.0 # shift from center is 3 arcsecs = 15 pixels, so blends are likely.
seed = 0
catalog = btk.catalog.CatsimCatalog.from_file(data_dir / "input_catalog.fits")
sampling_function = btk.sampling_functions.DefaultSampling(
max_number=max_n_sources,
min_number=min_n_sources,
stamp_size=stamp_size,
max_shift=max_shift,
min_mag=18,
max_mag=27,
mag_name="i_ab", # cutting on i-band
seed=seed,
)
survey = btk.survey.get_surveys("LSST")
batch_size = 10
draw_generator = btk.draw_blends.CatsimGenerator(
catalog,
sampling_function,
survey,
batch_size=batch_size,
njobs=1,
add_noise="background",
seed=seed, # use same seed here
)
return {
"draw_generator": draw_generator,
"survey": survey,
"max_n_sources": max_n_sources,
"batch_size": batch_size,
}
def test_efficiency_matrix(data_dir):
from surveycodex.utilities import mean_sky_level
from btk.deblend import PeakLocalMax, SepSingleBand
from btk.match import PixelHungarianMatcher
from btk.metrics.detection import Efficiency
setup_dict = _setup_generator(data_dir)
draw_generator = setup_dict["draw_generator"]
survey = setup_dict["survey"]
max_n_sources = setup_dict["max_n_sources"]
batch_size = setup_dict["batch_size"]
# sky level
sky_level = mean_sky_level(survey, survey.get_filter("r")).to_value("electron") # gain = 1
# use psf size as minimum distance between peaks (in pixels) for the peak-finding algorithm.
min_distance = int(get_psf_size(survey)) # needs to be an integer
# standard values for SEP that work well for blended galaxy scenes
thresh = 1.5
min_area = 3
# setup both deblenders
peak_finder = PeakLocalMax(
max_n_sources=max_n_sources + 10,
sky_level=sky_level,
threshold_scale=5,
min_distance=min_distance * 2,
use_band=2, # r-band
)
sep = SepSingleBand(
max_n_sources=max_n_sources + 10, thresh=thresh, min_area=min_area, use_band=2
)
# matcher
matcher = PixelHungarianMatcher(pixel_max_sep=min_distance)
# setup efficiency matrix metric
eff_matrix_peak = Efficiency(batch_size)
eff_matrix_sep = Efficiency(batch_size)
for _ in range(2):
blend_batch = next(draw_generator)
peak_batch = peak_finder(blend_batch)
sep_batch = sep(blend_batch)
matching_peak = matcher(blend_batch.catalog_list, peak_batch.catalog_list)
matching_sep = matcher(blend_batch.catalog_list, sep_batch.catalog_list)
eff_matrix_peak(matching_peak.tp, matching_peak.t, matching_peak.p)
eff_matrix_sep(matching_sep.tp, matching_sep.t, matching_sep.p)
# get efficiency matrices and normalize
_ = eff_matrix_peak.aggregate()
_ = eff_matrix_sep.aggregate()
def test_recall_curves(data_dir):
from surveycodex.utilities import mean_sky_level
from btk.deblend import PeakLocalMax, SepSingleBand
setup_dict = _setup_generator(data_dir)
draw_generator = setup_dict["draw_generator"]
survey = setup_dict["survey"]
max_n_sources = setup_dict["max_n_sources"]
batch_size = setup_dict["batch_size"]
# sky level
sky_level = mean_sky_level(survey, survey.get_filter("r")).to_value("electron") # gain = 1
# use psf size as minimum distance between peaks (in pixels).
min_distance = int(get_psf_size(survey)) # needs to be an integer
# setup both deblenders
peak_finder = PeakLocalMax(
max_n_sources=max_n_sources + 10,
sky_level=sky_level,
threshold_scale=5,
min_distance=min_distance * 2,
use_band=2, # r-band
)
sep = SepSingleBand(max_n_sources=max_n_sources + 10, thresh=1.5, min_area=3, use_band=2)
from btk.match import PixelHungarianMatcher
# matcher
matcher = PixelHungarianMatcher(pixel_max_sep=min_distance)
snr_bins = np.linspace(0, 100, 21)
from btk.measure import get_snr
from btk.metrics.detection import Recall
# we create one recall metric object per bin
# each of them will automatically aggregate results over batches
recalls_peaks = [Recall(batch_size) for _ in range(1, len(snr_bins))]
recalls_sep = [Recall(batch_size) for _ in range(1, len(snr_bins))]
for _ in range(2):
blend_batch = next(draw_generator)
iso_images = blend_batch.isolated_images[:, :, 2] # pick 'r' band
snr_r = get_snr(iso_images, sky_level)
# run deblenders and matches
peak_batch = peak_finder(blend_batch)
sep_batch = sep(blend_batch)
matching_peak = matcher(blend_batch.catalog_list, peak_batch.catalog_list)
matching_sep = matcher(blend_batch.catalog_list, sep_batch.catalog_list)
for jj in range(1, len(snr_bins)):
min_snr, _ = snr_bins[jj - 1], snr_bins[jj]
mask = snr_r > min_snr
matching_peak_new = matching_peak.filter_by_true(mask)
matching_sep_new = matching_sep.filter_by_true(mask)
recalls_peaks[jj - 1](matching_peak_new.tp, matching_peak_new.t, matching_peak_new.p)
recalls_sep[jj - 1](matching_sep_new.tp, matching_sep_new.t, matching_sep_new.p)
_ = np.array([recall.aggregate() for recall in recalls_peaks])
_ = np.array([recall.aggregate() for recall in recalls_sep])
def test_reconstruction_histograms(data_dir):
from btk.deblend import Scarlet, SepSingleBand
from btk.match import PixelHungarianMatcher
from btk.metrics.reconstruction import MSE, PSNR, StructSim
setup_dict = _setup_generator(data_dir)
draw_generator = setup_dict["draw_generator"]
survey = setup_dict["survey"]
max_n_sources = setup_dict["max_n_sources"]
batch_size = setup_dict["batch_size"]
metrics_sep = {"mse": MSE(batch_size), "psnr": PSNR(batch_size), "ssim": StructSim(batch_size)}
metrics_scarlet = {
"mse": MSE(batch_size),
"psnr": PSNR(batch_size),
"ssim": StructSim(batch_size),
}
# same as before
thresh = 1.5
min_area = 3
# use psf size as minimum distance between peaks (in pixels).
min_distance = int(get_psf_size(survey))
sep = SepSingleBand(max_n_sources=max_n_sources, thresh=thresh, use_band=2, min_area=min_area)
scarlet = Scarlet(max_n_sources)
matcher = PixelHungarianMatcher(min_distance)
njobs = 4 if mp.cpu_count() > 4 else mp.cpu_count() - 1
for ii in range(2):
blend_batch = next(draw_generator)
sep_batch = sep(blend_batch)
scarlet_batch = scarlet(
blend_batch, # this line takes a while
reference_catalogs=sep_batch.catalog_list,
njobs=njobs,
)
matching_sep = matcher(blend_batch.catalog_list, sep_batch.catalog_list)
matching_scarlet = matcher(blend_batch.catalog_list, scarlet_batch.catalog_list)
true_iso_images = blend_batch.isolated_images[:, :, 2] # pick 'r' band
iso_images_sep = sep_batch.deblended_images[
:, :, 0
] # pick the only band which is the 'r' band
iso_images_scarlet = scarlet_batch.deblended_images[:, :, 2] # pick 'r' band
iso_images1 = matching_sep.match_true_arrays(true_iso_images)
iso_images2 = matching_scarlet.match_true_arrays(true_iso_images)
iso_images_sep = matching_sep.match_pred_arrays(iso_images_sep)
iso_images_scarlet = matching_scarlet.match_pred_arrays(iso_images_scarlet)
for metric in metrics_sep.values():
metric(iso_images1, iso_images_sep)
for metric in metrics_scarlet.values():
metric(iso_images2, iso_images_scarlet)
# join data from all batches into single array
# sep
all_sep = {"mse": np.array([]), "psnr": np.array([]), "ssim": np.array([])}
for metric_name, metric in metrics_sep.items():
for mvalues in metric.all_data:
all_sep[metric_name] = np.concatenate([all_sep[metric_name], mvalues[metric_name]])
# scarlet
all_scarlet = {"mse": np.array([]), "psnr": np.array([]), "ssim": np.array([])}
for metric_name, metric in metrics_scarlet.items():
for mvalues in metric.all_data:
all_scarlet[metric_name] = np.concatenate(
[all_scarlet[metric_name], mvalues[metric_name]]
)
def test_ellipticity_residuals(data_dir):
from surveycodex.utilities import mean_sky_level
from btk.deblend import Scarlet
from btk.match import PixelHungarianMatcher
from btk.measure import get_blendedness, get_ksb_ellipticity, get_snr
setup_dict = _setup_generator(data_dir)
draw_generator = setup_dict["draw_generator"]
survey = setup_dict["survey"]
max_n_sources = setup_dict["max_n_sources"]
# we will continue using 'r' band
sky_level = mean_sky_level(survey, survey.get_filter("r")).to_value("electron") # gain = 1
# use psf size as minimum distance between peaks (in pixels).
min_distance = int(get_psf_size(survey))
scarlet = Scarlet(max_n_sources)
matcher = PixelHungarianMatcher(min_distance)
es1 = []
es2 = []
snrs = []
bs = []
# scarlet is slow, so we use less batches for this example.
for _ in range(2):
blend_batch = next(draw_generator)
scarlet_batch = scarlet(
blend_batch,
reference_catalogs=None, # uses truth catalog
njobs=1,
)
matching_scarlet = matcher(blend_batch.catalog_list, scarlet_batch.catalog_list)
# need their centroids need to measure ellipticity
b, ms1, _, _, _ = blend_batch.isolated_images.shape
centroids1 = np.zeros((b, ms1, 2))
for jj, t in enumerate(blend_batch.catalog_list):
n_sources = len(t)
if n_sources > 0:
centroids1[jj, :n_sources, 0] = t["x_peak"].value
centroids1[jj, :n_sources, 1] = t["y_peak"].value
b, ms2, _, _, _ = scarlet_batch.deblended_images.shape
centroids2 = np.zeros((b, ms2, 2))
for kk, t in enumerate(scarlet_batch.catalog_list):
n_sources = len(t)
if n_sources > 0:
centroids2[kk, :n_sources, 0] = t["x_peak"].value
centroids2[kk, :n_sources, 1] = t["y_peak"].value
psf_r = blend_batch.psf[2] # psf in r-band
true_iso_images = blend_batch.isolated_images[:, :, 2] # pick 'r' band
iso_images_scarlet = scarlet_batch.deblended_images[:, :, 2] # pick 'r' band
iso_images1, xy1 = matching_scarlet.match_true_arrays(true_iso_images, centroids1)
iso_images2, xy2 = matching_scarlet.match_pred_arrays(iso_images_scarlet, centroids2)
ellips1 = get_ksb_ellipticity(iso_images1, xy1, psf_r, pixel_scale=0.2)
ellips2 = get_ksb_ellipticity(iso_images2, xy2, psf_r, pixel_scale=0.2)
snr = get_snr(iso_images1, sky_level)
blendedness = get_blendedness(iso_images1)
es1.append(ellips1)
es2.append(ellips2)
snrs.append(snr)
bs.append(blendedness)
e11 = np.concatenate(es1)[:, :, 0].flatten()
e12 = np.concatenate(es1)[:, :, 1].flatten()
e21 = np.concatenate(es2)[:, :, 0].flatten()
e22 = np.concatenate(es2)[:, :, 1].flatten()
snr = np.concatenate(snrs).flatten()
bdd = np.concatenate(bs).flatten()
cond1 = ~np.isnan(e11)
cond2 = ~np.isnan(e12)
cond3 = ~np.isnan(e21)
cond4 = ~np.isnan(e22)
cond5 = (snr > 0) & (snr < 100)
cond = cond1 & cond2 & cond3 & cond4 & cond5
e11 = e11[cond]
e12 = e12[cond]
e21 = e21[cond]
e22 = e22[cond]
snr = snr[cond]
bdd = bdd[cond]
|
LSSTDESCREPO_NAMEBlendingToolKitPATH_START.@BlendingToolKit_extracted@BlendingToolKit-main@tests@test_advanced_analysis.py@.PATH_END.py
|
{
"filename": "_align.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sankey/node/hoverlabel/_align.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="align", parent_name="sankey.node.hoverlabel", **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sankey@node@hoverlabel@_align.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/polar/radialaxis/title/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="layout.polar.radialaxis.title.font",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@polar@radialaxis@title@font@_family.py@.PATH_END.py
|
{
"filename": "_textcase.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermapbox/marker/colorbar/title/font/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="scattermapbox.marker.colorbar.title.font",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermapbox@marker@colorbar@title@font@_textcase.py@.PATH_END.py
|
{
"filename": "JAX_CIGALE_emulator-kasia.ipynb",
"repo_name": "H-E-L-P/XID_plus",
"repo_path": "XID_plus_extracted/XID_plus-master/docs/build/doctrees/nbsphinx/notebooks/examples/SED_emulator/JAX_CIGALE_emulator-kasia.ipynb",
"type": "Jupyter Notebook"
}
|
# The JAX emulator: CIGALE prototype
In this notebook, I will prototype my idea for emulating radiative transfer codes with a Deepnet in order for it to be used inside xidplus. As `numpyro` uses JAX, the Deepnet wil ideally be trained with a JAX network. I will use CIGALE
### Advice from Kasia
Use the following modules:
* `Dale 2014` dust module with one parameter ($\alpha$) however, $\alpha$ can only take certian values in Cigale
* 0.0625, 0.1250, 0.1875, 0.2500,0.3125, 0.3750, 0.4375, 0.5000, 0.5625, 0.6250, 0.6875, 0.7500,0.8125, 0.8750, 0.9375, 1.0000, 1.0625, 1.1250, 1.1875, 1.2500,1.3125, 1.3750, 1.4375, 1.5000, 1.5625, 1.6250, 1.6875, 1.7500, 1.8125, 1.8750, 1.9375, 2.0000, 2.0625, 2.1250, 2.1875, 2.2500,2.3125, 2.3750, 2.4375, 2.5000, 2.5625, 2.6250, 2.6875, 2.7500,2.8125, 2.8750, 2.9375, 3.0000, 3.0625, 3.1250, 3.1875, 3.2500, 3.3125, 3.3750, 3.4375, 3.5000, 3.5625, 3.6250, 3.6875, 3.7500, 3.8125, 3.8750, 3.9375, 4.0000
* `sfhdelayed` starforamtion history module. Has parameters $\tau$ (500-6500) ($age$ can be calculated from redshift). $f_{burst}$ is set to 0
* `bc03`stellar population synthesis module (don't change parameters)
* `dustatt_2powerlaws`
* set $Av_BC$ the V band attenuation in the birth clouds to between 0 - 4
* set `BC_to_ISM_factor` to 0.7
Final parameters: $alpha$, $AV_BC$,$\tau$,$z$,$SFR$,$AGN$
Ideally, I would generate values from prior. I can do that for $AV_BC$,$\tau$,$z$,$SFR$,$AGN$ but not $\alpha$ given that there are fixed values.
```python
from astropy.cosmology import WMAP9 as cosmo
import jax
import numpy as onp
import pylab as plt
import astropy.units as u
import scipy.integrate as integrate
%matplotlib inline
import jax.numpy as np
from jax import grad, jit, vmap, value_and_grad
from jax import random
from jax import vmap # for auto-vectorizing functions
from functools import partial # for use with vmap
from jax import jit # for compiling functions for speedup
from jax.experimental import stax # neural network library
from jax.experimental.stax import Conv, Dense, MaxPool, Relu, Flatten, LogSoftmax, LeakyRelu # neural network layers
from jax.experimental import optimizers
from jax.tree_util import tree_multimap # Element-wise manipulation of collections of numpy arrays
import matplotlib.pyplot as plt # visualization
# Generate key which is used to generate random numbers
key = random.PRNGKey(2)
from xidplus import cigale
```
/usr/local/lib/python3.7/dist-packages/jax/experimental/stax.py:30: FutureWarning: jax.experimental.stax is deprecated, import jax.example_libraries.stax instead
FutureWarning)
/usr/local/lib/python3.7/dist-packages/jax/experimental/optimizers.py:30: FutureWarning: jax.experimental.optimizers is deprecated, import jax.example_libraries.optimizers instead
FutureWarning)
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-1-347a2da2ba01> in <module>()
22 # Generate key which is used to generate random numbers
23 key = random.PRNGKey(2)
---> 24 from xidplus import cigale
ModuleNotFoundError: No module named 'xidplus'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------
```python
onp.random.seed(2)
```
### Generate CIGALE SEDs
```python
from astropy.io import fits
from astropy.table import Table
import scipy.stats as stats
```
```python
alpha=onp.array([0.0625, 0.1250, 0.1875, 0.2500,0.3125, 0.3750, 0.4375, 0.5000, 0.5625, 0.6250, 0.6875, 0.7500,0.8125, 0.8750, 0.9375, 1.0000, 1.0625, 1.1250, 1.1875, 1.2500,1.3125, 1.3750, 1.4375, 1.5000, 1.5625, 1.6250, 1.6875, 1.7500, 1.8125, 1.8750, 1.9375, 2.0000, 2.0625, 2.1250, 2.1875, 2.2500,2.3125, 2.3750, 2.4375, 2.5000, 2.5625, 2.6250, 2.6875, 2.7500,2.8125, 2.8750, 2.9375, 3.0000, 3.0625, 3.1250, 3.1875, 3.2500, 3.3125, 3.3750, 3.4375, 3.5000, 3.5625, 3.6250, 3.6875, 3.7500, 3.8125, 3.8750, 3.9375, 4.0000])
alpha_rv = stats.randint(0, len(alpha))
av_bc_rv=stats.uniform(0.1,4.0)
tau_rv=stats.randint(500,6500)
z_rv=stats.uniform(0.01,6)
sfr_rv=stats.loguniform(0.01,30000)
agn_frac_rv=stats.beta(1,3)
```
```python
from astropy.cosmology import Planck13
```
```python
z=z_rv.rvs(1)[0]
onp.int(Planck13.age(z).value*1000)
alpha[alpha_rv.rvs(1)[0]]
```
2.875
```python
nsamp=1
from astropy.constants import L_sun, M_sun
from astropy.table import vstack
col_scale=['spire_250','spire_350','spire_500','dust.luminosity','sfh.sfr','stellar.m_star']
parameter_names=onp.array(['tau_main','age_main','Av_BC','alpha','fracAGN','redshift'])
all_SEDs=[]
for i in range(0,nsamp):
z=z_rv.rvs(1)[0]
parameters={'tau_main':[tau_rv.rvs(1)[0]],'age_main':[onp.int(Planck13.age(z).value*1000)],
'Av_BC':[av_bc_rv.rvs(1)[0]],'alpha':[alpha[alpha_rv.rvs(1)[0]]],'fracAGN':[agn_frac_rv.rvs(1)[0]],'redshift':[z]}
path_to_cigale='/Volumes/pdh_storage/cigale/'
path_to_ini_file='pcigale_kasia_nn.ini'
SEDs=cigale.generate_SEDs(parameter_names, parameters, path_to_cigale, path_to_ini_file, filename = 'tmp_single')
#set more appropriate units for dust
SEDs['dust.luminosity']=SEDs['dust.luminosity']/L_sun.value
scale=1.0/SEDs['sfh.sfr']
for c in col_scale:
SEDs[c]=SEDs[c]*scale*sfr_rv.rvs(1)[0]
all_SEDs.append(SEDs)
if i and i % 100 == 0:
tmp_SEDs=vstack(all_SEDs)
tmp_SEDs.write('kasia_gen_SEDs_{}.fits'.format(i),overwrite=True)
all_SEDs=[]
```
---
```python
all_SEDs[0]
```
<i>Table length=1</i>
<table id="table140660779053296" class="table-striped table-bordered table-condensed">
<thead><tr><th>id</th><th>spire_250</th><th>spire_350</th><th>spire_500</th><th>dust.luminosity</th><th>sfh.sfr</th><th>stellar.m_star</th><th>agn.fracAGN</th><th>attenuation.Av_BC</th><th>dust.alpha</th><th>sfh.tau_main</th><th>universe.redshift</th></tr></thead>
<thead><tr><th></th><th>mJy</th><th>mJy</th><th>mJy</th><th>W</th><th>solMass / yr</th><th>solMass</th><th></th><th>mag</th><th></th><th>Myr</th><th></th></tr></thead>
<thead><tr><th>int64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>0</td><td>0.0022648338953013686</td><td>0.6101981528070004</td><td>0.13285984543787135</td><td>486811658.0261648</td><td>21.20839313475317</td><td>71919897.44752046</td><td>0.100848769635</td><td>2.038996387135</td><td>0.5</td><td>3014.0</td><td>5.5992451949267</td></tr>
</table>
### Generate values for CIGALE
Redshift
```python
onp.array2string(10.0**np.arange(-2.5,0.77,0.1), separator=',',formatter={'float_kind':lambda x: "%.4f" % x}).replace('\n','')
```
'[0.0032,0.0040,0.0050,0.0063,0.0079,0.0100,0.0126,0.0158,0.0200,0.0251, 0.0316,0.0398,0.0501,0.0631,0.0794,0.1000,0.1259,0.1585,0.1995,0.2512, 0.3162,0.3981,0.5012,0.6310,0.7943,1.0000,1.2589,1.5849,1.9953,2.5119, 3.1623,3.9810,5.0118]'
```python
onp.array2string(np.arange(0.1,4,0.3),separator=',',formatter={'float_kind':lambda x: "%.4f" % x}).replace('\n','')
```
'[0.1000,0.4000,0.7000,1.0000,1.3000,1.6000,1.9000,2.2000,2.5000,2.8000, 3.1000,3.4000,3.7000]'
AGN frac
```python
onp.array2string(np.arange(0.001,1,0.075),separator=',',formatter={'float_kind':lambda x: "%.3f" % x}).replace('\n','')
```
'[0.001,0.076,0.151,0.226,0.301,0.376,0.451,0.526,0.601,0.676,0.751,0.826, 0.901,0.976]'
```python
SEDs=Table.read('/Volumes/pdh_storage/cigale/out/models-block-0.fits')
#set more appropriate units for dust
from astropy.constants import L_sun, M_sun
SEDs['dust.luminosity']=SEDs['dust.luminosity']/L_sun.value
```
```python
SEDs=SEDs[onp.isfinite(SEDs['spire_250'])]
```
```python
SEDs
```
<i>Table length=766584</i>
<table id="table140374676319928" class="table-striped table-bordered table-condensed">
<thead><tr><th>id</th><th>spire_250</th><th>spire_350</th><th>spire_500</th><th>dust.luminosity</th><th>sfh.sfr</th><th>stellar.m_star</th><th>agn.fracAGN</th><th>attenuation.Av_BC</th><th>dust.alpha</th><th>sfh.tau_main</th><th>universe.redshift</th></tr></thead>
<thead><tr><th></th><th>mJy</th><th>mJy</th><th>mJy</th><th>W</th><th>solMass / yr</th><th>solMass</th><th></th><th>mag</th><th></th><th>Myr</th><th></th></tr></thead>
<thead><tr><th>int64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>0</td><td>6.664787981156792e-09</td><td>2.579075857126304e-09</td><td>9.154350728346564e-10</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.0032</td></tr>
<tr><td>1</td><td>4.272567065611162e-09</td><td>1.6536408901353577e-09</td><td>5.87030971959381e-10</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.004</td></tr>
<tr><td>2</td><td>2.7401305723303493e-09</td><td>1.0607611243982099e-09</td><td>3.766230281788533e-10</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.005</td></tr>
<tr><td>3</td><td>1.7306250478806803e-09</td><td>6.701473917718022e-10</td><td>2.379845841843187e-10</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.0063</td></tr>
<tr><td>4</td><td>1.1042574710404118e-09</td><td>4.277475805942584e-10</td><td>1.5194164617231545e-10</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.0079</td></tr>
<tr><td>5</td><td>6.921688128956865e-10</td><td>2.682413523259305e-10</td><td>9.531300799051766e-11</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.01</td></tr>
<tr><td>6</td><td>4.383324492387039e-10</td><td>1.6996450724863183e-10</td><td>6.041772738676337e-11</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.0126</td></tr>
<tr><td>7</td><td>2.8060358442643693e-10</td><td>1.0887903487586143e-10</td><td>3.872338294196482e-11</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.0158</td></tr>
<tr><td>8</td><td>1.76640824425344e-10</td><td>6.860116573487778e-11</td><td>2.4414330913992916e-11</td><td>0.038722519393397305</td><td>9.100896611075192e-13</td><td>0.7106245236562091</td><td>0.001</td><td>0.1</td><td>0.0625</td><td>500.0</td><td>0.02</td></tr>
<tr><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td></tr>
<tr><td>936920</td><td>4.180120188131009e-09</td><td>2.279554068131366e-09</td><td>8.862791987996791e-10</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.1585</td></tr>
<tr><td>936921</td><td>2.6973612030185035e-09</td><td>1.53319068418658e-09</td><td>6.107980206774365e-10</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.1995</td></tr>
<tr><td>936922</td><td>1.739194959219224e-09</td><td>1.0405509059273366e-09</td><td>4.2836867144943303e-10</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.2512</td></tr>
<tr><td>936923</td><td>1.1203244047978009e-09</td><td>7.117275303365271e-10</td><td>3.0684724728393533e-10</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.3162</td></tr>
<tr><td>936924</td><td>7.184718848946818e-10</td><td>4.904663179404575e-10</td><td>2.2385185558941362e-10</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.3981</td></tr>
<tr><td>936925</td><td>4.5472465481360163e-10</td><td>3.4056288922272863e-10</td><td>1.6601722094507004e-10</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.5012</td></tr>
<tr><td>936926</td><td>2.809778318620063e-10</td><td>2.365965208434481e-10</td><td>1.2557261930427586e-10</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.631</td></tr>
<tr><td>936927</td><td>1.6942009956054125e-10</td><td>1.6188532123400123e-10</td><td>9.648888800160375e-11</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>0.7943</td></tr>
<tr><td>936928</td><td>9.770824072814998e-11</td><td>1.0890000383211227e-10</td><td>7.3871444850475e-11</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>1.0</td></tr>
<tr><td>936929</td><td>5.431522249781972e-11</td><td>7.013899714623192e-11</td><td>5.648483209645334e-11</td><td>2.7215645330526343</td><td>3.043543887708004e-10</td><td>0.7458014251288891</td><td>0.976</td><td>3.7</td><td>3.8125</td><td>6500.0</td><td>1.2589</td></tr>
</table>
```python
from astropy.table import vstack
```
```python
(1.0/dataset['sfh.sfr'])*dataset['sfh.sfr']*10.0**scale_table
```
DeviceArray([1.0000000e+08, 1.0000000e+08, 1.0000000e+08, ...,
5.6234133e+13, 5.6234133e+13, 5.6234133e+13], dtype=float32)
```python
# define a range of scales
scale=np.arange(8,14,0.25)
#repeat the SED table by the number of scale steps
dataset=vstack([SEDs for i in range(0,scale.size)])
#repeat the scale range by the number of entries in table (so I can easily multiply each column)
scale_table=np.repeat(scale,len(SEDs))
#parameters to scale
col_scale=['spire_250','spire_350','spire_500','dust.luminosity','sfh.sfr','stellar.m_star']
for c in col_scale:
dataset[c]=dataset[c]*10.0**scale_table
dataset['log10_sfh.sfr']=onp.log10(dataset['sfh.sfr'])
dataset['log10_universe.redshift']=onp.log10(dataset['universe.redshift'])
# transform AGN fraction to logit scale
dataset['logit_agnfrac']=onp.log(dataset['agn.fracAGN']/(1-dataset['agn.fracAGN']))
#shuffle dataset
dataset=dataset[onp.random.choice(len(dataset), len(dataset), replace=False)]
```
```python
plt.hist(dataset['log10_sfh.sfr'],bins=(np.arange(0,14)));
```

```python
dataset
```
<i>Table length=18398016</i>
<table id="table140374676318752" class="table-striped table-bordered table-condensed">
<thead><tr><th>id</th><th>spire_250</th><th>spire_350</th><th>spire_500</th><th>dust.luminosity</th><th>sfh.sfr</th><th>stellar.m_star</th><th>agn.fracAGN</th><th>attenuation.Av_BC</th><th>dust.alpha</th><th>sfh.tau_main</th><th>universe.redshift</th><th>log10_sfh.sfr</th><th>log10_universe.redshift</th><th>logit_agnfrac</th></tr></thead>
<thead><tr><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th>mag</th><th></th><th>Myr</th><th></th><th></th><th></th><th></th></tr></thead>
<thead><tr><th>int64</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float32</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>490252</td><td>1160.6235</td><td>483.27942</td><td>163.47063</td><td>6990731300.0</td><td>0.740176</td><td>2342796500.0</td><td>0.151</td><td>3.1</td><td>1.8125</td><td>3500.0</td><td>0.0079</td><td>-0.13066499</td><td>-2.1023729087095586</td><td>-1.726779349496423</td></tr>
<tr><td>428429</td><td>0.006685692</td><td>0.0031328278</td><td>0.0012310973</td><td>370134140.0</td><td>0.037604094</td><td>131436680.0</td><td>0.301</td><td>3.7</td><td>1.0625</td><td>3000.0</td><td>0.631</td><td>-1.4247649</td><td>-0.19997064075586568</td><td>-0.8425404774849347</td></tr>
<tr><td>652959</td><td>31.126167</td><td>20.64707</td><td>9.253553</td><td>128308100000.0</td><td>49.521706</td><td>132313310000.0</td><td>0.301</td><td>0.1</td><td>3.0625</td><td>5000.0</td><td>0.3981</td><td>1.6947956</td><td>-0.40000782241590205</td><td>-0.8425404774849347</td></tr>
<tr><td>442192</td><td>0.079513066</td><td>0.0880193</td><td>0.057997216</td><td>2390347000.0</td><td>0.41623157</td><td>1317451300.0</td><td>0.076</td><td>0.4</td><td>3.0625</td><td>3500.0</td><td>1.0</td><td>-0.38066497</td><td>0.0</td><td>-2.497978731355353</td></tr>
<tr><td>731545</td><td>3343.0225</td><td>1588.2834</td><td>559.03064</td><td>1607026000.0</td><td>0.2885123</td><td>744739500.0</td><td>0.451</td><td>0.4</td><td>3.8125</td><td>5500.0</td><td>0.004</td><td>-0.5398357</td><td>-2.3979400086720375</td><td>-0.19663110200685208</td></tr>
<tr><td>297069</td><td>64940.26</td><td>29118.043</td><td>10031.254</td><td>109114300000.0</td><td>18.232462</td><td>73675686000.0</td><td>0.001</td><td>0.4</td><td>2.4375</td><td>2500.0</td><td>0.0063</td><td>1.2608453</td><td>-2.2006594505464183</td><td>-6.906754778648554</td></tr>
<tr><td>651641</td><td>15.117687</td><td>10.061898</td><td>4.695546</td><td>228167660000.0</td><td>88.06343</td><td>235290030000.0</td><td>0.451</td><td>0.1</td><td>2.0625</td><td>5000.0</td><td>0.631</td><td>1.9447956</td><td>-0.19997064075586568</td><td>-0.19663110200685208</td></tr>
<tr><td>422446</td><td>1345.552</td><td>524.6432</td><td>184.69086</td><td>2069125900000.0</td><td>211.46336</td><td>739122800000.0</td><td>0.376</td><td>3.4</td><td>0.8125</td><td>3000.0</td><td>0.0631</td><td>2.3252351</td><td>-1.1999706407558657</td><td>-0.5065612249795332</td></tr>
<tr><td>15840</td><td>10975444.0</td><td>5164211.5</td><td>1809722.2</td><td>3510720300000.0</td><td>16.183937</td><td>12636890000000.0</td><td>0.301</td><td>0.7</td><td>3.4375</td><td>500.0</td><td>0.0032</td><td>1.2090842</td><td>-2.494850021680094</td><td>-0.8425404774849347</td></tr>
<tr><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td></tr>
<tr><td>910821</td><td>0.28527886</td><td>0.12397792</td><td>0.046376057</td><td>8344027600.0</td><td>0.962453</td><td>2358431200.0</td><td>0.451</td><td>2.5</td><td>1.0625</td><td>6500.0</td><td>0.3981</td><td>-0.016620466</td><td>-0.40000782241590205</td><td>-0.19663110200685208</td></tr>
<tr><td>58962</td><td>65.922844</td><td>54.73664</td><td>29.82822</td><td>1228408800000.0</td><td>2.8779564</td><td>2247192000000.0</td><td>0.601</td><td>3.1</td><td>2.4375</td><td>500.0</td><td>0.7943</td><td>0.4590842</td><td>-0.100015437450609</td><td>0.409633517645344</td></tr>
<tr><td>629153</td><td>20246.586</td><td>8485.822</td><td>2887.1501</td><td>762779100000.0</td><td>84.27728</td><td>235025710000.0</td><td>0.826</td><td>2.8</td><td>1.8125</td><td>4500.0</td><td>0.02</td><td>1.9257106</td><td>-1.6989700043360187</td><td>1.5575394743064488</td></tr>
<tr><td>312169</td><td>71.154</td><td>31.868464</td><td>12.169908</td><td>2863594200000.0</td><td>324.2241</td><td>1310159500000.0</td><td>0.676</td><td>1.3</td><td>1.0625</td><td>2500.0</td><td>0.5012</td><td>2.5108452</td><td>-0.29998893767788765</td><td>0.7354495602506349</td></tr>
<tr><td>756017</td><td>0.033807244</td><td>0.015279059</td><td>0.005686972</td><td>435289900.0</td><td>0.051305547</td><td>132435496.0</td><td>0.376</td><td>1.9</td><td>1.4375</td><td>5500.0</td><td>0.3162</td><td>-1.2898357</td><td>-0.5000381344038097</td><td>-0.5065612249795332</td></tr>
<tr><td>793732</td><td>0.24976146</td><td>0.09897569</td><td>0.035220087</td><td>1360693600.0</td><td>0.5282062</td><td>1325376600.0</td><td>0.001</td><td>0.1</td><td>0.8125</td><td>6000.0</td><td>0.1259</td><td>-0.27719647</td><td>-0.8999742698921374</td><td>-6.906754778648554</td></tr>
<tr><td>686684</td><td>93.57015</td><td>59.161423</td><td>25.373728</td><td>238183660000.0</td><td>27.8481</td><td>74405240000.0</td><td>0.301</td><td>1.9</td><td>3.4375</td><td>5000.0</td><td>0.3162</td><td>1.4447956</td><td>-0.5000381344038097</td><td>-0.8425404774849347</td></tr>
<tr><td>381481</td><td>317.95352</td><td>147.73785</td><td>51.53464</td><td>168833300.0</td><td>0.021146337</td><td>73912280.0</td><td>0.751</td><td>1.0</td><td>3.0625</td><td>3000.0</td><td>0.004</td><td>-1.6747649</td><td>-2.3979400086720375</td><td>1.1039527552994273</td></tr>
<tr><td>278641</td><td>69.425896</td><td>31.19034</td><td>11.923465</td><td>2824174600000.0</td><td>256.11032</td><td>1304143600000.0</td><td>0.076</td><td>3.4</td><td>1.0625</td><td>2000.0</td><td>0.5012</td><td>2.408427</td><td>-0.29998893767788765</td><td>-2.497978731355353</td></tr>
<tr><td>631975</td><td>4519.575</td><td>5312.5625</td><td>3645.5122</td><td>135643430000000.0</td><td>14986.8545</td><td>41794140000000.0</td><td>0.901</td><td>2.8</td><td>3.8125</td><td>4500.0</td><td>1.0</td><td>4.1757107</td><td>0.0</td><td>2.2083854074737483</td></tr>
</table>
## DeepNet building
I will build a multi input, multi output deepnet model as my emulator, with parameters as input and the observed flux as outputs. I will train on log10 flux to make the model easier to train, and have already standarised the input parameters. I wilkl be using `stax` which can be thought of as the `Keras` equivalent for `JAX`. This [blog](https://blog.evjang.com/2019/02/maml-jax.html) was useful starting point.
I will use batches to help train the network
```python
dataset=dataset[0:18000000]
```
```python
len(dataset)/1200
```
15000.0
```python
split=0.75
inner_batch_size=1200
train_ind=onp.round(0.75*len(dataset)).astype(int)
train=dataset[0:train_ind]
validation=dataset[train_ind:]
input_cols=['log10_sfh.sfr','agn.fracAGN','universe.redshift', 'attenuation.Av_BC','dust.alpha','sfh.tau_main']
output_cols=['spire_250','spire_350','spire_500']
train_batch_X=np.asarray([i.data for i in train[input_cols].values()]).reshape(len(input_cols)
,inner_batch_size,onp.round(len(train)/inner_batch_size).astype(int)).T.astype(float)
train_batch_Y=np.asarray([np.log(i.data) for i in train[output_cols].values()]).reshape(len(output_cols),
inner_batch_size,onp.round(len(train)/inner_batch_size).astype(int)).T.astype(float)
validation_batch_X=np.asarray([i.data for i in validation[input_cols].values()]).reshape(len(input_cols)
,inner_batch_size,onp.round(len(validation)/inner_batch_size).astype(int)).T.astype(float)
validation_batch_Y=np.asarray([np.log(i.data) for i in validation[output_cols].values()]).reshape(len(output_cols),
inner_batch_size,onp.round(len(validation)/inner_batch_size).astype(int)).T.astype(float)
```
```python
# Use stax to set up network initialization and evaluation functions
net_init, net_apply = stax.serial(
Dense(128), LeakyRelu,
Dense(128), LeakyRelu,
Dense(128), LeakyRelu,
Dense(128), Relu,
Dense(len(output_cols))
)
in_shape = (-1, len(input_cols),)
out_shape, net_params = net_init(key,in_shape)
```
```python
```
```python
def loss(params, inputs, targets):
# Computes average loss for the batch
predictions = net_apply(params, inputs)
return np.mean((targets - predictions)**2)
def batch_loss(p,x_b,y_b):
loss_b=vmap(partial(loss,p))(x_b,y_b)
return np.mean(loss_b)
```
```python
opt_init, opt_update, get_params= optimizers.adam(step_size=5e-4)
out_shape, net_params = net_init(key,in_shape)
opt_state = opt_init(net_params)
@jit
def step(i, opt_state, x1, y1):
p = get_params(opt_state)
g = grad(batch_loss)(p, x1, y1)
loss_tmp=batch_loss(p,x1,y1)
return opt_update(i, g, opt_state),loss_tmp
np_batched_loss_1 = []
valid_loss=[]
for i in range(10000):
opt_state, l = step(i, opt_state, train_batch_X, train_batch_Y)
p = get_params(opt_state)
valid_loss.append(batch_loss(p,validation_batch_X,validation_batch_Y))
np_batched_loss_1.append(l)
if i % 100 == 0:
print(i)
net_params = get_params(opt_state)
```
0
```python
for i in range(2000):
opt_state, l = step(i, opt_state, train_batch_X, train_batch_Y)
p = get_params(opt_state)
valid_loss.append(batch_loss(p,validation_batch_X,validation_batch_Y))
np_batched_loss_1.append(l)
if i % 100 == 0:
print(i)
net_params = get_params(opt_state)
```
```python
plt.figure(figsize=(20,10))
plt.semilogy(np_batched_loss_1,label='Training loss')
plt.semilogy(valid_loss,label='Validation loss')
plt.xlabel('Iteration')
plt.ylabel('Loss (MSE)')
plt.legend()
```
<matplotlib.legend.Legend at 0x7fbc8e8fedd8>

## Investigate performance of each band of emulator
To visulise performance of the trainied emulator, I will show the difference between real and emulated for each band.
```python
net_params = get_params(opt_state)
predictions = net_apply(net_params,validation_batch_X)
```
```python
validation_batch_X.shape
```
(41, 1200, 3)
```python
validation_batch_X[0,:,:].shape
```
(1200, 3)
```python
res=((np.exp(predictions)-np.exp(validation_batch_Y))/(np.exp(validation_batch_Y)))
fig,axes=plt.subplots(1,len(output_cols),figsize=(50,len(output_cols)))
for i in range(0,len(output_cols)):
axes[i].hist(res[:,:,i].flatten()*100.0,np.arange(-10,10,0.1))
axes[i].set_title(output_cols[i])
axes[i].set_xlabel(r'$\frac{f_{pred} - f_{True}}{f_{True}} \ \%$ error')
plt.subplots_adjust(wspace=0.5)
```

## Save network
Having trained and validated network, I need to save the network and relevant functions
```python
import cloudpickle
```
```python
with open('CIGALE_emulator_20210330_log10sfr_uniformAGN_z.pkl', 'wb') as f:
cloudpickle.dump({'net_init':net_init,'net_apply': net_apply,'params':net_params}, f)
net_init, net_apply
```
(<function jax.experimental.stax.serial.<locals>.init_fun(rng, input_shape)>,
<function jax.experimental.stax.serial.<locals>.apply_fun(params, inputs, **kwargs)>)
## Does SED look right?
```python
wave=np.array([250,350,500])
```
```python
plt.loglog(wave,np.exp(net_apply(net_params,np.array([2.95, 0.801, 0.1]))),'o')
#plt.loglog(wave,10.0**net_apply(net_params,np.array([3.0,0.0,0.0])),'o')
plt.loglog(wave,dataset[(dataset['universe.redshift']==0.1) & (dataset['agn.fracAGN'] == 0.801) & (dataset['sfh.sfr']>900) & (dataset['sfh.sfr']<1100)][output_cols].values())
```
[<matplotlib.lines.Line2D at 0x7fbc8a5d20b8>]

```python
dataset[(dataset['universe.redshift']==0.1) & (dataset['agn.fracAGN'] == 0.801) & (dataset['sfh.sfr']>900) & (dataset['sfh.sfr']<1100)]
```
<i>Table length=1</i>
<table id="table140447713941040" class="table-striped table-bordered table-condensed">
<thead><tr><th>id</th><th>IRAC1</th><th>megacam_g</th><th>megacam_i</th><th>megacam_r</th><th>megacam_u</th><th>megacam_y</th><th>megacam_z</th><th>mips_24</th><th>spire_250</th><th>spire_350</th><th>spire_500</th><th>dust.luminosity</th><th>dust.mass</th><th>sfh.sfr</th><th>stellar.m_star</th><th>agn.fracAGN</th><th>attenuation.BC_to_ISM_factor</th><th>attenuation.bessell_b</th><th>attenuation.galex_fuv</th><th>attenuation.slope_BC</th><th>attenuation.slope_ISM</th><th>dust.qpah</th><th>dust.umin</th><th>sfh.age</th><th>sfh.burst_age</th><th>sfh.f_burst</th><th>sfh.tau_main</th><th>universe.redshift</th><th>log10_sfh.sfr</th><th>log10_universe.redshift</th><th>logit_agnfrac</th></tr></thead>
<thead><tr><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th></th><th>mag</th><th>mag</th><th>mag</th><th></th><th></th><th></th><th>Myr</th><th></th><th></th><th></th><th></th><th></th><th></th><th></th></tr></thead>
<thead><tr><th>int64</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float32</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float64</th><th>float32</th><th>float64</th><th>float64</th></tr></thead>
<tr><td>2699</td><td>3561.7131</td><td>0.81075984</td><td>2.373399</td><td>1.5528784</td><td>0.25492316</td><td>2.4790812</td><td>3.5117917</td><td>12396.599</td><td>8839.021</td><td>3535.3625</td><td>1214.4366</td><td>8037312500000.0</td><td>4314639400.0</td><td>904.5928</td><td>175056060000.0</td><td>0.801</td><td>0.5</td><td>4.376290746740257</td><td>9.392077535240679</td><td>-0.7</td><td>-0.7</td><td>1.12</td><td>10.0</td><td>1000.0</td><td>50.0</td><td>0.1</td><td>3000.0</td><td>0.1</td><td>2.956453</td><td>-1.0</td><td>1.3925561223438672</td></tr>
</table>
```python
import xidplus
```
/Users/pdh21/anaconda3/envs/xidplus/lib/python3.6/site-packages/dask/config.py:168: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
data = yaml.load(f.read()) or {}
WARNING: AstropyDeprecationWarning: block_reduce was moved to the astropy.nddata.blocks module. Please update your import statement. [astropy.nddata.utils]
```python
from xidplus.numpyro_fit.misc import load_emulator
```
```python
obj=load_emulator('CIGALE_emulator_20210330_log10sfr_uniformAGN_z.pkl')
```
```python
type(obj['params'])
```
list
```python
import json
```
```python
import numpy as np
```
```python
np.savez('CIGALE_emulator_20210610_kasia',obj['params'],allow_pickle=True)
```
```python
ls
```
CIGALE_emulator_20210303.pkl
CIGALE_emulator_20210305.pkl
CIGALE_emulator_20210310_log10sfr_uniformAGN_log10z.pkl
CIGALE_emulator_20210311_log10sfr_uniformAGN_log10z_irac1.pkl
CIGALE_emulator_20210315_log10sfr_uniformAGN_log10z_irac1_megacam.pkl
CIGALE_emulator_20210329_log10sfr_uniformAGN_z_log_irac1_megacam.pkl
CIGALE_emulator_20210330_log10sfr_uniformAGN_z.pkl
CIGALE_emulator_20210420_log10sfr_uniformAGN_z.pkl.npz
GB_emulator_20210106.pkl
GB_emulator_20210209.pkl
GB_emulator_20210323.pkl
GB_emulator_20210324_notlog10z_T.pkl
Greybody_emulator.ipynb
JAX_CIGALE_emulator.ipynb
JAX_greybody_emulator.ipynb
Prior_pred_fits_0.pkl
Prior_pred_fits_1.pkl
Prior_pred_fits_2.pkl
Prior_pred_fits_3.pkl
Prior_pred_fits_4.pkl
Prior_pred_fits_5.pkl
Prior_pred_fits_6.pkl
Prior_pred_fits_7.pkl
Prior_pred_fits_8.pkl
Prior_pred_fits_9.pkl
XID+SED_Model_Building.ipynb
XID+SED_Principled_Workflow-VI.ipynb
XID+SED_Principled_Workflow.ipynb
XID+example_run_script-Prior_predictive_checks.ipynb
XID+posterior_analysis_validation-sed_emulator.ipynb
params_save.npz
params_save.txt
prior_pred_dist.m4v
prior_pred_dist_fit.m4v
test.fits
test_multivariate_cholesky.ipynb
test_numpyro_sed.pkl
test_numpyro_sed_prior_pred.pkl
```python
x=np.load('params_save.npz',allow_pickle=True)
```
```python
x['arr_0'].tolist()
```
[(array([[ 1.53069824e-01, -2.10883975e-01, 2.01293573e-01,
2.00886243e-05, 8.39339555e-05, -1.59104392e-01,
-3.11721444e-01, 8.15778226e-02, -1.30519122e-01,
2.66870164e-04, 2.02755541e-01, 2.00401381e-01,
2.55665570e-01, 9.49345157e-03, -2.87445990e-04,
2.97707498e-01, 2.03495666e-01, 6.32490441e-02,
1.99170873e-01, 1.80119430e-04, 1.20747246e-01,
-1.67233795e-01, -2.98334770e-02, -3.50672485e-06,
1.22021355e-01, 1.01019315e-01, 1.96768597e-01,
-1.26161918e-01, 3.03663150e-03, 2.84228637e-03,
1.29882827e-01, -7.99915791e-02, -1.29280478e-01,
4.20932584e-02, -8.54701623e-02, 5.27626090e-02,
-1.17510051e-01, 1.01156078e-01, 4.60322537e-02,
8.14467445e-02, 5.08916415e-02, 1.00427233e-01,
9.20651853e-03, 4.08653691e-02, -4.04455874e-04,
-8.63167420e-02, -1.63388662e-02, -6.32612929e-02,
-4.41273190e-02, 1.39217749e-01, 1.20777473e-01,
-2.40870282e-01, 2.55658120e-01, 1.09948285e-01,
-3.47286537e-02, 2.67806593e-02, 9.29918215e-02,
1.18900443e-05, -1.36361867e-01, 8.80834250e-06,
8.88264403e-02, -9.57389420e-05, -9.50625315e-02,
2.03881428e-01, -1.61554329e-02, -1.62480952e-04,
-2.17100427e-01, 6.52862415e-02, -2.11271066e-02,
-8.55357293e-03, 1.26932263e-01, -1.71124235e-01,
-5.88358827e-02, -5.16286343e-02, -1.39274625e-05,
5.35593834e-04, 9.61367711e-02, 1.18201040e-01,
-4.81214654e-03, 9.21703850e-06, -6.21714629e-02,
-8.35517049e-02, 1.48129642e-01, 9.29927846e-05,
1.15547804e-02, -1.89512312e-01, 4.78157401e-02,
1.76594645e-01, 1.74783200e-01, 5.14295287e-02,
8.22573304e-02, 9.21452641e-02, 4.19164710e-02,
-2.96897590e-01, -3.95684547e-05, -1.62488356e-01,
-5.28006442e-03, -2.46928513e-01, 1.66877523e-01,
-7.30078593e-02, -5.51951900e-02, -2.95307606e-01,
5.55741927e-03, -1.36754781e-01, 3.92581522e-02,
-6.33591320e-04, 2.59832561e-01, -1.40803531e-01,
2.81424731e-01, 2.48177692e-01, -2.55979836e-01,
-2.00387673e-03, 9.41594772e-06, 4.10088524e-02,
-2.16915846e-01, 2.05238029e-01, 5.90725578e-02,
-1.07257450e-02, 4.88407686e-02, 1.20324254e-01,
-3.98441069e-02, -8.82441998e-02, -6.40106574e-02,
9.10342392e-03, 3.94968502e-03, 6.52407557e-02,
-4.41694545e-04, 2.32200782e-05],
[-9.56135169e-02, -4.26747017e-02, 2.31635168e-01,
-4.34760768e-05, -1.41721021e-03, -1.05505474e-01,
-1.71429455e-01, 2.83397526e-01, -2.90613860e-01,
1.34984814e-02, -2.04954833e-01, -1.09364532e-01,
2.36716960e-02, 1.02085277e-01, -7.57721486e-04,
3.01513523e-01, -7.43361935e-02, 6.67992011e-02,
-2.26764724e-01, 1.24011317e-03, -6.82956874e-02,
-2.27028634e-02, -3.38605016e-01, -7.59451223e-06,
2.47587293e-01, -1.05594315e-01, 6.80167899e-02,
-3.21082734e-02, 2.73950666e-01, 2.28774473e-02,
1.39960796e-01, 1.27722230e-02, -1.43947393e-01,
3.11916452e-02, -1.90591291e-01, -8.27737749e-02,
-1.02607206e-01, 1.33461252e-01, -1.05700260e-02,
-1.02083698e-01, 5.75763918e-02, 1.34090975e-01,
2.15719789e-01, 2.12458178e-01, -4.04632092e-03,
-3.49481136e-01, 2.00065210e-01, 2.42629960e-01,
1.93042323e-01, 2.50296980e-01, 8.94507915e-02,
-9.58382115e-02, -2.28494462e-02, 2.36369818e-01,
-1.27528459e-01, -3.69563773e-02, -4.67114002e-02,
2.74982722e-03, -9.03799571e-03, -4.51136657e-05,
2.58287728e-01, 3.40779708e-03, 9.73177031e-02,
-6.85589537e-02, 6.38271775e-03, 1.67258189e-03,
1.97855867e-02, -9.07262564e-02, -2.09668845e-01,
-4.53785360e-02, 1.67807788e-01, -8.33287239e-02,
3.52400422e-01, 1.22029133e-01, 8.00654367e-02,
9.65318363e-03, 1.72288101e-02, -1.01203807e-02,
1.56366780e-01, -1.68244774e-03, -2.37100329e-02,
-1.73389837e-01, 1.95295420e-02, -1.99440308e-03,
8.00025314e-02, -5.56620248e-02, 1.02638401e-01,
-1.10902466e-01, 3.06838099e-02, -4.44191992e-02,
-8.43552127e-02, -1.01193162e-02, 2.33832449e-02,
-6.29129075e-03, 4.06327425e-03, -8.32595825e-02,
-3.15990485e-02, -1.56893134e-02, 2.82309711e-01,
2.82361269e-01, -5.72224781e-02, 1.26476690e-01,
-2.54863471e-01, 2.34438434e-01, 2.19621658e-01,
-3.14361751e-01, 1.25776291e-01, 3.03557932e-01,
5.59490025e-02, 1.51903592e-02, -2.60025859e-02,
-5.54666389e-03, -1.84743130e-05, -6.40727952e-02,
-2.44256482e-01, -4.81589586e-02, -2.26609573e-01,
1.23653643e-01, 1.95894942e-01, -6.13540150e-02,
1.45041913e-01, -2.85595238e-01, -4.65233065e-02,
5.48471771e-02, 1.05303042e-01, 2.79892176e-01,
5.61308339e-02, -1.89330840e-05],
[ 1.41446022e-02, -2.01077163e-01, -1.00045651e-01,
-8.03999066e-01, -8.95831764e-01, 4.21975218e-02,
-1.63626343e-01, -2.76833862e-01, -9.76875201e-02,
5.06179988e-01, 4.87757809e-02, 1.41420245e-01,
-2.07441241e-01, 4.01056767e-01, -8.05710256e-01,
6.44692872e-03, 2.21009120e-01, -1.69436529e-01,
-6.67921826e-02, -1.05775523e+00, 8.38957876e-02,
-2.98910160e-02, -1.46367446e-01, -6.53301001e-01,
-1.04626469e-01, -1.58328131e-01, 2.53040403e-01,
6.34190068e-02, 4.76733029e-01, -5.84724784e-01,
3.33633602e-01, -1.21292204e-01, -2.23500072e-03,
2.00990856e-01, -1.68379713e-02, 1.11320294e-01,
-1.31700095e-02, -1.03982717e-01, -4.40700680e-01,
2.74899751e-01, 2.59439200e-01, 2.15766475e-01,
3.26091349e-01, -6.15844578e-02, -6.39787376e-01,
1.57167748e-01, -1.39899984e-01, 8.18154663e-02,
-2.17399538e-01, -6.46281019e-02, 1.34346440e-01,
2.12652043e-01, -9.76891369e-02, 3.79553400e-02,
1.54967695e-01, -4.33932215e-01, -1.18579738e-01,
-1.39027762e+00, 2.06093520e-01, -7.32906520e-01,
-2.59851754e-01, -9.46300507e-01, 5.03597856e-02,
-7.38284066e-02, -3.75461280e-01, -8.37107539e-01,
-2.82007039e-01, -1.31407663e-01, 1.10969849e-01,
2.02970564e-01, -7.02738762e-02, 3.28801498e-02,
1.80798143e-01, -3.50165248e-01, 4.46210891e-01,
-6.59628510e-01, -2.87261099e-01, 2.80246645e-01,
2.50837177e-01, -1.16137397e+00, -1.68976769e-01,
-8.80870670e-02, -4.61860485e-02, -9.82364774e-01,
3.16690683e-01, 1.90806836e-01, 5.66026270e-02,
-1.44116074e-01, -2.78943963e-03, 2.78720170e-01,
-8.42702761e-02, -4.45272118e-01, 2.31863722e-01,
3.17606293e-02, -1.17060268e+00, 8.20184126e-02,
-5.30681014e-01, 1.83141917e-01, -4.02189136e-01,
-2.00566091e-03, 8.20839126e-03, 2.02945963e-01,
1.09732404e-01, 2.25487888e-01, 4.87290174e-02,
-1.38762947e-02, 1.79363459e-01, 1.78430855e-01,
-2.75342762e-02, -1.79975152e-01, -2.09120149e-03,
-6.71783090e-01, -6.95732594e-01, 2.93099910e-01,
-2.42147833e-01, -2.94341862e-01, 5.07360250e-02,
3.43546301e-01, 6.06366359e-02, 9.93927941e-02,
1.48736641e-01, 2.39289686e-01, 1.30173057e-01,
3.49565655e-01, 1.92776322e-01, -5.48308827e-02,
-4.40925032e-01, -8.10449481e-01]], dtype=float32),
array([ 0.04993482, -0.01212634, 0.0791207 , 0.00492666, 0.01319074,
-0.10325487, -0.1346548 , 0.10120684, -0.0575023 , -0.02011754,
0.06913678, 0.04907846, 0.04280977, 0.00189588, 0.00704372,
0.05355757, 0.06565716, 0.08904282, 0.03973582, 0.00688254,
0.06209717, -0.08303745, -0.03966172, 0.00399084, 0.06877296,
0.12979364, 0.06964204, 0.31554383, 0.13802093, 0.02556979,
0.4289254 , -0.0503752 , -0.07069702, 0.03091657, -0.06025747,
0.0746284 , -0.08132786, 0.04863249, 0.06814621, 0.0508422 ,
0.08750687, 0.06362367, 0.05117381, 0.07356688, 0.02762729,
0.09656563, 0.07198024, -0.26193607, -0.0602057 , 0.08606211,
0.07281027, -0.06211648, 0.07160514, 0.06034514, 0.30626503,
0.0429852 , 0.14285904, 0.03020917, 0.13712466, 0.00450039,
0.09005794, 0.0123495 , 0.36532483, 0.08149678, 0.18150629,
0.00565592, -0.11370263, 0.09517168, 0.21982178, 0.08685449,
0.08191731, -0.07257076, 0.2534638 , 0.3897213 , -0.00143844,
0.01895614, 0.10273186, 0.11818916, 0.00264214, 0.0128728 ,
-0.08194415, -0.0661905 , 0.07294855, 0.00806362, -0.00107712,
-0.00505454, 0.06896237, 0.0813387 , 0.06515121, -0.07235986,
0.07138455, 0.10467188, 0.16391854, -0.0606044 , 0.010386 ,
0.19612047, 0.10880241, -0.08573751, 0.07287953, 0.3737235 ,
-0.02502817, -0.07157979, -0.01277431, 0.14055003, 0.05926196,
0.29733676, 0.05263878, -0.1505707 , 0.05544473, 0.07868218,
-0.07834595, 0.05206577, 0.00429451, 0.05404095, -0.09889918,
-0.0151882 , 0.09757402, 0.19194539, 0.07196058, 0.03609553,
-0.3547578 , 0.04935162, 0.14975967, 0.01198729, 0.01796428,
0.02070589, 0.04502893, 0.00497391], dtype=float32)),
(),
(array([[-0.03422838, -0.02585936, 0.07256419, ..., 0.10604588,
0.07892369, 0.1075211 ],
[-0.02576409, -0.04324246, -0.14388034, ..., -0.14141543,
-0.14632596, -0.13177887],
[-0.03215259, 0.08260014, 0.04717227, ..., 0.12862837,
-0.10882525, 0.09364428],
...,
[-0.08749322, -0.0168451 , -0.05433867, ..., -0.03359725,
-0.00382601, 0.15514964],
[-0.10678113, 0.2651945 , -0.29697 , ..., -0.05284815,
-0.02245768, 0.07594308],
[-0.8964345 , 0.4518805 , -0.8790541 , ..., 0.1999891 ,
-0.5511756 , 0.20147882]], dtype=float32),
array([ 0.04318503, 0.06401413, 0.10552775, 0.1473793 , 0.03431362,
-0.04497027, 0.04367442, 0.07699274, 0.04165394, 0.07037516,
0.08304454, 0.0683172 , 0.00262007, 0.05374337, 0.03608707,
0.16427258, 0.04042302, -0.00634741, -0.02428112, 0.06873304,
0.0518212 , 0.04075989, 0.06424611, 0.05797005, 0.0316513 ,
0.2345811 , -0.02713604, 0.04995387, -0.04329855, 0.1378327 ,
0.05642162, 0.06783304, 0.0543987 , 0.05031141, -0.02864387,
0.1944591 , 0.20191945, -0.23888457, -0.00107816, 0.05796514,
0.06788929, 0.06220463, 0.04600336, 0.04127879, 0.23460324,
0.02737976, -0.02148593, 0.12680675, 0.07559889, 0.09663132,
0.09608997, 0.02774669, 0.05582898, 0.01978187, 0.0213549 ,
0.15664043, 0.05999393, -0.03036105, 0.07454889, 0.12296706,
0.00878114, 0.04319701, 0.03167537, 0.05068729, -0.01067725,
0.06127113, 0.14654745, -0.01772743, 0.04000608, 0.07163013,
0.05569566, 0.06486893, -0.03135101, 0.04571848, 0.07377528,
0.06323048, 0.05599957, -0.00325564, 0.05290375, 0.18670882,
0.04943348, 0.05839921, -0.01254695, 0.00374835, 0.04091755,
0.06934862, 0.05295921, 0.12965381, 0.26518968, 0.02922123,
0.0759641 , 0.06503931, -0.02622866, 0.08374478, -0.05003466,
0.04679668, 0.00959557, -0.00260884, -0.01219916, 0.0059711 ,
0.05894898, 0.04760699, 0.04448665, 0.14146328, 0.04171022,
0.00039654, 0.14147858, 0.05612815, 0.03840605, 0.04201587,
0.0598927 , 0.0404181 , -0.0121826 , 0.002291 , 0.02991664,
0.06338561, 0.05918598, 0.10408899, -0.05643817, 0.04786403,
0.17926623, 0.04988811, 0.05208017, 0.05186151, 0.0571902 ,
0.05716937, -0.02336271, 0.04936297], dtype=float32)),
(),
(array([[-4.78656381e-01, -4.84813273e-01, -5.50405860e-01],
[ 3.35031658e-01, 1.76466942e-01, 1.48843542e-01],
[-3.45053434e-01, -6.55529320e-01, -2.37278402e-01],
[-9.41956460e-01, -9.29954112e-01, -8.69155824e-01],
[ 2.55623072e-01, -3.67746176e-03, 1.26485467e-01],
[-5.47391735e-02, -1.23208657e-01, -1.38787791e-01],
[ 2.61397302e-01, 2.81228703e-02, 6.48772046e-02],
[ 2.01461002e-01, 1.97006389e-01, 1.24496259e-01],
[ 1.88138280e-02, 2.08875373e-01, 2.70739555e-01],
[ 2.38008395e-01, 2.37070963e-01, 4.67405587e-01],
[ 1.37017727e-01, 3.44284713e-01, 1.73349738e-01],
[ 2.18866974e-01, 2.30267376e-01, -6.88656420e-02],
[-6.76326990e-01, -6.99695110e-01, -1.38253331e-01],
[ 7.44611695e-02, 3.46906662e-01, 1.94446564e-01],
[ 1.27352834e-01, 3.37143987e-02, 7.34688267e-02],
[-1.10179520e+00, -9.15884078e-01, -9.38202620e-01],
[ 2.16434851e-01, 1.61506251e-01, 4.67592180e-02],
[-8.43614116e-02, -1.60350174e-01, -4.02493781e-04],
[-2.11055398e-01, -8.97642747e-02, 5.72317876e-02],
[ 2.15646937e-01, 2.57510275e-01, 1.78803712e-01],
[ 2.71565050e-01, -5.99395931e-02, 1.79622740e-01],
[ 7.15189651e-02, 2.59319156e-01, 2.61098385e-01],
[ 1.86891183e-01, 2.48789236e-01, 2.18168437e-01],
[ 1.84291024e-02, 2.42671505e-01, 2.19747853e-02],
[ 2.03428015e-01, 3.54187727e-01, 2.82053471e-01],
[ 7.03351319e-01, 8.09539080e-01, 7.72811830e-01],
[ 9.11437124e-02, -1.44656196e-01, 2.22388536e-01],
[ 2.26557896e-01, -1.10934088e-02, 2.13816017e-01],
[-1.12496905e-01, -1.87045932e-01, -5.97019009e-02],
[-7.61769235e-01, -7.72123635e-01, -9.75890219e-01],
[ 3.05190325e-01, 1.32497445e-01, 2.17855945e-01],
[ 1.31501675e-01, 1.12660259e-01, 1.65270157e-02],
[-5.56640387e-01, -4.99232233e-01, -2.92703152e-01],
[ 2.19774038e-01, 3.95686179e-02, 2.31017187e-01],
[-1.77604914e-01, -6.38101920e-02, -4.09325399e-02],
[-1.03784680e+00, -9.10168588e-01, -1.07743847e+00],
[-1.00170982e+00, -1.04068625e+00, -8.59334171e-01],
[ 8.45464468e-01, 2.08024696e-01, -4.88651544e-02],
[-4.31112051e-01, -1.91040486e-01, -3.56914222e-01],
[ 2.79779643e-01, 2.50112742e-01, 2.09560946e-01],
[ 5.63736148e-02, 2.27240503e-01, 2.81559199e-01],
[-7.16340959e-01, -4.02274430e-01, -5.70875406e-01],
[ 2.46527553e-01, -8.15471187e-02, 2.10321933e-01],
[ 1.37722507e-01, -1.67197183e-01, 3.23957533e-01],
[-8.45782340e-01, -1.05558038e+00, -1.12943280e+00],
[-3.96439135e-01, -2.55469799e-01, -4.35159922e-01],
[-2.39666134e-01, 1.18508175e-01, -3.05553824e-01],
[-6.16362095e-01, -5.21952748e-01, -6.37968063e-01],
[ 1.39270779e-02, 2.06092358e-01, 2.47910649e-01],
[-6.25008523e-01, -6.01777017e-01, -5.92815816e-01],
[ 1.96474746e-01, 1.98963687e-01, -1.47557515e-03],
[ 1.63086876e-01, 1.93513006e-01, 2.37006247e-01],
[ 1.29517719e-01, 1.98047608e-01, 2.71610498e-01],
[-1.42343462e-01, 7.90357590e-02, -8.58355612e-02],
[-5.27097523e-01, -3.60084742e-01, -4.36733454e-01],
[-8.70184779e-01, -2.06790432e-01, -1.00296281e-01],
[ 1.94708794e-01, 6.13125302e-02, 4.15276922e-02],
[ 1.23108901e-01, -2.63996720e-01, -2.07525179e-01],
[ 8.23504180e-02, 2.37032309e-01, 1.45315722e-01],
[-3.39615971e-01, -5.39651752e-01, -5.03607452e-01],
[-1.77049398e-01, -7.85850435e-02, -1.31696507e-01],
[ 4.49967720e-02, 3.20405841e-01, 1.52601779e-01],
[ 2.26537839e-01, 2.06401661e-01, -1.26141608e-01],
[-9.73755196e-02, 3.00717533e-01, 2.03305751e-01],
[-1.77933872e-02, -1.02676630e-01, -3.10888700e-02],
[ 2.64976263e-01, 2.50689447e-01, 1.67717174e-01],
[-2.89391667e-01, -5.38424611e-01, -5.13043404e-01],
[-1.12035535e-01, -2.91938297e-02, 5.71100600e-02],
[-6.81093931e-01, -7.79274285e-01, -8.95485103e-01],
[ 7.11113261e-03, 2.08368346e-01, 1.09029703e-01],
[ 2.67163366e-01, 9.01261866e-02, 2.32021391e-01],
[ 7.61629567e-02, 1.28208548e-01, 1.77933559e-01],
[-2.42862012e-02, -1.42082199e-01, 9.67771187e-02],
[ 1.93336159e-01, 8.01900402e-02, 1.84268087e-01],
[ 3.80656809e-01, 1.73509553e-01, 3.48442614e-01],
[-4.12368387e-01, -5.81474602e-01, -3.72003049e-01],
[ 2.31199354e-01, 9.51954871e-02, 9.01686102e-02],
[-2.30810836e-01, 3.19271423e-02, -2.17658013e-01],
[ 1.37645110e-01, 1.10636175e-01, 1.41822726e-01],
[-8.80627453e-01, -1.19069552e+00, -1.06030595e+00],
[ 1.34634405e-01, 2.07669675e-01, 3.21988791e-01],
[ 1.95426956e-01, 2.12161362e-01, 1.53630190e-02],
[-1.22733235e-01, 2.31809001e-02, -2.72758193e-02],
[ 3.04959603e-02, -6.61364049e-02, -2.81280249e-01],
[ 1.67098984e-01, 5.76136671e-02, 2.03342289e-01],
[ 2.42621675e-01, 4.24271375e-01, 3.72970521e-01],
[ 3.52295369e-01, 1.11584626e-01, -6.89266324e-02],
[-4.39384192e-01, -4.31890994e-01, -6.38583899e-01],
[ 5.71117282e-01, 4.89224911e-01, 3.75584066e-01],
[-3.42314690e-01, -6.00230023e-02, 3.19120228e-01],
[-5.33911943e-01, -4.10487771e-01, -4.53867435e-01],
[ 6.27191663e-02, 1.59758374e-01, 2.63538938e-02],
[ 1.95248798e-01, 3.79788578e-02, 2.10875288e-01],
[-5.45423508e-01, -3.39853108e-01, -3.81522626e-01],
[-2.18804523e-01, -5.64338192e-02, -2.85992641e-02],
[-5.62148280e-02, 2.20772356e-01, 8.28550979e-02],
[-4.09969002e-01, -3.73499215e-01, -1.92373544e-01],
[-5.19106209e-01, -1.23261869e-01, -1.96268767e-01],
[-1.57062620e-01, -1.10868244e-02, 6.20706715e-02],
[-1.57321051e-01, -3.06500614e-01, -2.30024591e-01],
[ 3.59437205e-02, 3.52994859e-01, 2.38882601e-01],
[ 1.36312008e-01, 1.38561815e-01, 2.23111033e-01],
[ 1.66426688e-01, 1.44103363e-01, 1.08864978e-01],
[-4.31933135e-01, -4.29439902e-01, -5.28308690e-01],
[-4.10688035e-02, 2.73244053e-01, 1.24709614e-01],
[-1.84629008e-01, -1.24577269e-01, 5.33142388e-02],
[-1.08149683e+00, -8.98993015e-01, -1.14275980e+00],
[ 2.08761305e-01, 1.56309798e-01, -1.25824418e-02],
[-4.95516181e-01, -3.53564203e-01, -1.31432191e-01],
[ 3.55273299e-02, 1.20648474e-01, 2.36399055e-01],
[ 8.50410089e-02, -1.47752538e-02, 2.15277553e-01],
[ 2.11633042e-01, 1.01077348e-01, 3.30886878e-02],
[-1.06111094e-01, 1.07693918e-01, -9.53614414e-02],
[-1.75395906e-01, -3.53530228e-01, -1.19211458e-01],
[ 2.68203467e-01, 2.82129109e-01, -1.54477522e-01],
[ 1.70596585e-01, 1.52597874e-01, 6.76318184e-02],
[ 5.46143129e-02, 1.23157211e-01, 8.29520896e-02],
[-5.31307220e-01, -1.78458467e-01, -5.32431602e-01],
[-1.97006300e-01, -1.22562185e-01, -7.49048591e-02],
[ 2.81229258e-01, -6.29485771e-02, 2.03068763e-01],
[-4.78337079e-01, -7.24266708e-01, -7.22120702e-01],
[ 3.39150280e-01, 6.52333498e-02, 1.42264619e-01],
[ 1.33290380e-01, 3.07468604e-02, 2.74779916e-01],
[-7.64167402e-04, 4.02096054e-03, 2.88234293e-01],
[ 2.67466605e-01, 2.52607644e-01, 2.24442825e-01],
[ 1.75580591e-01, 1.17959879e-01, 2.54109591e-01],
[-1.54530272e-01, -1.21206172e-01, 6.84056804e-02],
[ 3.03603262e-01, 1.25999466e-01, 8.91618878e-02]], dtype=float32),
array([0.02325003, 0.04449695, 0.04215444], dtype=float32))]
```python
obj['params']
```
[(array([[ 1.53069824e-01, -2.10883975e-01, 2.01293573e-01,
2.00886243e-05, 8.39339555e-05, -1.59104392e-01,
-3.11721444e-01, 8.15778226e-02, -1.30519122e-01,
2.66870164e-04, 2.02755541e-01, 2.00401381e-01,
2.55665570e-01, 9.49345157e-03, -2.87445990e-04,
2.97707498e-01, 2.03495666e-01, 6.32490441e-02,
1.99170873e-01, 1.80119430e-04, 1.20747246e-01,
-1.67233795e-01, -2.98334770e-02, -3.50672485e-06,
1.22021355e-01, 1.01019315e-01, 1.96768597e-01,
-1.26161918e-01, 3.03663150e-03, 2.84228637e-03,
1.29882827e-01, -7.99915791e-02, -1.29280478e-01,
4.20932584e-02, -8.54701623e-02, 5.27626090e-02,
-1.17510051e-01, 1.01156078e-01, 4.60322537e-02,
8.14467445e-02, 5.08916415e-02, 1.00427233e-01,
9.20651853e-03, 4.08653691e-02, -4.04455874e-04,
-8.63167420e-02, -1.63388662e-02, -6.32612929e-02,
-4.41273190e-02, 1.39217749e-01, 1.20777473e-01,
-2.40870282e-01, 2.55658120e-01, 1.09948285e-01,
-3.47286537e-02, 2.67806593e-02, 9.29918215e-02,
1.18900443e-05, -1.36361867e-01, 8.80834250e-06,
8.88264403e-02, -9.57389420e-05, -9.50625315e-02,
2.03881428e-01, -1.61554329e-02, -1.62480952e-04,
-2.17100427e-01, 6.52862415e-02, -2.11271066e-02,
-8.55357293e-03, 1.26932263e-01, -1.71124235e-01,
-5.88358827e-02, -5.16286343e-02, -1.39274625e-05,
5.35593834e-04, 9.61367711e-02, 1.18201040e-01,
-4.81214654e-03, 9.21703850e-06, -6.21714629e-02,
-8.35517049e-02, 1.48129642e-01, 9.29927846e-05,
1.15547804e-02, -1.89512312e-01, 4.78157401e-02,
1.76594645e-01, 1.74783200e-01, 5.14295287e-02,
8.22573304e-02, 9.21452641e-02, 4.19164710e-02,
-2.96897590e-01, -3.95684547e-05, -1.62488356e-01,
-5.28006442e-03, -2.46928513e-01, 1.66877523e-01,
-7.30078593e-02, -5.51951900e-02, -2.95307606e-01,
5.55741927e-03, -1.36754781e-01, 3.92581522e-02,
-6.33591320e-04, 2.59832561e-01, -1.40803531e-01,
2.81424731e-01, 2.48177692e-01, -2.55979836e-01,
-2.00387673e-03, 9.41594772e-06, 4.10088524e-02,
-2.16915846e-01, 2.05238029e-01, 5.90725578e-02,
-1.07257450e-02, 4.88407686e-02, 1.20324254e-01,
-3.98441069e-02, -8.82441998e-02, -6.40106574e-02,
9.10342392e-03, 3.94968502e-03, 6.52407557e-02,
-4.41694545e-04, 2.32200782e-05],
[-9.56135169e-02, -4.26747017e-02, 2.31635168e-01,
-4.34760768e-05, -1.41721021e-03, -1.05505474e-01,
-1.71429455e-01, 2.83397526e-01, -2.90613860e-01,
1.34984814e-02, -2.04954833e-01, -1.09364532e-01,
2.36716960e-02, 1.02085277e-01, -7.57721486e-04,
3.01513523e-01, -7.43361935e-02, 6.67992011e-02,
-2.26764724e-01, 1.24011317e-03, -6.82956874e-02,
-2.27028634e-02, -3.38605016e-01, -7.59451223e-06,
2.47587293e-01, -1.05594315e-01, 6.80167899e-02,
-3.21082734e-02, 2.73950666e-01, 2.28774473e-02,
1.39960796e-01, 1.27722230e-02, -1.43947393e-01,
3.11916452e-02, -1.90591291e-01, -8.27737749e-02,
-1.02607206e-01, 1.33461252e-01, -1.05700260e-02,
-1.02083698e-01, 5.75763918e-02, 1.34090975e-01,
2.15719789e-01, 2.12458178e-01, -4.04632092e-03,
-3.49481136e-01, 2.00065210e-01, 2.42629960e-01,
1.93042323e-01, 2.50296980e-01, 8.94507915e-02,
-9.58382115e-02, -2.28494462e-02, 2.36369818e-01,
-1.27528459e-01, -3.69563773e-02, -4.67114002e-02,
2.74982722e-03, -9.03799571e-03, -4.51136657e-05,
2.58287728e-01, 3.40779708e-03, 9.73177031e-02,
-6.85589537e-02, 6.38271775e-03, 1.67258189e-03,
1.97855867e-02, -9.07262564e-02, -2.09668845e-01,
-4.53785360e-02, 1.67807788e-01, -8.33287239e-02,
3.52400422e-01, 1.22029133e-01, 8.00654367e-02,
9.65318363e-03, 1.72288101e-02, -1.01203807e-02,
1.56366780e-01, -1.68244774e-03, -2.37100329e-02,
-1.73389837e-01, 1.95295420e-02, -1.99440308e-03,
8.00025314e-02, -5.56620248e-02, 1.02638401e-01,
-1.10902466e-01, 3.06838099e-02, -4.44191992e-02,
-8.43552127e-02, -1.01193162e-02, 2.33832449e-02,
-6.29129075e-03, 4.06327425e-03, -8.32595825e-02,
-3.15990485e-02, -1.56893134e-02, 2.82309711e-01,
2.82361269e-01, -5.72224781e-02, 1.26476690e-01,
-2.54863471e-01, 2.34438434e-01, 2.19621658e-01,
-3.14361751e-01, 1.25776291e-01, 3.03557932e-01,
5.59490025e-02, 1.51903592e-02, -2.60025859e-02,
-5.54666389e-03, -1.84743130e-05, -6.40727952e-02,
-2.44256482e-01, -4.81589586e-02, -2.26609573e-01,
1.23653643e-01, 1.95894942e-01, -6.13540150e-02,
1.45041913e-01, -2.85595238e-01, -4.65233065e-02,
5.48471771e-02, 1.05303042e-01, 2.79892176e-01,
5.61308339e-02, -1.89330840e-05],
[ 1.41446022e-02, -2.01077163e-01, -1.00045651e-01,
-8.03999066e-01, -8.95831764e-01, 4.21975218e-02,
-1.63626343e-01, -2.76833862e-01, -9.76875201e-02,
5.06179988e-01, 4.87757809e-02, 1.41420245e-01,
-2.07441241e-01, 4.01056767e-01, -8.05710256e-01,
6.44692872e-03, 2.21009120e-01, -1.69436529e-01,
-6.67921826e-02, -1.05775523e+00, 8.38957876e-02,
-2.98910160e-02, -1.46367446e-01, -6.53301001e-01,
-1.04626469e-01, -1.58328131e-01, 2.53040403e-01,
6.34190068e-02, 4.76733029e-01, -5.84724784e-01,
3.33633602e-01, -1.21292204e-01, -2.23500072e-03,
2.00990856e-01, -1.68379713e-02, 1.11320294e-01,
-1.31700095e-02, -1.03982717e-01, -4.40700680e-01,
2.74899751e-01, 2.59439200e-01, 2.15766475e-01,
3.26091349e-01, -6.15844578e-02, -6.39787376e-01,
1.57167748e-01, -1.39899984e-01, 8.18154663e-02,
-2.17399538e-01, -6.46281019e-02, 1.34346440e-01,
2.12652043e-01, -9.76891369e-02, 3.79553400e-02,
1.54967695e-01, -4.33932215e-01, -1.18579738e-01,
-1.39027762e+00, 2.06093520e-01, -7.32906520e-01,
-2.59851754e-01, -9.46300507e-01, 5.03597856e-02,
-7.38284066e-02, -3.75461280e-01, -8.37107539e-01,
-2.82007039e-01, -1.31407663e-01, 1.10969849e-01,
2.02970564e-01, -7.02738762e-02, 3.28801498e-02,
1.80798143e-01, -3.50165248e-01, 4.46210891e-01,
-6.59628510e-01, -2.87261099e-01, 2.80246645e-01,
2.50837177e-01, -1.16137397e+00, -1.68976769e-01,
-8.80870670e-02, -4.61860485e-02, -9.82364774e-01,
3.16690683e-01, 1.90806836e-01, 5.66026270e-02,
-1.44116074e-01, -2.78943963e-03, 2.78720170e-01,
-8.42702761e-02, -4.45272118e-01, 2.31863722e-01,
3.17606293e-02, -1.17060268e+00, 8.20184126e-02,
-5.30681014e-01, 1.83141917e-01, -4.02189136e-01,
-2.00566091e-03, 8.20839126e-03, 2.02945963e-01,
1.09732404e-01, 2.25487888e-01, 4.87290174e-02,
-1.38762947e-02, 1.79363459e-01, 1.78430855e-01,
-2.75342762e-02, -1.79975152e-01, -2.09120149e-03,
-6.71783090e-01, -6.95732594e-01, 2.93099910e-01,
-2.42147833e-01, -2.94341862e-01, 5.07360250e-02,
3.43546301e-01, 6.06366359e-02, 9.93927941e-02,
1.48736641e-01, 2.39289686e-01, 1.30173057e-01,
3.49565655e-01, 1.92776322e-01, -5.48308827e-02,
-4.40925032e-01, -8.10449481e-01]], dtype=float32),
array([ 0.04993482, -0.01212634, 0.0791207 , 0.00492666, 0.01319074,
-0.10325487, -0.1346548 , 0.10120684, -0.0575023 , -0.02011754,
0.06913678, 0.04907846, 0.04280977, 0.00189588, 0.00704372,
0.05355757, 0.06565716, 0.08904282, 0.03973582, 0.00688254,
0.06209717, -0.08303745, -0.03966172, 0.00399084, 0.06877296,
0.12979364, 0.06964204, 0.31554383, 0.13802093, 0.02556979,
0.4289254 , -0.0503752 , -0.07069702, 0.03091657, -0.06025747,
0.0746284 , -0.08132786, 0.04863249, 0.06814621, 0.0508422 ,
0.08750687, 0.06362367, 0.05117381, 0.07356688, 0.02762729,
0.09656563, 0.07198024, -0.26193607, -0.0602057 , 0.08606211,
0.07281027, -0.06211648, 0.07160514, 0.06034514, 0.30626503,
0.0429852 , 0.14285904, 0.03020917, 0.13712466, 0.00450039,
0.09005794, 0.0123495 , 0.36532483, 0.08149678, 0.18150629,
0.00565592, -0.11370263, 0.09517168, 0.21982178, 0.08685449,
0.08191731, -0.07257076, 0.2534638 , 0.3897213 , -0.00143844,
0.01895614, 0.10273186, 0.11818916, 0.00264214, 0.0128728 ,
-0.08194415, -0.0661905 , 0.07294855, 0.00806362, -0.00107712,
-0.00505454, 0.06896237, 0.0813387 , 0.06515121, -0.07235986,
0.07138455, 0.10467188, 0.16391854, -0.0606044 , 0.010386 ,
0.19612047, 0.10880241, -0.08573751, 0.07287953, 0.3737235 ,
-0.02502817, -0.07157979, -0.01277431, 0.14055003, 0.05926196,
0.29733676, 0.05263878, -0.1505707 , 0.05544473, 0.07868218,
-0.07834595, 0.05206577, 0.00429451, 0.05404095, -0.09889918,
-0.0151882 , 0.09757402, 0.19194539, 0.07196058, 0.03609553,
-0.3547578 , 0.04935162, 0.14975967, 0.01198729, 0.01796428,
0.02070589, 0.04502893, 0.00497391], dtype=float32)),
(),
(array([[-0.03422838, -0.02585936, 0.07256419, ..., 0.10604588,
0.07892369, 0.1075211 ],
[-0.02576409, -0.04324246, -0.14388034, ..., -0.14141543,
-0.14632596, -0.13177887],
[-0.03215259, 0.08260014, 0.04717227, ..., 0.12862837,
-0.10882525, 0.09364428],
...,
[-0.08749322, -0.0168451 , -0.05433867, ..., -0.03359725,
-0.00382601, 0.15514964],
[-0.10678113, 0.2651945 , -0.29697 , ..., -0.05284815,
-0.02245768, 0.07594308],
[-0.8964345 , 0.4518805 , -0.8790541 , ..., 0.1999891 ,
-0.5511756 , 0.20147882]], dtype=float32),
array([ 0.04318503, 0.06401413, 0.10552775, 0.1473793 , 0.03431362,
-0.04497027, 0.04367442, 0.07699274, 0.04165394, 0.07037516,
0.08304454, 0.0683172 , 0.00262007, 0.05374337, 0.03608707,
0.16427258, 0.04042302, -0.00634741, -0.02428112, 0.06873304,
0.0518212 , 0.04075989, 0.06424611, 0.05797005, 0.0316513 ,
0.2345811 , -0.02713604, 0.04995387, -0.04329855, 0.1378327 ,
0.05642162, 0.06783304, 0.0543987 , 0.05031141, -0.02864387,
0.1944591 , 0.20191945, -0.23888457, -0.00107816, 0.05796514,
0.06788929, 0.06220463, 0.04600336, 0.04127879, 0.23460324,
0.02737976, -0.02148593, 0.12680675, 0.07559889, 0.09663132,
0.09608997, 0.02774669, 0.05582898, 0.01978187, 0.0213549 ,
0.15664043, 0.05999393, -0.03036105, 0.07454889, 0.12296706,
0.00878114, 0.04319701, 0.03167537, 0.05068729, -0.01067725,
0.06127113, 0.14654745, -0.01772743, 0.04000608, 0.07163013,
0.05569566, 0.06486893, -0.03135101, 0.04571848, 0.07377528,
0.06323048, 0.05599957, -0.00325564, 0.05290375, 0.18670882,
0.04943348, 0.05839921, -0.01254695, 0.00374835, 0.04091755,
0.06934862, 0.05295921, 0.12965381, 0.26518968, 0.02922123,
0.0759641 , 0.06503931, -0.02622866, 0.08374478, -0.05003466,
0.04679668, 0.00959557, -0.00260884, -0.01219916, 0.0059711 ,
0.05894898, 0.04760699, 0.04448665, 0.14146328, 0.04171022,
0.00039654, 0.14147858, 0.05612815, 0.03840605, 0.04201587,
0.0598927 , 0.0404181 , -0.0121826 , 0.002291 , 0.02991664,
0.06338561, 0.05918598, 0.10408899, -0.05643817, 0.04786403,
0.17926623, 0.04988811, 0.05208017, 0.05186151, 0.0571902 ,
0.05716937, -0.02336271, 0.04936297], dtype=float32)),
(),
(array([[-4.78656381e-01, -4.84813273e-01, -5.50405860e-01],
[ 3.35031658e-01, 1.76466942e-01, 1.48843542e-01],
[-3.45053434e-01, -6.55529320e-01, -2.37278402e-01],
[-9.41956460e-01, -9.29954112e-01, -8.69155824e-01],
[ 2.55623072e-01, -3.67746176e-03, 1.26485467e-01],
[-5.47391735e-02, -1.23208657e-01, -1.38787791e-01],
[ 2.61397302e-01, 2.81228703e-02, 6.48772046e-02],
[ 2.01461002e-01, 1.97006389e-01, 1.24496259e-01],
[ 1.88138280e-02, 2.08875373e-01, 2.70739555e-01],
[ 2.38008395e-01, 2.37070963e-01, 4.67405587e-01],
[ 1.37017727e-01, 3.44284713e-01, 1.73349738e-01],
[ 2.18866974e-01, 2.30267376e-01, -6.88656420e-02],
[-6.76326990e-01, -6.99695110e-01, -1.38253331e-01],
[ 7.44611695e-02, 3.46906662e-01, 1.94446564e-01],
[ 1.27352834e-01, 3.37143987e-02, 7.34688267e-02],
[-1.10179520e+00, -9.15884078e-01, -9.38202620e-01],
[ 2.16434851e-01, 1.61506251e-01, 4.67592180e-02],
[-8.43614116e-02, -1.60350174e-01, -4.02493781e-04],
[-2.11055398e-01, -8.97642747e-02, 5.72317876e-02],
[ 2.15646937e-01, 2.57510275e-01, 1.78803712e-01],
[ 2.71565050e-01, -5.99395931e-02, 1.79622740e-01],
[ 7.15189651e-02, 2.59319156e-01, 2.61098385e-01],
[ 1.86891183e-01, 2.48789236e-01, 2.18168437e-01],
[ 1.84291024e-02, 2.42671505e-01, 2.19747853e-02],
[ 2.03428015e-01, 3.54187727e-01, 2.82053471e-01],
[ 7.03351319e-01, 8.09539080e-01, 7.72811830e-01],
[ 9.11437124e-02, -1.44656196e-01, 2.22388536e-01],
[ 2.26557896e-01, -1.10934088e-02, 2.13816017e-01],
[-1.12496905e-01, -1.87045932e-01, -5.97019009e-02],
[-7.61769235e-01, -7.72123635e-01, -9.75890219e-01],
[ 3.05190325e-01, 1.32497445e-01, 2.17855945e-01],
[ 1.31501675e-01, 1.12660259e-01, 1.65270157e-02],
[-5.56640387e-01, -4.99232233e-01, -2.92703152e-01],
[ 2.19774038e-01, 3.95686179e-02, 2.31017187e-01],
[-1.77604914e-01, -6.38101920e-02, -4.09325399e-02],
[-1.03784680e+00, -9.10168588e-01, -1.07743847e+00],
[-1.00170982e+00, -1.04068625e+00, -8.59334171e-01],
[ 8.45464468e-01, 2.08024696e-01, -4.88651544e-02],
[-4.31112051e-01, -1.91040486e-01, -3.56914222e-01],
[ 2.79779643e-01, 2.50112742e-01, 2.09560946e-01],
[ 5.63736148e-02, 2.27240503e-01, 2.81559199e-01],
[-7.16340959e-01, -4.02274430e-01, -5.70875406e-01],
[ 2.46527553e-01, -8.15471187e-02, 2.10321933e-01],
[ 1.37722507e-01, -1.67197183e-01, 3.23957533e-01],
[-8.45782340e-01, -1.05558038e+00, -1.12943280e+00],
[-3.96439135e-01, -2.55469799e-01, -4.35159922e-01],
[-2.39666134e-01, 1.18508175e-01, -3.05553824e-01],
[-6.16362095e-01, -5.21952748e-01, -6.37968063e-01],
[ 1.39270779e-02, 2.06092358e-01, 2.47910649e-01],
[-6.25008523e-01, -6.01777017e-01, -5.92815816e-01],
[ 1.96474746e-01, 1.98963687e-01, -1.47557515e-03],
[ 1.63086876e-01, 1.93513006e-01, 2.37006247e-01],
[ 1.29517719e-01, 1.98047608e-01, 2.71610498e-01],
[-1.42343462e-01, 7.90357590e-02, -8.58355612e-02],
[-5.27097523e-01, -3.60084742e-01, -4.36733454e-01],
[-8.70184779e-01, -2.06790432e-01, -1.00296281e-01],
[ 1.94708794e-01, 6.13125302e-02, 4.15276922e-02],
[ 1.23108901e-01, -2.63996720e-01, -2.07525179e-01],
[ 8.23504180e-02, 2.37032309e-01, 1.45315722e-01],
[-3.39615971e-01, -5.39651752e-01, -5.03607452e-01],
[-1.77049398e-01, -7.85850435e-02, -1.31696507e-01],
[ 4.49967720e-02, 3.20405841e-01, 1.52601779e-01],
[ 2.26537839e-01, 2.06401661e-01, -1.26141608e-01],
[-9.73755196e-02, 3.00717533e-01, 2.03305751e-01],
[-1.77933872e-02, -1.02676630e-01, -3.10888700e-02],
[ 2.64976263e-01, 2.50689447e-01, 1.67717174e-01],
[-2.89391667e-01, -5.38424611e-01, -5.13043404e-01],
[-1.12035535e-01, -2.91938297e-02, 5.71100600e-02],
[-6.81093931e-01, -7.79274285e-01, -8.95485103e-01],
[ 7.11113261e-03, 2.08368346e-01, 1.09029703e-01],
[ 2.67163366e-01, 9.01261866e-02, 2.32021391e-01],
[ 7.61629567e-02, 1.28208548e-01, 1.77933559e-01],
[-2.42862012e-02, -1.42082199e-01, 9.67771187e-02],
[ 1.93336159e-01, 8.01900402e-02, 1.84268087e-01],
[ 3.80656809e-01, 1.73509553e-01, 3.48442614e-01],
[-4.12368387e-01, -5.81474602e-01, -3.72003049e-01],
[ 2.31199354e-01, 9.51954871e-02, 9.01686102e-02],
[-2.30810836e-01, 3.19271423e-02, -2.17658013e-01],
[ 1.37645110e-01, 1.10636175e-01, 1.41822726e-01],
[-8.80627453e-01, -1.19069552e+00, -1.06030595e+00],
[ 1.34634405e-01, 2.07669675e-01, 3.21988791e-01],
[ 1.95426956e-01, 2.12161362e-01, 1.53630190e-02],
[-1.22733235e-01, 2.31809001e-02, -2.72758193e-02],
[ 3.04959603e-02, -6.61364049e-02, -2.81280249e-01],
[ 1.67098984e-01, 5.76136671e-02, 2.03342289e-01],
[ 2.42621675e-01, 4.24271375e-01, 3.72970521e-01],
[ 3.52295369e-01, 1.11584626e-01, -6.89266324e-02],
[-4.39384192e-01, -4.31890994e-01, -6.38583899e-01],
[ 5.71117282e-01, 4.89224911e-01, 3.75584066e-01],
[-3.42314690e-01, -6.00230023e-02, 3.19120228e-01],
[-5.33911943e-01, -4.10487771e-01, -4.53867435e-01],
[ 6.27191663e-02, 1.59758374e-01, 2.63538938e-02],
[ 1.95248798e-01, 3.79788578e-02, 2.10875288e-01],
[-5.45423508e-01, -3.39853108e-01, -3.81522626e-01],
[-2.18804523e-01, -5.64338192e-02, -2.85992641e-02],
[-5.62148280e-02, 2.20772356e-01, 8.28550979e-02],
[-4.09969002e-01, -3.73499215e-01, -1.92373544e-01],
[-5.19106209e-01, -1.23261869e-01, -1.96268767e-01],
[-1.57062620e-01, -1.10868244e-02, 6.20706715e-02],
[-1.57321051e-01, -3.06500614e-01, -2.30024591e-01],
[ 3.59437205e-02, 3.52994859e-01, 2.38882601e-01],
[ 1.36312008e-01, 1.38561815e-01, 2.23111033e-01],
[ 1.66426688e-01, 1.44103363e-01, 1.08864978e-01],
[-4.31933135e-01, -4.29439902e-01, -5.28308690e-01],
[-4.10688035e-02, 2.73244053e-01, 1.24709614e-01],
[-1.84629008e-01, -1.24577269e-01, 5.33142388e-02],
[-1.08149683e+00, -8.98993015e-01, -1.14275980e+00],
[ 2.08761305e-01, 1.56309798e-01, -1.25824418e-02],
[-4.95516181e-01, -3.53564203e-01, -1.31432191e-01],
[ 3.55273299e-02, 1.20648474e-01, 2.36399055e-01],
[ 8.50410089e-02, -1.47752538e-02, 2.15277553e-01],
[ 2.11633042e-01, 1.01077348e-01, 3.30886878e-02],
[-1.06111094e-01, 1.07693918e-01, -9.53614414e-02],
[-1.75395906e-01, -3.53530228e-01, -1.19211458e-01],
[ 2.68203467e-01, 2.82129109e-01, -1.54477522e-01],
[ 1.70596585e-01, 1.52597874e-01, 6.76318184e-02],
[ 5.46143129e-02, 1.23157211e-01, 8.29520896e-02],
[-5.31307220e-01, -1.78458467e-01, -5.32431602e-01],
[-1.97006300e-01, -1.22562185e-01, -7.49048591e-02],
[ 2.81229258e-01, -6.29485771e-02, 2.03068763e-01],
[-4.78337079e-01, -7.24266708e-01, -7.22120702e-01],
[ 3.39150280e-01, 6.52333498e-02, 1.42264619e-01],
[ 1.33290380e-01, 3.07468604e-02, 2.74779916e-01],
[-7.64167402e-04, 4.02096054e-03, 2.88234293e-01],
[ 2.67466605e-01, 2.52607644e-01, 2.24442825e-01],
[ 1.75580591e-01, 1.17959879e-01, 2.54109591e-01],
[-1.54530272e-01, -1.21206172e-01, 6.84056804e-02],
[ 3.03603262e-01, 1.25999466e-01, 8.91618878e-02]], dtype=float32),
array([0.02325003, 0.04449695, 0.04215444], dtype=float32))]
```python
```
|
H-E-L-PREPO_NAMEXID_plusPATH_START.@XID_plus_extracted@XID_plus-master@docs@build@doctrees@nbsphinx@notebooks@examples@SED_emulator@JAX_CIGALE_emulator-kasia.ipynb@.PATH_END.py
|
{
"filename": "PN-Hamiltonian-Spin-Orbit.ipynb",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/NRPyPN/PN-Hamiltonian-Spin-Orbit.ipynb",
"type": "Jupyter Notebook"
}
|
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# $H_{\rm SO}$, up to and including 3.5 post-Newtonian order
## Author: Zach Etienne
## This notebook constructs the spin-orbit coupling terms in the Hamiltonian up to 3.5 post-Newtonian order.
**Notebook Status:** <font color='green'><b> Validated </b></font>
**Validation Notes:** All expressions in this notebook were transcribed twice by hand on separate occasions, and expressions were corrected as needed to ensure consistency with published PN expressions. In addition, this tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented.**
### This notebook exists as the following Python module:
1. [PN_Hamiltonian_SO.py](../../edit/NRPyPN/PN_Hamiltonian_SO.py)
### This notebook & corresponding Python module depend on the following NRPy+/NRPyPN Python modules:
1. [indexedexp.py](../../edit/indexedexp.py): [**documentation+tutorial**](../Tutorial-Indexed_Expressions.ipynb)
1. [NRPyPN_shortcuts.py](../../edit/NRPyPN/NRPyPN_shortcuts.py): [**documentation**](NRPyPN_shortcuts.ipynb)
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
1. Part 1: [$H_{\rm SO, 1.5PN}$](#onept5pn), as summarized in [Damour, Jaranowski, and Schäfer (2008)](https://arxiv.org/abs/0711.1048) (see references therein for sources)
1. Part 2: [$H_{\rm SO, 2.5PN}$](#twopt5pn), as derived by [Damour, Jaranowski, and Schäfer (2008)](https://arxiv.org/abs/0711.1048)
1. Part 3: [$H_{\rm SO, 3.5PN}$](#threept5pn), as derived in [Hartung and Steinhoff (2011)](https://arxiv.org/abs/1104.3079)
1. Part 4: [Validation against second transcription and corresponding Python module](#code_validation)
1. Part 5: [LaTeX PDF output](#latex_pdf_output): $\LaTeX$ PDF Output
<a id='onept5pn'></a>
# Part 1: $H_{\rm SO, 1.5PN}$, as summarized in [Damour, Jaranowski, and Schäfer (2008)](https://arxiv.org/abs/0711.1048) (see references therein for sources) \[Back to [top](#toc)\]
$$\label{onept5pn}$$
As described in the [nonspinning Hamiltonian notebook](PN-Hamiltonian-Nonspinning.ipynb), the basic physical system assumes two point particles of mass $m_1$ and $m_2$ with corresponding momentum vectors $\mathbf{P}_1$ and $\mathbf{P}_2$, and displacement vectors $\mathbf{X}_1$ and $\mathbf{X}_2$ with respect to the center of mass. Here we also consider the spin vectors of each point mass $\mathbf{S}_1$ and $\mathbf{S}_2$, respectively.
[Damour, Jaranowski, and Schäfer (2008)](https://arxiv.org/abs/0711.1048) adopt the notation
\begin{align}
\mathbf{r}_{12} &= (\mathbf{X}_1-\mathbf{X}_2)\\
r_{12} = r_{21} &= |\mathbf{r}_{12}|\\
\mathbf{n}_{12} &= \frac{\mathbf{r}_{12}}{r_{12}},
\end{align}
and when the numbers in subscripts are flipped, the particles are interchanged.
The spin-orbit terms of the Hamiltonian up to and including 3.5 PN order are generally given by:
$$
H_{\rm SO} = \mathbf{\Omega}_1 S^i_1 + \mathbf{\Omega}_2 S^i_2,
$$
where we need only define $\mathbf{\Omega}_1$ within a function, as $\mathbf{\Omega}_2$ is defined simply by interchanging $1\leftrightarrow 2$ in the $\mathbf{\Omega}_1$ expression.
At 1.5PN order (as summarized in [Damour, Jaranowski, and Schäfer (2008)](https://arxiv.org/abs/0711.1048), Eq 4.11a), we have
$$
\mathbf{\Omega}_{1,SO,1.5PN} = \frac{1}{r_{12}^2}\bigg( \frac{3m_2}{2m_1}{\mathbf{n}}_{12}\times{\mathbf{p}}_1 - 2 {\mathbf{n}}_{12}\times{\mathbf{p}}_2 \bigg),
$$
```python
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os, sys # Standard Python modules for multiplatform OS-level functions
import indexedexpNRPyPN as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
from NRPyPN_shortcuts import div,dot,cross # NRPyPN: shortcuts for e.g., vector operations
# 1.5PN spin-orbit coupling term, from Eq. 4.11a of
# Damour, Jaranowski, and Schäfer (2008)
# https://arxiv.org/abs/0711.1048
def f_H_SO_1p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, r12):
def f_Omega1(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (div(3,2)*m2/m1 * cross(n12U,p1U)[i] - 2*cross(n12U,p2U)[i])/r12**2
return Omega1
global H_SO_1p5PN
Omega1 = f_Omega1(m1,m2, n12U, p1U,p2U, r12)
Omega2 = f_Omega1(m2,m1, n21U, p2U,p1U, r12)
H_SO_1p5PN = dot(Omega1,S1U) + dot(Omega2,S2U)
```
```python
# Second version, used for validation purposes only.
def f_H_SO_1p5PNv2(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, r12):
def f_Omega_SO_1p5PN(m1,m2, n12U, p1U,p2U, r12):
Omega1U = ixp.zerorank1()
for i in range(3):
Omega1U[i] = (div(3,2)*m2/m1 * cross(n12U,p1U)[i] - 2*cross(n12U,p2U)[i])/r12**2
return Omega1U
Omega1_1p5PNU = f_Omega_SO_1p5PN(m1,m2, n12U, p1U,p2U, r12)
Omega2_1p5PNU = f_Omega_SO_1p5PN(m2,m1, n21U, p2U,p1U, r12)
global H_SO_1p5PNv2
H_SO_1p5PNv2 = dot(Omega1_1p5PNU,S1U) + dot(Omega2_1p5PNU,S2U)
```
<a id='twopt5pn'></a>
# Part 2: $H_{\rm SO, 2.5PN}$, as derived by [Damour, Jaranowski, and Schäfer (2008)](https://arxiv.org/abs/0711.1048) \[Back to [top](#toc)\]
$$\label{twopt5pn}$$
To reduce the possibility of copying errors, equations are taken directly from the arXiv LaTeX source code of Eq 4.11b in [Damour, Jaranowski, and Schäfer (2008)](https://arxiv.org/abs/0711.1048), and only mildly formatted to (1) improve presentation in Jupyter notebooks and (2) to ensure some degree of consistency in notation across different terms in other Hamiltonian notebooks:
\begin{align}
\mathbf{\Omega}^{\rm NLO}_{1} &= \frac{G^2}{c^4r_{12}^3} \Bigg(
\bigg(-\frac{11}{2}m_2-5\frac{m_2^2}{m_1}\bigg){\mathbf{n}}_{12}\times{\mathbf{p}}_1
+ \bigg(6m_1+\frac{15}{2}m_2\bigg){\mathbf{n}}_{12}\times{\mathbf{p}}_2 \Bigg)\\
&\quad
+ \frac{G}{c^4r_{12}^2} \Bigg( \bigg(
- \frac{5m_2{\bf p}_1^2}{8m_1^3} - \frac{3({\mathbf{p}}_1\cdot{\mathbf{p}}_2)}{4m_1^2}
+ \frac{3{\bf p}_2^2}{4m_1m_2}
- \frac{3(\mathbf{n}_{12}\cdot\mathbf{p}_1)(\mathbf{n}_{12}\cdot\mathbf{p}_2)}{4m_1^2} - \frac{3(\mathbf{n}_{12}\cdot\mathbf{p}_2)^2}{2m_1m_2} \bigg){\mathbf{n}}_{12}\times{\mathbf{p}}_1
\\
&\quad\quad\quad\quad + \bigg(\frac{({\mathbf{p}}_1\cdot{\mathbf{p}}_2)}{m_1m_2}+\frac{3(\mathbf{n}_{12}\cdot\mathbf{p}_1)(\mathbf{n}_{12}\cdot\mathbf{p}_2)}{m_1m_2}\bigg){\mathbf{n}}_{12}\times{\mathbf{p}}_2
+ \bigg( \frac{3(\mathbf{n}_{12}\cdot\mathbf{p}_1)}{4m_1^2} - \frac{2(\mathbf{n}_{12}\cdot\mathbf{p}_2)}{m_1m_2} \bigg){\mathbf{p}}_1\times{\mathbf{p}}_2
\Bigg).
\end{align}
```python
# 2.5PN spin-orbit coupling term, from Eq. 4.11b of
# Damour, Jaranowski, and Schäfer (2008)
# https://arxiv.org/abs/0711.1048
def f_H_SO_2p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, r12):
def f_Omega_SO_2p5PN(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (+(+(-div(11,2)*m2-5*m2**2/m1)*cross(n12U,p1U)[i]
+(6*m1 + div(15,2)*m2) *cross(n12U,p2U)[i])/r12**3
+(+(-div(5,8)*m2*dot(p1U,p1U)/m1**3
-div(3,4)*dot(p1U,p2U)/m1**2
+div(3,4)*dot(p2U,p2U)/(m1*m2)
-div(3,4)*dot(n12U,p1U)*dot(n12U,p2U)/m1**2
-div(3,2)*dot(n12U,p2U)**2/(m1*m2))*cross(n12U,p1U)[i]
+(dot(p1U,p2U)/(m1*m2) + 3*dot(n12U,p1U)*dot(n12U,p2U)/(m1*m2))*cross(n12U,p2U)[i]
+(div(3,4)*dot(n12U,p1U)/m1**2 - 2*dot(n12U,p2U)/(m1*m2))*cross(p1U,p2U)[i])/r12**2)
return Omega1
Omega1_2p5PNU = f_Omega_SO_2p5PN(m1,m2, n12U, p1U,p2U, r12)
Omega2_2p5PNU = f_Omega_SO_2p5PN(m2,m1, n21U, p2U,p1U, r12)
global H_SO_2p5PN
H_SO_2p5PN = dot(Omega1_2p5PNU,S1U) + dot(Omega2_2p5PNU,S2U)
```
```python
# Second version, used for validation purposes only.
def f_H_SO_2p5PNv2(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, r12):
def f_Omega_SO_2p5PNv2(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
n12_cross_p1 = cross(n12U,p1U)
n12_cross_p2 = cross(n12U,p2U)
for i in range(3):
Omega1[i] = ( (-div(11,2)*m2 - 5*m2**2/m1)*n12_cross_p1[i] + # line 1
(6*m1 + div(15,2)*m2) *n12_cross_p2[i] ) / r12**3 # line 1
Omega1[i]+= (( -div(5,8)*m2*dot(p1U,p1U)/m1**3 # line 2
-div(3,4)*dot(p1U,p2U)/m1**2 # line 2
+div(3,4)*dot(p2U,p2U)/(m1*m2) # line 2
-div(3,4)*dot(n12U,p1U)*dot(n12U,p2U)/m1**2 # line 2
-div(3,2)*dot(n12U,p2U)**2/(m1*m2) )*n12_cross_p1[i] + # line 2
( dot(p1U,p2U)/(m1*m2) + 3*dot(n12U,p1U)*dot(n12U,p2U)/(m1*m2) )*n12_cross_p2[i] + # line 3
(+div(3,4)*dot(n12U,p1U)/m1**2 - 2*dot(n12U,p2U)/(m1*m2) )*cross(p1U,p2U)[i] )/r12**2 # line 3
return Omega1
Omega1_2p5PNU = f_Omega_SO_2p5PNv2(m1,m2, n12U, p1U,p2U, r12)
Omega2_2p5PNU = f_Omega_SO_2p5PNv2(m2,m1, n21U, p2U,p1U, r12)
global H_SO_2p5PNv2
H_SO_2p5PNv2 = dot(Omega1_2p5PNU,S1U) + dot(Omega2_2p5PNU,S2U)
```
<a id='threept5pn'></a>
# Part 3: $H_{\rm SO, 3.5PN}$, as derived in [Hartung and Steinhoff (2011)](https://arxiv.org/abs/1104.3079) \[Back to [top](#toc)\]
$$\label{threept5pn}$$
To reduce the possibility of copying errors, equations are taken directly from the arXiv LaTeX source code of Eq 5 in [Hartung and Steinhoff (2011)](https://arxiv.org/abs/1104.3079), and only mildly formatted to (1) improve presentation in Jupyter notebooks and (2) to ensure some degree of consistency in notation across different terms in other Hamiltonian notebooks:
\begin{align}
H^{\text{NNLO}}_{\text{SO}} & = \frac{1}{r_{12}^2} \biggl[
\biggl(
\frac{7 m_2 (\mathbf{P}_1^2)^2}{16 m_1^5}
+ \frac{9 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_1^2}{16 m_1^4}
+ \frac{3 \mathbf{P}_1^2 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{4 m_1^3 m_2}\nonumber\\
&\quad\quad\quad + \frac{45 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)^3}{16 m_1^2 m_2^2}
+ \frac{9 \mathbf{P}_1^2 (\mathbf{P}_1\cdot\mathbf{P}_2)}{16 m_1^4}
- \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2 (\mathbf{P}_1\cdot\mathbf{P}_2)}{16 m_1^2 m_2^2}\nonumber\\
&\quad\quad\quad - \frac{3 (\mathbf{P}_1^2) (\mathbf{P}_2^2)}{16 m_1^3 m_2}
- \frac{15 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2) \mathbf{P}_2^2}{16 m_1^2 m_2^2}
+ \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2 \mathbf{P}_2^2}{4 m_1 m_2^3}\nonumber\\
&\quad\quad\quad - \frac{3 (\mathbf{P}_1\cdot\mathbf{P}_2) \mathbf{P}_2^2}{16 m_1^2 m_2^2}
- \frac{3 (\mathbf{P}_2^2)^2}{16 m_1 m_2^3}
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_1)\mathbf{S}_1)\\
&\quad\quad\quad +\biggl(
- \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_1^2}{2 m_1^3 m_2}\nonumber\\
&\quad\quad\quad - \frac{15 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2(\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{4 m_1^2 m_2^2}
+ \frac{3 \mathbf{P}_1^2 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{4 m_1^2 m_2^2}
- \frac{\mathbf{P}_1^2 (\mathbf{P}_1\cdot\mathbf{P}_2)}{2 m_1^3 m_2}
+ \frac{(\mathbf{P}_1\cdot\mathbf{P}_2)^2}{2 m_1^2 m_2^2}\nonumber\\
&\quad\quad\quad + \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2 \mathbf{P}_2^2}{4 m_1^2 m_2^2}
- \frac{(\mathbf{P}_1^2) (\mathbf{P}_2^2)}{4 m_1^2 m_2^2}
- \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_2^2}{2 m_1 m_2^3}\nonumber\\
&\quad\quad\quad - \frac{(\mathbf{P}_1\cdot\mathbf{P}_2) \mathbf{P}_2^2}{2 m_1 m_2^3}
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_2)\mathbf{S}_1)\\
&\quad\quad\quad +\biggl(
- \frac{9 (\mathbf{n}_{12}\cdot\mathbf{P}_1) \mathbf{P}_1^2}{16 m_1^4}
+ \frac{\mathbf{P}_1^2 (\mathbf{n}_{12}\cdot\mathbf{P}_2)}{m_1^3 m_2}\nonumber\\
&\quad\quad\quad + \frac{27 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{16 m_1^2 m_2^2}
- \frac{(\mathbf{n}_{12}\cdot\mathbf{P}_2)(\mathbf{P}_1\cdot\mathbf{P}_2)}{8 m_1^2 m_2^2}
-\frac{5 (\mathbf{n}_{12}\cdot\mathbf{P}_1) \mathbf{P}_2^2}{16 m_1^2 m_2^2}\nonumber\\
&\quad\quad\quad + \frac{(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_2^2}{m_1 m_2^3}
\biggr)((\mathbf{P}_1 \times \mathbf{P}_2)\mathbf{S}_1)
\biggr] \nonumber\\
&+ \frac{1}{r_{12}^3} \biggl[
\biggl(
-\frac{3 m_2 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2}{2 m_1^2}
+\left(
-\frac{3 m_2}{2 m_1^2}
+\frac{27 m_2^2}{8 m_1^3}
\right) \mathbf{P}_1^2
+\left(
\frac{177}{16 m_1}
+\frac{11}{m_2}
\right) (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2\nonumber\\
&\quad\quad\quad +\left(
\frac{11}{2 m_1}
+\frac{9 m_2}{2 m_1^2}
\right) (\mathbf{n}_{12}\cdot\mathbf{P}_1) (\mathbf{n}_{12}\cdot\mathbf{P}_2)
+\left(
\frac{23}{4 m_1}
+\frac{9 m_2}{2 m_1^2}
\right) (\mathbf{P}_1\cdot\mathbf{P}_2)\nonumber\\
&\quad\quad\quad -\left(
\frac{159}{16 m_1}
+\frac{37}{8 m_2}
\right) \mathbf{P}_2^2
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_1)\mathbf{S}_1)
+\biggl(
\frac{4 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2}{m_1}
+\frac{13 \mathbf{P}_1^2}{2 m_1}\nonumber\\
&\quad\quad\quad +\frac{5 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{m_2}
+\frac{53 \mathbf{P}_2^2}{8 m_2}
- \left(
\frac{211}{8 m_1}
+\frac{22}{m_2}
\right) (\mathbf{n}_{12}\cdot\mathbf{P}_1) (\mathbf{n}_{12}\cdot\mathbf{P}_2)\nonumber\\
&\quad\quad\quad -\left(
\frac{47}{8 m_1}
+\frac{5}{m_2}
\right)(\mathbf{P}_1\cdot\mathbf{P}_2)
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_2)\mathbf{S}_1)
+\biggl(
-\left(
\frac{8}{m_1}
+\frac{9 m_2}{2 m_1^2}
\right)(\mathbf{n}_{12}\cdot\mathbf{P}_1)\nonumber\\
&\quad\quad\quad +\left(
\frac{59}{4 m_1}
+\frac{27}{2 m_2}
\right)(\mathbf{n}_{12}\cdot\mathbf{P}_2)
\biggr)((\mathbf{P}_1 \times \mathbf{P}_2)\mathbf{S}_1)
\biggr]\nonumber\\
&+\frac{1}{r_{12}^4} \biggl[
\left(
\frac{181 m_1 m_2}{16}
+ \frac{95 m_2^2}{4}
+ \frac{75 m_2^3}{8 m_1}
\right) ((\mathbf{n}_{12} \times \mathbf{P}_1)\mathbf{S}_1)\nonumber\\
&\quad\quad\quad - \left(
\frac{21 m_1^2}{2}
+ \frac{473 m_1 m_2}{16}
+ \frac{63 m_2^2}{4}
\right)((\mathbf{n}_{12} \times \mathbf{P}_2)\mathbf{S}_1)
\biggr]
+ (1\leftrightarrow2)\,.
\end{align}
Let's split the above into more bite-sized pieces. First:
\begin{align}
H^a_{SO,2.5PN} &= \frac{1}{r_{12}^2} \biggl[
\biggl(
\frac{7 m_2 (\mathbf{P}_1^2)^2}{16 m_1^5}
+ \frac{9 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_1^2}{16 m_1^4}
+ \frac{3 \mathbf{P}_1^2 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{4 m_1^3 m_2}\nonumber\\
&\quad\quad\quad + \frac{45 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)^3}{16 m_1^2 m_2^2}
+ \frac{9 \mathbf{P}_1^2 (\mathbf{P}_1\cdot\mathbf{P}_2)}{16 m_1^4}
- \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2 (\mathbf{P}_1\cdot\mathbf{P}_2)}{16 m_1^2 m_2^2}\nonumber\\
&\quad\quad\quad - \frac{3 (\mathbf{P}_1^2) (\mathbf{P}_2^2)}{16 m_1^3 m_2}
- \frac{15 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2) \mathbf{P}_2^2}{16 m_1^2 m_2^2}
+ \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2 \mathbf{P}_2^2}{4 m_1 m_2^3}\nonumber\\
&\quad\quad\quad - \frac{3 (\mathbf{P}_1\cdot\mathbf{P}_2) \mathbf{P}_2^2}{16 m_1^2 m_2^2}
- \frac{3 (\mathbf{P}_2^2)^2}{16 m_1 m_2^3}
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_1)\mathbf{S}_1)\biggr]
\end{align}
```python
# 3.5PN spin-orbit coupling term, from Eq. 5 of
# Hartung and Steinhoff (2011)
# https://arxiv.org/abs/1104.3079
# 3.5PN H_SO: Omega_1, part 1:
def HS2011_Omega_SO_3p5PN_pt1(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ((+7*m2*dot(p1U,p1U)**2/(16*m1**5)
+9*dot(n12U,p1U)*dot(n12U,p2U)*dot(p1U,p1U)/(16*m1**4)
+3*dot(p1U,p1U)*dot(n12U,p2U)**2/(4*m1**3*m2)
+45*dot(n12U,p1U)*dot(n12U,p2U)**3/(16*m1**2*m2**2)
+9*dot(p1U,p1U)*dot(p1U,p2U)/(16*m1**4)
-3*dot(n12U,p2U)**2*dot(p1U,p2U)/(16*m1**2*m2**2)
-3*dot(p1U,p1U)*dot(p2U,p2U)/(16*m1**3*m2)
-15*dot(n12U,p1U)*dot(n12U,p2U)*dot(p2U,p2U)/(16*m1**2*m2**2)
+3*dot(n12U,p2U)**2*dot(p2U,p2U)/(4*m1*m2**3)
-3*dot(p1U,p2U)*dot(p2U,p2U)/(16*m1**2*m2**2)
-3*dot(p2U,p2U)**2/(16*m1*m2**3))*cross(n12U,p1U)[i])/r12**2
return Omega1
```
```python
# Second version, used for validation purposes only.
def HS2011_Omega_SO_3p5PN_pt1v2(m1,m2, n12U, p1U,p2U, q):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ( (+div(7,16)*m2*dot(p1U,p1U)**2/m1**5
+div(9,16)*dot(n12U,p1U)*dot(n12U,p2U)*dot(p1U,p1U)/m1**4
+div(3,4) *dot(p1U,p1U)*dot(n12U,p2U)**2/(m1**3*m2)
+div(45,16)*dot(n12U,p1U)*dot(n12U,p2U)**3/(m1**2*m2**2)
+div(9,16)*dot(p1U,p1U)*dot(p1U,p2U)/m1**4
-div(3,16)*dot(n12U,p2U)**2*dot(p1U,p2U)/(m1**2*m2**2)
-div(3,16)*dot(p1U,p1U)*dot(p2U,p2U)/(m1**3*m2)
-div(15,16)*dot(n12U,p1U)*dot(n12U,p2U)*dot(p2U,p2U)/(m1**2*m2**2)
+div(3,4)*dot(n12U,p2U)**2*dot(p2U,p2U)/(m1*m2**3)
-div(3,16)*dot(p1U,p2U)*dot(p2U,p2U)/(m1**2*m2**2)
-div(3,16)*dot(p2U,p2U)**2/(m1*m2**3)) * cross(n12U,p1U)[i] )/q**2
return Omega1
```
Next,
\begin{align}
H^b_{SO,2.5PN} &= \frac{1}{r_{12}^2} \biggl[
+\biggl(
- \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_1^2}{2 m_1^3 m_2}\nonumber\\
&\quad\quad\quad - \frac{15 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2(\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{4 m_1^2 m_2^2}
+ \frac{3 \mathbf{P}_1^2 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{4 m_1^2 m_2^2}
- \frac{\mathbf{P}_1^2 (\mathbf{P}_1\cdot\mathbf{P}_2)}{2 m_1^3 m_2}
+ \frac{(\mathbf{P}_1\cdot\mathbf{P}_2)^2}{2 m_1^2 m_2^2}\nonumber\\
&\quad\quad\quad + \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2 \mathbf{P}_2^2}{4 m_1^2 m_2^2}
- \frac{(\mathbf{P}_1^2) (\mathbf{P}_2^2)}{4 m_1^2 m_2^2}
- \frac{3 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_2^2}{2 m_1 m_2^3}\nonumber\\
&\quad\quad\quad - \frac{(\mathbf{P}_1\cdot\mathbf{P}_2) \mathbf{P}_2^2}{2 m_1 m_2^3}
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_2)\mathbf{S}_1)
\biggr]
\end{align}
```python
# 3.5PN H_SO: Omega_1, part 2:
def HS2011_Omega_SO_3p5PN_pt2(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (+(-3*dot(n12U,p1U)*dot(n12U,p2U)*dot(p1U,p1U)/(2*m1**3*m2)
-15*dot(n12U,p1U)**2*dot(n12U,p2U)**2/(4*m1**2*m2**2)
+3*dot(p1U,p1U)*dot(n12U,p2U)**2/(4*m1**2*m2**2)
-dot(p1U,p1U)*dot(p1U,p2U)/(2*m1**3*m2)
+dot(p1U,p2U)**2/(2*m1**2*m2**2)
+3*dot(n12U,p1U)**2*dot(p2U,p2U)/(4*m1**2*m2**2)
-dot(p1U,p1U)*dot(p2U,p2U)/(4*m1**2*m2**2)
-3*dot(n12U,p1U)*dot(n12U,p2U)*dot(p2U,p2U)/(2*m1*m2**3)
-dot(p1U,p2U)*dot(p2U,p2U)/(2*m1*m2**3))*cross(n12U,p2U)[i])/r12**2
return Omega1
```
```python
# Second version, used for validation purposes only.
def HS2011_Omega_SO_3p5PN_pt2v2(m1,m2, n12U, p1U,p2U, q):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ( (-div(3,2)*dot(n12U,p1U)*dot(n12U,p2U)*dot(p1U,p1U)/(m1**3*m2)
-div(15,4)*dot(n12U,p1U)**2*dot(n12U,p2U)**2/(m1**2*m2**2)
+div(3,4)*dot(p1U,p1U)*dot(n12U,p2U)**2/(m1**2*m2**2)
-div(1,2)*dot(p1U,p1U)*dot(p1U,p2U)/(m1**3*m2)
+div(1,2)*dot(p1U,p2U)**2/(m1**2*m2**2)
+div(3,4)*dot(n12U,p1U)**2*dot(p2U,p2U)/(m1**2*m2**2)
-div(1,4)*dot(p1U,p1U)*dot(p2U,p2U)/(m1**2*m2**2)
-div(3,2)*dot(n12U,p1U)*dot(n12U,p2U)*dot(p2U,p2U)/(m1*m2**3)
-div(1,2)*dot(p1U,p2U)*dot(p2U,p2U)/(m1*m2**3))*cross(n12U,p2U)[i] )/q**2
return Omega1
```
Part 3:
\begin{align}
H^c_{SO,2.5PN} &= \frac{1}{r_{12}^2} \biggl[
+\biggl(
- \frac{9 (\mathbf{n}_{12}\cdot\mathbf{P}_1) \mathbf{P}_1^2}{16 m_1^4}
+ \frac{\mathbf{P}_1^2 (\mathbf{n}_{12}\cdot\mathbf{P}_2)}{m_1^3 m_2}\nonumber\\
&\quad\quad\quad + \frac{27 (\mathbf{n}_{12}\cdot\mathbf{P}_1)(\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{16 m_1^2 m_2^2}
- \frac{(\mathbf{n}_{12}\cdot\mathbf{P}_2)(\mathbf{P}_1\cdot\mathbf{P}_2)}{8 m_1^2 m_2^2}
-\frac{5 (\mathbf{n}_{12}\cdot\mathbf{P}_1) \mathbf{P}_2^2}{16 m_1^2 m_2^2}\nonumber\\
&\quad\quad\quad + \frac{(\mathbf{n}_{12}\cdot\mathbf{P}_2)\mathbf{P}_2^2}{m_1 m_2^3}
\biggr)((\mathbf{P}_1 \times \mathbf{P}_2)\mathbf{S}_1)
\biggr]
\end{align}
```python
# 3.5PN H_SO: Omega_1, part 3:
def HS2011_Omega_SO_3p5PN_pt3(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (+(-9*dot(n12U,p1U)*dot(p1U,p1U)/(16*m1**4)
+dot(p1U,p1U)*dot(n12U,p2U)/(m1**3*m2)
+27*dot(n12U,p1U)*dot(n12U,p2U)**2/(16*m1**2*m2**2)
-dot(n12U,p2U)*dot(p1U,p2U)/(8*m1**2*m2**2)
-5*dot(n12U,p1U)*dot(p2U,p2U)/(16*m1**2*m2**2))*cross(p1U,p2U)[i])/r12**2
return Omega1
```
```python
# Second version, used for validation purposes only.
def HS2011_Omega_SO_3p5PN_pt3v2(m1,m2, n12U, p1U,p2U, q):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ( (-div(9,16)*dot(n12U,p1U)*dot(p1U,p1U)/m1**4
+ dot(p1U,p1U)*dot(n12U,p2U)/(m1**3*m2)
+div(27,16)*dot(n12U,p1U)*dot(n12U,p2U)**2/(m1**2*m2**2)
-div(1,8)*dot(n12U,p2U)*dot(p1U,p2U)/(m1**2*m2**2)
-div(5,16)*dot(n12U,p1U)*dot(p2U,p2U)/(m1**2*m2**2)
+ dot(n12U,p2U)*dot(p2U,p2U)/(m1*m2**3))*cross(p1U,p2U)[i] )/q**2
return Omega1
```
Part 4, the first $1/r_{12}^3$ term:
\begin{align}
H^d_{SO,2.5PN} &= \frac{1}{r_{12}^3} \biggl[
\biggl(
-\frac{3 m_2 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2}{2 m_1^2}
+\left(
-\frac{3 m_2}{2 m_1^2}
+\frac{27 m_2^2}{8 m_1^3}
\right) \mathbf{P}_1^2
+\left(
\frac{177}{16 m_1}
+\frac{11}{m_2}
\right) (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2\nonumber\\
&\quad\quad\quad +\left(
\frac{11}{2 m_1}
+\frac{9 m_2}{2 m_1^2}
\right) (\mathbf{n}_{12}\cdot\mathbf{P}_1) (\mathbf{n}_{12}\cdot\mathbf{P}_2)
+\left(
\frac{23}{4 m_1}
+\frac{9 m_2}{2 m_1^2}
\right) (\mathbf{P}_1\cdot\mathbf{P}_2)\nonumber\\
&\quad\quad\quad -\left(
\frac{159}{16 m_1}
+\frac{37}{8 m_2}
\right) \mathbf{P}_2^2
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_1)\mathbf{S}_1)
\biggr]
\end{align}
```python
# 3.5PN H_SO: Omega_1, part 4:
def HS2011_Omega_SO_3p5PN_pt4(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (+(-3*m2*dot(n12U,p1U)**2/(2*m1**2)
+((-3*m2)/(2*m1**2) + 27*m2**2/(8*m1**3))*dot(p1U,p1U)
+(177/(16*m1) + 11/m2)*dot(n12U,p2U)**2
+(11/(2*m1) + 9*m2/(2*m1**2))*dot(n12U,p1U)*dot(n12U,p2U)
+(23/(4*m1) + 9*m2/(2*m1**2))*dot(p1U,p2U)
-(159/(16*m1) + 37/(8*m2))*dot(p2U,p2U))*cross(n12U,p1U)[i])/r12**3
return Omega1
```
```python
# Second version, used for validation purposes only.
def HS2011_Omega_SO_3p5PN_pt4v2(m1,m2, n12U, p1U,p2U, q):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ( (-div(3,2)*m2*dot(n12U,p1U)**2/m1**2
+(-div(3,2)*m2/m1**2 + div(27,8)*m2**2/m1**3)*dot(p1U,p1U)
+(+div(177,16)/m1 + 11/m2)*dot(n12U,p2U)**2
+(+div(11,2)/m1 + div(9,2)*m2/m1**2)*dot(n12U,p1U)*dot(n12U,p2U)
+(+div(23,4)/m1 + div(9,2)*m2/m1**2)*dot(p1U,p2U)
-(+div(159,16)/m1 + div(37,8)/m2)*dot(p2U,p2U) )*cross(n12U,p1U)[i] )/q**3
return Omega1
```
Part 5, the second $1/r_{12}^3$ term:
\begin{align}
H^e_{SO,2.5PN} &= \frac{1}{r_{12}^3} \biggl[
+\biggl(
\frac{4 (\mathbf{n}_{12}\cdot\mathbf{P}_1)^2}{m_1}
+\frac{13 \mathbf{P}_1^2}{2 m_1}\nonumber\\
&\quad\quad\quad +\frac{5 (\mathbf{n}_{12}\cdot\mathbf{P}_2)^2}{m_2}
+\frac{53 \mathbf{P}_2^2}{8 m_2}
- \left(
\frac{211}{8 m_1}
+\frac{22}{m_2}
\right) (\mathbf{n}_{12}\cdot\mathbf{P}_1) (\mathbf{n}_{12}\cdot\mathbf{P}_2)\nonumber\\
&\quad\quad\quad -\left(
\frac{47}{8 m_1}
+\frac{5}{m_2}
\right)(\mathbf{P}_1\cdot\mathbf{P}_2)
\biggr)((\mathbf{n}_{12} \times \mathbf{P}_2)\mathbf{S}_1)
\biggr]
\end{align}
```python
# 3.5PN H_SO: Omega_1, part 5:
def HS2011_Omega_SO_3p5PN_pt5(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (+(+4*dot(n12U,p1U)**2/m1
+13*dot(p1U,p1U)/(2*m1)
+5*dot(n12U,p2U)**2/m2
+53*dot(p2U,p2U)/(8*m2)
-(211/(8*m1) + 22/m2)*dot(n12U,p1U)*dot(n12U,p2U)
-(47/(8*m1) + 5/m2)*dot(p1U,p2U))*cross(n12U,p2U)[i])/r12**3
return Omega1
```
```python
# Second version, used for validation purposes only.
def HS2011_Omega_SO_3p5PN_pt5v2(m1,m2, n12U, p1U,p2U, q):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ( (+4*dot(n12U,p1U)**2/m1
+div(13,2)*dot(p1U,p1U)/m1
+5*dot(n12U,p2U)**2/m2
+div(53,8)*dot(p2U,p2U)/m2
-(div(211,8)/m1+22/m2)*dot(n12U,p1U)*dot(n12U,p2U)
-(div(47,8)/m1+5/m2)*dot(p1U,p2U)) * cross(n12U,p2U)[i] )/q**3
return Omega1
```
Part 6, the third $1/r_{12}^3$ term:
\begin{align}
H^f_{SO,2.5PN} &= \frac{1}{r_{12}^3} \biggl[
+\biggl(
-\left(
\frac{8}{m_1}
+\frac{9 m_2}{2 m_1^2}
\right)(\mathbf{n}_{12}\cdot\mathbf{P}_1)\nonumber\\
&\quad\quad\quad +\left(
\frac{59}{4 m_1}
+\frac{27}{2 m_2}
\right)(\mathbf{n}_{12}\cdot\mathbf{P}_2)
\biggr)((\mathbf{P}_1 \times \mathbf{P}_2)\mathbf{S}_1)
\biggr]
\end{align}
```python
# 3.5PN H_SO: Omega_1, part 6:
def HS2011_Omega_SO_3p5PN_pt6(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (+(-(8/m1 + 9*m2/(2*m1**2))*dot(n12U,p1U)
+(59/(4*m1) + 27/(2*m2))*dot(n12U,p2U))*cross(p1U,p2U)[i])/r12**3
return Omega1
```
```python
# Second version, used for validation purposes only.
def HS2011_Omega_SO_3p5PN_pt6v2(m1,m2, n12U, p1U,p2U, q):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ( (-( 8/m1 + div(9,2)*m2/m1**2)*dot(n12U,p1U)
+(div(59,4)/m1 + div(27,2)/m2) *dot(n12U,p2U))*cross(p1U,p2U)[i] )/q**3
return Omega1
```
Finally part 7, the $1/r_{12}^4$ term:
\begin{align}
H^f_{SO,2.5PN} &= \frac{1}{r_{12}^4} \biggl[
\left(
\frac{181 m_1 m_2}{16}
+ \frac{95 m_2^2}{4}
+ \frac{75 m_2^3}{8 m_1}
\right) ((\mathbf{n}_{12} \times \mathbf{P}_1)\mathbf{S}_1)\nonumber\\
&\quad\quad\quad - \left(
\frac{21 m_1^2}{2}
+ \frac{473 m_1 m_2}{16}
+ \frac{63 m_2^2}{4}
\right)((\mathbf{n}_{12} \times \mathbf{P}_2)\mathbf{S}_1)
\biggr]
\end{align}
```python
# 3.5PN H_SO: Omega_1, part 7:
def HS2011_Omega_SO_3p5PN_pt7(m1,m2, n12U, p1U,p2U, r12):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = (+(181*m1*m2/16 + 95*m2**2/4 + 75*m2**3/(8*m1))*cross(n12U,p1U)[i]
-(21*m1**2/2 + 473*m1*m2/16 + 63*m2**2/4)*cross(n12U,p2U)[i])/r12**4
return Omega1
```
```python
# Second version, used for validation purposes only.
def HS2011_Omega_SO_3p5PN_pt7v2(m1,m2, n12U, p1U,p2U, q):
Omega1 = ixp.zerorank1()
for i in range(3):
Omega1[i] = ( +(div(181,16)*m1*m2 + div(95,4)*m2**2 + div(75,8)*m2**3/m1)*cross(n12U,p1U)[i]
-(div(21,2)*m1**2 + div(473,16)*m1*m2 + div(63,4)*m2**2 )*cross(n12U,p2U)[i] )/q**4
return Omega1
```
Now we put all the $\Omega$ terms together:
```python
def f_H_SO_3p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, r12):
Omega1_3p5PNU = ixp.zerorank1()
Omega2_3p5PNU = ixp.zerorank1()
for i in range(3):
Omega1_3p5PNU[i] = HS2011_Omega_SO_3p5PN_pt1(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt3(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt4(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt5(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt6(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt7(m1,m2, n12U, p1U,p2U, r12)[i]
Omega2_3p5PNU[i] = HS2011_Omega_SO_3p5PN_pt1(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt2(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt3(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt4(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt5(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt6(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt7(m2,m1, n21U, p2U,p1U, r12)[i]
global H_SO_3p5PN
H_SO_3p5PN = dot(Omega1_3p5PNU,S1U) + dot(Omega2_3p5PNU,S2U)
```
```python
# For validation purposes only:
def f_H_SO_3p5PNv2(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, r12):
Omega1_3p5PNU = ixp.zerorank1()
Omega2_3p5PNU = ixp.zerorank1()
for i in range(3):
Omega1_3p5PNU[i] = HS2011_Omega_SO_3p5PN_pt1v2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt2v2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt3v2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt4v2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt5v2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt6v2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega1_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt7v2(m1,m2, n12U, p1U,p2U, r12)[i]
Omega2_3p5PNU[i] = HS2011_Omega_SO_3p5PN_pt1v2(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt2v2(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt3v2(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt4v2(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt5v2(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt6v2(m2,m1, n21U, p2U,p1U, r12)[i]
Omega2_3p5PNU[i]+= HS2011_Omega_SO_3p5PN_pt7v2(m2,m1, n21U, p2U,p1U, r12)[i]
global H_SO_3p5PNv2
H_SO_3p5PNv2 = dot(Omega1_3p5PNU,S1U) + dot(Omega2_3p5PNU,S2U)
```
<a id='code_validation'></a>
# Part 4: Validation against second transcription and corresponding Python module \[Back to [top](#toc)\]
$$\label{code_validation}$$
As a code validation check, we verify agreement between
* the SymPy expressions transcribed from the cited published work on two separate occasions, and
* the SymPy expressions generated in this notebook, and the corresponding Python module.
```python
from NRPyPN_shortcuts import m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q # Import needed input variables
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
f_H_SO_1p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
f_H_SO_2p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
f_H_SO_3p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
def error(varname):
print("ERROR: When comparing Python module & notebook, "+varname+" was found not to match.")
sys.exit(1)
# Validation against second transcription of the expressions:
f_H_SO_1p5PNv2(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
f_H_SO_2p5PNv2(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
f_H_SO_3p5PNv2(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
if sp.simplify(H_SO_1p5PN - H_SO_1p5PNv2) != 0: error("H_SO_1p5PNv2")
if sp.simplify(H_SO_2p5PN - H_SO_2p5PNv2) != 0: error("H_SO_2p5PNv2")
if sp.simplify(H_SO_3p5PN - H_SO_3p5PNv2) != 0: error("H_SO_3p5PNv2")
# Validation against corresponding Python module:
import PN_Hamiltonian_SO as HSO
HSO.f_H_SO_1p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
HSO.f_H_SO_2p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
HSO.f_H_SO_3p5PN(m1,m2, n12U,n21U, S1U, S2U, p1U,p2U, q)
if sp.simplify(H_SO_1p5PN - HSO.H_SO_1p5PN) != 0: error("H_SO_1p5PN")
if sp.simplify(H_SO_2p5PN - HSO.H_SO_2p5PN) != 0: error("H_SO_2p5PN")
if sp.simplify(H_SO_3p5PN - HSO.H_SO_3p5PN) != 0: error("H_SO_3p5PN")
print("ALL TESTS PASS")
```
ALL TESTS PASS
<a id='latex_pdf_output'></a>
# Part 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[PN-Hamiltonian-Spin-Orbit.pdf](PN-Hamiltonian-Spin-Orbit.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
import cmdline_helperNRPyPN as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("PN-Hamiltonian-Spin-Orbit")
```
Created PN-Hamiltonian-Spin-Orbit.tex, and compiled LaTeX file to PDF file
PN-Hamiltonian-Spin-Orbit.pdf
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@NRPyPN@PN-Hamiltonian-Spin-Orbit.ipynb@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "ML4GW/aframe",
"repo_path": "aframe_extracted/aframe-main/projects/plots/plots/legacy/main.py",
"type": "Python"
}
|
import logging
from pathlib import Path
from typing import Callable, List, Optional
import h5py
import jsonargparse
import numpy as np
from bokeh.io import save
from bokeh.layouts import gridplot
from ledger.events import EventSet, RecoveredInjectionSet
from ledger.injections import InjectionParameterSet
from plots.legacy import compute, tools
from plots.legacy.gwtc3 import main as gwtc3_pipeline_sv
from plots.vetos import VETO_CATEGORIES, VetoParser, get_catalog_vetos
from priors.priors import log_normal_masses
from utils.cosmology import DEFAULT_COSMOLOGY, get_astrophysical_volume
from utils.logging import configure_logging
logging.getLogger("urllib3").setLevel(logging.WARNING)
def get_prob(prior, ledger):
sample = dict(mass_1=ledger.mass_1, mass_2=ledger.mass_2)
return prior.prob(sample, axis=0)
def normalize_path(path):
path = Path(path)
if not path.is_absolute():
return Path(__file__).resolve().parent / path
return path
INJECTION_FILE = normalize_path(
"endo3_mixture-LIGO-T2100113-v12-1256655642-12905976.hdf5"
)
VETO_DEFINER_FILE = normalize_path("../vetos/H1L1-HOFT_C01_O3_CBC.xml")
GATE_PATHS = {
"H1": normalize_path("../vetos/H1-O3_GATES_1238166018-31197600.txt"),
"L1": normalize_path("../vetos/L1-O3_GATES_1238166018-31197600.txt"),
}
def main(
background: Path,
foreground: Path,
rejected_params: Path,
ifos: List[str],
mass_combos: List[tuple],
source_prior: Callable,
output_dir: Path,
log_file: Optional[Path] = None,
dt: Optional[float] = None,
max_far: float = 365,
sigma: float = 0.1,
verbose: bool = False,
vetos: Optional[List[VETO_CATEGORIES]] = None,
):
"""
Compute and plot the sensitive volume of an aframe analysis
Args:
background:
Path to the background event set. Should be an HDF5 file
readable by `ledger.events.EventSet.read`
foreground:
Path to the foreground event set. Should be an HDF5 file
readable by `ledger.injections.RecoveredInjectionSet.read`
rejected_params:
Path to the rejected parameter set. Should be an HDF5 file
readable by `ledger.injections.InjectionParameterSet.read`
output_dir:
Path to the directory to save the output plots and data
log_file:
Path to the log file. If not provided, will log to stdout
dt:
If provided, enforce a recovery time delta of `dt` seconds
between injected and recovered events. Note that your `dt`
should be greater than 1 / `inference_sampling_rate`.
max_far:
The maximum FAR to compute the sensitive volume out to in
units of years^-1
sigma:
The width of the log normal mass distribution to use
verbose:
If true, log at the debug level
"""
configure_logging(log_file, verbose)
logging.info("Reading in inference outputs")
background = EventSet.read(background)
foreground = RecoveredInjectionSet.read(foreground)
# Filter unphysical waveforms generated by IMRPhenomXPHM glitch
# https://git.ligo.org/reed.essick/rpo4-injection-triage/-/blob/main/rpo4a/README.md?ref_type=heads # noqa
mask = foreground.snr > 1e4
num_unphysical = sum(mask)
if num_unphysical > 0:
foreground = foreground[~mask]
logging.info(
f"Removed {num_unphysical} foreground events with SNR > 10,000"
)
rejected_params = InjectionParameterSet.read(rejected_params)
for i in range(2):
mass = f"mass_{i + 1}"
for ledger in [foreground, rejected_params]:
val = getattr(ledger, mass)
setattr(ledger, mass, val / (1 + ledger.redshift))
logging.info("Read in:")
logging.info(f"\t{len(background)} background events")
logging.info(f"\t{len(foreground)} foreground events")
logging.info(f"\t{len(rejected_params)} rejected events")
start, stop = (
background.detection_time.min(),
background.detection_time.max(),
)
logging.info(f"Loading in vetoes from {start} to {stop}")
# optionally apply vetos
# if user passed list of veto categories
if vetos is not None:
veto_parser = VetoParser(
VETO_DEFINER_FILE,
GATE_PATHS,
start,
stop,
ifos,
)
catalog_vetos = get_catalog_vetos(start, stop)
for cat in vetos:
for i, ifo in enumerate(ifos):
if cat == "CATALOG":
vetos = catalog_vetos
else:
vetos = veto_parser.get_vetos(cat)[ifo]
back_count = len(background)
fore_count = len(foreground)
if len(vetos) > 0:
background = background.apply_vetos(vetos, i)
foreground = foreground.apply_vetos(vetos, i)
logging.info(
f"\t{back_count - len(background)} {cat} "
f"background events removed for ifo {ifo}"
)
logging.info(
f"\t{fore_count - len(foreground)} {cat} "
f"foreground events removed for ifo {ifo}"
)
logging.info("Computing data likelihood under source prior")
source, _ = source_prior(DEFAULT_COSMOLOGY)
source_probs = get_prob(source, foreground)
source_rejected_probs = get_prob(source, rejected_params)
logging.info("Computing maximum astrophysical volume")
zprior = source["redshift"]
zmin, zmax = zprior.minimum, zprior.maximum
try:
decprior = source["dec"]
except KeyError:
decrange = None
else:
decrange = (decprior.minimum, decprior.maximum)
v0 = get_astrophysical_volume(zmin, zmax, DEFAULT_COSMOLOGY, decrange)
v0 /= 10**9
Tb = background.Tb / tools.SECONDS_PER_YEAR
max_events = int(max_far * Tb)
x = np.arange(1, max_events + 1) / Tb
thresholds = np.sort(background.detection_statistic)[-max_events:][::-1]
weights = np.zeros((len(mass_combos), len(source_probs)))
for i, combo in enumerate(mass_combos):
logging.info(f"Computing likelihoods under {combo} log normal")
prior, _ = log_normal_masses(
*combo, sigma=sigma, cosmology=DEFAULT_COSMOLOGY
)
prob = get_prob(prior, foreground)
rejected_prob = get_prob(prior, rejected_params)
weight = prob / source_probs
rejected_weights = rejected_prob / source_rejected_probs
norm = weight.sum() + rejected_weights.sum()
weight /= norm
# finally, enforce recovery time delta by setting weights to 0
# for events outside of the delta t
if dt is not None:
logging.info(f"Enforcing recovery time delta of {dt} seconds")
mask = (
np.abs(foreground.detection_time - foreground.injection_time)
<= dt
)
weight[~mask] = 0
weights[i] = weight
logging.info("Computing sensitive volume at thresholds")
y, err = compute.sensitive_volume(
foreground.detection_statistic, weights, thresholds
)
y *= v0
err *= v0
output_dir.mkdir(exist_ok=True, parents=True)
with h5py.File(output_dir / "sensitive_volume.h5", "w") as f:
f.create_dataset("thresholds", data=thresholds)
f.create_dataset("fars", data=x)
for i, combo in enumerate(mass_combos):
g = f.create_group("-".join(map(str, combo)))
g.create_dataset("sv", data=y[i])
g.create_dataset("err", data=err[i])
logging.info("Calculating SV vs FAR for GWTC-3 pipelines")
gwtc3_sv, gwtc3_err = gwtc3_pipeline_sv(
mass_combos=mass_combos,
injection_file=INJECTION_FILE,
detection_criterion="far",
detection_thresholds=x,
output_dir=output_dir,
)
plots = tools.make_grid(mass_combos)
for i, p in enumerate(plots):
color = tools.palette[0]
# only include a legend on the top left
kwargs = {}
if i == 0:
kwargs["legend_label"] = "aframe"
p.line(x, y[i], line_width=1.5, line_color=color, **kwargs)
tools.plot_err_bands(
p,
x,
y[i],
err[i],
line_color=color,
line_width=0.8,
fill_color=color,
fill_alpha=0.4,
)
for pipeline, color in zip(gwtc3_sv.keys(), tools.palette[1:]):
m1, m2 = mass_combos[i]
mass_key = f"{m1}-{m2}"
sv = gwtc3_sv[pipeline][mass_key]
err = gwtc3_err[pipeline][mass_key]
if i == 0:
kwargs["legend_label"] = pipeline
p.line(x, sv, line_width=1.5, line_color=color, **kwargs)
tools.plot_err_bands(
p,
x,
sv,
err,
line_color=color,
line_width=0.8,
fill_color=color,
fill_alpha=0.4,
)
# style the legend on the top left plot
legend = plots[0].legend
legend.ncols = 2
# style legend position
legend.location = "top_left"
legend.margin = 4
legend.padding = 2
# style individual glyphs
legend.glyph_height = 6
legend.label_text_font_size = "8pt"
legend.label_height = 8
grid = gridplot(plots, toolbar_location="right", ncols=2)
save(grid, filename=output_dir / "sensitive_volume.html")
if __name__ == "__main__":
parser = jsonargparse.ArgumentParser()
parser.add_function_arguments(main)
args = parser.parse_args()
main(**vars(args))
|
ML4GWREPO_NAMEaframePATH_START.@aframe_extracted@aframe-main@projects@plots@plots@legacy@main.py@.PATH_END.py
|
{
"filename": "_tickvals.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/parcoords/dimension/_tickvals.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="parcoords.dimension", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@parcoords@dimension@_tickvals.py@.PATH_END.py
|
{
"filename": "getpota_daily_ran.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/getpota_daily_ran.py",
"type": "Python"
}
|
'''
Find all of the potential assignments for randoms in all archived tiles
'''
import numpy as np
import os
from astropy.table import Table, join
import argparse
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles
from fiberassign.targets import Targets, TargetsAvailable, LocationsAvailable, create_tagalong, load_target_file, targets_in_tiles
from fiberassign.assign import Assignment
from fiberassign.utils import Logger
import fitsio
import LSS.common_tools as common
from LSS.globals import main
parser = argparse.ArgumentParser()
parser.add_argument("--prog", choices=['DARK','BRIGHT'])
parser.add_argument("--getcoll", choices=['n','y'],default='y')
parser.add_argument("--minr",default=0,type=int)
parser.add_argument("--maxr",default=4,type=int)
args = parser.parse_args()
#tiletab = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-'+args.prog+'.fits')
margins = dict(pos=0.05,
petal=0.4,
gfa=0.4)
#def main():
# from LSS.mkCat_singletile.fa4lsscat import getfatiles
# getfatiles()
# return
log = Logger.get()
rann = 0
n = 0
mainp = main(args.prog.lower(),'daily')
mt = mainp.mtld
tiles = mainp.tiles
imbits = mainp.imbits #mask bits applied to targeting
ebits = mainp.ebits #extra mask bits we think should be applied
tsnrcut = mainp.tsnrcut
dchi2 = mainp.dchi2
tnsrcol = mainp.tsnrcol
zmin = mainp.zmin
zmax = mainp.zmax
badfib = mainp.badfib
wd = mt['SURVEY'] == 'main'
wd &= mt['ZDONE'] == 'true'
wd &= mt['FAPRGRM'] == args.prog.lower()
mtld = mt[wd]
print('found '+str(len(mtld))+' '+args.prog+' time main survey tiles with zdone true for daily version of reduced spectra')
selt = np.isin(tiles['TILEID'],mtld['TILEID'])
ta = Table()
ta['TILEID'] = tiles[selt]['TILEID']
ta['RA'] = tiles[selt]['RA']
ta['DEC'] =tiles[selt]['DEC']
def getcoll(ind):
#tile = 1230
tile = ta[ind]['TILEID']
ts = '%06i' % tile
fbah = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
dt = fbah['RUNDATE']#[:19]
pr = args.prog
t = Table(ta[ind])
t['OBSCONDITIONS'] = 516
t['IN_DESI'] = 1
t['MTLTIME'] = fbah['MTLTIME']
t['FA_RUN'] = fbah['FA_RUN']
t['PROGRAM'] = pr
obsha = fbah['FA_HA']
obstheta = fbah['FIELDROT']
hw = load_hardware(rundate=dt, add_margins=margins)
t.write(os.environ['SCRATCH']+'/rantiles/'+str(tile)+'-'+str(rann)+'-tiles.fits', overwrite=True)
tiles = load_tiles(
tiles_file=os.environ['SCRATCH']+'/rantiles/'+str(tile)+'-'+str(rann)+'-tiles.fits',obsha=obsha,obstheta=obstheta,
select=[tile])
tids = tiles.id
print('Tile ids:', tids)
I = np.flatnonzero(np.array(tids) == tile)
assert(len(I) == 1)
i = I[0]
tile_ra = tiles.ra[i]
tile_dec = tiles.dec[i]
# Create empty target list
tgs = Targets()
# Create structure for carrying along auxiliary target data not needed by C++.
plate_radec=True
tagalong = create_tagalong(plate_radec=plate_radec)
# Load target files...
load_target_file(tgs, tagalong, '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/random'+str(rann)+'/tilenofa-%i.fits' % tile)
#loading it again straight to table format because I can't quickly figure out exactly where targetid,ra,dec gets stored
tar_tab = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/random'+str(rann)+'/tilenofa-%i.fits' % tile,columns =['TARGETID','RA','DEC'])
# Find targets within tiles, and project their RA,Dec positions
# into focal-plane coordinates.
tile_targetids, tile_x, tile_y, tile_xy_cs5 = targets_in_tiles(hw, tgs, tiles, tagalong)
# Compute the targets available to each fiber for each tile.
tgsavail = TargetsAvailable(hw, tiles, tile_targetids, tile_x, tile_y)
# Compute the fibers on all tiles available for each target and sky
favail = LocationsAvailable(tgsavail)
# FAKE stucksky
stucksky = {}
# Create assignment object
asgn = Assignment(tgs, tgsavail, favail, stucksky)
tgsavail = asgn.targets_avail()
avail = tgsavail.tile_data(tile)
navail = np.sum([len(avail[x]) for x in avail.keys()])
fibers = dict(hw.loc_fiber)
fdata = Table()
fdata['LOCATION'] = np.zeros(navail,dtype=int)
fdata['FIBER'] = np.zeros(navail,dtype=int)
fdata['TARGETID'] = np.zeros(navail,dtype=int)
off = 0
# The "FAVAIL" (available targets) HDU is sorted first by LOCATION,
# then by TARGETID.
for lid in sorted(avail.keys()):
# lid (location id) is a scalar, tg (target ids) is an array
tg = avail[lid]
fdata['LOCATION'][off:off+len(tg)] = lid
fdata['FIBER'] [off:off+len(tg)] = fibers[lid]
fdata['TARGETID'][off:off+len(tg)] = sorted(tg)
off += len(tg)
fdata = join(fdata,tar_tab,keys=['TARGETID'],join_type='left')
if args.getcoll == 'y':
coll = asgn.check_avail_collisions(tile)
kl = np.array(list(coll.keys())).transpose()
locs = kl[0]
ids = kl[1]
locids = ids*10000+locs
print('N collisions:', len(coll))
locidsin = np.isin(fdata['LOCATION']+10000*fdata['TARGETID'],locids)
print('N collisions original:',np.sum(locidsin),len(fdata))
fdata['COLLISION'] = locidsin
#colltab = Table(forig[locidsin])
fdata['TILEID'] = tile
return fdata
if __name__ == '__main__':
from multiprocessing import Pool
tls = list(ta['TILEID'])#[:10])
inds = np.arange(len(tls))
for rann in range(args.minr,args.maxr):
with Pool(processes=128) as pool:
res = pool.map(getcoll, inds)
colltot = np.concatenate(res)
if args.getcoll == 'y':
print(len(colltot),np.sum(colltot['COLLISION']))
common.write_LSS_scratchcp(colltot,'/global/cfs/cdirs/desi/survey/catalogs/main/LSS/random'+str(rann)+'/pota-'+args.prog+'.fits')
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@getpota_daily_ran.py@.PATH_END.py
|
{
"filename": "SV1xi.ipynb",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/Sandbox/SV1xi.ipynb",
"type": "Jupyter Notebook"
}
|
Should work if you have done git clone https://github.com/desihub/LSS.git and edited the part appending to the path or just made sure you are in LSS/Sandbox
```python
import sys, os, glob, time
import numpy as np
import matplotlib.pyplot as plt
import fitsio
```
```python
sys.path.append('../py') #this works if you are in the Sandbox directory, check with os.getcwd()
```
```python
from LSS.mkCat_singletile import xitools
```
```python
import subprocess
```
```python
import importlib
```
```python
importlib.reload(xitools) #this is just for when I developed the code during the notebook creation
```
<module 'LSS.mkCat_singletile.xitools' from '../py/LSS/mkCat_singletile/xitools.py'>
```python
#directories for intermediate files
dirpcadw = os.environ['CSCRATCH']+'/pcadw/'
dirpc = os.environ['CSCRATCH']+'/paircounts/'
if not os.path.exists(dirpc):
os.mkdir(dirpcadw)
if not os.path.exists(dirpc):
os.mkdir(dirpc)
```
```python
rmax = 10 # number of random files to use
```
```python
tp='ELG'
tile = '80606'
night = 'deep'
zmin = 0.8
zmax=1.1
catdir = '/global/cfs/cdirs/desi/survey/catalogs/SV1/LSS/LSScats/v0/'
gf = xitools.createSourcesrd_ad(tp,tile,night,zmin=zmin,zmax=zmax,datadir=catdir)
```
995 data objects going out for paircounts
3169 random objects going out for paircounts
```python
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
```
CompletedProcess(args='./dopcgELG80606_deep_zm0.8zx1.1.sh', returncode=0)
```python
#above did first random file, do the rest
for i in range(1,rmax):
gf = xitools.createSourcesrd_ari(tp,tile,night,i,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
```
3200 random objects going out for paircounts
3157 random objects going out for paircounts
3156 random objects going out for paircounts
3157 random objects going out for paircounts
3092 random objects going out for paircounts
3109 random objects going out for paircounts
3100 random objects going out for paircounts
3028 random objects going out for paircounts
3243 random objects going out for paircounts
```python
#calculates xi0, writes to cwd
xitools.ppxilcalc_LSDfjack_bs(tp,tile,night,zmin=zmin,zmax=zmax,bs=5,nran=rmax,wmu='')
```

wrote results to xi0ELG80606_deep_zm0.8zx1.15st0.dat
array([ 1.5366768 , 0.433039 , 0.15283732, 0.07482333, 0.09376429,
0.03483154, 0.00229998, 0.00457256, -0.02105441, -0.01085485],
dtype=float32)
```python
#repeat for the rest of the ELG tiles (could have used all,
#but with five tiles far apart, might as well go tile by tile)
```
```python
tiles = ['80608','80610','80621','80623']
for tile in tiles:
gf = xitools.createSourcesrd_ad(tp,tile,night,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
for i in range(1,rmax):
gf = xitools.createSourcesrd_ari(tp,tile,night,i,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
xitools.ppxilcalc_LSDfjack_bs(tp,tile,night,zmin=zmin,zmax=zmax,bs=5,nran=rmax,wmu='')
```
824 data objects going out for paircounts
2700 random objects going out for paircounts
2563 random objects going out for paircounts
2712 random objects going out for paircounts
2647 random objects going out for paircounts
2592 random objects going out for paircounts
2630 random objects going out for paircounts
2657 random objects going out for paircounts
2637 random objects going out for paircounts
2576 random objects going out for paircounts
2690 random objects going out for paircounts

wrote results to xi0ELG80608_deep_zm0.8zx1.15st0.dat
920 data objects going out for paircounts
3008 random objects going out for paircounts
2943 random objects going out for paircounts
2938 random objects going out for paircounts
2954 random objects going out for paircounts
2916 random objects going out for paircounts
2884 random objects going out for paircounts
2769 random objects going out for paircounts
2937 random objects going out for paircounts
3048 random objects going out for paircounts
2957 random objects going out for paircounts

wrote results to xi0ELG80610_deep_zm0.8zx1.15st0.dat
882 data objects going out for paircounts
2780 random objects going out for paircounts
2753 random objects going out for paircounts
2785 random objects going out for paircounts
2815 random objects going out for paircounts
2838 random objects going out for paircounts
2772 random objects going out for paircounts
2796 random objects going out for paircounts
2834 random objects going out for paircounts
2770 random objects going out for paircounts
2816 random objects going out for paircounts

wrote results to xi0ELG80621_deep_zm0.8zx1.15st0.dat
730 data objects going out for paircounts
2322 random objects going out for paircounts
2371 random objects going out for paircounts
2477 random objects going out for paircounts
2380 random objects going out for paircounts
2295 random objects going out for paircounts
2430 random objects going out for paircounts
2400 random objects going out for paircounts
2334 random objects going out for paircounts
2315 random objects going out for paircounts
2374 random objects going out for paircounts

wrote results to xi0ELG80623_deep_zm0.8zx1.15st0.dat
```python
bs=5
xidir=''
d1 = np.loadtxt(xidir+'xi0ELG80606_deep_zm0.8zx1.1'+str(bs)+'st0.dat').transpose()
d2 = np.loadtxt(xidir+'xi0ELG80608_deep_zm0.8zx1.1'+str(bs)+'st0.dat').transpose()
d3 = np.loadtxt(xidir+'xi0ELG80610_deep_zm0.8zx1.1'+str(bs)+'st0.dat').transpose()
d4 = np.loadtxt(xidir+'xi0ELG80621_deep_zm0.8zx1.1'+str(bs)+'st0.dat').transpose()
d5 = np.loadtxt(xidir+'xi0ELG80623_deep_zm0.8zx1.1'+str(bs)+'st0.dat').transpose()
dme = (d1[1]+d2[1]+d3[1]+d4[1]+d5[1])/5. #just take mean
eme = 0.5/np.sqrt(5)*((d1[1]-dme)**2.+(d2[1]-dme)**2.+(d3[1]-dme)**2.+(d4[1]-dme)**2.+(d5[1]-dme)**2.)**.5 #standard deviation
```
```python
#check what this looks like
plt.errorbar(d1[0],(dme+0.02)*d1[0]**2.,eme*d1[0]**2.,fmt='-',color='b',label='0.8<z<1.1')
plt.xlim(0,50)
plt.ylim(-25,59)
plt.xlabel('s (Mpc/h)')
plt.ylabel(r'$s^2\xi_0$')
plt.show()
```

```python
#repeat for higher redshift range
zmin = 1.1
zmax = 1.6
tiles = ['80606','80608','80610','80621','80623']
for tile in tiles:
gf = xitools.createSourcesrd_ad(tp,tile,night,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
for i in range(1,rmax):
gf = xitools.createSourcesrd_ari(tp,tile,night,i,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
xitools.ppxilcalc_LSDfjack_bs(tp,tile,night,zmin=zmin,zmax=zmax,bs=5,nran=rmax,wmu='')
```
862 data objects going out for paircounts
2779 random objects going out for paircounts
2733 random objects going out for paircounts
2745 random objects going out for paircounts
2755 random objects going out for paircounts
2787 random objects going out for paircounts
2726 random objects going out for paircounts
2735 random objects going out for paircounts
2770 random objects going out for paircounts
2767 random objects going out for paircounts
2669 random objects going out for paircounts

wrote results to xi0ELG80606_deep_zm1.1zx1.65st0.dat
747 data objects going out for paircounts
2237 random objects going out for paircounts
2345 random objects going out for paircounts
2381 random objects going out for paircounts
2342 random objects going out for paircounts
2385 random objects going out for paircounts
2406 random objects going out for paircounts
2328 random objects going out for paircounts
2354 random objects going out for paircounts
2358 random objects going out for paircounts
2351 random objects going out for paircounts

wrote results to xi0ELG80608_deep_zm1.1zx1.65st0.dat
771 data objects going out for paircounts
2419 random objects going out for paircounts
2368 random objects going out for paircounts
2492 random objects going out for paircounts
2462 random objects going out for paircounts
2497 random objects going out for paircounts
2547 random objects going out for paircounts
2382 random objects going out for paircounts
2445 random objects going out for paircounts
2399 random objects going out for paircounts
2448 random objects going out for paircounts

wrote results to xi0ELG80610_deep_zm1.1zx1.65st0.dat
695 data objects going out for paircounts
2228 random objects going out for paircounts
2207 random objects going out for paircounts
2183 random objects going out for paircounts
2269 random objects going out for paircounts
2287 random objects going out for paircounts
2193 random objects going out for paircounts
2166 random objects going out for paircounts
2172 random objects going out for paircounts
2133 random objects going out for paircounts
2209 random objects going out for paircounts

wrote results to xi0ELG80621_deep_zm1.1zx1.65st0.dat
667 data objects going out for paircounts
2117 random objects going out for paircounts
2119 random objects going out for paircounts
2149 random objects going out for paircounts
2122 random objects going out for paircounts
2253 random objects going out for paircounts
2191 random objects going out for paircounts
2202 random objects going out for paircounts
2173 random objects going out for paircounts
2131 random objects going out for paircounts
2217 random objects going out for paircounts

wrote results to xi0ELG80623_deep_zm1.1zx1.65st0.dat
```python
bs=5
xidir=''
d1 = np.loadtxt(xidir+'xi0ELG80606_deep_zm1.1zx1.6'+str(bs)+'st0.dat').transpose()
d2 = np.loadtxt(xidir+'xi0ELG80608_deep_zm1.1zx1.6'+str(bs)+'st0.dat').transpose()
d3 = np.loadtxt(xidir+'xi0ELG80610_deep_zm1.1zx1.6'+str(bs)+'st0.dat').transpose()
d4 = np.loadtxt(xidir+'xi0ELG80621_deep_zm1.1zx1.6'+str(bs)+'st0.dat').transpose()
d5 = np.loadtxt(xidir+'xi0ELG80623_deep_zm1.1zx1.6'+str(bs)+'st0.dat').transpose()
dh = (d1[1]+d2[1]+d3[1]+d4[1]+d5[1])/5. #just take mean
eh = 0.5/np.sqrt(5)*((d1[1]-dme)**2.+(d2[1]-dme)**2.+(d3[1]-dme)**2.+(d4[1]-dme)**2.+(d5[1]-dme)**2.)**.5 #standard deviation
```
```python
#run the alltiles version to compare
tile = 'alltiles'
gf = xitools.createSourcesrd_ad(tp,tile,night,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
for i in range(1,rmax):
gf = xitools.createSourcesrd_ari(tp,tile,night,i,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
xitools.ppxilcalc_LSDfjack_bs(tp,tile,night,zmin=zmin,zmax=zmax,bs=5,nran=rmax,wmu='')
```
3742 data objects going out for paircounts
11780 random objects going out for paircounts
11772 random objects going out for paircounts
11950 random objects going out for paircounts
11950 random objects going out for paircounts
12209 random objects going out for paircounts
12063 random objects going out for paircounts
11813 random objects going out for paircounts
11914 random objects going out for paircounts
11788 random objects going out for paircounts
11894 random objects going out for paircounts

wrote results to xi0ELGalltiles_deep_zm1.1zx1.65st0.dat
array([ 0.99269664, 0.25191352, 0.10171105, 0.08814581, 0.05382847,
-0.03308081, -0.01791456, -0.00646897, -0.0064632 , -0.00310062],
dtype=float32)
```python
dha = np.loadtxt('xi0ELGalltiles_deep_zm1.1zx1.65st0.dat').transpose()
plt.errorbar(d1[0]+.5,(dh+0.02)*d1[0]**2.,eh*d1[0]**2.,fmt='-',color='b',label='1.1<z<1.6, tile by tile')
plt.plot(d[0],(dha[1]+0.02)*d[0]**2.,'-',color='g',label='1.1<z<1.6, alltiles')
plt.xlabel('s (Mpc/h)')
plt.ylabel(r'$s^2\xi_0$')
plt.xlim(0,50)
plt.ylim(-25,59)
plt.legend()
plt.show()
```

```python
xilin = np.loadtxt(os.environ['HOME']+'/BAOtemplates/xi0Challenge_matterpower0.563.04.07.015.00.dat').transpose()
plt.errorbar(d1[0],(dme+0.02)*d1[0]**2.,eme*d1[0]**2.,fmt='-',color='b',label='0.8<z<1.1')
plt.errorbar(d1[0]+.5,(dh+0.02)*d1[0]**2.,eh*d1[0]**2.,fmt='-',color='purple',label='1.1<z<1.6')
plt.plot(xilin[0],xilin[0]**2.*xilin[1]*.7,'k:',label=r'$\xi_{\rm 0}(z=0),b/D(z)=\sqrt{0.7},\beta=0.56$')
plt.plot(xilin[0],xilin[0]**2.*xilin[1]*.5,'k-.',label=r'$\xi_{\rm 0}(z=0),b/D(z)=\sqrt{0.5},\beta=0.56$')
plt.xlabel('s (Mpc/h)')
plt.ylabel(r'$s^2\xi_0$')
plt.xlim(0,50)
plt.ylim(-29,59)
plt.legend()
plt.show()
```

Below looks at finer division in redshift to see if anything obvious pops out
```python
tile = 'alltiles'
zms = [0.6,.8,1.,1.2,1.4]
for zmin in zms:
zmax = round(zmin + 0.2,1)
print(zmin,zmax)
gf = xitools.createSourcesrd_ad(tp,tile,night,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
for i in range(1,rmax):
gf = xitools.createSourcesrd_ari(tp,tile,night,i,zmin=zmin,zmax=zmax,datadir=catdir)
subprocess.run(['chmod','+x','dopc'+gf+'.sh'])
subprocess.run('./dopc'+gf+'.sh')
xitools.ppxilcalc_LSDfjack_bs(tp,tile,night,zmin=zmin,zmax=zmax,bs=5,nran=rmax,wmu='')
```
0.6 0.8
1818 data objects going out for paircounts
5732 random objects going out for paircounts
5640 random objects going out for paircounts
5783 random objects going out for paircounts
5810 random objects going out for paircounts
5719 random objects going out for paircounts
5653 random objects going out for paircounts
5836 random objects going out for paircounts
5880 random objects going out for paircounts
5742 random objects going out for paircounts
5725 random objects going out for paircounts

wrote results to xi0ELGalltiles_deep_zm0.6zx0.85st0.dat
0.8 1.0
3256 data objects going out for paircounts
10507 random objects going out for paircounts
10338 random objects going out for paircounts
10523 random objects going out for paircounts
10444 random objects going out for paircounts
10264 random objects going out for paircounts
10400 random objects going out for paircounts
10240 random objects going out for paircounts
10421 random objects going out for paircounts
10218 random objects going out for paircounts
10551 random objects going out for paircounts

wrote results to xi0ELGalltiles_deep_zm0.8zx1.05st0.dat
1.0 1.2
1981 data objects going out for paircounts
6234 random objects going out for paircounts
6306 random objects going out for paircounts
6292 random objects going out for paircounts
6312 random objects going out for paircounts
6421 random objects going out for paircounts
6285 random objects going out for paircounts
6336 random objects going out for paircounts
6225 random objects going out for paircounts
6338 random objects going out for paircounts
6347 random objects going out for paircounts

wrote results to xi0ELGalltiles_deep_zm1.0zx1.25st0.dat
1.2 1.4
1475 data objects going out for paircounts
4648 random objects going out for paircounts
4545 random objects going out for paircounts
4812 random objects going out for paircounts
4700 random objects going out for paircounts
4779 random objects going out for paircounts
4795 random objects going out for paircounts
4585 random objects going out for paircounts
4778 random objects going out for paircounts
4682 random objects going out for paircounts
4782 random objects going out for paircounts

wrote results to xi0ELGalltiles_deep_zm1.2zx1.45st0.dat
1.4 1.6
1381 data objects going out for paircounts
4370 random objects going out for paircounts
4413 random objects going out for paircounts
4392 random objects going out for paircounts
4446 random objects going out for paircounts
4543 random objects going out for paircounts
4391 random objects going out for paircounts
4383 random objects going out for paircounts
4332 random objects going out for paircounts
4287 random objects going out for paircounts
4294 random objects going out for paircounts

wrote results to xi0ELGalltiles_deep_zm1.4zx1.65st0.dat
```python
```
```python
d1 = np.loadtxt('xi0ELGalltiles_deep_zm0.6zx0.85st0.dat').transpose()
d2 = np.loadtxt('xi0ELGalltiles_deep_zm0.8zx1.05st0.dat').transpose()
d3 = np.loadtxt('xi0ELGalltiles_deep_zm1.0zx1.25st0.dat').transpose()
d4 = np.loadtxt('xi0ELGalltiles_deep_zm1.2zx1.45st0.dat').transpose()
d5 = np.loadtxt('xi0ELGalltiles_deep_zm1.4zx1.65st0.dat').transpose()
plt.plot(d1[0],(d1[1]+0.02)*d1[0]**2.,label='0.6<z<0.8')
plt.plot(d1[0],(d2[1]+0.02)*d1[0]**2.,label='0.8<z<1.0')
plt.plot(d1[0],(d3[1]+0.02)*d1[0]**2.,label='1.0<z<1.2')
plt.plot(d1[0],(d4[1]+0.02)*d1[0]**2.,label='1.2<z<1.4')
plt.plot(d1[0],(d5[1]+0.02)*d1[0]**2.,label='1.4<z<1.6')
plt.plot(xilin[0],xilin[0]**2.*xilin[1]*.7,'k:',label=r'$\xi_{\rm 0}(z=0),b/D(z)=\sqrt{0.7},\beta=0.56$')
plt.plot(xilin[0],xilin[0]**2.*xilin[1]*.5,'k-.',label=r'$\xi_{\rm 0}(z=0),b/D(z)=\sqrt{0.5},\beta=0.56$')
plt.xlabel('s (Mpc/h)')
plt.ylabel(r'$s^2\xi_0$')
plt.xlim(0,50)
plt.ylim(-79,79)
plt.legend()
plt.show()
```

```python
```
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@Sandbox@SV1xi.ipynb@.PATH_END.py
|
{
"filename": "_cluster.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermapbox/_cluster.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ClusterValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="cluster", parent_name="scattermapbox", **kwargs):
super(ClusterValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Cluster"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the color for each cluster step.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
enabled
Determines whether clustering is enabled or
disabled.
maxzoom
Sets the maximum zoom level. At zoom levels
equal to or greater than this, points will
never be clustered.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
size
Sets the size for each cluster step.
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
step
Sets how many points it takes to create a
cluster or advance to the next cluster step.
Use this in conjunction with arrays for `size`
and / or `color`. If an integer, steps start at
multiples of this number. If an array, each
step extends from the given value until one
less than the next value.
stepsrc
Sets the source reference on Chart Studio Cloud
for `step`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermapbox@_cluster.py@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scattermapbox/_stream.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermapbox.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattermapbox.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scattermapbox@_stream.py@.PATH_END.py
|
{
"filename": "generalized_linear_model.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/demo/guide-python/generalized_linear_model.py",
"type": "Python"
}
|
"""
Demo for GLM
============
"""
import os
import xgboost as xgb
##
# this script demonstrate how to fit generalized linear model in xgboost
# basically, we are using linear model, instead of tree for our boosters
##
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
dtest = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.test?format=libsvm")
)
# change booster to gblinear, so that we are fitting a linear model
# alpha is the L1 regularizer
# lambda is the L2 regularizer
# you can also set lambda_bias which is L2 regularizer on the bias term
param = {
"objective": "binary:logistic",
"booster": "gblinear",
"alpha": 0.0001,
"lambda": 1,
}
# normally, you do not need to set eta (step_size)
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
# there could be affection on convergence with parallelization on certain cases
# setting eta to be smaller value, e.g 0.5 can make the optimization more stable
# param['eta'] = 1
##
# the rest of settings are the same
##
watchlist = [(dtest, "eval"), (dtrain, "train")]
num_round = 4
bst = xgb.train(param, dtrain, num_round, watchlist)
preds = bst.predict(dtest)
labels = dtest.get_label()
print(
"error=%f"
% (
sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i])
/ float(len(preds))
)
)
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@demo@guide-python@generalized_linear_model.py@.PATH_END.py
|
{
"filename": "trisurf.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/doc/python/trisurf.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
notebook_metadata_filter: all
text_representation:
extension: .md
format_name: markdown
format_version: '1.2'
jupytext_version: 1.4.2
kernelspec:
display_name: Python 3
language: python
name: python3
language_info:
codemirror_mode:
name: ipython
version: 3
file_extension: .py
mimetype: text/x-python
name: python
nbconvert_exporter: python
pygments_lexer: ipython3
version: 3.7.7
plotly:
description: How to make tri-surf plots in Python with Plotly. Trisurfs are formed
by replacing the boundaries of a compact surface by touching triangles.
display_as: 3d_charts
language: python
layout: base
name: Trisurf Plots
order: 8
permalink: python/trisurf/
thumbnail: thumbnail/trisurf.jpg
---
Trisurf plots can be made using a [figure factory](/python/figure-factories/) as detailed in this page.
#### Torus
```python
import plotly.figure_factory as ff
import numpy as np
from scipy.spatial import Delaunay
u = np.linspace(0, 2*np.pi, 20)
v = np.linspace(0, 2*np.pi, 20)
u,v = np.meshgrid(u,v)
u = u.flatten()
v = v.flatten()
x = (3 + (np.cos(v)))*np.cos(u)
y = (3 + (np.cos(v)))*np.sin(u)
z = np.sin(v)
points2D = np.vstack([u,v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
fig = ff.create_trisurf(x=x, y=y, z=z,
simplices=simplices,
title="Torus", aspectratio=dict(x=1, y=1, z=0.3))
fig.show()
```
#### Mobius Band
```python
import plotly.figure_factory as ff
import numpy as np
from scipy.spatial import Delaunay
u = np.linspace(0, 2*np.pi, 24)
v = np.linspace(-1, 1, 8)
u,v = np.meshgrid(u,v)
u = u.flatten()
v = v.flatten()
tp = 1 + 0.5*v*np.cos(u/2.)
x = tp*np.cos(u)
y = tp*np.sin(u)
z = 0.5*v*np.sin(u/2.)
points2D = np.vstack([u,v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
fig = ff.create_trisurf(x=x, y=y, z=z,
colormap="Portland",
simplices=simplices,
title="Mobius Band")
fig.show()
```
#### Boy's Surface
```python
import plotly.figure_factory as ff
import numpy as np
from scipy.spatial import Delaunay
u=np.linspace(-np.pi/2, np.pi/2, 60)
v=np.linspace(0, np.pi, 60)
u,v=np.meshgrid(u,v)
u=u.flatten()
v=v.flatten()
x = (np.sqrt(2)*(np.cos(v)*np.cos(v))*np.cos(2*u) + np.cos(u)*np.sin(2*v))/(2 - np.sqrt(2)*np.sin(3*u)*np.sin(2*v))
y = (np.sqrt(2)*(np.cos(v)*np.cos(v))*np.sin(2*u) - np.sin(u)*np.sin(2*v))/(2 - np.sqrt(2)*np.sin(3*u)*np.sin(2*v))
z = (3*(np.cos(v)*np.cos(v)))/(2 - np.sqrt(2)*np.sin(3*u)*np.sin(2*v))
points2D = np.vstack([u, v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
fig = ff.create_trisurf(x=x, y=y, z=z,
colormap=['rgb(50, 0, 75)', 'rgb(200, 0, 200)', '#c8dcc8'],
show_colorbar=True,
simplices=simplices,
title="Boy's Surface")
fig.show()
```
#### Reference
For more info on `ff.create_trisurf()`, see the [full function reference](https://plotly.com/python-api-reference/generated/plotly.figure_factory.create_trisurf.html)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@doc@python@trisurf.md@.PATH_END.py
|
{
"filename": "feature_extractor_byol.py",
"repo_name": "SKA-INAF/sclassifier",
"repo_path": "sclassifier_extracted/sclassifier-master/sclassifier/feature_extractor_byol.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import print_function
##################################################
### MODULE IMPORT
##################################################
## STANDARD MODULES
import os
import sys
import subprocess
import string
import time
import signal
from threading import Thread
import datetime
import numpy as np
import random
import math
import logging
import collections
import csv
import pickle
from copy import deepcopy
from pathlib import Path
##############################
## GLOBAL VARS
##############################
from sclassifier import logger
## TENSORFLOW & KERAS MODULES
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import optimizers
try:
from tensorflow.keras.utils import plot_model
except:
from tensorflow.keras.utils.vis_utils import plot_model
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.models import model_from_json
try:
from tensorflow.keras.layers import BatchNormalization
except Exception as e:
logger.warn("Failed to import BatchNormalization (err=%s), trying in another way ..." % str(e))
from tensorflow.keras.layers.normalization import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D, UpSampling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Lambda
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.losses import mse, binary_crossentropy
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.image import convert_image_dtype
from tensorflow.python.ops.image_ops_impl import _fspecial_gauss, _ssim_helper, _verify_compatible_image_shapes
from tensorflow.keras.regularizers import l1
from tensorflow.keras.callbacks import (
ModelCheckpoint,
EarlyStopping,
ReduceLROnPlateau,
)
from tensorflow.python.framework.ops import disable_eager_execution, enable_eager_execution
#disable_eager_execution()
#enable_eager_execution()
## SCIKIT MODULES
from skimage.metrics import mean_squared_error
from skimage.metrics import structural_similarity
from skimage.util import img_as_float64
from PIL import Image
## GRAPHICS MODULES
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
## PACKAGE MODULES
from .utils import Utils
from .tf_utils import byol_loss
##from .models import ResNet18, ResNet34
from .models import resnet18, resnet34
from .models import ProjectionHead, ClassificationHead
################################
## FeatExtractorByol CLASS
################################
# - Implementation following:
# https://github.com/garder14/byol-tensorflow2/blob/main/pretraining.py
# https://www.kaggle.com/code/vedantj/byol-tensorflow-2-0/notebook
class FeatExtractorByol(object):
""" Class to create and train a feature extractor based on Byol contrastive learning framework
Arguments:
- DataGenerator class
"""
def __init__(self, data_generator):
""" Return a feature extractor Byol object """
self.dg= data_generator
self.dg_cv= None
self.has_cvdata= False
# *****************************
# ** Input data
# *****************************
self.nsamples= 0
self.nsamples_cv= 0
self.nx= 64
self.ny= 64
self.nchannels= 0
self.inputs= None
self.inputs_train= None
self.input_labels= {}
self.source_names= []
self.input_data_dim= 0
self.encoded_data= None
self.train_data_generator= None
self.crossval_data_generator= None
self.test_data_generator= None
self.test_data_generator_embeddings= None
self.augmentation= False
self.validation_steps= 10
self.use_multiprocessing= True
self.nworkers= 0
# *****************************
# ** Model
# *****************************
# - NN architecture
#self.model= None
#self.modelfile= ""
#self.weightfile= ""
self.modelfile_encoder= ""
self.weightfile_encoder= ""
self.modelfile_projector= ""
self.weightfile_projector= ""
self.modelfile_predictor= ""
self.weightfile_predictor= ""
#self.fitout= None
self.f_online= None
self.g_online= None
self.q_online= None
self.f_target= None
self.g_target= None
self.add_channorm_layer= False
self.channorm_min= 0.0
self.channorm_max= 1.0
self.nfilters_cnn= [32,64,128]
self.kernsizes_cnn= [3,5,7]
self.strides_cnn= [2,2,2]
self.add_max_pooling= False
self.pool_size= 2
self.add_leakyrelu= False
self.leakyrelu_alpha= 0.2
self.add_batchnorm= True
self.activation_fcn_cnn= "relu"
self.add_dense= False
self.add_dropout_layer= False
self.add_conv_dropout_layer= False
self.conv_dropout_rate= 0.2
self.dense_layer_sizes= [256,128]
self.dense_layer_activation= 'relu'
self.latent_dim= 2
self.use_global_avg_pooling= False
self.use_predefined_arch= False
self.predefined_arch= "resnet50"
# - Training options
self.nepochs= 10
self.batch_size= 32
self.learning_rate= 1.e-4
self.optimizer= tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
self.ph_regul= 0.005
self.loss_type= "categorical_crossentropy"
self.weight_init_seed= None
self.shuffle_train_data= True
self.augment_scale_factor= 1
self.load_cv_data_in_batches= True
self.balance_classes= False
self.class_probs= {}
# *****************************
# ** Output
# *****************************
self.outfile_loss= 'losses.png'
self.outfile_nnout_metrics= 'losses.dat'
self.outfile_encoded_data= 'latent_data.dat'
self.save_embeddings= True
self.save_tb_embeddings= False
self.nembeddings_save= 1000
self.img_embedding_scale= 1.0
self.shuffle_embeddings= False
self.outfile_tb_embeddings= 'feature_vecs.tsv'
#####################################
## SETTERS/GETTERS
#####################################
def set_image_size(self,nx,ny):
""" Set image size """
self.nx= nx
self.ny= ny
def set_optimizer(self, opt, learning_rate=1.e-4):
""" Set optimizer """
if opt=="rmsprop":
logger.info("Setting rmsprop optimizer with lr=%f ..." % (learning_rate))
self.optimizer= tf.keras.optimizers.RMSprop(learning_rate=learning_rate)
elif opt=="adam":
logger.info("Setting adam optimizer with lr=%f ..." % (learning_rate))
self.optimizer= tf.keras.optimizers.Adam(learning_rate=learning_rate)
else:
logger.warn("Unknown optimizer selected (%s), won't change the default ..." % (opt))
def set_reproducible_model(self):
""" Set model in reproducible mode """
logger.info("Set reproducible model ...")
# - Fix numpy and tensorflow seeds
#np.random.seed(1)
#tf.set_random_seed(2)
# - Do not shuffle data during training
self.shuffle_train_data= False
# - Initialize weight to same array
if self.weight_init_seed is None:
self.weight_init_seed= 1
#####################################
## SET TRAIN DATA
#####################################
def __set_data(self):
""" Set train data & generator from loader """
# - Retrieve info from data loader
if self.nchannels<=0 or self.nchannels is None:
self.nchannels= self.dg.nchannels # NB: if pre-processor modifies the tensor dimension, you must exclicitly set nchannels!
self.source_labels= self.dg.labels
self.source_ids= self.dg.classids
self.source_names= self.dg.snames
self.nsamples= len(self.source_labels)
# - Create train data generator
self.train_data_generator= self.dg.generate_byol_data(
batch_size=self.batch_size,
shuffle=self.shuffle_train_data,
balance_classes=self.balance_classes, class_probs=self.class_probs
)
# - Create cross validation data generator
if self.dg_cv is None:
logger.info("Creating validation data generator (deep-copying train data generator) ...")
self.dg_cv= deepcopy(self.dg)
logger.info("Disabling data augmentation in validation data generator ...")
self.dg_cv.disable_augmentation()
self.has_cvdata= False
self.nsamples_cv= 0
batch_size_cv= 0
self.crossval_data_generator= None
else:
self.has_cvdata= True
self.nsamples_cv= len(self.dg_cv.labels)
logger.info("#nsamples_cv=%d" % (self.nsamples_cv))
if self.load_cv_data_in_batches:
batch_size_cv= self.batch_size
else:
batch_size_cv= self.nsamples_cv
logger.info("Loading cv data in batches? %d (batch_size_cv=%d)" % (self.load_cv_data_in_batches, batch_size_cv))
self.crossval_data_generator= self.dg_cv.generate_byol_data(
batch_size=batch_size_cv,
shuffle=False
)
# - Create test data generator
logger.info("Creating test data generator (deep-copying train data generator) ...")
self.dg_test= deepcopy(self.dg)
logger.info("Disabling data augmentation in test data generator ...")
self.dg_test.disable_augmentation()
self.test_data_generator= self.dg_test.generate_cae_data(
batch_size=1,
shuffle=False
)
# - Create embeddings data generator
logger.info("Creating test data generator for embeddings (deep-copying train data generator) ...")
self.dg_test_embeddings= deepcopy(self.dg)
logger.info("Disabling data augmentation in test data generator for embeddings ...")
self.dg_test_embeddings.disable_augmentation()
self.test_data_generator_embeddings= self.dg_test_embeddings.generate_data(
batch_size=1,
shuffle=self.shuffle_embeddings
)
return 0
#####################################
## CREATE BASE MODEL (CUSTOM)
#####################################
def __create_custom_base_model(self, inputShape, model_name='base_model'):
""" Create the encoder base model using a custom parametrized CNN """
#===========================
#== INIT WEIGHTS
#===========================
logger.info("Initializing weights ...")
try:
weight_initializer = tf.keras.initializers.HeUniform(seed=self.weight_init_seed)
except:
logger.info("Failed to find tf.keras.initializers.HeUniform, trying with tf.keras.initializers.he_uniform ...")
weight_initializer= tf.keras.initializers.he_uniform(seed=self.weight_init_seed)
#===========================
#== INPUT LAYER
#===========================
#x= inputs
inputs= Input(shape=inputShape)
input_data_dim= K.int_shape(inputs)
x= inputs
print("Base model input data dim=", input_data_dim)
#===========================
#== CONV LAYER
#===========================
# - Create encoder or base model
for k in range(len(self.nfilters_cnn)):
# - Add a Convolutional 2D layer
padding= "same"
if k==0:
# - Set weights for the first layer
x = layers.Conv2D(self.nfilters_cnn[k], (self.kernsizes_cnn[k], self.kernsizes_cnn[k]), strides=self.strides_cnn[k], padding=padding, kernel_initializer=weight_initializer)(x)
else:
x = layers.Conv2D(self.nfilters_cnn[k], (self.kernsizes_cnn[k], self.kernsizes_cnn[k]), strides=self.strides_cnn[k], padding=padding)(x)
# - Add batch normalization?
if self.add_batchnorm:
x = BatchNormalization(axis=-1)(x)
# - Add Leaky RELU?
if self.add_leakyrelu:
x = layers.LeakyReLU(alpha=self.leakyrelu_alpha)(x)
else:
x = layers.ReLU()(x)
# - Add max pooling?
if self.add_max_pooling:
padding= "valid"
x = layers.MaxPooling2D(pool_size=(self.pool_size,self.pool_size), strides=None, padding=padding)(x)
# - Add dropout?
if self.add_conv_dropout_layer:
x= layers.Dropout(self.conv_dropout_rate)(x)
#===========================
#== FLATTEN LAYER
#===========================
if self.use_global_avg_pooling:
x= layers.GlobalAveragePooling2D()(x)
else:
x = layers.Flatten()(x)
#===========================
#== DENSE LAYER
#===========================
if self.add_dense:
x = layers.Dense(self.latent_dim, activation=self.dense_layer_activation)(x)
#===========================
#== BUILD MODEL
#===========================
model= Model(inputs, x, name=model_name)
return model
########################################
## CREATE BASE MODEL (PREDEFINED)
########################################
def __create_predefined_base_model(self, inputShape, model_name='base_model'):
""" Create the encoder base model """
#===========================
#== INIT WEIGHTS
#===========================
logger.info("Initializing weights ...")
try:
weight_initializer = tf.keras.initializers.HeUniform(seed=self.weight_init_seed)
except:
logger.info("Failed to find tf.keras.initializers.HeUniform, trying with tf.keras.initializers.he_uniform ...")
weight_initializer= tf.keras.initializers.he_uniform(seed=self.weight_init_seed)
#===========================
#== INPUT LAYER
#===========================
#x= inputs
inputs= Input(shape=inputShape)
input_data_dim= K.int_shape(inputs)
x= inputs
print("Base model input data dim=", input_data_dim)
#===========================
#== RES NET
#===========================
if self.predefined_arch=="resnet50":
logger.info("Using resnet50 as base encoder ...")
resnet50= tf.keras.applications.resnet50.ResNet50(
include_top=False, # disgard the fully-connected layer as we are training from scratch
weights=None, # random initialization
input_tensor=inputs,
input_shape=inputShape,
pooling="avg" #global average pooling will be applied to the output of the last convolutional block
)
x= resnet50(x)
elif self.predefined_arch=="resnet101":
logger.info("Using resnet101 as base encoder ...")
resnet101= tf.keras.applications.resnet.ResNet101(
include_top=False, # disgard the fully-connected layer as we are training from scratch
weights=None, # random initialization
input_tensor=inputs,
input_shape=inputShape,
pooling="avg" #global average pooling will be applied to the output of the last convolutional block
)
x= resnet101(x)
elif self.predefined_arch=="resnet18":
logger.info("Using resnet18 as base encoder ...")
x= resnet18(x, include_top=False)
elif self.predefined_arch=="resnet34":
logger.info("Using resnet34 as base encoder ...")
x= resnet34(x, include_top=False)
else:
logger.error("Unknown/unsupported predefined backbone architecture given (%s)!" % (self.predefined_arch))
return None
#===========================
#== FLATTEN LAYER
#===========================
###x = layers.Flatten()(x) # done already inside resnet block
###x= layers.GlobalAveragePooling2D()(x) # done already inside pooling
#===========================
#== DENSE LAYER
#===========================
# - Needed only to reduce a bit resnet output (2048)
if self.add_dense:
x = layers.Dense(self.latent_dim, activation=self.dense_layer_activation)(x)
#===========================
#== BUILD MODEL
#===========================
model= Model(inputs, x, name=model_name)
return model
#####################################
## CREATE BASE MODEL
#####################################
def __create_base_model(self, inputShape, model_name='base_model'):
""" Create the encoder base model """
if self.use_predefined_arch:
return self.__create_predefined_base_model(inputShape, model_name)
else:
return self.__create_custom_base_model(inputShape, model_name)
##########################################
## CREATE PREDICTOR/PROJECTOR MODEL
##########################################
def __create_proj_model(self, inputShape, model_name='proj_model'):
""" Create the projector model """
#===========================
#== INPUT LAYER
#===========================
inputs= Input(shape=inputShape)
input_data_dim= K.int_shape(inputs)
x= inputs
#===========================
#== DENSE LAYER
#===========================
# - Original implementation seems to have 2 layers, e.g. 256-128
num_layers_ph= len(self.dense_layer_sizes)
for j in range(num_layers_ph):
layer_size= self.dense_layer_sizes[j]
if j < num_layers_ph - 1:
# - Add linear dense layer
x = layers.Dense(layer_size)(x)
###x = layers.Dense(layer_size, activation=self.dense_layer_activation, kernel_regularizer=l1(self.ph_regul))(x) # probably wrong, activation function is linear in original work?
###if self.add_dropout_layer:
### x= layers.Dropout(self.dropout_rate)(x)
# - Add batch normalization?
if self.add_batchnorm:
x = BatchNormalization()(x)
# - Add activation (RELU to make non-linear)
if self.add_leakyrelu:
x = layers.LeakyReLU(alpha=self.leakyrelu_alpha)(x)
else:
x = layers.ReLU()(x)
else:
# - Add final linear dense layer
x = layers.Dense(layer_size)(x)
#===========================
#== BUILD MODEL
#===========================
model= Model(inputs, x, name=model_name)
return model
#####################################
## CREATE MODEL
#####################################
def __create_model(self):
""" Create the model """
# - Create inputs
inputShape = (self.ny, self.nx, self.nchannels)
self.inputs= Input(shape=inputShape, dtype='float', name='inputs')
self.input_data_dim= K.int_shape(self.inputs)
print("Input data dim=", self.input_data_dim)
print("inputs shape")
print(K.int_shape(self.inputs))
# - Create online encoder base model (f_online)
logger.info("Creating online encoder base model ...")
self.f_online= self.__create_base_model(inputShape, 'f_online')
self.f_online.summary()
h= self.f_online(self.inputs)
# - Create online projector model
logger.info("Creating online projector model ...")
self.g_online= self.__create_proj_model(h.shape[-1], 'g_online')
self.g_online.summary()
z= self.g_online(h)
# - Create online predictor model
logger.info("Creating online predictor model ...")
self.q_online= self.__create_proj_model(z.shape[-1], 'q_online')
self.q_online.summary()
p= self.q_online(z)
# - Create target encoder base model (f_target)
logger.info("Creating target encoder base model ...")
self.f_target= self.__create_base_model(inputShape, 'f_target')
self.f_target.summary()
h = self.f_target(self.inputs)
# - Create target projector model
logger.info("Creating target projector model ...")
self.g_target= self.__create_proj_model(h.shape[-1], 'g_target')
self.g_target.summary()
return 0
#####################################
## RUN TRAIN
#####################################
def run_train(self, modelfile_encoder="", weightfile_encoder="", modelfile_projector="", weightfile_projector="", modelfile_predictor="", weightfile_predictor=""):
""" Run network training """
#===========================
#== SET TRAINING DATA
#===========================
logger.info("Setting training data from data loader ...")
status= self.__set_data()
if status<0:
logger.error("Train data set failed!")
return -1
#===========================
#== BUILD MODEL
#===========================
# - Load model architecture & weights from input files
if modelfile_encoder!="" and modelfile_projector!="" and modelfile_predictor!="":
# - Load encoder
logger.info("Loading encoder model architecture from file: %s, %s ..." % (modelfile_encoder, weightfile_encoder))
if self.__load_encoder(modelfile_encoder, weightfile_encoder)<0:
logger.error("Encoder model loading failed!")
return -1
self.modelfile_encoder= modelfile_encoder
self.weightfile_encoder= weightfile_encoder
# - Load projector
logger.info("Loading projector model architecture from file: %s, %s ..." % (modelfile_projector, weightfile_projector))
if self.__load_projector(modelfile_projector, weightfile_projector)<0:
logger.error("Projector model loading failed!")
return -1
self.modelfile_projector= modelfile_projector
self.weightfile_projector= weightfile_projector
# - Load predictor
logger.info("Loading predictor model architecture from file: %s, %s ..." % (modelfile_predictor, weightfile_predictor))
if self.__load_predictor(modelfile_predictor, weightfile_predictor)<0:
logger.error("Predictor model loading failed!")
return -1
self.modelfile_predictor= modelfile_predictor
self.weightfile_predictor= weightfile_predictor
else:
# - Build model
logger.info("Building network architecture ...")
if self.__create_model()<0:
logger.error("Model build failed!")
return -1
#===========================
#== TRAIN MODEL
#===========================
logger.info("Training BYOL model ...")
status= self.__train_network()
if status<0:
logger.error("Model training failed!")
return -1
logger.info("End training run")
return 0
#####################################
## TRAIN NN
#####################################
@tf.function
def __train_step_pretraining(self, x1, x2): # (bs, 32, 32, 3), (bs, 32, 32, 3)
""" Train step pretraining """
# Forward pass
h_target_1 = self.f_target(x1, training=True)
z_target_1 = self.g_target(h_target_1, training=True)
h_target_2 = self.f_target(x2, training=True)
z_target_2 = self.g_target(h_target_2, training=True)
with tf.GradientTape(persistent=True) as tape:
h_online_1 = self.f_online(x1, training=True)
z_online_1 = self.g_online(h_online_1, training=True)
p_online_1 = self.q_online(z_online_1, training=True)
h_online_2 = self.f_online(x2, training=True)
z_online_2 = self.g_online(h_online_2, training=True)
p_online_2 = self.q_online(z_online_2, training=True)
p_online = tf.concat([p_online_1, p_online_2], axis=0)
z_target = tf.concat([z_target_2, z_target_1], axis=0)
loss = byol_loss(p_online, z_target)
# Backward pass (update online networks)
grads = tape.gradient(loss, self.f_online.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.f_online.trainable_variables))
grads = tape.gradient(loss, self.g_online.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.g_online.trainable_variables))
grads = tape.gradient(loss, self.q_online.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.q_online.trainable_variables))
del tape
return loss
@tf.function
def __val_step_pretraining(self, x1, x2): # (bs, 32, 32, 3), (bs, 32, 32, 3)
""" Validation step pretraining """
h_target_1 = self.f_target(x1)
z_target_1 = self.g_target(h_target_1)
h_target_2 = self.f_target(x2)
z_target_2 = self.g_target(h_target_2)
h_online_1 = self.f_online(x1)
z_online_1 = self.g_online(h_online_1)
p_online_1 = self.q_online(z_online_1)
h_online_2 = self.f_online(x2)
z_online_2 = self.g_online(h_online_2)
p_online_2 = self.q_online(z_online_2)
p_online = tf.concat([p_online_1, p_online_2], axis=0)
z_target = tf.concat([z_target_2, z_target_1], axis=0)
loss = byol_loss(p_online, z_target)
return loss
def __train_network(self):
""" Train BYOL model """
#===========================
#== INIT
#===========================
# - Initialize train/test loss vs epoch
self.train_loss_vs_epoch= np.zeros((1,self.nepochs))
steps_per_epoch= self.nsamples // self.batch_size
# - Set validation steps
val_steps_per_epoch= self.validation_steps
if self.has_cvdata:
if self.load_cv_data_in_batches:
val_steps_per_epoch= self.nsamples_cv // self.batch_size
else:
val_steps_per_epoch= 1
#===========================
#== TRAIN NETWORK
#===========================
# - Train model
logger.info("Start BYOL training (dataset_size=%d, batch_size=%d, steps_per_epoch=%d, val_steps_per_epoch=%d) ..." % (self.nsamples, self.batch_size, steps_per_epoch, val_steps_per_epoch))
losses_train = []
losses_val = []
log_every= 1
for epoch_id in range(self.nepochs):
# - Run train
losses_train_batch= []
for batch_id in range(steps_per_epoch):
# - Fetch train data from generator
x1, x2= next(self.train_data_generator)
loss = self.__train_step_pretraining(x1, x2)
losses_train_batch.append(float(loss))
# - Update target networks (exponential moving average of online networks)
beta = 0.99
f_target_weights = self.f_target.get_weights()
f_online_weights = self.f_online.get_weights()
for i in range(len(f_online_weights)):
f_target_weights[i] = beta * f_target_weights[i] + (1 - beta) * f_online_weights[i]
self.f_target.set_weights(f_target_weights)
g_target_weights = self.g_target.get_weights()
g_online_weights = self.g_online.get_weights()
for i in range(len(g_online_weights)):
g_target_weights[i] = beta * g_target_weights[i] + (1 - beta) * g_online_weights[i]
self.g_target.set_weights(g_target_weights)
# - Print train losses in batch
if (batch_id + 1) % log_every == 0:
logger.info("Epoch %d/%d [Batch %d/%d]: train_loss=%f" % (epoch_id+1, self.nepochs, batch_id+1, steps_per_epoch, loss))
# - Run validation?
losses_val_batch= []
if self.has_cvdata:
for batch_id in range(val_steps_per_epoch):
x1, x2= next(self.crossval_data_generator)
loss = self.__val_step_pretraining(x1, x2)
losses_val_batch.append(float(loss))
if (batch_id + 1) % log_every == 0:
logger.info("Epoch %d/%d [Batch %d/%d]: val_loss=%f" % (epoch_id+1, self.nepochs, batch_id+1, val_steps_per_epoch, loss))
# - Compute average train & validation loss
loss_train= np.nanmean(losses_train_batch)
losses_train.append(loss_train)
if losses_val_batch:
loss_val= np.nanmean(losses_val_batch)
losses_val.append(loss_val)
logger.info("Epoch %d/%d: train_loss=%f, val_loss=%f " % (epoch_id+1, self.nepochs, loss_train, loss_val))
else:
logger.info("Epoch %d/%d: train_loss=%f" % (epoch_id+1, self.nepochs, loss_train))
#===========================
#== SAVE NN
#===========================
# - Save encoder base
if self.f_online:
logger.info("Saving online encoder network/weights to file ...")
self.f_online.save_weights('encoder_weights.h5')
with open('encoder_architecture.json', 'w') as f:
f.write(self.f_online.to_json())
self.f_online.save('encoder.h5')
# - Save online projector
if self.g_online:
logger.info("Saving online projector network/weights to file ...")
self.g_online.save_weights('projector_weights.h5')
with open('projector_architecture.json', 'w') as f:
f.write(self.g_online.to_json())
self.g_online.save('projector.h5')
# - Save online predictor
if self.q_online:
logger.info("Saving online predictor network/weights to file ...")
self.q_online.save_weights('predictor_weights.h5')
with open('predictor_architecture.json', 'w') as f:
f.write(self.q_online.to_json())
self.q_online.save('predictor.h5')
#================================
#== SAVE TRAIN METRICS
#================================
# - Saving losses to file
logger.info("Saving train/val losses to file ...")
N= len(losses_train)
if not losses_val:
losses_val= [0]*N
epoch_ids= np.array(range(N))
epoch_ids+= 1
epoch_ids= epoch_ids.reshape(N,1)
metrics_data= np.concatenate(
(epoch_ids, np.array(losses_train).reshape(N,1), np.array(losses_val).reshape(N,1)),
axis=1
)
head= '# epoch loss loss_val'
Utils.write_ascii(metrics_data,self.outfile_nnout_metrics,head)
#================================
#== PREDICT & SAVE EMBEDDINGS
#================================
if self.save_embeddings and self.__save_embeddings()<0:
logger.warn("Failed to save latent space embeddings to file ...")
return -1
return 0
#####################################
## RUN PREDICT
#####################################
def run_predict(self, modelfile_encoder="", weightfile_encoder=""):
""" Run model prediction """
#===========================
#== SET DATA
#===========================
logger.info("Setting input data from data loader ...")
status= self.__set_data()
if status<0:
logger.error("Input data set failed!")
return -1
#===========================
#== LOAD MODEL
#===========================
# - Load encoder
logger.info("Loading encoder model architecture from file: %s, %s ..." % (modelfile_encoder, weightfile_encoder))
if self.__load_encoder(modelfile_encoder, weightfile_encoder)<0:
logger.error("Encoder model loading failed!")
return -1
self.modelfile_encoder= modelfile_encoder
self.weightfile_encoder= weightfile_encoder
#================================
#== PREDICT & SAVE EMBEDDINGS
#================================
if self.save_embeddings and self.__save_embeddings()<0:
logger.warn("Failed to save latent space embeddings to file ...")
return -1
#================================
#== SAVE EMBEDDINGS TO TB
#================================
if self.save_tb_embeddings and self.__save_tb_embeddings()<0:
logger.warn("Failed to save embeddings in tensorboard format ...")
logger.info("End predict run")
return 0
########################################
## SAVE EMBEDDINGS
########################################
def __save_embeddings(self):
""" Save embeddings """
# - Apply model to input
logger.info("Running BYOL prediction on input data ...")
predout= self.f_online.predict(
x=self.test_data_generator,
steps=self.nsamples,
verbose=2,
workers=self.nworkers,
use_multiprocessing=self.use_multiprocessing
)
if type(predout)==tuple and len(predout)>0:
self.encoded_data= predout[0]
else:
self.encoded_data= predout
print("encoded_data shape")
print(self.encoded_data.shape)
N= self.encoded_data.shape[0]
Nvar= self.encoded_data.shape[1]
# - Merge encoded data
logger.info("Adding source info data to encoded data ...")
obj_names= np.array(self.source_names).reshape(N,1)
obj_ids= np.array(self.source_ids).reshape(N,1)
enc_data= np.concatenate(
(obj_names, self.encoded_data, obj_ids),
axis=1
)
# - Save latent data to file
logger.info("Saving predicted latent data to file %s ..." % (self.outfile_encoded_data))
znames_counter= list(range(1, Nvar+1))
znames= '{}{}'.format('z',' z'.join(str(item) for item in znames_counter))
head= '{} {} {}'.format("# sname", znames, "id")
Utils.write_ascii(enc_data, self.outfile_encoded_data, head)
return 0
########################################
## SAVE EMBEDDINGS TENSORBOARD
########################################
def __save_tb_embeddings(self):
""" Save embeddings for tensorboard visualization """
# - Set nembeddings to be saved: -1=ALL
if self.nembeddings_save==-1 or self.nembeddings_save>self.nsamples:
n_embeddings= self.nsamples
else:
n_embeddings= self.nembeddings_save
# - Loop and save
imgs= []
img_embeddings= []
labels= []
for i in range(n_embeddings):
# - Get data from generator
data, sdata= next(self.test_data_generator_embeddings)
class_id= sdata.id
class_name= sdata.label
nimgs= data.shape[0]
nchannels= data.shape[3]
# - Get latent data for this output
predout= self.f_online.predict(
x= data,
batch_size=1,
verbose=2,
workers=self.nworkers,
use_multiprocessing=self.use_multiprocessing
)
# - Save embeddings & labels
for j in range(nimgs):
img_embeddings.append(predout[j])
labels.append(class_id)
# - Save images (if nchan=1 or nchan=3)
if nchannels==1 or nchannels==3:
for j in range(nimgs):
data_arr= data[j]
if nchannels==1:
data_arr= data_arr[:,:,0]
img_h= data_arr.shape[0]
img_w= data_arr.shape[1]
# - Downscale image previews
scale= self.img_embedding_scale
if scale>0 and scale<1 and scale!=1:
try:
data_resized= Utils.resize_img(img_as_float64(data_arr), (round(img_h * scale), round(img_w * scale)), preserve_range=True, order=1, anti_aliasing=True)
data_arr= data_resized
except Exception as e:
logger.error("Failed to resize image with scale=%f (err=%s)!" % (self.img_embedding_scale, str(e)))
# - Convert data to [0,255] range and create PIL image (convert to RGB)
data_norm= (data_arr-np.min(data_arr))/(np.max(data_arr)-np.min(data_arr))
data_norm= data_norm*255
img= Image.fromarray(data_norm.astype('uint8')).convert('RGB')
imgs.append(img)
# - Check there are embedding data
if not img_embeddings:
logger.warn("No embeddings retrieved from generator, nothing to be saved ...")
return -1
# - Create output log directory
currdir= os.getcwd()
savedir= os.path.join(currdir, 'logs', 'embeddings')
logger.info("Creating embedding save dir %s ..." % (savedir))
p= Path(savedir)
p.mkdir(parents=True, exist_ok= True)
# - Save embeddings
logger.info("Save embeddings to file %s ..." % (self.outfile_tb_embeddings))
outfile_fullpath= os.path.join(savedir, self.outfile_tb_embeddings)
#with open(outfile_fullpath, 'w') as fw:
# csv_writer = csv.writer(fw, delimiter='\t')
# csv_writer.writerows(img_embeddings)
embeddings_variable = tf.Variable(img_embeddings) # Create a checkpoint from embedding, the filename and key are the # name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=embeddings_variable)
checkpoint.save(os.path.join(savedir, "embedding.ckpt"))
# - Save labels
outfile_labels_fullpath= os.path.join(savedir, 'metadata.tsv')
logger.info("Saving label metadata to file %s ..." % (outfile_labels_fullpath))
with open(outfile_labels_fullpath, 'w') as fp:
for label in labels:
#fp.write(f"{label}\n")
fp.write("{}\n".format(label))
# - Create config projector
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`.
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
embedding.metadata_path = 'metadata.tsv'
# - Save image sprite (if imgs available)
nimgs= len(imgs)
if nimgs>0:
# - Set the width and height of a single thumbnail
nx= imgs[0].width
ny= imgs[0].height
embedding.sprite.image_path = 'sprite.jpg' # Specify the width and height of a single thumbnail.
embedding.sprite.single_image_dim.extend([ny, nx])
one_square_size = int(np.ceil(np.sqrt(nimgs)))
master_width = ny * one_square_size
master_height = nx * one_square_size
spriteimage = Image.new(
mode='RGBA',
size=(master_width, master_height),
color=(0,0,0,0) # fully transparent
)
for count, image in enumerate(imgs):
div, mod = divmod(count, one_square_size)
h_loc = nx * div
w_loc = ny * mod
spriteimage.paste(image, (w_loc, h_loc))
outfile_sprite_fullpath= os.path.join(savedir, 'sprite.jpg')
logger.info("Saving sprite image to file %s ..." % (outfile_sprite_fullpath))
spriteimage.convert("RGB").save(outfile_sprite_fullpath, transparency=0)
# - Visualize embeddings
logger.info("Visualize embeddings ...")
projector.visualize_embeddings(savedir, config)
return 0
#####################################
## LOAD ENCODER MODEL
#####################################
def __load_encoder(self, modelfile, weightfile=""):
""" Load encoder model and weights from input h5 file """
#==============================
#== LOAD MODEL ARCHITECTURE
#==============================
# - Load online encoder
logger.info("Loading online encoder from file %s ..." % (modelfile))
try:
self.f_online= load_model(modelfile)
except Exception as e:
logger.warn("Failed to load online encoder model from file %s (err=%s)!" % (modelfile, str(e)))
return -1
if not self.f_online or self.f_online is None:
logger.error("Encoder online model object is None, loading failed!")
return -1
# - Load target encoder
logger.info("Loading target encoder from file %s ..." % (modelfile))
try:
self.f_target= load_model(modelfile)
except Exception as e:
logger.warn("Failed to load target encoder model from file %s (err=%s)!" % (modelfile, str(e)))
return -1
if not self.f_target or self.f_target is None:
logger.error("Encoder target model object is None, loading failed!")
return -1
#==============================
#== LOAD MODEL WEIGHTS
#==============================
if weightfile:
# - Load online encoder weights
logger.info("Loading online encoder weights from file %s ..." % (weightfile))
try:
self.f_online.load_weights(weightfile)
except Exception as e:
logger.warn("Failed to load online encoder model weights from file %s (err=%s)!" % (weightfile, str(e)))
return -1
# - Load target encoder weights
logger.info("Loading target encoder weights from file %s ..." % (weightfile))
try:
self.f_target.load_weights(weightfile)
except Exception as e:
logger.warn("Failed to load target encoder model weights from file %s (err=%s)!" % (weightfile, str(e)))
return -1
return 0
#####################################
## LOAD PROJECTOR MODEL
#####################################
def __load_projector(self, modelfile, weightfile=""):
""" Load projector model and weights from input h5 file """
#==============================
#== LOAD MODEL ARCHITECTURE
#==============================
try:
self.g_online= load_model(modelfile)
self.g_target= load_model(modelfile)
except Exception as e:
logger.warn("Failed to load projector model from file %s (err=%s)!" % (modelfile, str(e)))
return -1
if not self.g_online or self.g_online is None:
logger.error("Projector online model object is None, loading failed!")
return -1
if not self.g_target or self.g_target is None:
logger.error("Projector target model object is None, loading failed!")
return -1
#==============================
#== LOAD MODEL WEIGHTS
#==============================
if weightfile:
try:
self.g_online.load_weights(weightfile)
self.g_target.load_weights(weightfile)
except Exception as e:
logger.warn("Failed to load projector model weights from file %s (err=%s)!" % (weightfile, str(e)))
return -1
return 0
#####################################
## LOAD PREDICTOR MODEL
#####################################
def __load_predictor(self, modelfile, weightfile=""):
""" Load predictor model and weights from input h5 file """
#==============================
#== LOAD MODEL ARCHITECTURE
#==============================
try:
self.q_online= load_model(modelfile)
except Exception as e:
logger.warn("Failed to load predictor model from file %s (err=%s)!" % (modelfile, str(e)))
return -1
if not self.q_online or self.q_online is None:
logger.error("Predictor model object is None, loading failed!")
return -1
#==============================
#== LOAD MODEL WEIGHTS
#==============================
if weightfile:
try:
self.q_online.load_weights(weightfile)
except Exception as e:
logger.warn("Failed to load predictor model weights from file %s (err=%s)!" % (weightfile, str(e)))
return -1
return 0
|
SKA-INAFREPO_NAMEsclassifierPATH_START.@sclassifier_extracted@sclassifier-master@sclassifier@feature_extractor_byol.py@.PATH_END.py
|
{
"filename": "_minexponent.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/polar/radialaxis/_minexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minexponent", parent_name="layout.polar.radialaxis", **kwargs
):
super(MinexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@polar@radialaxis@_minexponent.py@.PATH_END.py
|
{
"filename": "random_cat_runner.py",
"repo_name": "CosmoStat/shapepipe",
"repo_path": "shapepipe_extracted/shapepipe-master/shapepipe/modules/random_cat_runner.py",
"type": "Python"
}
|
"""RANDOM CAT RUNNER.
Module runner for ``random_cat``.
:Author: Martin Kilbinger <martin.kilbinger@cea.fr>
"""
from shapepipe.modules.module_decorator import module_runner
from shapepipe.modules.random_cat_package.random_cat import RandomCat
@module_runner(
version='1.1',
file_pattern=['image', 'pipeline_flag'],
file_ext=['.fits', 'fits'],
depends=['astropy'],
numbering_scheme='_0',
)
def random_cat_runner(
input_file_list,
run_dirs,
file_number_string,
config,
module_config_sec,
w_log,
):
"""Define The Random Catalogue Runner."""
# Get input file names of image and mask
input_image_name = input_file_list[0]
input_mask_name = input_file_list[1]
# Set output file name
if config.has_option(module_config_sec, 'OUTPUT_FILE_PATTERN'):
output_file_pattern = config.get(
module_config_sec,
'OUTPUT_FILE_PATTERN'
)
else:
output_file_pattern = 'random_cat'
# Get number of random objects requested on output
n_rand = config.getfloat(module_config_sec, 'N_RANDOM')
# Flag whether n_rand is total (DENSITY=False, default)
# or per square degree (DENSITY=True)
if config.has_option(module_config_sec, 'DENSITY'):
density = config.getboolean(module_config_sec, 'DENSITY')
else:
density = False
# Get healpix output options
save_mask_as_healpix = config.getboolean(
module_config_sec,
'SAVE_MASK_AS_HEALPIX'
)
if save_mask_as_healpix:
healpix_options = {}
for option_trunc in ['FILE_BASE', 'NSIDE']:
option = f'HEALPIX_OUT_{option_trunc}'
healpix_options[option_trunc] = config.get(
module_config_sec,
option
)
# Create rand cat class instance
rand_cat_inst = RandomCat(
input_image_name,
input_mask_name,
run_dirs['output'],
file_number_string,
output_file_pattern,
n_rand,
density,
w_log,
healpix_options,
)
# Run processing
rand_cat_inst.process()
# No return objects
return None, None
|
CosmoStatREPO_NAMEshapepipePATH_START.@shapepipe_extracted@shapepipe-master@shapepipe@modules@random_cat_runner.py@.PATH_END.py
|
{
"filename": "_legendrank.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/densitymapbox/_legendrank.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendrankValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendrank", parent_name="densitymapbox", **kwargs):
super(LegendrankValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@densitymapbox@_legendrank.py@.PATH_END.py
|
{
"filename": "image_array.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/data_objects/image_array.py",
"type": "Python"
}
|
import numpy as np
from unyt import unyt_array
from yt.config import ytcfg
from yt.visualization.image_writer import write_bitmap, write_image
class ImageArray(unyt_array):
r"""A custom Numpy ndarray used for images.
This differs from ndarray in that you can optionally specify an
info dictionary which is used later in saving, and can be accessed with
ImageArray.info.
Parameters
----------
input_array: array_like
A numpy ndarray, or list.
Other Parameters
----------------
info: dictionary
Contains information to be stored with image.
Returns
-------
obj: ImageArray object
Raises
------
None
See Also
--------
numpy.ndarray : Inherits
Notes
-----
References
----------
Examples
--------
These are written in doctest format, and should illustrate how to
use the function. Use the variables 'ds' for the dataset, 'pc' for
a plot collection, 'c' for a center, and 'L' for a vector.
>>> im = np.zeros([64, 128, 3])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 0.3 * k, im.shape[1])
>>> myinfo = {
... "field": "dinosaurs",
... "east_vector": np.array([1.0, 0.0, 0.0]),
... "north_vector": np.array([0.0, 0.0, 1.0]),
... "normal_vector": np.array([0.0, 1.0, 0.0]),
... "width": 0.245,
... "units": "cm",
... "type": "rendering",
... }
>>> im_arr = ImageArray(im, info=myinfo)
>>> im_arr.save("test_ImageArray")
Numpy ndarray documentation appended:
"""
def __new__(
cls,
input_array,
units=None,
registry=None,
info=None,
bypass_validation=False,
):
obj = super().__new__(
cls, input_array, units, registry, bypass_validation=bypass_validation
)
if info is None:
info = {}
obj.info = info
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
super().__array_finalize__(obj)
self.info = getattr(obj, "info", None)
def write_hdf5(self, filename, dataset_name=None):
r"""Writes ImageArray to hdf5 file.
Parameters
----------
filename : string
The filename to create and write a dataset to
dataset_name : string
The name of the dataset to create in the file.
Examples
--------
>>> im = np.zeros([64, 128, 3])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 0.3 * k, im.shape[1])
>>> myinfo = {
... "field": "dinosaurs",
... "east_vector": np.array([1.0, 0.0, 0.0]),
... "north_vector": np.array([0.0, 0.0, 1.0]),
... "normal_vector": np.array([0.0, 1.0, 0.0]),
... "width": 0.245,
... "units": "cm",
... "type": "rendering",
... }
>>> im_arr = ImageArray(im, info=myinfo)
>>> im_arr.write_hdf5("test_ImageArray.h5")
"""
if dataset_name is None:
dataset_name = self.info.get("name", "image")
super().write_hdf5(filename, dataset_name=dataset_name, info=self.info)
def add_background_color(self, background="black", inline=True):
r"""Adds a background color to a 4-channel ImageArray
This adds a background color to a 4-channel ImageArray, by default
doing so inline. The ImageArray must already be normalized to the
[0,1] range.
Parameters
----------
background:
This can be used to set a background color for the image, and can
take several types of values:
* ``white``: white background, opaque
* ``black``: black background, opaque
* ``None``: transparent background
* 4-element array [r,g,b,a]: arbitrary rgba setting.
Default: 'black'
inline : boolean, optional
If True, original ImageArray is modified. If False, a copy is first
created, then modified. Default: True
Returns
-------
out: ImageArray
The modified ImageArray with a background color added.
Examples
--------
>>> im = np.zeros([64, 128, 4])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 10.0 * k, im.shape[1])
>>> im_arr = ImageArray(im)
>>> im_arr.rescale()
>>> new_im = im_arr.add_background_color([1.0, 0.0, 0.0, 1.0], inline=False)
>>> new_im.write_png("red_bg.png")
>>> im_arr.add_background_color("black")
>>> im_arr.write_png("black_bg.png")
"""
assert self.shape[-1] == 4
if background is None:
background = (0.0, 0.0, 0.0, 0.0)
elif background == "white":
background = (1.0, 1.0, 1.0, 1.0)
elif background == "black":
background = (0.0, 0.0, 0.0, 1.0)
# Alpha blending to background
if inline:
out = self
else:
out = self.copy()
for i in range(3):
out[:, :, i] = self[:, :, i] * self[:, :, 3]
out[:, :, i] += background[i] * background[3] * (1.0 - self[:, :, 3])
out[:, :, 3] = self[:, :, 3] + background[3] * (1.0 - self[:, :, 3])
return out
def rescale(self, cmax=None, amax=None, inline=True):
r"""Rescales the image to be in [0,1] range.
Parameters
----------
cmax : float, optional
Normalization value to use for rgb channels. Defaults to None,
corresponding to using the maximum value in the rgb channels.
amax : float, optional
Normalization value to use for alpha channel. Defaults to None,
corresponding to using the maximum value in the alpha channel.
inline : boolean, optional
Specifies whether or not the rescaling is done inline. If false,
a new copy of the ImageArray will be created, returned.
Default:True.
Returns
-------
out: ImageArray
The rescaled ImageArray, clipped to the [0,1] range.
Notes
-----
This requires that the shape of the ImageArray to have a length of 3,
and for the third dimension to be >= 3. If the third dimension has
a shape of 4, the alpha channel will also be rescaled.
Examples
--------
>>> im = np.zeros([64, 128, 4])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 0.3 * k, im.shape[1])
>>> im = ImageArray(im)
>>> im.write_png("original.png")
>>> im.rescale()
>>> im.write_png("normalized.png")
"""
assert len(self.shape) == 3
assert self.shape[2] >= 3
if inline:
out = self
else:
out = self.copy()
if cmax is None:
cmax = self[:, :, :3].sum(axis=2).max()
if cmax > 0.0:
np.multiply(self[:, :, :3], 1.0 / cmax, out[:, :, :3])
if self.shape[2] == 4:
if amax is None:
amax = self[:, :, 3].max()
if amax > 0.0:
np.multiply(self[:, :, 3], 1.0 / amax, out[:, :, 3])
np.clip(out, 0.0, 1.0, out)
return out
def write_png(
self,
filename,
sigma_clip=None,
background="black",
rescale=True,
):
r"""Writes ImageArray to png file.
Parameters
----------
filename : string
Filename to save to. If None, PNG contents will be returned as a
string.
sigma_clip : float, optional
Image will be clipped before saving to the standard deviation
of the image multiplied by this value. Useful for enhancing
images. Default: None
background:
This can be used to set a background color for the image, and can
take several types of values:
* ``white``: white background, opaque
* ``black``: black background, opaque
* ``None``: transparent background
* 4-element array [r,g,b,a]: arbitrary rgba setting.
Default: 'black'
rescale : boolean, optional
If True, will write out a rescaled image (without modifying the
original image). Default: True
Examples
--------
>>> im = np.zeros([64, 128, 4])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 10.0 * k, im.shape[1])
>>> im_arr = ImageArray(im)
>>> im_arr.write_png("standard.png")
>>> im_arr.write_png("non-scaled.png", rescale=False)
>>> im_arr.write_png("black_bg.png", background="black")
>>> im_arr.write_png("white_bg.png", background="white")
>>> im_arr.write_png("green_bg.png", background=[0, 1, 0, 1])
>>> im_arr.write_png("transparent_bg.png", background=None)
"""
if rescale:
scaled = self.rescale(inline=False)
else:
scaled = self
if self.shape[-1] == 4:
out = scaled.add_background_color(background, inline=False)
else:
out = scaled
if filename is not None and filename[-4:] != ".png":
filename += ".png"
if sigma_clip is not None:
clip_value = self._clipping_value(sigma_clip, im=out)
return write_bitmap(out.swapaxes(0, 1), filename, clip_value)
else:
return write_bitmap(out.swapaxes(0, 1), filename)
def write_image(
self,
filename,
color_bounds=None,
channel=None,
cmap_name=None,
func=lambda x: x,
):
r"""Writes a single channel of the ImageArray to a png file.
Parameters
----------
filename : string
Note filename not be modified.
Other Parameters
----------------
channel: int
Which channel to write out as an image. Defaults to 0
cmap_name: string
Name of the colormap to be used.
color_bounds : tuple of floats, optional
The min and max to scale between. Outlying values will be clipped.
cmap_name : string, optional
An acceptable colormap. See either yt.visualization.color_maps or
https://scipy-cookbook.readthedocs.io/items/Matplotlib_Show_colormaps.html .
func : function, optional
A function to transform the buffer before applying a colormap.
Returns
-------
scaled_image : uint8 image that has been saved
Examples
--------
>>> im = np.zeros([64, 128])
>>> for i in range(im.shape[0]):
... im[i, :] = np.linspace(0.0, 0.3 * i, im.shape[1])
>>> myinfo = {
... "field": "dinosaurs",
... "east_vector": np.array([1.0, 0.0, 0.0]),
... "north_vector": np.array([0.0, 0.0, 1.0]),
... "normal_vector": np.array([0.0, 1.0, 0.0]),
... "width": 0.245,
... "units": "cm",
... "type": "rendering",
... }
>>> im_arr = ImageArray(im, info=myinfo)
>>> im_arr.write_image("test_ImageArray.png")
"""
if cmap_name is None:
cmap_name = ytcfg.get("yt", "default_colormap")
if filename is not None and filename[-4:] != ".png":
filename += ".png"
# TODO: Write info dict as png metadata
if channel is None:
return write_image(
self.swapaxes(0, 1).to_ndarray(),
filename,
color_bounds=color_bounds,
cmap_name=cmap_name,
func=func,
)
else:
return write_image(
self.swapaxes(0, 1)[:, :, channel].to_ndarray(),
filename,
color_bounds=color_bounds,
cmap_name=cmap_name,
func=func,
)
def save(self, filename, png=True, hdf5=True, dataset_name=None):
"""
Saves ImageArray.
Arguments:
filename: string
This should not contain the extension type (.png, .h5, ...)
Optional Arguments:
png: boolean, default True
Save to a png
hdf5: boolean, default True
Save to hdf5 file, including info dictionary as attributes.
"""
if png:
if not filename.endswith(".png"):
filename = filename + ".png"
if len(self.shape) > 2:
self.write_png(filename)
else:
self.write_image(filename)
if hdf5:
if not filename.endswith(".h5"):
filename = filename + ".h5"
self.write_hdf5(filename, dataset_name)
def _clipping_value(self, sigma_clip, im=None):
# return the max value to clip with given a sigma_clip value. If im
# is None, the current instance is used
if im is None:
im = self
nz = im[:, :, :3][im[:, :, :3].nonzero()]
return nz.mean() + sigma_clip * nz.std()
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@data_objects@image_array.py@.PATH_END.py
|
{
"filename": "_token.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/pointcloud/stream/_token.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="pointcloud.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@pointcloud@stream@_token.py@.PATH_END.py
|
{
"filename": "thirdai_neuraldb.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/vectorstores/thirdai_neuraldb.ipynb",
"type": "Jupyter Notebook"
}
|
# ThirdAI NeuralDB
>[NeuralDB](https://www.thirdai.com/neuraldb-enterprise/) is a CPU-friendly and fine-tunable vector store developed by [ThirdAI](https://www.thirdai.com/).
## Initialization
There are two initialization methods:
- From Scratch: Basic model
- From Checkpoint: Load a model that was previously saved
For all of the following initialization methods, the `thirdai_key` parameter can be omitted if the `THIRDAI_KEY` environment variable is set.
ThirdAI API keys can be obtained at https://www.thirdai.com/try-bolt/
You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration
```python
from langchain_community.vectorstores import NeuralDBVectorStore
# From scratch
vectorstore = NeuralDBVectorStore.from_scratch(thirdai_key="your-thirdai-key")
# From checkpoint
vectorstore = NeuralDBVectorStore.from_checkpoint(
# Path to a NeuralDB checkpoint. For example, if you call
# vectorstore.save("/path/to/checkpoint.ndb") in one script, then you can
# call NeuralDBVectorStore.from_checkpoint("/path/to/checkpoint.ndb") in
# another script to load the saved model.
checkpoint="/path/to/checkpoint.ndb",
thirdai_key="your-thirdai-key",
)
```
## Inserting document sources
```python
vectorstore.insert(
# If you have PDF, DOCX, or CSV files, you can directly pass the paths to the documents
sources=["/path/to/doc.pdf", "/path/to/doc.docx", "/path/to/doc.csv"],
# When True this means that the underlying model in the NeuralDB will
# undergo unsupervised pretraining on the inserted files. Defaults to True.
train=True,
# Much faster insertion with a slight drop in performance. Defaults to True.
fast_mode=True,
)
from thirdai import neural_db as ndb
vectorstore.insert(
# If you have files in other formats, or prefer to configure how
# your files are parsed, then you can pass in NeuralDB document objects
# like this.
sources=[
ndb.PDF(
"/path/to/doc.pdf",
version="v2",
chunk_size=100,
metadata={"published": 2022},
),
ndb.Unstructured("/path/to/deck.pptx"),
]
)
```
## Similarity search
To query the vectorstore, you can use the standard LangChain vectorstore method `similarity_search`, which returns a list of LangChain Document objects. Each document object represents a chunk of text from the indexed files. For example, it may contain a paragraph from one of the indexed PDF files. In addition to the text, the document's metadata field contains information such as the document's ID, the source of this document (which file it came from), and the score of the document.
```python
# This returns a list of LangChain Document objects
documents = vectorstore.similarity_search("query", k=10)
```
## Fine tuning
NeuralDBVectorStore can be fine-tuned to user behavior and domain-specific knowledge. It can be fine-tuned in two ways:
1. Association: the vectorstore associates a source phrase with a target phrase. When the vectorstore sees the source phrase, it will also consider results that are relevant to the target phrase.
2. Upvoting: the vectorstore upweights the score of a document for a specific query. This is useful when you want to fine-tune the vectorstore to user behavior. For example, if a user searches "how is a car manufactured" and likes the returned document with id 52, then we can upvote the document with id 52 for the query "how is a car manufactured".
```python
vectorstore.associate(source="source phrase", target="target phrase")
vectorstore.associate_batch(
[
("source phrase 1", "target phrase 1"),
("source phrase 2", "target phrase 2"),
]
)
vectorstore.upvote(query="how is a car manufactured", document_id=52)
vectorstore.upvote_batch(
[
("query 1", 52),
("query 2", 20),
]
)
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@vectorstores@thirdai_neuraldb.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "purmortal/galcraft",
"repo_path": "galcraft_extracted/galcraft-main/GalCraft/modules/__init__.py",
"type": "Python"
}
|
purmortalREPO_NAMEgalcraftPATH_START.@galcraft_extracted@galcraft-main@GalCraft@modules@__init__.py@.PATH_END.py
|
|
{
"filename": "e33c1d5684cf_changed_parts_paper_table_to_parts.py",
"repo_name": "HERA-Team/hera_mc",
"repo_path": "hera_mc_extracted/hera_mc-main/alembic/versions/e33c1d5684cf_changed_parts_paper_table_to_parts.py",
"type": "Python"
}
|
"""changed parts_paper table to parts
Revision ID: e33c1d5684cf
Revises: 3d3c72ecbc0d
Create Date: 2018-01-30 01:02:58.347378+00:00
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "e33c1d5684cf"
down_revision = "3d3c72ecbc0d"
branch_labels = None
depends_on = None
# Hand-edited -- see http://petegraham.co.uk/rename-postgres-table-with-alembic/
def upgrade():
# ### commands hand-edited to only rename_table ###
op.rename_table("parts_paper", "parts")
# ### end Alembic commands ###
def downgrade():
# ### commands hand-edited to apply rename_table ###
op.rename_table("parts", "parts_paper")
# ### end Alembic commands ###
|
HERA-TeamREPO_NAMEhera_mcPATH_START.@hera_mc_extracted@hera_mc-main@alembic@versions@e33c1d5684cf_changed_parts_paper_table_to_parts.py@.PATH_END.py
|
{
"filename": "test_galkin.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_GalKin/test_galkin.py",
"type": "Python"
}
|
"""Tests for `galkin` module."""
import pytest
import unittest
import copy
import numpy.testing as npt
import numpy as np
import scipy.integrate as integrate
from lenstronomy.GalKin.galkin import Galkin
from lenstronomy.GalKin.light_profile import LightProfile
import lenstronomy.Util.param_util as param_util
from lenstronomy.Util import constants as const
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
kwargs_model = {"anisotropy_model": "const"}
kwargs_aperture = {
"center_ra": 0,
"width": 1,
"length": 1,
"angle": 0,
"center_dec": 0,
"aperture_type": "slit",
}
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 1}
Galkin(
kwargs_model,
kwargs_aperture,
kwargs_psf,
kwargs_cosmo,
kwargs_numerics={},
analytic_kinematics=True,
)
with self.assertRaises(ValueError):
kwargs_model = {
"mass_profile_list": ["SIS"],
"light_profile_list": ["HERNQUIST"],
"anisotropy_model": "OM",
}
x_grid, y_grid = np.meshgrid(np.linspace(-1, 1, 2), np.linspace(-1, 1, 2))
kwargs_aperture = {
"x_grid": x_grid,
"y_grid": y_grid,
"aperture_type": "IFU_grid",
}
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 1}
galkin = Galkin(
kwargs_model,
kwargs_aperture,
kwargs_psf,
kwargs_cosmo,
kwargs_numerics={"lum_weight_int_method": False},
analytic_kinematics=False,
)
galkin.dispersion_map_grid_convolved(
kwargs_mass=[{"theta_E": 1}],
kwargs_light=[{"amp": 1, "Rs": 1}],
kwargs_anisotropy={"r_ani": 1},
supersampling_factor=1,
)
class TestGalkin(object):
def setup_method(self):
np.random.seed(42)
kwargs_model = {
"mass_profile_list": ["SIS"],
"light_profile_list": ["HERNQUIST"],
"anisotropy_model": "OM",
}
x_grid, y_grid = np.meshgrid(
np.arange(-0.45, 0.5, 0.1),
np.arange(-0.45, 0.5, 0.1),
)
kwargs_aperture = {
"x_grid": x_grid,
"y_grid": y_grid,
"aperture_type": "IFU_grid",
}
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 1}
self.galkin_ifu_grid = Galkin(
kwargs_model,
kwargs_aperture,
kwargs_psf,
kwargs_cosmo,
kwargs_numerics={"lum_weight_int_method": True},
analytic_kinematics=False,
)
def test_compare_power_law(self):
"""Compare power-law profiles analytical vs.
numerical
:return:
"""
# light profile
light_profile_list = ["HERNQUIST"]
r_eff = 1.5
kwargs_light = [
{"Rs": 0.551 * r_eff, "amp": 1.0}
] # effective half light radius (2d projected) in arcsec
# 0.551 *
# mass profile
mass_profile_list = ["SPP"]
theta_E = 1.2
gamma = 2.0
kwargs_profile = [
{"theta_E": theta_E, "gamma": gamma}
] # Einstein radius (arcsec) and power-law slope
# anisotropy profile
anisotropy_type = "OM"
r_ani = 2.0
kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec]
# aperture as slit
aperture_type = "slit"
length = 1.0
width = 0.3
kwargs_aperture = {
"aperture_type": aperture_type,
"length": length,
"width": width,
"center_ra": 0,
"center_dec": 0,
"angle": 0,
}
psf_fwhm = 1.0 # Gaussian FWHM psf
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_numerics = {
"interpol_grid_num": 1000,
"max_integrate": 1000,
"min_integrate": 0.001,
}
kwargs_model = {
"mass_profile_list": mass_profile_list,
"light_profile_list": light_profile_list,
"anisotropy_model": anisotropy_type,
}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm}
galkin_analytic = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics,
analytic_kinematics=True,
)
sigma_v_analytic = galkin_analytic.dispersion(
kwargs_mass={"gamma": gamma, "theta_E": theta_E},
kwargs_light={"r_eff": r_eff},
kwargs_anisotropy={"r_ani": r_ani},
sampling_number=1000,
)
kwargs_numerics["lum_weight_int_method"] = False
galkin_num_3d = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics,
analytic_kinematics=False,
)
sigma_v_num_3d = galkin_num_3d.dispersion(
kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
npt.assert_almost_equal(sigma_v_num_3d / sigma_v_analytic, 1, decimal=2)
# 2d projected integral calculation
kwargs_numerics = {
"interpol_grid_num": 1000,
"max_integrate": 1000,
"min_integrate": 0.000001,
"lum_weight_int_method": True,
"log_integration": True,
}
galkin_num_log_proj = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics,
analytic_kinematics=False,
)
sigma_v_num_log_proj = galkin_num_log_proj.dispersion(
kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
kwargs_numerics = {
"interpol_grid_num": 10000,
"max_integrate": 1000,
"min_integrate": 0.0001,
"lum_weight_int_method": True,
"log_integration": False,
}
galkin_num_lin_proj = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics,
analytic_kinematics=False,
)
sigma_v_num_lin_proj = galkin_num_lin_proj.dispersion(
kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
npt.assert_almost_equal(sigma_v_num_log_proj / sigma_v_analytic, 1, decimal=2)
npt.assert_almost_equal(sigma_v_num_lin_proj / sigma_v_analytic, 1, decimal=2)
def test_log_vs_linear_integral(self):
"""Here we test logarithmic vs linear integral in an end-to-end fashion.
We do not demand the highest level of precisions here!!! We are using the
luminosity-weighted velocity dispersion integration calculation in this test.
"""
# light profile
light_profile_list = ["HERNQUIST"]
Rs = 0.5
kwargs_light = [
{"Rs": Rs, "amp": 1.0}
] # effective half light radius (2d projected) in arcsec
# 0.551 *
# mass profile
mass_profile_list = ["SPP"]
theta_E = 1.2
gamma = 2.0
kwargs_profile = [
{"theta_E": theta_E, "gamma": gamma}
] # Einstein radius (arcsec) and power-law slope
# anisotropy profile
anisotropy_type = "OM"
r_ani = 2.0
kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec]
# aperture as slit
aperture_type = "slit"
length = 3.8
width = 0.9
kwargs_aperture = {
"aperture_type": aperture_type,
"length": length,
"width": width,
"center_ra": 0,
"center_dec": 0,
"angle": 0,
}
psf_fwhm = 0.7 # Gaussian FWHM psf
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_numerics_log = {
"interpol_grid_num": 1000,
"log_integration": True,
"max_integrate": 10,
"min_integrate": 0.001,
"lum_weight_int_method": True,
}
kwargs_numerics_linear = {
"interpol_grid_num": 1000,
"log_integration": False,
"max_integrate": 10,
"min_integrate": 0.001,
"lum_weight_int_method": True,
}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm}
kwargs_model = {
"mass_profile_list": mass_profile_list,
"light_profile_list": light_profile_list,
"anisotropy_model": anisotropy_type,
}
galkin_linear = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics_linear,
)
sigma_v_lin = galkin_linear.dispersion(
kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
galkin_log = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics_log,
)
sigma_v_log = galkin_log.dispersion(
kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
npt.assert_almost_equal(sigma_v_lin / sigma_v_log, 1, decimal=2)
def test_projected_light_integral_hernquist(self):
"""
:return:
"""
light_profile_list = ["HERNQUIST"]
Rs = 1.0
kwargs_light = [
{"Rs": Rs, "amp": 1.0}
] # effective half light radius (2d projected) in arcsec
lightProfile = LightProfile(light_profile_list)
R = 2
light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light)
out = integrate.quad(
lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light),
0,
100,
)
npt.assert_almost_equal(light2d, out[0] * 2, decimal=3)
def test_projected_light_integral_hernquist_ellipse(self):
"""
:return:
"""
light_profile_list = ["HERNQUIST_ELLIPSE"]
Rs = 1.0
phi, q = 1, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
kwargs_light = [
{"Rs": Rs, "amp": 1.0, "e1": e1, "e2": e2}
] # effective half light radius (2d projected) in arcsec
lightProfile = LightProfile(light_profile_list)
R = 2
light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light)
out = integrate.quad(
lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light),
0,
10,
)
npt.assert_almost_equal(light2d, out[0] * 2, decimal=3)
def test_projected_light_integral_pjaffe(self):
"""
:return:
"""
light_profile_list = ["PJAFFE"]
kwargs_light = [
{"Rs": 0.5, "Ra": 0.01, "amp": 1.0}
] # effective half light radius (2d projected) in arcsec
lightProfile = LightProfile(light_profile_list)
R = 0.01
light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light)
out = integrate.quad(
lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light),
0,
100,
)
npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3)
def test_realistic_0(self):
"""Realistic test example :return:"""
light_profile_list = ["HERNQUIST"]
kwargs_light = [
{
"Rs": 0.10535462602138289,
"center_x": -0.02678473951679429,
"center_y": 0.88691126347462712,
"amp": 3.7114695634960109,
}
]
lightProfile = LightProfile(light_profile_list)
R = 0.01
light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light)
out = integrate.quad(
lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light),
0,
100,
)
npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3)
def test_realistic_1(self):
"""Realistic test example :return:"""
light_profile_list = ["HERNQUIST_ELLIPSE"]
phi, q = 0.74260706384506325, 0.46728323131925864
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
kwargs_light = [
{
"Rs": 0.10535462602138289,
"e1": e1,
"e2": e2,
"center_x": -0.02678473951679429,
"center_y": 0.88691126347462712,
"amp": 3.7114695634960109,
}
]
lightProfile = LightProfile(light_profile_list)
R = 0.01
light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light)
out = integrate.quad(
lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light),
0,
100,
)
npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3)
def test_realistic(self):
"""Realistic test example :return:"""
light_profile_list = ["HERNQUIST_ELLIPSE", "PJAFFE_ELLIPSE"]
phi, q = 0.74260706384506325, 0.46728323131925864
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
phi2, q2 = -0.33379268413794494, 0.66582356813012267
e12, e22 = param_util.phi_q2_ellipticity(phi2, q2)
kwargs_light = [
{
"Rs": 0.10535462602138289,
"e1": e1,
"e2": e2,
"center_x": -0.02678473951679429,
"center_y": 0.88691126347462712,
"amp": 3.7114695634960109,
},
{
"Rs": 0.44955054610388684,
"e1": e12,
"e2": e22,
"center_x": 0.019536801118136753,
"center_y": 0.0218888643537157,
"Ra": 0.0010000053334891974,
"amp": 967.00280526319796,
},
]
light_profile = LightProfile(light_profile_list)
R = 0.01
light2d = light_profile.light_2d(R=R, kwargs_list=kwargs_light)
out = integrate.quad(
lambda x: light_profile.light_3d(np.sqrt(R**2 + x**2), kwargs_light),
0,
100,
)
npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3)
def test_dispersion_map(self):
"""Tests whether the old and new version provide the same answer."""
# light profile
light_profile_list = ["HERNQUIST"]
r_eff = 1.5
kwargs_light = [
{"Rs": r_eff, "amp": 1.0}
] # effective half light radius (2d projected) in arcsec
# 0.551 *
# mass profile
mass_profile_list = ["SPP"]
theta_E = 1.2
gamma = 2.0
kwargs_mass = [
{"theta_E": theta_E, "gamma": gamma}
] # Einstein radius (arcsec) and power-law slope
# anisotropy profile
anisotropy_type = "OM"
r_ani = 2.0
kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec]
# aperture as shell
# aperture_type = 'shell'
# kwargs_aperture_inner = {'r_in': 0., 'r_out': 0.2, 'center_dec': 0, 'center_ra': 0}
# kwargs_aperture_outer = {'r_in': 0., 'r_out': 1.5, 'center_dec': 0, 'center_ra': 0}
# aperture as slit
r_bins = np.linspace(0, 2, 3)
kwargs_ifu = {
"r_bins": r_bins,
"center_ra": 0,
"center_dec": 0,
"aperture_type": "IFU_shells",
}
kwargs_aperture = {
"aperture_type": "shell",
"r_in": r_bins[0],
"r_out": r_bins[1],
"center_ra": 0,
"center_dec": 0,
}
psf_fwhm = 1.0 # Gaussian FWHM psf
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_numerics = {
"interpol_grid_num": 500,
"log_integration": True,
"max_integrate": 100,
}
kwargs_model = {
"mass_profile_list": mass_profile_list,
"light_profile_list": light_profile_list,
"anisotropy_model": anisotropy_type,
}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm}
galkinIFU = Galkin(
kwargs_aperture=kwargs_ifu,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_model=kwargs_model,
kwargs_numerics=kwargs_numerics,
analytic_kinematics=True,
)
sigma_v_ifu = galkinIFU.dispersion_map(
kwargs_mass={"theta_E": theta_E, "gamma": gamma},
kwargs_light={"r_eff": r_eff},
kwargs_anisotropy=kwargs_anisotropy,
num_kin_sampling=1000,
)
galkin = Galkin(
kwargs_model,
kwargs_aperture,
kwargs_psf,
kwargs_cosmo,
kwargs_numerics,
analytic_kinematics=True,
)
sigma_v = galkin.dispersion(
kwargs_mass={"theta_E": theta_E, "gamma": gamma},
kwargs_light={"r_eff": r_eff},
kwargs_anisotropy=kwargs_anisotropy,
sampling_number=1000,
)
npt.assert_almost_equal(sigma_v, sigma_v_ifu[0], decimal=-1)
def test_dispersion_map_grid_convolved(self):
"""Test whether the old and new version using direct PSF convolution provide the
same answer."""
# light profile
light_profile_list = ["HERNQUIST"]
r_eff = 1.0
kwargs_light = {
"r_eff": r_eff, # effective half light radius (2d
# projected) in arcsec 0.551 * mass profile
"amp": 1.0,
"center_x": 0.0,
"center_y": 0.0,
}
mass_profile_list = ["PEMD"]
theta_E = 1.0
gamma = 2.0
kwargs_mass = {
"theta_E": theta_E,
"center_x": 0.0,
"center_y": 0.0,
"gamma": gamma,
} # Einstein radius (arcsec) and power-law slope
# anisotropy profile
anisotropy_type = "OM"
r_ani = 1.5
kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec]
# aperture as grid
# aperture_type = 'shell'
# kwargs_aperture_inner = {'r_in': 0., 'r_out': 0.2, 'center_dec': 0, 'center_ra': 0}
# kwargs_aperture_outer = {'r_in': 0., 'r_out': 1.5, 'center_dec': 0, 'center_ra': 0}
# aperture as slit
x_grid, y_grid = np.meshgrid(
np.arange(-1.9 * 2, 1.91 * 2, 0.4), np.arange(-1.9 * 2, 1.91 * 2, 0.4)
)
kwargs_ifu = {
"aperture_type": "IFU_grid",
"x_grid": x_grid,
"y_grid": y_grid,
}
kwargs_aperture = {
"aperture_type": "slit",
"width": 0.4,
"length": 0.4,
"center_ra": 0,
"center_dec": 0,
}
psf_fwhm = 0.8 # Gaussian FWHM psf
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_numerics = { #'sampling_number': 1000,
"interpol_grid_num": 1000,
"log_integration": True,
"max_integrate": 1000,
"min_integrate": 0.001,
}
kwargs_model = {
"mass_profile_list": mass_profile_list,
"light_profile_list": light_profile_list,
"anisotropy_model": anisotropy_type,
}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm}
galkinIFU = Galkin(
kwargs_aperture=kwargs_ifu,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_model=kwargs_model,
kwargs_numerics=kwargs_numerics,
analytic_kinematics=True,
)
sigma_v_ifu = galkinIFU.dispersion_map_grid_convolved(
kwargs_mass=kwargs_mass,
kwargs_light=kwargs_light,
kwargs_anisotropy=kwargs_anisotropy,
supersampling_factor=21,
)
for i in range(9, 12):
for j in range(9, 12):
kwargs_aperture["center_ra"] = x_grid[i, j]
kwargs_aperture["center_dec"] = y_grid[i, j]
galkin = Galkin(
kwargs_model,
kwargs_aperture,
kwargs_psf,
kwargs_cosmo,
kwargs_numerics,
analytic_kinematics=True,
)
sigma_v = galkin.dispersion(
kwargs_mass=kwargs_mass, # {'theta_E': theta_E, 'gamma':
# gamma},
kwargs_light=kwargs_light, # {'r_eff': r_eff},
kwargs_anisotropy=kwargs_anisotropy,
sampling_number=1000,
)
npt.assert_almost_equal(sigma_v, sigma_v_ifu[i, j], decimal=-1)
# test for voronoi binning
voronoi_bins = np.zeros_like(x_grid) - 1
voronoi_bins[8:12, 8:12] = 0
kwargs_aperture = {
"aperture_type": "slit",
"width": 1.6,
"length": 1.6,
"center_ra": 0,
"center_dec": 0,
}
sigma_v_ifu = galkinIFU.dispersion_map_grid_convolved(
kwargs_mass=kwargs_mass,
kwargs_light=kwargs_light,
kwargs_anisotropy=kwargs_anisotropy,
supersampling_factor=21,
voronoi_bins=voronoi_bins,
)
galkin = Galkin(
kwargs_model,
kwargs_aperture,
kwargs_psf,
kwargs_cosmo,
kwargs_numerics,
analytic_kinematics=True,
)
sigma_v = galkin.dispersion(
kwargs_mass=kwargs_mass, # {'theta_E': theta_E, 'gamma':
# gamma},
kwargs_light=kwargs_light, # {'r_eff': r_eff},
kwargs_anisotropy=kwargs_anisotropy,
sampling_number=1000,
)
npt.assert_almost_equal(sigma_v, sigma_v_ifu[0], decimal=-1)
def test_extract_center(self):
"""Test the extraction of the center of the IFU map."""
assert Galkin._extract_center([{"center_x": 1, "center_y": 2}]) == (1, 2)
assert Galkin._extract_center([{}]) == (0, 0)
assert Galkin._extract_center({"center_x": 1, "center_y": 2}) == (1, 2)
assert Galkin._extract_center({}) == (0, 0)
def test_projected_integral_vs_3d_rendering(self):
lum_weight_int_method = True
# light profile
light_profile_list = ["HERNQUIST"]
r_eff = 1.5
kwargs_light = [
{"Rs": 0.551 * r_eff, "amp": 1.0}
] # effective half light radius (2d projected) in arcsec
# 0.551 *
# mass profile
mass_profile_list = ["SPP"]
theta_E = 1.2
gamma = 2.0
kwargs_profile = [
{"theta_E": theta_E, "gamma": gamma}
] # Einstein radius (arcsec) and power-law slope
# anisotropy profile
anisotropy_type = "OM"
r_ani = 2.0
kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec]
# aperture as slit
aperture_type = "slit"
length = 1.0
width = 0.3
kwargs_aperture = {
"aperture_type": aperture_type,
"length": length,
"width": width,
"center_ra": 0,
"center_dec": 0,
"angle": 0,
}
psf_fwhm = 1.0 # Gaussian FWHM psf
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
kwargs_numerics_3d = {
"interpol_grid_num": 2000,
"log_integration": True,
"max_integrate": 1000,
"min_integrate": 0.00001,
"lum_weight_int_method": False,
}
kwargs_model = {
"mass_profile_list": mass_profile_list,
"light_profile_list": light_profile_list,
"anisotropy_model": anisotropy_type,
}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm}
galkin = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics_3d,
)
sigma_v = galkin.dispersion(
kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
kwargs_numerics_2d = {
"interpol_grid_num": 2000,
"log_integration": True,
"max_integrate": 1000,
"min_integrate": 0.00001,
"lum_weight_int_method": True,
}
galkin = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics_2d,
analytic_kinematics=False,
)
sigma_v_int_method = galkin.dispersion(
kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
npt.assert_almost_equal(sigma_v_int_method / sigma_v, 1, decimal=2)
def test_2d_vs_3d_power_law(self):
# set up power-law light profile
light_model = ["POWER_LAW"]
kwargs_light = [{"gamma": 2, "amp": 1, "e1": 0, "e2": 0}]
lens_model = ["SIS"]
kwargs_mass = [{"theta_E": 1}]
anisotropy_type = "isotropic"
kwargs_anisotropy = {}
kwargs_model = {
"mass_profile_list": lens_model,
"light_profile_list": light_model,
"anisotropy_model": anisotropy_type,
}
kwargs_numerics = {
"interpol_grid_num": 2000,
"log_integration": True,
"max_integrate": 50,
"min_integrate": 0.0001,
}
kwargs_numerics_3d = copy.deepcopy(kwargs_numerics)
kwargs_numerics_3d["lum_weight_int_method"] = False
kwargs_numerics_2d = copy.deepcopy(kwargs_numerics)
kwargs_numerics_2d["lum_weight_int_method"] = True
kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800}
# compute analytic velocity dispersion of SIS profile
v_sigma_c2 = (
kwargs_mass[0]["theta_E"]
* const.arcsec
/ (4 * np.pi)
* kwargs_cosmo["d_s"]
/ kwargs_cosmo["d_ds"]
)
v_sigma_true = np.sqrt(v_sigma_c2) * const.c / 1000
# aperture as slit
aperture_type = "slit"
length = 1.0
width = 0.3
kwargs_aperture = {
"aperture_type": aperture_type,
"length": length,
"width": width,
"center_ra": 0,
"center_dec": 0,
"angle": 0,
}
kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 0.5}
galkin3d = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics_3d,
)
galkin2d = Galkin(
kwargs_model=kwargs_model,
kwargs_aperture=kwargs_aperture,
kwargs_psf=kwargs_psf,
kwargs_cosmo=kwargs_cosmo,
kwargs_numerics=kwargs_numerics_2d,
)
sigma_draw_list = []
for i in range(100):
sigma_v_draw = galkin3d._draw_one_sigma2(
kwargs_mass, kwargs_light, kwargs_anisotropy
)
sigma_draw_list.append(sigma_v_draw)
# import matplotlib.pyplot as plt
# plt.plot(np.sqrt(sigma_draw_list) / 1000 / v_sigma_true)
# plt.show()
# assert 1 == 0
sigma_v_2d = galkin2d.dispersion(
kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
sigma_v_3d = galkin3d.dispersion(
kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000
)
npt.assert_almost_equal(sigma_v_2d / v_sigma_true, 1, decimal=2)
npt.assert_almost_equal(sigma_v_3d / v_sigma_true, 1, decimal=2)
def test_get_convolution_kernel(self):
"""Test the PSF kernel."""
psf = self.galkin_ifu_grid._get_convolution_kernel(supersampling_factor=1)
assert psf.shape == (61, 61)
def test_get_grid(self):
""""""
kwargs_mass = [{"theta_E": 1.2, "gamma": 2}]
(
x_grid,
y_grid,
log10_radial_distance_from_center,
) = self.galkin_ifu_grid._get_grid(kwargs_mass, supersampling_factor=1)
assert x_grid.shape == (10, 10)
assert y_grid.shape == (10, 10)
(
x_grid,
y_grid,
log10_radial_distance_from_center,
) = self.galkin_ifu_grid._get_grid(kwargs_mass, supersampling_factor=3)
assert x_grid.shape == (30, 30)
assert y_grid.shape == (30, 30)
def test_delta_pix_xy(self):
""""""
delta_x, delta_y = self.galkin_ifu_grid._delta_pix_xy()
npt.assert_almost_equal(delta_x, 0.1, decimal=5)
npt.assert_almost_equal(delta_y, 0.1, decimal=5)
if __name__ == "__main__":
pytest.main()
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_GalKin@test_galkin.py@.PATH_END.py
|
{
"filename": "gethdutype.py",
"repo_name": "Fermipy/fermipy",
"repo_path": "fermipy_extracted/fermipy-master/fermipy/scripts/gethdutype.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
""" Identify the type of image stored in an HDU
"""
__facility__ = "gethdutype.py"
__abstract__ = __doc__
__author__ = "E. Charles"
__date__ = "$Date: 2015/05/06 21:20:31 $"
__version__ = "$Revision: 1.4 $, $Author: echarles $"
__release__ = "$Name: $"
import sys
import argparse
import os
import numpy
from astropy.io import fits
def tryprint(header, key):
try:
print ("%s = %s"%(key,header[key]))
except KeyError:
print ("No key %s"%(key))
def gethdutype(hdu):
if hdu.is_image:
naxis = hdu.header['NAXIS']
print("WCS Image, naxis = %i"%(naxis))
return
header = hdu.header
try:
pixtype = header['PIXTYPE']
except KeyError:
print ("Unknown image type, PIXTYPE keyword is absent")
return
if pixtype != "HEALPIX":
print ("Unknown image type PIXTYPE = %s"%pixtype)
tryprint(header, "HPX_CONV")
tryprint(header, "INDXSCHM")
tryprint(header, "COORDSYS")
tryprint(header, "NSIDE")
tryprint(header, "ORDERING")
def main():
# Argument defintion
usage = "usage: %(prog)s [options]"
description = "Identify the type of image stored in an HDU"
parser = argparse.ArgumentParser(usage,description=__abstract__)
parser.add_argument("-i", "--input",type=argparse.FileType('r'),required=True,
help="Input file")
parser.add_argument("--hdu", type=str, default=None,
help="FITS HDU with map")
# Parse the command line
args = parser.parse_args(sys.argv[1:])
f = fits.open(args.input.name)
if args.hdu is None:
hdu = f[0]
else:
hdu = f[args.hdu]
gethdutype(hdu)
if __name__ == "__main__":
main()
|
FermipyREPO_NAMEfermipyPATH_START.@fermipy_extracted@fermipy-master@fermipy@scripts@gethdutype.py@.PATH_END.py
|
{
"filename": "_showlegend.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmap/_showlegend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="heatmap", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmap@_showlegend.py@.PATH_END.py
|
{
"filename": "varying_pann.ipynb",
"repo_name": "miguelzuma/hi_class_public",
"repo_path": "hi_class_public_extracted/hi_class_public-master/notebooks/varying_pann.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# import necessary modules
# uncomment to get plots displayed in notebook
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
import math
```
```python
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
```
```python
############################################
#
# Varying parameter (others fixed to default)
#
var_name = 'annihilation'
var_array = np.linspace(0,1.e-5,5)
var_num = len(var_array)
var_legend = r'$p_\mathrm{ann}$'
var_figname = 'pann'
#
#############################################
#
# Fixed settings
#
common_settings = {'output':'tCl,pCl,lCl,mPk',
'lensing':'yes',
# LambdaCDM parameters
'h':0.67556,
'omega_b':0.022032,
'omega_cdm':0.12038,
'A_s':2.215e-9,
'n_s':0.9619,
'tau_reio':0.0925,
# Take fixed value for primordial Helium (instead of automatic BBN adjustment)
'YHe':0.246,
# other output and precision parameters
'P_k_max_1/Mpc':3.0,
'l_switch_limber':9}
#'background_verbose':1}
#
# arrays for output
#
kvec = np.logspace(-4,np.log10(3),1000)
legarray = []
twopi = 2.*math.pi
#
# Create figures
#
fig_Pk, ax_Pk = plt.subplots()
fig_TT, ax_TT = plt.subplots()
fig_EE, ax_EE = plt.subplots()
fig_PP, ax_PP = plt.subplots()
#
# loop over varying parameter values
#
for i,var in enumerate(var_array):
#
print ' * Compute with %s=%e'%(var_name,var)
#
# deal with colors and legends
#
if i == 0:
var_color = 'k'
var_alpha = 1.
legarray.append(r'ref. $\Lambda CDM$')
else:
var_color = 'r'
var_alpha = 1.*i/(var_num-1.)
if i == var_num-1:
legarray.append(var_legend)
#
# call CLASS
#
M = Class()
M.set(common_settings)
M.set({var_name:var})
M.compute()
#
# get Cls
#
clM = M.lensed_cl(2500)
ll = clM['ell'][2:]
clTT = clM['tt'][2:]
clEE = clM['ee'][2:]
clPP = clM['pp'][2:]
#
# get P(k) for common k values
#
pkM = []
for k in kvec:
pkM.append(M.pk(k,0.))
#
# plot P(k)
#
ax_Pk.loglog(kvec,np.array(pkM),color=var_color,alpha=var_alpha,linestyle='-')
#
# plot C_l^TT
#
ax_TT.semilogx(ll,clTT*ll*(ll+1)/twopi,color=var_color,alpha=var_alpha,linestyle='-')
#
# plot Cl EE
#
ax_EE.loglog(ll,clEE*ll*(ll+1)/twopi,color=var_color,alpha=var_alpha,linestyle='-')
#
# plot Cl phiphi
#
ax_PP.loglog(ll,clPP*ll*(ll+1)*ll*(ll+1)/twopi,color=var_color,alpha=var_alpha,linestyle='-')
#
# reset CLASS
#
M.struct_cleanup()
M.empty()
#
# output of P(k) figure
#
ax_Pk.set_xlim([1.e-4,3.])
ax_Pk.set_xlabel(r'$k \,\,\,\, [h/\mathrm{Mpc}]$')
ax_Pk.set_ylabel(r'$P(k) \,\,\,\, [\mathrm{Mpc}/h]^3$')
ax_Pk.legend(legarray)
fig_Pk.tight_layout()
fig_Pk.savefig('spectra_%s_Pk.pdf' % var_figname)
#
# output of C_l^TT figure
#
ax_TT.set_xlim([2,2500])
ax_TT.set_xlabel(r'$\ell$')
ax_TT.set_ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{TT}$')
ax_TT.legend(legarray)
fig_TT.tight_layout()
fig_TT.savefig('spectra_%s_cltt.pdf' % var_figname)
#
# output of C_l^EE figure
#
ax_EE.set_xlim([2,2500])
ax_EE.set_xlabel(r'$\ell$')
ax_EE.set_ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{EE}$')
ax_EE.legend(legarray)
fig_EE.tight_layout()
fig_EE.savefig('spectra_%s_clee.pdf' % var_figname)
#
# output of C_l^pp figure
#
ax_PP.set_xlim([10,2500])
ax_PP.set_xlabel(r'$\ell$')
ax_PP.set_ylabel(r'$[\ell^2(\ell+1)^2/2\pi] C_\ell^\mathrm{\phi \phi}$')
ax_PP.legend(legarray)
fig_PP.tight_layout()
fig_PP.savefig('spectra_%s_clpp.pdf' % var_figname)
```
```python
```
|
miguelzumaREPO_NAMEhi_class_publicPATH_START.@hi_class_public_extracted@hi_class_public-master@notebooks@varying_pann.ipynb@.PATH_END.py
|
{
"filename": "metrics.py",
"repo_name": "aimalz/qp",
"repo_path": "qp_extracted/qp-master/qp/metrics.py",
"type": "Python"
}
|
import numpy as np
import qp
def calculate_moment(p, N, using=None, limits=None, dx=0.01, vb=False):
"""
Calculates a moment of a qp.PDF object
Parameters
----------
p: qp.PDF object
the PDF whose moment will be calculated
N: int
order of the moment to be calculated
limits: tuple of floats
endpoints of integration interval over which to calculate moments
dx: float
resolution of integration grid
vb: Boolean
print progress to stdout?
Returns
-------
M: float
value of the moment
"""
if limits is None:
limits = p.limits
if using is None:
using = p.first
# Make a grid from the limits and resolution
d = int((limits[-1] - limits[0]) / dx)
grid = np.linspace(limits[0], limits[1], d)
dx = (limits[-1] - limits[0]) / (d - 1)
# Evaluate the functions on the grid
pe = p.evaluate(grid, using=using, vb=vb)[1]
# pe = normalize_gridded(pe)[1]
# calculate the moment
grid_to_N = grid ** N
M = quick_moment(pe, grid_to_N, dx)
return M
def quick_moment(p_eval, grid_to_N, dx):
"""
Calculates a moment of an evaluated PDF
Parameters
----------
p_eval: numpy.ndarray, float
the values of a probability distribution
grid: numpy.ndarray, float
the grid upon which p_eval was evaluated
dx: float
the difference between regular grid points
N: int
order of the moment to be calculated
Returns
-------
M: float
value of the moment
"""
M = np.dot(grid_to_N, p_eval) * dx
return M
def calculate_kld(p, q, limits=qp.utils.lims, dx=0.01, vb=False):
"""
Calculates the Kullback-Leibler Divergence between two qp.PDF objects.
Parameters
----------
p: PDF object
probability distribution whose distance _from_ `q` will be calculated.
q: PDF object
probability distribution whose distance _to_ `p` will be calculated.
limits: tuple of floats
endpoints of integration interval in which to calculate KLD
dx: float
resolution of integration grid
vb: boolean
report on progress to stdout?
Returns
-------
Dpq: float
the value of the Kullback-Leibler Divergence from `q` to `p`
Notes
-----
TO DO: change this to calculate_kld
TO DO: have this take number of points not dx!
"""
# Make a grid from the limits and resolution
N = int((limits[-1] - limits[0]) / dx)
grid = np.linspace(limits[0], limits[1], N)
dx = (limits[-1] - limits[0]) / (N - 1)
# Evaluate the functions on the grid and normalize
pe = p.evaluate(grid, vb=vb, norm=True)
pn = pe[1]
qe = q.evaluate(grid, vb=vb, norm=True)
qn = qe[1]
# Normalize the evaluations, so that the integrals can be done
# (very approximately!) by simple summation:
# pn = pe / np.sum(pe)
#denominator = max(np.sum(qe), epsilon)
# qn = qe / np.sum(qe)#denominator
# Compute the log of the normalized PDFs
# logquotient = safelog(pn / qn)
# logp = safelog(pn)
# logq = safelog(qn)
# Calculate the KLD from q to p
Dpq = quick_kld(pn, qn, dx=dx)# np.dot(pn * logquotient, np.ones(len(grid)) * dx)
if Dpq < 0.:
print('broken KLD: '+str((Dpq, pn, qn, dx)))
Dpq = qp.utils.epsilon
return Dpq
def quick_kld(p_eval, q_eval, dx=0.01):
"""
Calculates the Kullback-Leibler Divergence between two evaluations of PDFs.
Parameters
----------
p_eval: numpy.ndarray, float
evaluations of probability distribution whose distance _from_ `q` will be calculated
q_eval: numpy.ndarray, float
evaluations of probability distribution whose distance _to_ `p` will be calculated.
dx: float
resolution of integration grid
Returns
-------
Dpq: float
the value of the Kullback-Leibler Divergence from `q` to `p`
Notes
-----
TO DO: change this to quick_kld
"""
logquotient = qp.utils.safelog(p_eval) - qp.utils.safelog(q_eval)
# logp = safelog(pn)
# logq = safelog(qn)
# Calculate the KLD from q to p
Dpq = dx * np.sum(p_eval * logquotient)
return Dpq
def calculate_rmse(p, q, limits=qp.utils.lims, dx=0.01, vb=False):
"""
Calculates the Root Mean Square Error between two qp.PDF objects.
Parameters
----------
p: PDF object
probability distribution function whose distance between its truth and the approximation of `q` will be calculated.
q: PDF object
probability distribution function whose distance between its approximation and the truth of `p` will be calculated.
limits: tuple of floats
endpoints of integration interval in which to calculate RMS
dx: float
resolution of integration grid
vb: boolean
report on progress to stdout?
Returns
-------
rms: float
the value of the RMS error between `q` and `p`
Notes
-----
TO DO: change dx to N
"""
# Make a grid from the limits and resolution
N = int((limits[-1] - limits[0]) / dx)
grid = np.linspace(limits[0], limits[1], N)
dx = (limits[-1] - limits[0]) / (N - 1)
# Evaluate the functions on the grid
pe = p.evaluate(grid, vb=vb)[1]
qe = q.evaluate(grid, vb=vb)[1]
# Calculate the RMS between p and q
rms = quick_rmse(pe, qe, N)# np.sqrt(dx * np.sum((pe - qe) ** 2))
return rms
def quick_rmse(p_eval, q_eval, N):
"""
Calculates the Root Mean Square Error between two evaluations of PDFs.
Parameters
----------
p_eval: numpy.ndarray, float
evaluation of probability distribution function whose distance between
its truth and the approximation of `q` will be calculated.
q_eval: numpy.ndarray, float
evaluation of probability distribution function whose distance between
its approximation and the truth of `p` will be calculated.
N: int
number of points at which PDFs were evaluated
Returns
-------
rms: float
the value of the RMS error between `q` and `p`
"""
# Calculate the RMS between p and q
rms = np.sqrt(np.sum((p_eval - q_eval) ** 2) / N)
return rms
|
aimalzREPO_NAMEqpPATH_START.@qp_extracted@qp-master@qp@metrics.py@.PATH_END.py
|
{
"filename": "MasterPlot.py",
"repo_name": "mmicromegas/ransX",
"repo_path": "ransX_extracted/ransX-master/UTILS/RANSX/MasterPlot.py",
"type": "Python"
}
|
from EQUATIONS.ContinuityEquationWithMassFlux import ContinuityEquationWithMassFlux
from EQUATIONS.ContinuityEquationWithFavrianDilatation import ContinuityEquationWithFavrianDilatation
from EQUATIONS.MomentumEquationX import MomentumEquationX
from EQUATIONS.MomentumEquationY import MomentumEquationY
from EQUATIONS.MomentumEquationZ import MomentumEquationZ
from EQUATIONS.ReynoldsStressXXequation import ReynoldsStressXXequation
from EQUATIONS.ReynoldsStressYYequation import ReynoldsStressYYequation
from EQUATIONS.ReynoldsStressZZequation import ReynoldsStressZZequation
from EQUATIONS.TurbulentKineticEnergyEquation import TurbulentKineticEnergyEquation
from EQUATIONS.TurbulentKineticEnergyEquationRadial import TurbulentKineticEnergyEquationRadial
from EQUATIONS.TurbulentKineticEnergyEquationHorizontal import TurbulentKineticEnergyEquationHorizontal
from EQUATIONS.InternalEnergyEquation import InternalEnergyEquation
from EQUATIONS.InternalEnergyFluxEquation import InternalEnergyFluxEquation
from EQUATIONS.InternalEnergyVarianceEquation import InternalEnergyVarianceEquation
from EQUATIONS.KineticEnergyEquation import KineticEnergyEquation
from EQUATIONS.TotalEnergyEquation import TotalEnergyEquation
from EQUATIONS.EntropyEquation import EntropyEquation
from EQUATIONS.EntropyFluxEquation import EntropyFluxEquation
from EQUATIONS.EntropyVarianceEquation import EntropyVarianceEquation
from EQUATIONS.PressureEquation import PressureEquation
from EQUATIONS.PressureFluxXequation import PressureFluxXequation
from EQUATIONS.PressureFluxYequation import PressureFluxYequation
from EQUATIONS.PressureFluxZequation import PressureFluxZequation
from EQUATIONS.PressureVarianceEquation import PressureVarianceEquation
from EQUATIONS.TemperatureEquation import TemperatureEquation
from EQUATIONS.TemperatureFluxEquation import TemperatureFluxEquation
from EQUATIONS.TemperatureVarianceEquation import TemperatureVarianceEquation
from EQUATIONS.EnthalpyEquation import EnthalpyEquation
from EQUATIONS.EnthalpyFluxEquation import EnthalpyFluxEquation
from EQUATIONS.EnthalpyVarianceEquation import EnthalpyVarianceEquation
from EQUATIONS.DensityVarianceEquation import DensityVarianceEquation
from EQUATIONS.TurbulentMassFluxEquation import TurbulentMassFluxEquation
from EQUATIONS.DensitySpecificVolumeCovarianceEquation import DensitySpecificVolumeCovarianceEquation
from EQUATIONS.XtransportEquation import XtransportEquation
from EQUATIONS.XfluxXequation import XfluxXequation
from EQUATIONS.XfluxYequation import XfluxYequation
from EQUATIONS.XfluxZequation import XfluxZequation
from EQUATIONS.XvarianceEquation import XvarianceEquation
from EQUATIONS.Xdiffusivity import Xdiffusivity
from EQUATIONS.XdamkohlerNumber import XdamkohlerNumber
from EQUATIONS.AbarTransportEquation import AbarTransportEquation
from EQUATIONS.ZbarTransportEquation import ZbarTransportEquation
from EQUATIONS.AbarFluxTransportEquation import AbarFluxTransportEquation
from EQUATIONS.ZbarFluxTransportEquation import ZbarFluxTransportEquation
from EQUATIONS.TemperatureDensity import TemperatureDensity
from EQUATIONS.PressureInternalEnergy import PressureInternalEnergy
from EQUATIONS.NuclearEnergyProduction import NuclearEnergyProduction
from EQUATIONS.Gravity import Gravity
from EQUATIONS.TemperatureGradients import TemperatureGradients
from EQUATIONS.Degeneracy import Degeneracy
from EQUATIONS.VelocitiesMeanExp import VelocitiesMeanExp
from EQUATIONS.VelocitiesMLTturb import VelocitiesMLTturb
from EQUATIONS.RelativeRMSflct import RelativeRMSflct
from EQUATIONS.AbarZbar import AbarZbar
from EQUATIONS.BruntVaisalla import BruntVaisalla
from EQUATIONS.Buoyancy import Buoyancy
# import classes for hydrodynamic stellar structure equations
from EQUATIONS.HsseContinuityEquation import HsseContinuityEquation
from EQUATIONS.HsseMomentumEquationX import HsseMomentumEquationX
from EQUATIONS.HsseTemperatureEquation import HsseTemperatureEquation
from EQUATIONS.HsseLuminosityEquation import HsseLuminosityEquation
from EQUATIONS.HsseXtransportEquation import HsseXtransportEquation
# from class for full turbulence velocity field hypothesis
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisX import FullTurbulenceVelocityFieldHypothesisX
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisY import FullTurbulenceVelocityFieldHypothesisY
from EQUATIONS.FullTurbulenceVelocityFieldHypothesisZ import FullTurbulenceVelocityFieldHypothesisZ
from EQUATIONS.UxfpdIdentity import UxfpdIdentity
from EQUATIONS.UyfpdIdentity import UyfpdIdentity
from EQUATIONS.UzfpdIdentity import UzfpdIdentity
from EQUATIONS.DivuDilatation import DivuDilatation
import matplotlib.pyplot as plt
class MasterPlot():
def __init__(self, params):
self.params = params
def execRho(self, bconv, tconv):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot density
ransCONT.plot_rho(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rho')['xbl'],
params.getForEqs('rho')['xbr'],
params.getForEqs('rho')['ybu'],
params.getForEqs('rho')['ybd'],
params.getForEqs('rho')['ilg'])
# ransCONT.plot_mm_vs_MM(params.getForProp('prop')['laxis'],
# params.getForEqs('rho')['xbl'],
# params.getForEqs('rho')['xbr'],
# params.getForEqs('rho')['ybu'],
# params.getForEqs('rho')['ybd'],
# params.getForEqs('rho')['ilg'])
def execContEq(self, bconv, tconv):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation
ransCONT.plot_continuity_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('conteq')['xbl'],
params.getForEqs('conteq')['xbr'],
params.getForEqs('conteq')['ybu'],
params.getForEqs('conteq')['ybd'],
params.getForEqs('conteq')['ilg'])
def execContEqBar(self):
params = self.params
# instantiate
ransCONT = ContinuityEquationWithFavrianDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation integral budget
ransCONT.plot_continuity_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar('conteqBar')['xbl'],
params.getForEqsBar('conteqBar')['xbr'],
params.getForEqsBar('conteqBar')['ybu'],
params.getForEqsBar('conteqBar')['ybd'])
def execContFddEq(self, bconv, tconv):
params = self.params
# instantiate
ransCONTfdd = ContinuityEquationWithMassFlux(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation
ransCONTfdd.plot_continuity_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('conteqfdd')['xbl'],
params.getForEqs('conteqfdd')['xbr'],
params.getForEqs('conteqfdd')['ybu'],
params.getForEqs('conteqfdd')['ybd'],
params.getForEqs('conteqfdd')['ilg'])
# ransCONTfdd.plot_Frho_space_time(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('conteqfdd')['xbl'],
# params.getForEqs('conteqfdd')['xbr'],
# params.getForEqs('conteqfdd')['ybu'],
# params.getForEqs('conteqfdd')['ybd'],
# params.getForEqs('conteqfdd')['ilg'])
def execContFddEqBar(self):
params = self.params
# instantiate
ransCONTfdd = ContinuityEquationWithMassFlux(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot continuity equation integral budget
ransCONTfdd.plot_continuity_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar('conteqfddBar')['xbl'],
params.getForEqsBar('conteqfddBar')['xbr'],
params.getForEqsBar('conteqfddBar')['ybu'],
params.getForEqsBar('conteqfddBar')['ybd'])
def execHssContEq(self, bconv, tconv):
params = self.params
# instantiate
ranshssecont = HsseContinuityEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
# plot continuity equation
ranshssecont.plot_continuity_equation(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative
ranshssecont.plot_continuity_equation_2(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative simplified
ranshssecont.plot_continuity_equation_3(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# plot continuity equation alternative simplified - cracking on velocities
# ranshssecont.plot_velocities(params.getForProp('prop')['laxis'],\
# params.getForEqs('cteqhsse')['xbl'],\
# params.getForEqs('cteqhsse')['xbr'],\
# params.getForEqs('cteqhsse')['ybu'],\
# params.getForEqs('cteqhsse')['ybd'],\
# params.getForEqs('cteqhsse')['ilg'])
ranshssecont.plot_dilatation_flux(params.getForProp('prop')['laxis'],
params.getForEqs('cteqhsse')['xbl'],
params.getForEqs('cteqhsse')['xbr'],
params.getForEqs('cteqhsse')['ybu'],
params.getForEqs('cteqhsse')['ybd'],
params.getForEqs('cteqhsse')['ilg'])
# ranshssecont.plot_mass_flux_acceleration(params.getForProp('prop')['laxis'],\
# params.getForEqs('cteqhsse')['xbl'],\
# params.getForEqs('cteqhsse')['xbr'],\
# params.getForEqs('cteqhsse')['ybu'],\
# params.getForEqs('cteqhsse')['ybd'],\
# params.getForEqs('cteqhsse')['ilg'])
def execHssMomxEq(self, bconv, tconv):
params = self.params
# instantiate
ranshssemomx = HsseMomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
# plot hsse momentm equation
ranshssemomx.plot_momentum_equation_x(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
# plot hsse momentm equation alternative
ranshssemomx.plot_momentum_equation_x_2(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
# plot hsse momentm equation alternative simplified
ranshssemomx.plot_momentum_equation_x_3(params.getForProp('prop')['laxis'],
params.getForEqs('mxeqhsse')['xbl'],
params.getForEqs('mxeqhsse')['xbr'],
params.getForEqs('mxeqhsse')['ybu'],
params.getForEqs('mxeqhsse')['ybd'],
params.getForEqs('mxeqhsse')['ilg'])
def execHssTempEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ranshssetemp = HsseTemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss, bconv, tconv,
params.getForProp('prop')['prefix'])
# plot hsse temperature equation
ranshssetemp.plot_tt_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
# plot hsse temperature equation alternative
ranshssetemp.plot_tt_equation_2(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
# plot hsse temperature equation alternative simplified
ranshssetemp.plot_tt_equation_3(params.getForProp('prop')['laxis'],
params.getForEqs('tpeqhsse')['xbl'],
params.getForEqs('tpeqhsse')['xbr'],
params.getForEqs('tpeqhsse')['ybu'],
params.getForEqs('tpeqhsse')['ybd'],
params.getForEqs('tpeqhsse')['ilg'])
def execHssLumiEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ranshsselumi = HsseLuminosityEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss, bconv, tconv,
params.getForProp('prop')['prefix'])
# plot hsse luminosity equation
# ranshsselumi.plot_luminosity_equation(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation exact
ranshsselumi.plot_luminosity_equation_exact(params.getForProp('prop')['laxis'],
params.getForEqs('lueqhsse')['xbl'],
params.getForEqs('lueqhsse')['xbr'],
params.getForEqs('lueqhsse')['ybu'],
params.getForEqs('lueqhsse')['ybd'],
params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation exact 2
ranshsselumi.plot_luminosity_equation_exact2(params.getForProp('prop')['laxis'],
params.getForEqs('lueqhsse')['xbl'],
params.getForEqs('lueqhsse')['xbr'],
params.getForEqs('lueqhsse')['ybu'],
params.getForEqs('lueqhsse')['ybd'],
params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation alternative
# ranshsselumi.plot_luminosity_equation_2(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
# plot hsse luminosity equation alternative simplified
# ranshsselumi.plot_luminosity_equation_3(params.getForProp('prop')['laxis'],
# params.getForEqs('lueqhsse')['xbl'],
# params.getForEqs('lueqhsse')['xbr'],
# params.getForEqs('lueqhsse')['ybu'],
# params.getForEqs('lueqhsse')['ybd'],
# params.getForEqs('lueqhsse')['ilg'])
def execHssCompEq(self, inuc, element, x, bconv, tconv):
params = self.params
# instantiate
ranshssecomp = HsseXtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ranshssecomp.plot_Xtransport_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXrho(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXtra.plot_Xrho(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXtra.plot_X(params.getForProp('prop')['laxis'], \
# params.getForEqs(x)['xbl'], \
# params.getForEqs(x)['xbr'], \
# params.getForEqs(x)['ybu'], \
# params.getForEqs(x)['ybd'], \
# params.getForEqs(x)['ilg'])
# ransXtra.plot_gradX(params.getForProp('prop')['laxis'],\
# params.getForEqs(x)['xbl'],\
# params.getForEqs(x)['xbr'],\
# params.getForEqs(x)['ybu'],\
# params.getForEqs(x)['ybd'],\
# params.getForEqs(x)['ilg'])
def execX(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
if params.getForProp('prop')['plabel'] == "oburn":
ransXtra.plot_X_with_MM(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
else:
ransXtra.plot_X(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
#ransXtra.plot_X_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
#ransXtra.plot_rhoX_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXtra.plot_Xm_with_MM(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
def execXtrsEq(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXtra.plot_Xtransport_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXtrsEqBar(self, inuc, element, x, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransXtra = XtransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['plabel'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot X transport equation integral budget
ransXtra.plot_Xtransport_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqsBar(x)['xbl'],
params.getForEqsBar(x)['xbr'],
params.getForEqsBar(x)['ybu'],
params.getForEqsBar(x)['ybd'])
def execXflxx(self, inuc, element, x, bconv, tconv, tke_diss, tauL, cnvz_in_hp):
params = self.params
# instantiate
ransXflxx = XfluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, tke_diss, tauL, cnvz_in_hp,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# ransXflxx.plot_XfluxX(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
ransXflxx.plot_alphaX(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxxX(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxXRogers1989(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_Xflux_gradient(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxX2(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
def execXflxXeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL, cnvz_in_hp):
params = self.params
# instantiate
ransXflxx = XfluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['fext'],
inuc, element, bconv, tconv, tke_diss, tauL, cnvz_in_hp,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXflxx.plot_XfluxX_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
# ransXflxx.plot_XfluxX_equation2(params.getForProp('prop')['laxis'], \
# params.getForEqs(x)['xbl'], \
# params.getForEqs(x)['xbr'], \
# params.getForEqs(x)['ybu'], \
# params.getForEqs(x)['ybd'], \
# params.getForEqs(x)['ilg'])
def execXflxy(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxy = XfluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxy.plot_XfluxY(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxYeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxy = XfluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxy.plot_XfluxY_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxz(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxz = XfluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxz.plot_XfluxZ(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXflxZeq(self, inuc, element, x, bconv, tconv, tke_diss, tauL):
params = self.params
# instantiate
ransXflxz = XfluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv, tke_diss, tauL,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXflxz.plot_XfluxZ_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXvar(self, inuc, element, x, bconv, tconv):
params = self.params
tauL = 1.
# instantiate
ransXvar = XvarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, tauL, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXvar.plot_Xvariance(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXvarEq(self, inuc, element, x, tauL, bconv, tconv):
params = self.params
# instantiate
ransXvar = XvarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, tauL, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransXvar.plot_Xvariance_equation(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execDiff(self, inuc, element, x, lc, uconv, bconv, tconv, tke_diss, tauL, super_ad_i, super_ad_o, cnvz_in_hp):
params = self.params
# instantiate
ransXdiff = Xdiffusivity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
inuc, element, lc, uconv, bconv, tconv, cnvz_in_hp,
tke_diss, tauL, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# ransXdiff.plot_X_Ediffusivity(params.getForProp('prop')['laxis'],
# params.getForEqs(x)['xbl'],
# params.getForEqs(x)['xbr'],
# params.getForEqs(x)['ybu'],
# params.getForEqs(x)['ybd'],
# params.getForEqs(x)['ilg'])
ransXdiff.plot_X_Ediffusivity2(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execXda(self, inuc, element, x, bconv, tconv):
params = self.params
# instantiate
ransXda = XdamkohlerNumber(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
inuc, element, bconv, tconv,
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransXda.plot_Xda(params.getForProp('prop')['laxis'],
params.getForEqs(x)['xbl'],
params.getForEqs(x)['xbr'],
params.getForEqs(x)['ybu'],
params.getForEqs(x)['ybd'],
params.getForEqs(x)['ilg'])
def execTke(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTke.plot_tke(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkie')['xbl'],
params.getForEqs('tkie')['xbr'],
params.getForEqs('tkie')['ybu'],
params.getForEqs('tkie')['ybd'],
params.getForEqs('tkie')['ilg'])
#ransTke.plot_TKE_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeeq')['xbl'],
# params.getForEqs('tkeeq')['xbr'],
# params.getForEqs('tkeeq')['ybu'],
# params.getForEqs('tkeeq')['ybd'],
# params.getForEqs('tkeeq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEq(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTke.plot_tke_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeeq')['xbl'],
params.getForEqs('tkeeq')['xbr'],
params.getForEqs('tkeeq')['ybu'],
params.getForEqs('tkeeq')['ybd'],
params.getForEqs('tkeeq')['ilg'])
def execTkeEqBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTke = TurbulentKineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTke.plot_tke_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeeqBar')['xbl'],
params.getForEqs('tkeeqBar')['xbr'],
params.getForEqs('tkeeqBar')['ybu'],
params.getForEqs('tkeeqBar')['ybd'])
def execTkeRadial(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTkeR.plot_tkeRadial(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkieR')['xbl'],
params.getForEqs('tkieR')['xbr'],
params.getForEqs('tkieR')['ybu'],
params.getForEqs('tkieR')['ybd'],
params.getForEqs('tkieR')['ilg'])
#ransTkeR.plot_TKEradial_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeReq')['xbl'],
# params.getForEqs('tkeReq')['xbr'],
# params.getForEqs('tkeReq')['ybu'],
# params.getForEqs('tkeReq')['ybd'],
# params.getForEqs('tkeReq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEqRadial(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeR.plot_tkeRadial_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeReq')['xbl'],
params.getForEqs('tkeReq')['xbr'],
params.getForEqs('tkeReq')['ybu'],
params.getForEqs('tkeReq')['ybd'],
params.getForEqs('tkeReq')['ilg'])
def execTkeEqRadialBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeR = TurbulentKineticEnergyEquationRadial(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeR.plot_tkeRadial_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeReqBar')['xbl'],
params.getForEqs('tkeReqBar')['xbr'],
params.getForEqs('tkeReqBar')['ybu'],
params.getForEqs('tkeReqBar')['ybd'])
def execTkeHorizontal(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy
ransTkeH.plot_tkeHorizontal(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tkieH')['xbl'],
params.getForEqs('tkieH')['xbr'],
params.getForEqs('tkieH')['ybu'],
params.getForEqs('tkieH')['ybd'],
params.getForEqs('tkieH')['ilg'])
#ransTkeH.plot_TKEhorizontal_space_time(params.getForProp('prop')['laxis'],
# params.getForEqs('tkeHeq')['xbl'],
# params.getForEqs('tkeHeq')['xbr'],
# params.getForEqs('tkeHeq')['ybu'],
# params.getForEqs('tkeHeq')['ybd'],
# params.getForEqs('tkeHeq')['ilg'])
# plot turbulent kinetic energy evolution
# ransTke.plot_tke_evolution()
# plot evolution of convection boundaries
# ransTke.plot_conv_bndry_location()
def execTkeEqHorizontal(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeH.plot_tkeHorizontal_equation(params.getForProp('prop')['laxis'],
params.getForEqs('tkeHeq')['xbl'],
params.getForEqs('tkeHeq')['xbr'],
params.getForEqs('tkeHeq')['ybu'],
params.getForEqs('tkeHeq')['ybd'],
params.getForEqs('tkeHeq')['ilg'])
def execTkeEqHorizontalBar(self, kolmdissrate, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransTkeH = TurbulentKineticEnergyEquationHorizontal(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
kolmdissrate, bconv, tconv,
super_ad_i, super_ad_o,
params.getForProp('prop')['prefix'])
# plot turbulent kinetic energy equation
ransTkeH.plot_tkeHorizontal_equation_integral_budget(params.getForProp('prop')['laxis'],
params.getForEqs('tkeHeqBar')['xbl'],
params.getForEqs('tkeHeqBar')['xbr'],
params.getForEqs('tkeHeqBar')['ybu'],
params.getForEqs('tkeHeqBar')['ybd'])
def execMomx(self, bconv, tconv):
params = self.params
# instantiate
ransMomx = MomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransMomx.plot_momentum_x(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momex')['xbl'],
params.getForEqs('momex')['xbr'],
params.getForEqs('momex')['ybu'],
params.getForEqs('momex')['ybd'],
params.getForEqs('momex')['ilg'])
def execMomxEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomx = MomentumEquationX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransMomx.plot_momentum_equation_x(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momxeq')['xbl'],
params.getForEqs('momxeq')['xbr'],
params.getForEqs('momxeq')['ybu'],
params.getForEqs('momxeq')['ybd'],
params.getForEqs('momxeq')['ilg'])
def execMomy(self, bconv, tconv):
params = self.params
# instantiate
ransMomy = MomentumEquationY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomy.plot_momentum_y(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momey')['xbl'],
params.getForEqs('momey')['xbr'],
params.getForEqs('momey')['ybu'],
params.getForEqs('momey')['ybd'],
params.getForEqs('momey')['ilg'])
def execMomyEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomy = MomentumEquationY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomy.plot_momentum_equation_y(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momyeq')['xbl'],
params.getForEqs('momyeq')['xbr'],
params.getForEqs('momyeq')['ybu'],
params.getForEqs('momyeq')['ybd'],
params.getForEqs('momyeq')['ilg'])
def execMomz(self, bconv, tconv):
params = self.params
# instantiate
ransMomz = MomentumEquationZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomz.plot_momentum_z(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momez')['xbl'],
params.getForEqs('momez')['xbr'],
params.getForEqs('momez')['ybu'],
params.getForEqs('momez')['ybd'],
params.getForEqs('momez')['ilg'])
def execMomzEq(self, bconv, tconv):
params = self.params
# instantiate
ransMomz = MomentumEquationZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransMomz.plot_momentum_equation_z(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('momzeq')['xbl'],
params.getForEqs('momzeq')['xbr'],
params.getForEqs('momzeq')['ybu'],
params.getForEqs('momzeq')['ybd'],
params.getForEqs('momzeq')['ilg'])
def execEi(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransEi = InternalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEi.plot_ei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eint')['xbl'],
params.getForEqs('eint')['xbr'],
params.getForEqs('eint')['ybu'],
params.getForEqs('eint')['ybd'],
params.getForEqs('eint')['ilg'])
def execEiEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransEi = InternalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEi.plot_ei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eieq')['xbl'],
params.getForEqs('eieq')['xbr'],
params.getForEqs('eieq')['ybu'],
params.getForEqs('eieq')['ybd'],
params.getForEqs('eieq')['ilg'])
def execEiFlx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransEiFlx = InternalEnergyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEiFlx.plot_fei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eintflx')['xbl'],
params.getForEqs('eintflx')['xbr'],
params.getForEqs('eintflx')['ybu'],
params.getForEqs('eintflx')['ybd'],
params.getForEqs('eintflx')['ilg'])
def execEiFlxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransEiFlx = InternalEnergyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransEiFlx.plot_fei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eiflxeq')['xbl'],
params.getForEqs('eiflxeq')['xbr'],
params.getForEqs('eiflxeq')['ybu'],
params.getForEqs('eiflxeq')['ybd'],
params.getForEqs('eiflxeq')['ilg'])
ransEiFlx.plot_fei_equation2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eiflxeq')['xbl'],
params.getForEqs('eiflxeq')['xbr'],
params.getForEqs('eiflxeq')['ybu'],
params.getForEqs('eiflxeq')['ybd'],
params.getForEqs('eiflxeq')['ilg'])
def execHHflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransHHflx = EnthalpyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHHflx.plot_fhh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enthflx')['xbl'],
params.getForEqs('enthflx')['xbr'],
params.getForEqs('enthflx')['ybu'],
params.getForEqs('enthflx')['ybd'],
params.getForEqs('enthflx')['ilg'])
def execHHflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransHHflx = EnthalpyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHHflx.plot_fhh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hhflxeq')['xbl'],
params.getForEqs('hhflxeq')['xbr'],
params.getForEqs('hhflxeq')['ybu'],
params.getForEqs('hhflxeq')['ybd'],
params.getForEqs('hhflxeq')['ilg'])
def execHHvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransHHvar = EnthalpyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransHHvar.plot_sigma_hh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enthvar')['xbl'],
params.getForEqs('enthvar')['xbr'],
params.getForEqs('enthvar')['ybu'],
params.getForEqs('enthvar')['ybd'],
params.getForEqs('enthvar')['ilg'])
def execHHvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransHHvar = EnthalpyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransHHvar.plot_sigma_hh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hhvareq')['xbl'],
params.getForEqs('hhvareq')['xbr'],
params.getForEqs('hhvareq')['ybu'],
params.getForEqs('hhvareq')['ybd'],
params.getForEqs('hhvareq')['ilg'])
def execEiVar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransEiVar = InternalEnergyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransEiVar.plot_sigma_ei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eintvar')['xbl'],
params.getForEqs('eintvar')['xbr'],
params.getForEqs('eintvar')['ybu'],
params.getForEqs('eintvar')['ybd'],
params.getForEqs('eintvar')['ilg'])
def execEiVarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransEiVar = InternalEnergyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransEiVar.plot_sigma_ei_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('eivareq')['xbl'],
params.getForEqs('eivareq')['xbr'],
params.getForEqs('eivareq')['ybu'],
params.getForEqs('eivareq')['ybd'],
params.getForEqs('eivareq')['ilg'])
def execSS(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransSS = EntropyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSS.plot_ss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entr')['xbl'],
params.getForEqs('entr')['xbr'],
params.getForEqs('entr')['ybu'],
params.getForEqs('entr')['ybd'],
params.getForEqs('entr')['ilg'])
def execSSeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransSS = EntropyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSS.plot_ss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('sseq')['xbl'],
params.getForEqs('sseq')['xbr'],
params.getForEqs('sseq')['ybu'],
params.getForEqs('sseq')['ybd'],
params.getForEqs('sseq')['ilg'])
def execSSflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransSSflx = EntropyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSSflx.plot_fss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entrflx')['xbl'],
params.getForEqs('entrflx')['xbr'],
params.getForEqs('entrflx')['ybu'],
params.getForEqs('entrflx')['ybd'],
params.getForEqs('entrflx')['ilg'])
def execSSflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransSSflx = EntropyFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransSSflx.plot_fss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssflxeq')['xbl'],
params.getForEqs('ssflxeq')['xbr'],
params.getForEqs('ssflxeq')['ybu'],
params.getForEqs('ssflxeq')['ybd'],
params.getForEqs('ssflxeq')['ilg'])
ransSSflx.plot_fss_equation2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssflxeq')['xbl'],
params.getForEqs('ssflxeq')['xbr'],
params.getForEqs('ssflxeq')['ybu'],
params.getForEqs('ssflxeq')['ybd'],
params.getForEqs('ssflxeq')['ilg'])
def execSSvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransSSvar = EntropyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_ss(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('entrvar')['xbl'],
params.getForEqs('entrvar')['xbr'],
params.getForEqs('entrvar')['ybu'],
params.getForEqs('entrvar')['ybd'],
params.getForEqs('entrvar')['ilg'])
def execSSvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransSSvar = EntropyVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_ss_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ssvareq')['xbl'],
params.getForEqs('ssvareq')['xbr'],
params.getForEqs('ssvareq')['ybu'],
params.getForEqs('ssvareq')['ybd'],
params.getForEqs('ssvareq')['ilg'])
def execDDvar(self, bconv, tconv):
params = self.params
tauL = 1.
# instantiate
ransDDvar = DensityVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tauL,
params.getForProp('prop')['prefix'])
ransDDvar.plot_sigma_dd(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('densvar')['xbl'],
params.getForEqs('densvar')['xbr'],
params.getForEqs('densvar')['ybu'],
params.getForEqs('densvar')['ybd'],
params.getForEqs('densvar')['ilg'])
def execDDvarEq(self, tauL, bconv, tconv):
params = self.params
# instantiate
ransSSvar = DensityVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
tauL,
params.getForProp('prop')['prefix'])
ransSSvar.plot_sigma_dd_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ddvareq')['xbl'],
params.getForEqs('ddvareq')['xbr'],
params.getForEqs('ddvareq')['ybu'],
params.getForEqs('ddvareq')['ybd'],
params.getForEqs('ddvareq')['ilg'])
def execTMSflx(self, bconv, tconv, lc):
params = self.params
# instantiate
ransTMSflx = TurbulentMassFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
lc)
ransTMSflx.plot_a(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tmsflx')['xbl'],
params.getForEqs('tmsflx')['xbr'],
params.getForEqs('tmsflx')['ybu'],
params.getForEqs('tmsflx')['ybd'],
params.getForEqs('tmsflx')['ilg'])
def execAeq(self, bconv, tconv, lc):
params = self.params
# instantiate
ransTMSflx = TurbulentMassFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
lc)
ransTMSflx.plot_a_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('aeq')['xbl'],
params.getForEqs('aeq')['xbr'],
params.getForEqs('aeq')['ybu'],
params.getForEqs('aeq')['ybd'],
params.getForEqs('aeq')['ilg'])
def execDSVC(self, bconv, tconv):
params = self.params
# instantiate
ransDSVC = DensitySpecificVolumeCovarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDSVC.plot_b(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('dsvc')['xbl'],
params.getForEqs('dsvc')['xbr'],
params.getForEqs('dsvc')['ybu'],
params.getForEqs('dsvc')['ybd'],
params.getForEqs('dsvc')['ilg'])
def execBeq(self, bconv, tconv):
params = self.params
# instantiate
ransDSVC = DensitySpecificVolumeCovarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDSVC.plot_b_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('beq')['xbl'],
params.getForEqs('beq')['xbr'],
params.getForEqs('beq')['ybu'],
params.getForEqs('beq')['ybd'],
params.getForEqs('beq')['ilg'])
def execRhoTemp(self, bconv, tconv):
params = self.params
# instantiate
ransTempRho = TemperatureDensity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransTempRho.plot_ttdd(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttdd')['xbl'],
params.getForEqs('ttdd')['xbr'],
params.getForEqs('ttdd')['ybu'],
params.getForEqs('ttdd')['ybd'],
params.getForEqs('ttdd')['ilg'])
def execPressEi(self, bconv, tconv):
params = self.params
# instantiate
ransPressEi = PressureInternalEnergy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransPressEi.plot_ppei(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppei')['xbl'],
params.getForEqs('ppei')['xbr'],
params.getForEqs('ppei')['ybu'],
params.getForEqs('ppei')['ybd'],
params.getForEqs('ppei')['ilg'])
def execEnuc(self, bconv, tconv):
params = self.params
# instantiate
ransEnuc = NuclearEnergyProduction(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# ransEnuc.plot_enuc(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('enuc')['xbl'],
# params.getForEqs('enuc')['xbr'],
# params.getForEqs('enuc')['ybu'],
# params.getForEqs('enuc')['ybd'],
# params.getForEqs('enuc')['ilg'])
ransEnuc.plot_enuc2(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enuc')['xbl'],
params.getForEqs('enuc')['xbr'],
params.getForEqs('enuc')['ybu'],
params.getForEqs('enuc')['ybd'],
params.getForEqs('enuc')['ilg'])
# ransEnuc.plot_enuc_per_volume(params.getForProp('prop')['laxis'], \
# params.getForEqs('enuc')['xbl'], \
# params.getForEqs('enuc')['xbr'], \
# params.getForEqs('enuc')['ybu'], \
# params.getForEqs('enuc')['ybd'], \
# params.getForEqs('enuc')['ilg'])
def execGrav(self, bconv, tconv):
params = self.params
# instantiate
ransGrav = Gravity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransGrav.plot_grav(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('grav')['xbl'],
params.getForEqs('grav')['xbr'],
params.getForEqs('grav')['ybu'],
params.getForEqs('grav')['ybd'],
params.getForEqs('grav')['ilg'])
def execNablas(self, bconv, tconv, super_ad_i, super_ad_o):
params = self.params
# instantiate
ransNablas = TemperatureGradients(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransNablas.plot_nablas(params.getForProp('prop')['laxis'],
bconv, tconv, super_ad_i, super_ad_o,
params.getForEqs('nablas')['xbl'],
params.getForEqs('nablas')['xbr'],
params.getForEqs('nablas')['ybu'],
params.getForEqs('nablas')['ybd'],
params.getForEqs('nablas')['ilg'])
#ransNablas.plot_nablas2(params.getForProp('prop')['laxis'],
# bconv, tconv, super_ad_i, super_ad_o,
# params.getForEqs('nablas')['xbl'],
# params.getForEqs('nablas')['xbr'],
# params.getForEqs('nablas')['ybu'],
# params.getForEqs('nablas')['ybd'],
# params.getForEqs('nablas')['ilg'])
def execDegeneracy(self):
params = self.params
# instantiate
ransDeg = Degeneracy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransDeg.plot_degeneracy(params.getForProp('prop')['laxis'],
params.getForEqs('psi')['xbl'],
params.getForEqs('psi')['xbr'],
params.getForEqs('psi')['ybu'],
params.getForEqs('psi')['ybd'],
params.getForEqs('psi')['ilg'])
def execVelocitiesMeanExp(self, bconv, tconv):
params = self.params
# instantiate
ransVelmeanExp = VelocitiesMeanExp(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransVelmeanExp.plot_velocities(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('velbgr')['xbl'],
params.getForEqs('velbgr')['xbr'],
params.getForEqs('velbgr')['ybu'],
params.getForEqs('velbgr')['ybd'],
params.getForEqs('velbgr')['ilg'])
def execVelocitiesMLTturb(self, bconv, tconv, uconv, super_ad_i, super_ad_o, ):
params = self.params
# instantiate
ransVelMLTturb = VelocitiesMLTturb(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
bconv, tconv, uconv, super_ad_i, super_ad_o,
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransVelMLTturb.plot_velocities(params.getForProp('prop')['laxis'],
params.getForEqs('velmlt')['xbl'],
params.getForEqs('velmlt')['xbr'],
params.getForEqs('velmlt')['ybu'],
params.getForEqs('velmlt')['ybd'],
params.getForEqs('velmlt')['ilg'])
def execBruntV(self, bconv, tconv):
params = self.params
# instantiate
ransBruntV = BruntVaisalla(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransBruntV.plot_bruntvaisalla(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('nsq')['xbl'],
params.getForEqs('nsq')['xbr'],
params.getForEqs('nsq')['ybu'],
params.getForEqs('nsq')['ybd'],
params.getForEqs('nsq')['ilg'])
# ransBruntV.plot_ri(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('nsq')['xbl'],
# params.getForEqs('nsq')['xbr'],
# params.getForEqs('nsq')['ybu'],
# params.getForEqs('nsq')['ybd'],
# params.getForEqs('nsq')['ilg'])
def execBuoyancy(self, bconv, tconv):
params = self.params
# instantiate
ransBuo = Buoyancy(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransBuo.plot_buoyancy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('buo')['xbl'],
params.getForEqs('buo')['xbr'],
params.getForEqs('buo')['ybu'],
params.getForEqs('buo')['ybd'],
params.getForEqs('buo')['ilg'])
def execRelativeRmsFlct(self, bconv, tconv):
params = self.params
# instantiate
ransRms = RelativeRMSflct(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
ransRms.plot_relative_rms_flct(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('relrmsflct')['xbl'],
params.getForEqs('relrmsflct')['xbr'],
params.getForEqs('relrmsflct')['ybu'],
params.getForEqs('relrmsflct')['ybd'],
params.getForEqs('relrmsflct')['ilg'])
# ransRms.plot_relative_rms_flct2(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('relrmsflct')['xbl'],
# params.getForEqs('relrmsflct')['xbr'],
# params.getForEqs('relrmsflct')['ybu'],
# params.getForEqs('relrmsflct')['ybd'],
# params.getForEqs('relrmsflct')['ilg'])
def execAbarZbar(self, bconv, tconv):
params = self.params
# instantiate
ransAZ = AbarZbar(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
ransAZ.plot_abarzbar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abzb')['xbl'],
params.getForEqs('abzb')['xbr'],
params.getForEqs('abzb')['ybu'],
params.getForEqs('abzb')['ybd'],
params.getForEqs('abzb')['ilg'])
def execKe(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransKe = KineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot kinetic energy
ransKe.plot_ke(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('kine')['xbl'],
params.getForEqs('kine')['xbr'],
params.getForEqs('kine')['ybu'],
params.getForEqs('kine')['ybd'],
params.getForEqs('kine')['ilg'])
def execKeEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransKe = KineticEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot kinetic energy equation
ransKe.plot_ke_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('kieq')['xbl'],
params.getForEqs('kieq')['xbr'],
params.getForEqs('kieq')['ybu'],
params.getForEqs('kieq')['ybd'],
params.getForEqs('kieq')['ilg'])
def execTe(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransTe = TotalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot total energy
ransTe.plot_et(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('toe')['xbl'],
params.getForEqs('toe')['xbr'],
params.getForEqs('toe')['ybu'],
params.getForEqs('toe')['ybd'],
params.getForEqs('toe')['ilg'])
def execTeEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransTe = TotalEnergyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot total energy equation
ransTe.plot_et_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('teeq')['xbl'],
params.getForEqs('teeq')['xbr'],
params.getForEqs('teeq')['ybu'],
params.getForEqs('teeq')['ybd'],
params.getForEqs('teeq')['ilg'])
def execRxx(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRxx = ReynoldsStressXXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rxx
ransRxx.plot_rxx(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rxx')['xbl'],
params.getForEqs('rxx')['xbr'],
params.getForEqs('rxx')['ybu'],
params.getForEqs('rxx')['ybd'],
params.getForEqs('rxx')['ilg'])
def execRxxEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRxx = ReynoldsStressXXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rxx
ransRxx.plot_rxx_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rexxeq')['xbl'],
params.getForEqs('rexxeq')['xbr'],
params.getForEqs('rexxeq')['ybu'],
params.getForEqs('rexxeq')['ybd'],
params.getForEqs('rexxeq')['ilg'])
def execRyy(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRyy = ReynoldsStressYYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress ryy
ransRyy.plot_ryy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ryy')['xbl'],
params.getForEqs('ryy')['xbr'],
params.getForEqs('ryy')['ybu'],
params.getForEqs('ryy')['ybd'],
params.getForEqs('ryy')['ilg'])
def execRyyEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRyy = ReynoldsStressYYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress ryy
ransRyy.plot_ryy_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('reyyeq')['xbl'],
params.getForEqs('reyyeq')['xbr'],
params.getForEqs('reyyeq')['ybu'],
params.getForEqs('reyyeq')['ybd'],
params.getForEqs('reyyeq')['ilg'])
def execRzz(self, bconv, tconv):
params = self.params
kolmrate = 0.
# instantiate
ransRzz = ReynoldsStressZZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rzz
ransRzz.plot_rzz(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rzz')['xbl'],
params.getForEqs('rzz')['xbr'],
params.getForEqs('rzz')['ybu'],
params.getForEqs('rzz')['ybd'],
params.getForEqs('rzz')['ilg'])
def execRzzEq(self, kolmrate, bconv, tconv):
params = self.params
# instantiate
ransRzz = ReynoldsStressZZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
-kolmrate,
params.getForProp('prop')['prefix'])
# plot reynolds stress rzz
ransRzz.plot_rzz_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('rezzeq')['xbl'],
params.getForEqs('rezzeq')['xbr'],
params.getForEqs('rezzeq')['ybu'],
params.getForEqs('rezzeq')['ybd'],
params.getForEqs('rezzeq')['ilg'])
def execAbar(self, bconv, tconv):
params = self.params
# instantiate
ransAbar = AbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot abar
ransAbar.plot_abar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abar')['xbl'],
params.getForEqs('abar')['xbr'],
params.getForEqs('abar')['ybu'],
params.getForEqs('abar')['ybd'],
params.getForEqs('abar')['ilg'])
def execAbarEq(self, bconv, tconv):
params = self.params
# instantiate
ransAbar = AbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot abar equation
ransAbar.plot_abar_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abreq')['xbl'],
params.getForEqs('abreq')['xbr'],
params.getForEqs('abreq')['ybu'],
params.getForEqs('abreq')['ybd'],
params.getForEqs('abreq')['ilg'])
def execFabarx(self, bconv, tconv):
params = self.params
# instantiate
ransFabarx = AbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot fabarx
ransFabarx.plot_abarflux(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('abflx')['xbl'],
params.getForEqs('abflx')['xbr'],
params.getForEqs('abflx')['ybu'],
params.getForEqs('abflx')['ybd'],
params.getForEqs('abflx')['ilg'])
def execFabarxEq(self, bconv, tconv):
params = self.params
# instantiate
ransFabarx = AbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
params.getForProp('prop')['prefix'])
# plot fabarx equation
ransFabarx.plot_abarflux_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('fabxeq')['xbl'],
params.getForEqs('fabxeq')['xbr'],
params.getForEqs('fabxeq')['ybu'],
params.getForEqs('fabxeq')['ybd'],
params.getForEqs('fabxeq')['ilg'])
def execZbar(self, bconv, tconv):
params = self.params
# instantiate
ransZbar = ZbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot zbar
ransZbar.plot_zbar(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbar')['xbl'],
params.getForEqs('zbar')['xbr'],
params.getForEqs('zbar')['ybu'],
params.getForEqs('zbar')['ybd'],
params.getForEqs('zbar')['ilg'])
def execZbarEq(self, bconv, tconv):
params = self.params
# instantiate
ransZbar = ZbarTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot zbar equation
ransZbar.plot_zbar_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbreq')['xbl'],
params.getForEqs('zbreq')['xbr'],
params.getForEqs('zbreq')['ybu'],
params.getForEqs('zbreq')['ybd'],
params.getForEqs('zbreq')['ilg'])
def execFzbarx(self, bconv, tconv):
params = self.params
# instantiate
ransFzbarx = ZbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot fzbarx
ransFzbarx.plot_zbarflux(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('zbflx')['xbl'],
params.getForEqs('zbflx')['xbr'],
params.getForEqs('zbflx')['ybu'],
params.getForEqs('zbflx')['ybd'],
params.getForEqs('zbflx')['ilg'])
def execFzbarxEq(self, bconv, tconv):
params = self.params
# instantiate
ransFzbarx = ZbarFluxTransportEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'])
# plot fzbarx equation
ransFzbarx.plot_zbarflux_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('fzbxeq')['xbl'],
params.getForEqs('fzbxeq')['xbr'],
params.getForEqs('fzbxeq')['ybu'],
params.getForEqs('fzbxeq')['ybd'],
params.getForEqs('fzbxeq')['ilg'])
def execPP(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPP = PressureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPP.plot_pp(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('press')['xbl'],
params.getForEqs('press')['xbr'],
params.getForEqs('press')['ybu'],
params.getForEqs('press')['ybd'],
params.getForEqs('press')['ilg'])
# ransPP.plot_dAdt(params.getForProp('prop')['laxis'], \
# params.getForEqs('press')['xbl'], \
# params.getForEqs('press')['xbr'], \
# params.getForEqs('press')['ybu'], \
# params.getForEqs('press')['ybd'], \
# params.getForEqs('press')['ilg'])
def execPPeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPP = PressureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPP.plot_pp_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppeq')['xbl'],
params.getForEqs('ppeq')['xbr'],
params.getForEqs('ppeq')['ybu'],
params.getForEqs('ppeq')['ybd'],
params.getForEqs('ppeq')['ilg'])
def execPPxflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPxflx = PressureFluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPxflx.plot_fppx(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressxflx')['xbl'],
params.getForEqs('pressxflx')['xbr'],
params.getForEqs('pressxflx')['ybu'],
params.getForEqs('pressxflx')['ybd'],
params.getForEqs('pressxflx')['ilg'])
def execPPxflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPxflx = PressureFluxXequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPxflx.plot_fppx_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppxflxeq')['xbl'],
params.getForEqs('ppxflxeq')['xbr'],
params.getForEqs('ppxflxeq')['ybu'],
params.getForEqs('ppxflxeq')['ybd'],
params.getForEqs('ppxflxeq')['ilg'])
def execPPyflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPyflx = PressureFluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPyflx.plot_fppy(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressyflx')['xbl'],
params.getForEqs('pressyflx')['xbr'],
params.getForEqs('pressyflx')['ybu'],
params.getForEqs('pressyflx')['ybd'],
params.getForEqs('pressyflx')['ilg'])
def execPPyflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPyflx = PressureFluxYequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPyflx.plot_fppy_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppyflxeq')['xbl'],
params.getForEqs('ppyflxeq')['xbr'],
params.getForEqs('ppyflxeq')['ybu'],
params.getForEqs('ppyflxeq')['ybd'],
params.getForEqs('ppyflxeq')['ilg'])
def execPPzflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransPPzflx = PressureFluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPzflx.plot_fppz(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('presszflx')['xbl'],
params.getForEqs('presszflx')['xbr'],
params.getForEqs('presszflx')['ybu'],
params.getForEqs('presszflx')['ybd'],
params.getForEqs('presszflx')['ilg'])
def execPPzflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransPPzflx = PressureFluxZequation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransPPzflx.plot_fppz_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppzflxeq')['xbl'],
params.getForEqs('ppzflxeq')['xbr'],
params.getForEqs('ppzflxeq')['ybu'],
params.getForEqs('ppzflxeq')['ybd'],
params.getForEqs('ppzflxeq')['ilg'])
def execPPvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransPPvar = PressureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransPPvar.plot_sigma_pp(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('pressvar')['xbl'],
params.getForEqs('pressvar')['xbr'],
params.getForEqs('pressvar')['ybu'],
params.getForEqs('pressvar')['ybd'],
params.getForEqs('pressvar')['ilg'])
def execPPvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransPPvar = PressureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransPPvar.plot_sigma_pp_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ppvareq')['xbl'],
params.getForEqs('ppvareq')['xbr'],
params.getForEqs('ppvareq')['ybu'],
params.getForEqs('ppvareq')['ybd'],
params.getForEqs('ppvareq')['ilg'])
def execTT(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransTT = TemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTT.plot_tt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('temp')['xbl'],
params.getForEqs('temp')['xbr'],
params.getForEqs('temp')['ybu'],
params.getForEqs('temp')['ybd'],
params.getForEqs('temp')['ilg'])
def execTTeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransTT = TemperatureEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTT.plot_tt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tteq')['xbl'],
params.getForEqs('tteq')['xbr'],
params.getForEqs('tteq')['ybu'],
params.getForEqs('tteq')['ybd'],
params.getForEqs('tteq')['ilg'])
def execTTvar(self, bconv, tconv):
params = self.params
tke_diss = 0.
tauL = 1.
# instantiate
ransTTvar = TemperatureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransTTvar.plot_sigma_tt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tempvar')['xbl'],
params.getForEqs('tempvar')['xbr'],
params.getForEqs('tempvar')['ybu'],
params.getForEqs('tempvar')['ybd'],
params.getForEqs('tempvar')['ilg'])
def execTTvarEq(self, tke_diss, tauL, bconv, tconv):
params = self.params
# instantiate
ransTTvar = TemperatureVarianceEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss, tauL,
params.getForProp('prop')['prefix'])
ransTTvar.plot_sigma_tt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttvareq')['xbl'],
params.getForEqs('ttvareq')['xbr'],
params.getForEqs('ttvareq')['ybu'],
params.getForEqs('ttvareq')['ybd'],
params.getForEqs('ttvareq')['ilg'])
def execTTflx(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransTTflx = TemperatureFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTTflx.plot_ftt(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('tempflx')['xbl'],
params.getForEqs('tempflx')['xbr'],
params.getForEqs('tempflx')['ybu'],
params.getForEqs('tempflx')['ybd'],
params.getForEqs('tempflx')['ilg'])
def execTTflxEq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransTTflx = TemperatureFluxEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
tke_diss,
params.getForProp('prop')['prefix'])
ransTTflx.plot_ftt_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('ttflxeq')['xbl'],
params.getForEqs('ttflxeq')['xbr'],
params.getForEqs('ttflxeq')['ybu'],
params.getForEqs('ttflxeq')['ybd'],
params.getForEqs('ttflxeq')['ilg'])
def execHH(self, bconv, tconv):
params = self.params
tke_diss = 0.
# instantiate
ransHH = EnthalpyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHH.plot_hh(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('enth')['xbl'],
params.getForEqs('enth')['xbr'],
params.getForEqs('enth')['ybu'],
params.getForEqs('enth')['ybd'],
params.getForEqs('enth')['ilg'])
def execHHeq(self, tke_diss, bconv, tconv):
params = self.params
# instantiate
ransHH = EnthalpyEquation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['nsdim'],
tke_diss,
params.getForProp('prop')['prefix'])
ransHH.plot_hh_equation(params.getForProp('prop')['laxis'],
bconv, tconv,
params.getForEqs('hheq')['xbl'],
params.getForEqs('hheq')['xbr'],
params.getForEqs('hheq')['ybu'],
params.getForEqs('hheq')['ybd'],
params.getForEqs('hheq')['ilg'])
def execFtvfhX(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhX = FullTurbulenceVelocityFieldHypothesisX(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhX.plot_ftvfhX_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_x')['xbl'],
params.getForEqs('ftvfh_x')['xbr'],
params.getForEqs('ftvfh_x')['ybu'],
params.getForEqs('ftvfh_x')['ybd'],
params.getForEqs('ftvfh_x')['ilg'])
def execFtvfhY(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhY = FullTurbulenceVelocityFieldHypothesisY(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhY.plot_ftvfhY_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_y')['xbl'],
params.getForEqs('ftvfh_y')['xbr'],
params.getForEqs('ftvfh_y')['ybu'],
params.getForEqs('ftvfh_y')['ybd'],
params.getForEqs('ftvfh_y')['ilg'])
def execFtvfhZ(self, bconv, tconv):
params = self.params
# instantiate
ransFtvfhZ = FullTurbulenceVelocityFieldHypothesisZ(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransFtvfhZ.plot_ftvfhZ_equation(params.getForProp('prop')['laxis'],
params.getForEqs('ftvfh_z')['xbl'],
params.getForEqs('ftvfh_z')['xbr'],
params.getForEqs('ftvfh_z')['ybu'],
params.getForEqs('ftvfh_z')['ybd'],
params.getForEqs('ftvfh_z')['ilg'])
def execUxfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUxfpd = UxfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUxfpd.plot_uxfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uxfpd')['xbl'],
params.getForEqs('uxfpd')['xbr'],
params.getForEqs('uxfpd')['ybu'],
params.getForEqs('uxfpd')['ybd'],
params.getForEqs('uxfpd')['ilg'])
def execUyfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUyfpd = UyfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUyfpd.plot_uyfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uyfpd')['xbl'],
params.getForEqs('uyfpd')['xbr'],
params.getForEqs('uyfpd')['ybu'],
params.getForEqs('uyfpd')['ybd'],
params.getForEqs('uyfpd')['ilg'])
def execUzfpd(self, bconv, tconv):
params = self.params
# instantiate
ransUzfpd = UzfpdIdentity(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransUzfpd.plot_uzfpd_identity(params.getForProp('prop')['laxis'],
params.getForEqs('uzfpd')['xbl'],
params.getForEqs('uzfpd')['xbr'],
params.getForEqs('uzfpd')['ybu'],
params.getForEqs('uzfpd')['ybd'],
params.getForEqs('uzfpd')['ilg'])
def execDivu(self, bconv, tconv):
params = self.params
# instantiate
ransDivu = DivuDilatation(params.getForProp('prop')['eht_data'],
params.getForProp('prop')['ig'],
params.getForProp('prop')['fext'],
params.getForProp('prop')['ieos'],
params.getForProp('prop')['intc'],
params.getForProp('prop')['prefix'],
bconv, tconv)
ransDivu.plot_divu(params.getForProp('prop')['laxis'],
params.getForEqs('divu')['xbl'],
params.getForEqs('divu')['xbr'],
params.getForEqs('divu')['ybu'],
params.getForEqs('divu')['ybd'],
params.getForEqs('divu')['ilg'])
#ransDivu.plot_divu_space_time(params.getForProp('prop')['laxis'],
# bconv, tconv,
# params.getForEqs('conteqfdd')['xbl'],
# params.getForEqs('conteqfdd')['xbr'],
# params.getForEqs('conteqfdd')['ybu'],
# params.getForEqs('conteqfdd')['ybd'],
# params.getForEqs('conteqfdd')['ilg'])
def SetMatplotlibParams(self):
""" This routine sets some standard values for matplotlib """
""" to obtain publication-quality figures """
# plt.rc('text',usetex=True)
# plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('font', **{'family': 'serif', 'serif': ['Times New Roman']})
plt.rc('font', size=16.)
plt.rc('lines', linewidth=2, markeredgewidth=2., markersize=12)
plt.rc('axes', linewidth=1.5)
plt.rcParams['xtick.major.size'] = 8.
plt.rcParams['xtick.minor.size'] = 4.
plt.rcParams['figure.subplot.bottom'] = 0.15
plt.rcParams['figure.subplot.left'] = 0.17
plt.rcParams['figure.subplot.right'] = 0.85
plt.rcParams.update({'figure.max_open_warning': 0})
|
mmicromegasREPO_NAMEransXPATH_START.@ransX_extracted@ransX-master@UTILS@RANSX@MasterPlot.py@.PATH_END.py
|
{
"filename": "naive_multiband.py",
"repo_name": "astroML/gatspy",
"repo_path": "gatspy_extracted/gatspy-master/gatspy/periodic/naive_multiband.py",
"type": "Python"
}
|
"""
Naive Multiband Methods
This basically amounts to a band-by-band single band approach, followed by
some sort of majority vote among the peaks of the individual periodograms.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['NaiveMultiband']
import numpy as np
from scipy.stats import mode
from .modeler import PeriodicModelerMultiband
from .lomb_scargle import LombScargle
def mode_in_range(a, axis=0, tol=1E-3):
"""Find the mode of values to within a certain range"""
a_trunc = a // tol
vals, counts = mode(a_trunc, axis)
mask = (a_trunc == vals)
# mean of each row
return np.sum(a * mask, axis) / np.sum(mask, axis)
class NaiveMultiband(PeriodicModelerMultiband):
"""Naive version of multiband fitting
Parameters
----------
optimizer : PeriodicOptimizer instance
Optimizer to use to find the best period. If not specified, the
LinearScanOptimizer will be used.
BaseModel : PeriodicModeler instance
Single-band model to use on data from each band.
fit_period : bool (optional)
If True, then fit for the best period when fit() method is called.
optimizer_kwds : dict (optional)
Dictionary of keyword arguments for constructing the optimizer. For
example, silence optimizer output with `optimizer_kwds={"quiet": True}`.
*args, **kwargs :
additional arguments are passed to BaseModel on construction.
"""
def __init__(self, optimizer=None, BaseModel=LombScargle,
fit_period=False, optimizer_kwds=None,
*args, **kwargs):
self.BaseModel = BaseModel
self.args = args
self.kwargs = kwargs
PeriodicModelerMultiband.__init__(self, optimizer,
fit_period=fit_period,
optimizer_kwds=optimizer_kwds)
def _fit(self, t, y, dy, filts):
t, y, dy, filts = np.broadcast_arrays(t, y, dy, filts)
unique_filts = np.unique(filts)
masks = [(filts == filt) for filt in unique_filts]
self.models_ = dict([(filt,
self.BaseModel(self.optimizer,
*self.args,
**self.kwargs).fit(t[mask],
y[mask],
dy[mask]))
for filt, mask in zip(unique_filts, masks)])
def _predict(self, t, filts, period):
result = np.zeros_like(t)
for filt, model in self.models_.items():
mask = (filts == filt)
result[mask] = model.predict(t[mask], period=period)
return result
def _score(self, periods):
raise NotImplementedError("score is not implmented for NaiveMultiband")
def scores(self, periods):
"""Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
scores : dict
Dictionary of scores. Dictionary keys are the unique filter names
passed to fit()
"""
return dict([(filt, model.score(periods))
for (filt, model) in self.models_.items()])
def best_periods(self):
"""Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
best_periods : dict
Dictionary of best periods. Dictionary keys are the unique filter
names passed to fit()
"""
for (key, model) in self.models_.items():
model.optimizer = self.optimizer
return dict((filt, model.best_period)
for (filt, model) in self.models_.items())
@property
def best_period(self):
best_periods = np.asarray(list(self.best_periods().values()))
return mode_in_range(best_periods, tol=1E-2)
|
astroMLREPO_NAMEgatspyPATH_START.@gatspy_extracted@gatspy-master@gatspy@periodic@naive_multiband.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "yaojian95/ForSEplus",
"repo_path": "ForSEplus_extracted/ForSEplus-main/README.md",
"type": "Markdown"
}
|
# ForSEplus

\*Different realizations of small scales at 12 arcminutes in the second column.
- Simulate **stochastic, polarized(QU), non-Gaussian** thermal dust emission at **353GHz** up to **3 arcminutes**.
- plus version of https://github.com/ai4cmb/ForSE.
- If you want to use the generated maps directly, we also offer some realizations of maps at url...
# Installations
## Dependencies
[](https://www.astropy.org)
[](https://healpy.readthedocs.io/en/latest/)
[](https://www.tensorflow.org)
[](https://namaster.readthedocs.io/en/latest/)
[](https://pypi.org/project/reproject/)
[](http://numba.pydata.org/)
- Namaster: to compute power spectra to normalize the small scales from neural networks
- reproject: only needed to perform projection from Healpix maps to flat patches and vice versa
- numba: only needed to accelearte the calculation of Minkowski functionals for a given patch
We assume you alrealy have your own python virtual environment.
The first thing to do is to install the dependencies and the main difficulty is to install the `Namaster` package, which has its own dependencies. You can install it with `conda` and if you want to install `Namaster` dependencies from source codes, we prepared a [install_dependencies.sh](install_dependencies.sh) file for you.
With all dependencies solved, you have two ways to install this package.
## from source
Download the source code, then
(venv) $ python -m pip install . --user
## from pip (not updated with the latest source code yet)
(venv) $ python -m pip install ForSEplus --user
# Ancillary data (Compulsory)
The zipped complete ancillary data can be downloaded [here](https://drive.google.com/file/d/1dqRQBc2832HpQHQx6dQzkwTkT71kSQiw/view?usp=sharing) (4GB, 9.4GB after decompression). Then decompress the files into a directory, whose path should be given to `dir_data` when running the pipeline. If you are on NERSC, you can use `dir_data = /pscratch/sd/j/jianyao/ForSE_plus_data_32/`, which I already open the permission to all users on NERSC.
# Usage
Once installed, import the `forseplus` as:
from ForSEplus.forseplus_class import forseplus
Then intialize a instance to generate maps:
fp = forseplus(dir_data = '/pscratch/sd/j/jianyao/ForSE_plus_data_32/',
return_12 = True,
go_3 = True,
correct_EB = False,
seed = None)
You can choose to return sotchastic maps at 12 arcmin only (`return_12 = True, go_3 = False`), or maps at 3 arcmin only (`return_12 = False, go_3 = True`), or both (`return_12 = True, go_3 = True`), though in any case maps at 12 arcmin will be generated since 12arcmin maps will be the input to generate maps at 3arcmin.
Then you can run:
maps_12amin, maps_3amin = fp.run()
to obtain QU Maps, which will be in `uK_CMB` units. Returned QU maps at 12amin will be (2, 12\*2048\*\*2), 3amin will be (2, 12*4096\*\*2).
If set `correct_EB = True`, it will apply the E/B ratio correction proposed in Yao et al. to artificially tune the Cl_EE/Cl_BB = 2 for the generated small scales. Otherwise, Cl_EE/Cl_BB = 1. Refer to Section 4.1 of Yao et al. for more details.
`seed` parameter defines the random seed to generate random component which are added to the fixed, observed large scales. If you want to generate many realizations, just put the `fp.run()` inside a loop and make sure `seed = None`.
## Memory needed (Peak memory) and time cost (with `correct_EB = False`, test on Perlmutter Jupyter login node)
| Case | CPU | GPU | Time |
| :--------------: | :---: | :-: | :------: |
| only 12amin | 16GB | 10GB | ~15 secs |
| also go to 3amin | 63.62GB* | 18GB | ~5 mins |
\* This number is after a careful optimization which doesn't exceed the memory limit (64GB) on Perlmutter login node:-). However if you have other notebooks running, there will be a 'kernel restarting' error caused by the memory limit, so you may open an Exclusive GPU node or submit the job to a compute node (which of course needs allocation hours, which is a bit troublesome(っ◞‸◟ c), but we will continue to optimize the memory usage during the running).
# Citation
[](https://doi.org/10.1051/0004-6361/202449827)
@ARTICLE{2024A&A...686A.290Y,
author = {{Yao}, Jian and {Krachmalnicoff}, Nicoletta and {Foschi}, Marianna and {Puglisi}, Giuseppe and {Baccigalupi}, Carlo},
title = "{FORSE+: Simulating non-Gaussian CMB foregrounds at 3 arcmin in a stochastic way based on a generative adversarial network}",
journal = {\aap},
keywords = {methods: data analysis, cosmic background radiation, diffuse radiation, Astrophysics - Cosmology and Nongalactic Astrophysics, Astrophysics - Instrumentation and Methods for Astrophysics},
year = 2024,
month = jun,
volume = {686},
eid = {A290},
pages = {A290},
doi = {10.1051/0004-6361/202449827},
archivePrefix = {arXiv},
eprint = {2406.14519},
primaryClass = {astro-ph.CO},
adsurl = {https://ui.adsabs.harvard.edu/abs/2024A&A...686A.290Y},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@ARTICLE{2021ApJ...911...42K,
author = {{Krachmalnicoff}, Nicoletta and {Puglisi}, Giuseppe},
title = "{ForSE: A GAN-based Algorithm for Extending CMB Foreground Models to Subdegree Angular Scales}",
journal = {\apj},
keywords = {Cosmic microwave background radiation, Neural networks, Diffuse radiation, 322, 1933, 383, Astrophysics - Cosmology and Nongalactic Astrophysics, Astrophysics - Instrumentation and Methods for Astrophysics},
year = 2021,
month = apr,
volume = {911},
number = {1},
eid = {42},
pages = {42},
doi = {10.3847/1538-4357/abe71c},
archivePrefix = {arXiv},
eprint = {2011.02221},
primaryClass = {astro-ph.CO},
adsurl = {https://ui.adsabs.harvard.edu/abs/2021ApJ...911...42K},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
|
yaojian95REPO_NAMEForSEplusPATH_START.@ForSEplus_extracted@ForSEplus-main@README.md@.PATH_END.py
|
{
"filename": "SampleFileUtil.py",
"repo_name": "cosmo-ethz/CosmoHammer",
"repo_path": "CosmoHammer_extracted/CosmoHammer-master/cosmoHammer/util/SampleFileUtil.py",
"type": "Python"
}
|
import pickle
import numpy as np
import cosmoHammer.Constants as c
class SampleFileUtil(object):
"""
Util for handling sample files
:param filePrefix: the prefix to use
:param master: True if the sampler instance is the master
:param reuseBurnin: True if the burn in data from a previous run should be used
"""
def __init__(self, filePrefix, master=True, reuseBurnin=False):
self.filePrefix = filePrefix
if(master):
if(reuseBurnin):
mode = "r"
else:
mode = "w"
self.samplesFileBurnin = open(self.filePrefix+c.BURNIN_SUFFIX, mode)
self.probFileBurnin = open(self.filePrefix+c.BURNIN_PROB_SUFFIX, mode)
self.samplesFile = open(self.filePrefix+c.FILE_SUFFIX, "w")
self.probFile = open(self.filePrefix+c.PROB_SUFFIX, "w")
def importFromFile(self, filePath):
values = np.loadtxt(filePath, dtype=float)
return values
def storeRandomState(self, filePath, randomState):
with open(filePath,'wb') as f:
pickle.dump(randomState, f)
def importRandomState(self, filePath):
with open(filePath,'rb') as f:
state = pickle.load(f)
return state
def persistBurninValues(self, pos, prob, data):
self.persistValues(self.samplesFileBurnin, self.probFileBurnin, pos, prob, data)
def persistSamplingValues(self, pos, prob, data):
self.persistValues(self.samplesFile, self.probFile, pos, prob, data)
def persistValues(self, posFile, probFile, pos, prob, data):
"""
Writes the walker positions and the likelihood to the disk
"""
posFile.write("\n".join(["\t".join([str(q) for q in p]) for p in pos]))
posFile.write("\n")
posFile.flush()
probFile.write("\n".join([str(p) for p in prob]))
probFile.write("\n")
probFile.flush();
def close(self):
self.samplesFileBurnin.close()
self.probFileBurnin.close()
self.samplesFile.close()
self.probFile.close()
def __str__(self, *args, **kwargs):
return "SampleFileUtil"
|
cosmo-ethzREPO_NAMECosmoHammerPATH_START.@CosmoHammer_extracted@CosmoHammer-master@cosmoHammer@util@SampleFileUtil.py@.PATH_END.py
|
{
"filename": "ruth4.py",
"repo_name": "adrn/gala",
"repo_path": "gala_extracted/gala-main/gala/integrate/pyintegrators/ruth4.py",
"type": "Python"
}
|
""" Leapfrog integration. """
# Project
from ..core import Integrator
from ..timespec import parse_time_specification
__all__ = ["Ruth4Integrator"]
class Ruth4Integrator(Integrator):
r"""
A 4th order symplectic integrator.
Given a function for computing time derivatives of the phase-space
coordinates, this object computes the orbit at specified times.
.. seealso::
- https://en.wikipedia.org/wiki/Symplectic_integrator#A_fourth-order_example
Naming convention for variables::
im1 = i-1
im1_2 = i-1/2
ip1 = i+1
ip1_2 = i+1/2
Examples
--------
Using ``q`` as our coordinate variable and ``p`` as the conjugate
momentum, we want to numerically solve for an orbit in the
potential (Hamiltonian)
.. math::
\Phi &= \frac{1}{2}q^2\\
H(q, p) &= \frac{1}{2}(p^2 + q^2)
In this system,
.. math::
\dot{q} &= \frac{\partial \Phi}{\partial p} = p \\
\dot{p} &= -\frac{\partial \Phi}{\partial q} = -q
We will use the variable ``w`` to represent the full phase-space vector,
:math:`w = (q, p)`. We define a function that computes the time derivates
at any given time, ``t``, and phase-space position, ``w``::
def F(t, w):
dw = [w[1], -w[0]]
return dw
.. note::
The force here is not time dependent, but this function always has
to accept the independent variable (e.g., time) as the
first argument.
To create an integrator object, just pass this acceleration function in
to the constructor, and then we can integrate orbits from a given vector
of initial conditions::
integrator = Ruth4Integrator(acceleration)
times, ws = integrator(w0=[1., 0.], dt=0.1, n_steps=1000)
.. note::
When integrating a single vector of initial conditions, the return
array will have 2 axes. In the above example, the returned array will
have shape ``(2, 1001)``. If an array of initial conditions are passed
in, the return array will have 3 axes, where the last axis is for the
individual orbits.
Parameters
----------
func : func
A callable object that computes the phase-space time derivatives
at a time and point in phase space.
func_args : tuple (optional)
Any extra arguments for the derivative function.
func_units : `~gala.units.UnitSystem` (optional)
If using units, this is the unit system assumed by the
integrand function.
"""
# From: https://en.wikipedia.org/wiki/Symplectic_integrator
_cs = [
1 / (2 * (2 - 2 ** (1 / 3))),
(1 - 2 ** (1 / 3)) / (2 * (2 - 2 ** (1 / 3))),
(1 - 2 ** (1 / 3)) / (2 * (2 - 2 ** (1 / 3))),
1 / (2 * (2 - 2 ** (1 / 3))),
]
_ds = [
0,
1 / (2 - 2 ** (1 / 3)),
-(2 ** (1 / 3)) / (2 - 2 ** (1 / 3)),
1 / (2 - 2 ** (1 / 3)),
]
def step(self, t, w, dt):
"""
Step forward the positions and velocities by the given timestep.
Parameters
----------
dt : numeric
The timestep to move forward.
"""
w_i = w.copy()
for cj, dj in zip(self._cs, self._ds):
F_i = self.F(t, w_i, *self._func_args)
a_i = F_i[self.ndim:]
w_i[self.ndim:] += dj * a_i * dt
w_i[: self.ndim] += cj * w_i[self.ndim:] * dt
return w_i
def __call__(self, w0, mmap=None, **time_spec):
# generate the array of times
times = parse_time_specification(self._func_units, **time_spec)
n_steps = len(times) - 1
dt = times[1] - times[0]
w0_obj, w0, ws = self._prepare_ws(w0, mmap, n_steps=n_steps)
# Set first step to the initial conditions
if self.store_all:
ws[:, 0] = w0
w = w0.copy()
range_ = self._get_range_func()
for ii in range_(1, n_steps + 1):
w = self.step(times[ii], w, dt)
if self.store_all:
ws[:, ii] = w
if not self.store_all:
ws = w
times = times[-1:]
return self._handle_output(w0_obj, times, ws)
|
adrnREPO_NAMEgalaPATH_START.@gala_extracted@gala-main@gala@integrate@pyintegrators@ruth4.py@.PATH_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/streamtube/colorbar/_title.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "streamtube.colorbar"
_path_str = "streamtube.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.streamtube.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.streamtube.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.streamtube.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.streamtube.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@streamtube@colorbar@_title.py@.PATH_END.py
|
{
"filename": "agama_stream.ipynb",
"repo_name": "ybillchen/particle_spray",
"repo_path": "particle_spray_extracted/particle_spray-main/agama_stream.ipynb",
"type": "Jupyter Notebook"
}
|
# Particle spray algorithm by Chen et al. (2024) via `agama`
Author: Yingtian "Bill" Chen
We provide a notebook to generate streams using the Chen+24 ([arXiv:2408.01496](https://arxiv.org/abs/2408.01496)) model via `agama`. This implementation is based on Eugene Vasiliev's [tutorial notebook](https://github.com/GalacticDynamics-Oxford/Agama/blob/master/py/tutorial_streams.ipynb) during the [Streams24](https://stellarstreams.org/streams24/) workshop at Durham.
```python
import numpy as np
import matplotlib.pyplot as plt
import agama
agama.setUnits(length=1, velocity=1, mass=1)
timeUnitGyr = agama.getUnits()['time'] / 1e3 # time unit is 1 kpc / (1 km/s)
print('time unit: %.3f Gyr' % timeUnitGyr)
```
time unit: 0.978 Gyr
## First, define the particle spray methods
```python
# Useful utility functions
def get_rot_mat(x, y, z, vx, vy, vz):
Lx = y * vz - z * vy
Ly = z * vx - x * vz
Lz = x * vy - y * vx
r = (x*x + y*y + z*z)**0.5
L = (Lx*Lx + Ly*Ly + Lz*Lz)**0.5
# rotation matrices transforming from the host to the satellite frame for each point on the trajectory
R = np.zeros((len(x), 3, 3))
R[:,0,0] = x/r
R[:,0,1] = y/r
R[:,0,2] = z/r
R[:,2,0] = Lx/L
R[:,2,1] = Ly/L
R[:,2,2] = Lz/L
R[:,1,0] = R[:,0,2] * R[:,2,1] - R[:,0,1] * R[:,2,2]
R[:,1,1] = R[:,0,0] * R[:,2,2] - R[:,0,2] * R[:,2,0]
R[:,1,2] = R[:,0,1] * R[:,2,0] - R[:,0,0] * R[:,2,1]
return R, L, r
def get_d2Phi_dr2(pot_host, x, y, z):
# compute the second derivative of potential by spherical radius
r = (x*x + y*y + z*z)**0.5
der = pot_host.forceDeriv(np.column_stack([x,y,z]))[1]
d2Phi_dr2 = -(x**2 * der[:,0] + y**2 * der[:,1] + z**2 * der[:,2] +
2*x*y * der[:,3] + 2*y*z * der[:,4] + 2*z*x * der[:,5]) / r**2
return d2Phi_dr2
```
```python
def create_ic_chen24(rng, pot_host, orb_sat, mass_sat):
N = len(orb_sat)
x, y, z, vx, vy, vz = orb_sat.T
R, L, r = get_rot_mat(x, y, z, vx, vy, vz)
d2Phi_dr2 = get_d2Phi_dr2(pot_host, x, y, z)
# compute the tidal radius at this radius for each point on the trajectory
Omega = L / r**2
r_tidal = (agama.G * mass_sat / (Omega**2 - d2Phi_dr2))**(1./3)
# assign positions and velocities (in the satellite reference frame) of particles
r_tidal = np.repeat(r_tidal, 2)
mean = np.array([1.6, -30, 0, 1, 20, 0])
cov = np.array([
[0.1225, 0, 0, 0, -4.9, 0],
[ 0, 529, 0, 0, 0, 0],
[ 0, 0, 144, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0],
[ -4.9, 0, 0, 0, 400, 0],
[ 0, 0, 0, 0, 0, 484],
])
posvel = rng.multivariate_normal(mean, cov, size=2*N)
Dr = posvel[:, 0] * r_tidal
v_esc = np.sqrt(2 * agama.G * mass_sat / Dr)
Dv = posvel[:, 3] * v_esc
# convert degrees to radians
phi = posvel[:, 1] * np.pi / 180
theta = posvel[:, 2] * np.pi / 180
alpha = posvel[:, 4] * np.pi / 180
beta = posvel[:, 5] * np.pi / 180
dx = Dr * np.cos(theta) * np.cos(phi)
dy = Dr * np.cos(theta) * np.sin(phi)
dz = Dr * np.sin(theta)
dvx = Dv * np.cos(beta) * np.cos(alpha)
dvy = Dv * np.cos(beta) * np.sin(alpha)
dvz = Dv * np.sin(beta)
dq = np.column_stack([dx, dy, dz])
dp = np.column_stack([dvx, dvy, dvz])
ic_stream = np.tile(orb_sat, 2).reshape(2*N, 6)
# trailing arm
ic_stream[::2,0:3] += np.einsum('ni,nij->nj', dq[::2], R)
ic_stream[::2,3:6] += np.einsum('ni,nij->nj', dp[::2], R)
# leading arm
ic_stream[1::2,0:3] += np.einsum('ni,nij->nj', -dq[1::2], R)
ic_stream[1::2,3:6] += np.einsum('ni,nij->nj', -dp[1::2], R)
return ic_stream
# For comparison Fardal+15 method
# Originally implemented by Eugene
def create_ic_fardal15(rng, pot_host, orb_sat, mass_sat, gala_modified=False):
N = len(orb_sat)
x, y, z, vx, vy, vz = orb_sat.T
R, L, r = get_rot_mat(x, y, z, vx, vy, vz)
d2Phi_dr2 = get_d2Phi_dr2(pot_host, x, y, z)
# compute the Jacobi radius and the relative velocity at this radius for each point on the trajectory
Omega = L / r**2
rj = (agama.G * mass_sat / (Omega**2 - d2Phi_dr2))**(1./3)
vj = Omega * rj
# assign positions and velocities (in the satellite reference frame) of particles
# leaving the satellite at both lagrange points.
rj = np.repeat(rj, 2) * np.tile([1, -1], N)
vj = np.repeat(vj, 2) * np.tile([1, -1], N)
mean_x = 2.0
disp_x = 0.5 if gala_modified else 0.4
disp_z = 0.5
mean_vy = 0.3
disp_vy = 0.5 if gala_modified else 0.4
disp_vz = 0.5
rx = rng.normal(size=2*N) * disp_x + mean_x
rz = rng.normal(size=2*N) * disp_z * rj
rvy =(rng.normal(size=2*N) * disp_vy + mean_vy) * vj * (rx if gala_modified else 1)
rvz = rng.normal(size=2*N) * disp_vz * vj
rx *= rj
ic_stream = np.tile(orb_sat, 2).reshape(2*N, 6)
ic_stream[:,0:3] += np.einsum('ni,nij->nj',
np.column_stack([rx, rx*0, rz ]), np.repeat(R, 2, axis=0))
ic_stream[:,3:6] += np.einsum('ni,nij->nj',
np.column_stack([rx*0, rvy, rvz]), np.repeat(R, 2, axis=0))
return ic_stream
```
```python
def create_stream(create_ic_method, rng, time_total, num_particles, pot_host, posvel_sat, mass_sat, pot_sat=None, **kwargs):
# integrate the orbit of the progenitor from its present-day posvel (at time t=0)
# back in time for an interval time_total, storing the trajectory at num_steps points
time_sat, orbit_sat = agama.orbit(potential=pot_host, ic=posvel_sat,
time=time_total, trajsize=num_particles//2)
if time_total < 0:
# reverse the arrays to make them increasing in time
time_sat = time_sat [::-1]
orbit_sat = orbit_sat[::-1]
# at each point on the trajectory, create a pair of seed initial conditions
# for particles released at Lagrange points
ic_stream = create_ic_method(rng, pot_host, orbit_sat, mass_sat, **kwargs)
time_seed = np.repeat(time_sat, 2)
if pot_sat is None:
pot_tot = pot_host
else:
# include the progenitor's potential
traj = np.column_stack([time_sat, orbit_sat])
pot_traj = agama.Potential(potential=pot_sat, center=traj)
pot_tot = agama.Potential(pot_host, pot_traj)
xv_stream = np.vstack(agama.orbit(potential=pot_tot,
ic=ic_stream, time=-time_seed if time_total<0 else time_total-time_seed, timestart=time_seed, trajsize=1)[:,1])
return time_sat, orbit_sat, xv_stream, ic_stream
```
## Now, do particle spray and orbit integration
Here is the case of backward integration, which is more commonly used for stream modeling. But you can do forward integration by simply fliping the sign of `time_total`.
```python
pot = agama.Potential("data/MWPotential2014.ini")
prog_w0 = [40,0,0,0,100,0]
prog_mass = 1e5
```
```python
time_total = -3.0 / timeUnitGyr # in time units (0.978 Gyr)
num_particles = 1000 # number of particles in the stream
%time time_sat_f15, orbit_sat_f15, xv_stream_f15, ic_stream_f15 = \
create_stream(create_ic_fardal15, np.random.default_rng(0), time_total, num_particles, pot, prog_w0, prog_mass, gala_modified=False)
%time time_sat_c24, orbit_sat_c24, xv_stream_c24, ic_stream_c24 = \
create_stream(create_ic_chen24, np.random.default_rng(0), time_total, num_particles, pot, prog_w0, prog_mass)
prog_pot = agama.Potential(type='Plummer', mass=prog_mass, scaleRadius=4e-3)
%time time_sat_c24b, orbit_sat_c24b, xv_stream_c24b, ic_stream_c24b = \
create_stream(create_ic_chen24, np.random.default_rng(0), time_total, num_particles, pot, prog_w0, prog_mass, pot_sat=prog_pot)
```
1000 orbits complete (5.556e+04 orbits/s)
CPU times: user 257 ms, sys: 385 µs, total: 257 ms
Wall time: 25.1 ms
1000 orbits complete (6.667e+04 orbits/s)
CPU times: user 210 ms, sys: 11.7 ms, total: 222 ms
Wall time: 18.3 ms
1000 orbits complete (2.941e+04 orbits/s)
CPU times: user 420 ms, sys: 0 ns, total: 420 ms
Wall time: 42.8 ms
```python
plt.scatter(xv_stream_f15[:,0]-5, xv_stream_f15[:,1], s=1, alpha=0.5, label='Fardal+15')
plt.scatter(xv_stream_c24[:,0], xv_stream_c24[:,1], s=1, alpha=0.5, label='Chen+24 no prog.')
plt.scatter(xv_stream_c24b[:,0]+5, xv_stream_c24b[:,1], s=1, alpha=0.5, label='Chen+24 with prog.')
plt.xlabel(r'$x\ ({\rm kpc})$')
plt.ylabel(r'$y\ ({\rm kpc})$')
plt.xlim(25, 50)
plt.ylim(-15, 15)
plt.legend(loc='lower left')
plt.gca().set_aspect(1)
plt.show()
```

## Action space distribution
```python
actFinder = agama.ActionFinder(pot)
def get_action(stream, prog, actFinder):
action_prog = actFinder(prog)
Jphi_prog = action_prog[2]
Jr_prog = action_prog[0]
actions = actFinder(stream)
Jphi = actions[:,2]
Jr = actions[:,0]
# DLtot = Ltot - Ltot_prog
DJphi = Jphi - Jphi_prog
DJr = Jr - Jr_prog
return DJphi, DJr
DJphi, DJr = get_action(xv_stream_f15, orbit_sat_f15[-1], actFinder)
plt.scatter(DJphi, DJr, s=1, alpha=0.5, label='Fardal+15')
DJphi, DJr = get_action(xv_stream_c24, orbit_sat_c24[-1], actFinder)
plt.scatter(DJphi, DJr, s=1, alpha=0.5, label='Chen+24 no prog.')
DJphi, DJr = get_action(xv_stream_c24b, orbit_sat_c24b[-1], actFinder)
plt.scatter(DJphi, DJr, s=1, alpha=0.5, label='Chen+24 with prog.')
plt.xlabel(r'$\Delta J_\phi\ ({\rm kpc\,km/s})$')
plt.ylabel(r'$\Delta J_r\ ({\rm kpc\,km/s})$')
plt.xlim(-120, 120)
plt.ylim(-100, 100)
plt.legend(loc='lower right')
plt.gca().set_aspect(1)
plt.show()
```

|
ybillchenREPO_NAMEparticle_sprayPATH_START.@particle_spray_extracted@particle_spray-main@agama_stream.ipynb@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contourcarpet/colorbar/title/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self,
plotly_name="lineposition",
parent_name="contourcarpet.colorbar.title.font",
**kwargs,
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contourcarpet@colorbar@title@font@_lineposition.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "desihub/desisim",
"repo_path": "desisim_extracted/desisim-main/py/desisim/test/__init__.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function
import unittest
def test_suite():
"""Returns unittest.TestSuite of desisim tests for use by setup.py"""
#- DEBUG Travis test failures
# return unittest.defaultTestLoader.loadTestsFromNames([
# # 'desisim.test.test_batch', #- OK
# # 'desisim.test.test_io', #- OK
# # 'desisim.test.test_obs', #- OK
# 'desisim.test.test_pixsim',
# # 'desisim.test.test_quickcat', #- OK
# # 'desisim.test.test_targets', #- OK
# # 'desisim.test.test_templates', #- OK
# # 'desisim.test.test_top_level', #- OK
# ])
#- DEBUG Travis test failures
from os.path import dirname
desisim_dir = dirname(dirname(__file__))
print(desisim_dir)
return unittest.defaultTestLoader.discover(desisim_dir,
top_level_dir=dirname(desisim_dir))
|
desihubREPO_NAMEdesisimPATH_START.@desisim_extracted@desisim-main@py@desisim@test@__init__.py@.PATH_END.py
|
{
"filename": "biased_isotropic_velocity.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/empirical_models/phase_space_models/analytic_models/satellites/nfw/kernels/biased_isotropic_velocity.py",
"type": "Python"
}
|
"""
"""
import numpy as np
from scipy.integrate import quad as quad_integration
from .mass_profile import _g_integral
__all__ = ("dimensionless_radial_velocity_dispersion",)
def _jeans_integrand_term1(y, *args):
r"""First term in the Jeans integrand"""
bias_ratio = args[0] # = halo_conc/gal_conc
return np.log(1 + bias_ratio * y) / (y**3 * (1 + y) ** 2)
def _jeans_integrand_term2(y, *args):
r"""Second term in the Jeans integrand"""
bias_ratio = args[0] # = halo_conc/gal_conc
numerator = bias_ratio
denominator = (y**2) * ((1 + y) ** 2) * (1 + bias_ratio * y)
return numerator / denominator
def dimensionless_radial_velocity_dispersion(
scaled_radius, halo_conc, gal_conc, profile_integration_tol=1e-4
):
r"""
Analytical solution to the isotropic jeans equation for an NFW potential,
rendered dimensionless via scaling by the virial velocity.
:math:`\tilde{\sigma}^{2}_{r}(\tilde{r})\equiv\sigma^{2}_{r}(\tilde{r})/V_{\rm vir}^{2} = \frac{c^{2}\tilde{r}(1 + c\tilde{r})^{2}}{g(c)}\int_{c\tilde{r}}^{\infty}{\rm d}y\frac{g(y)}{y^{3}(1 + y)^{2}}`
See :ref:`nfw_jeans_velocity_profile_derivations` for derivations and implementation details.
Parameters
-----------
scaled_radius : array_like
Length-Ngals numpy array storing the halo-centric distance
*r* scaled by the halo boundary :math:`R_{\Delta}`, so that
:math:`0 <= \tilde{r} \equiv r/R_{\Delta} <= 1`.
halo_conc : float
Concentration of the halo.
Returns
-------
result : array_like
Radial velocity dispersion profile scaled by the virial velocity.
The returned result has the same dimension as the input ``scaled_radius``.
"""
x = np.atleast_1d(scaled_radius).astype(np.float64)
result = np.zeros_like(x)
prefactor = (
gal_conc * gal_conc * x * (1.0 + gal_conc * x) ** 2 / _g_integral(halo_conc)
)
extra_args = halo_conc / np.atleast_1d(gal_conc).astype("f4")
lower_limit = gal_conc * x
upper_limit = float("inf")
for i in range(len(x)):
term1, _ = quad_integration(
_jeans_integrand_term1,
lower_limit[i],
upper_limit,
epsrel=profile_integration_tol,
args=extra_args,
)
term2, _ = quad_integration(
_jeans_integrand_term2,
lower_limit[i],
upper_limit,
epsrel=profile_integration_tol,
args=extra_args,
)
result[i] = term1 - term2
return np.sqrt(result * prefactor)
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@empirical_models@phase_space_models@analytic_models@satellites@nfw@kernels@biased_isotropic_velocity.py@.PATH_END.py
|
{
"filename": "modelparameters.py",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/source/MulensModel/modelparameters.py",
"type": "Python"
}
|
import numpy as np
from MulensModel.uniformcausticsampling import UniformCausticSampling
from MulensModel.orbits.orbit import Orbit
class ModelParameters(object):
"""
A class for the basic microlensing model parameters (t_0, u_0,
t_E, s, q, alpha, etc.). Can handle point lens or binary lens.
The pi_E assumes NE coordinates (Parallel, Perpendicular
coordinates are not supported).
Arguments :
parameters: *dictionary*
A dictionary of parameters and their values. See
:py:func:`which_parameters()` for valid parameter combinations.
Attributes :
parameters: *dictionary*
A dictionary of parameters and their values. Do not use it to
change parameter values, instead use e.g.:
``model_parameters.u_0 = 0.1`` or
``setattr(model_parameters, 'u_0', 0.1)``.
Example:
Define a point lens model:
``params = ModelParameters({'t_0': 2450000., 'u_0': 0.3,
't_E': 35.})``
Then you can print the parameters:
``print(params)``
"""
# parameters that may be defined for a given source
_primary_source_params_head = ['t_0', 'u_0']
_finite_source_params_head = ['rho', 't_star']
_all_source_params_head = np.hstack((_primary_source_params_head, _finite_source_params_head))
_t_0_ref_types = ['par', 'kep', 'xi']
def __init__(self, parameters):
if not isinstance(parameters, dict):
raise TypeError('ModelParameters must be initialized with dict as a parameter\ne.g., '
"ModelParameters({'t_0': 2456789.0, 'u_0': 0.123, 't_E': 23.45})")
self._count_sources(parameters.keys())
self._count_lenses(parameters.keys())
self._set_type(parameters.keys())
self._check_types('alpha' in parameters.keys())
if self.n_sources == 1:
self._check_valid_combination_1_source(parameters.keys())
if self._type['Cassan08']:
self._uniform_caustic = None
self._standard_parameters = None
if self.is_xallarap:
self._check_for_extra_source_parameters(parameters.keys())
delta_1 = self._get_xallarap_position(parameters)
self._xallarap_reference_position = delta_1
elif self.n_sources > 1:
self._check_valid_combination_of_sources(parameters.keys())
if 't_E' not in parameters.keys():
raise KeyError('Currently, the binary source calculations ' +
'require t_E to be directly defined, i.e., ' +
'has to be the same for both sources.')
source_params = self._divide_parameters(parameters)
for (i, params_i) in enumerate(source_params):
# This try/except block forces checks from ._init_1_source()
# to be run on each source parameters separately.
try:
self.__setattr__('_source_{0}_parameters'.format(i + 1), ModelParameters(params_i))
except Exception:
print("ERROR IN INITIALIZING SOURCE {0}".format(i + 1))
raise
if self.is_xallarap:
self._update_sources_xallarap_reference()
else:
msg = 'wrong number of sources. Your parameters: {0:}'
raise ValueError(msg.format(parameters))
self._set_parameters(parameters)
def _update_sources_xallarap_reference(self):
"""
Update .xallarap_reference_position for each source parameters
Note: below we're calling private function and set private
properties NOT of self, but self._source_X_parameters,
which both are of the same type as self.
"""
delta_1 = self._source_1_parameters._get_xallarap_position()
self._source_1_parameters._xallarap_reference_position = delta_1
self._source_2_parameters._xallarap_reference_position = delta_1
def _get_xallarap_position(self, parameters=None):
"""
Get position at t_0_xi from xallarap Orbit object.
Note: this function is called in 2 different ways:
- directly, i.e., self._get_xallarap_orbit(), and
- indirectly, i.e., self._source_1_parameters._get_xallarap_orbit().
"""
if parameters is None:
parameters = self.parameters
t_0_xi = parameters.get('t_0_xi', parameters['t_0'])
zip_ = parameters.items()
orbit_parameters = {key[3:]: value
for (key, value) in zip_ if key[:3] == "xi_"}
orbit_parameters['epoch_reference'] = t_0_xi
orbit = Orbit(**orbit_parameters)
return orbit.get_reference_plane_position([t_0_xi])
def __getattr__(self, item):
(head, end) = self._split_parameter_name(item)
if end is not None:
return self.__getattr__('_source_{:}_parameters'.format(end)).__getattribute__(head)
elif item.startswith("source_") and item.endswith("_parameters"):
return object.__getattribute__(self, "_" + item)
else:
return object.__getattribute__(self, item)
def _split_parameter_name(self, parameter):
"""
Split ABC_DEF_n into ABC_DEF (str) and n (int). For parameters like t_0 or rho, n is None.
"""
end = parameter.split('_')[-1]
if end.isnumeric() and int(end) > 0:
head = parameter[:-len(end)-1]
end = int(end)
else:
head = parameter
end = None
return (head, end)
def _count_sources(self, keys):
"""
How many luminous sources are there?
"""
self._n_sources = 1
for key in keys:
n = self._split_parameter_name(key)[1]
if n is not None:
if n > self._n_sources:
self._n_sources = n
if 'q_source' in keys:
if self._n_sources != 1:
if self._n_sources != 2 and ('rho_2' in keys or 't_star_2' in keys):
raise RuntimeError('wrong set of parametes: ' + str(keys))
self._n_sources = 2
def _count_lenses(self, keys):
"""How many lenses there are?"""
self._n_lenses = 1
if 's' in keys or 'q' in keys:
self._n_lenses = 2
# Both standard and Cassen08 parameterizations require s and q
def _set_type(self, keys):
"""
sets self._type property, which indicates what type of a model we have
"""
types = ['finite source', 'parallax', 'Cassan08',
'lens 2-parameter orbital motion', 'mass sheet', 'xallarap']
out = {type_: False for type_ in types}
temp = {
'finite source': 'rho t_star rho_1 rho_2 t_star_1 t_star_2',
'parallax': 'pi_E_N pi_E_E',
'Cassan08': 'x_caustic_in x_caustic_out t_caustic_in t_caustic_out',
'lens 2-parameter orbital motion': 'dalpha_dt ds_dt',
'mass sheet': 'convergence_K shear_G',
'xallarap': ('xi_period xi_semimajor_axis xi_inclination '
'xi_Omega_node xi_argument_of_latitude_reference '
'xi_eccentricity xi_omega_periapsis q_source')}
parameter_to_type = dict()
for (key, values) in temp.items():
for value in values.split():
parameter_to_type[value] = key
for key in keys:
if key in parameter_to_type:
out[parameter_to_type[key]] = True
self._type = out
def _check_types(self, alpha_defined):
"""
Check if self._type values make sense
"""
n_lenses = self._n_lenses
# Lens orbital motion requires binary lens:
if self._type['lens 2-parameter orbital motion'] and n_lenses == 1:
raise KeyError('Orbital motion of the lens requires two lens '
'components but only one was provided.')
self._check_types_for_Cassan08()
if alpha_defined:
if self._n_lenses == 1 and not self._type['mass sheet']:
raise KeyError(
'You defined alpha for single lens model '
'without external mass sheet. This is not allowed.')
def _check_valid_combination_1_source(self, keys):
"""
Check that the user hasn't over-defined the ModelParameters.
"""
# Make sure that there are no unwanted keys
allowed_keys = set((
't_0 u_0 t_E t_eff rho t_star pi_E_N pi_E_E t_0_par '
's q alpha dalpha_dt ds_dt t_0_kep convergence_K shear_G '
't_0_1 t_0_2 u_0_1 u_0_2 rho_1 rho_2 t_star_1 t_star_2 '
'x_caustic_in x_caustic_out t_caustic_in t_caustic_out '
'xi_period xi_semimajor_axis xi_inclination xi_Omega_node '
'xi_argument_of_latitude_reference xi_eccentricity '
'xi_omega_periapsis t_0_xi q_source').split())
difference = set(keys) - allowed_keys
if len(difference) > 0:
derived_1 = ['gamma', 'gamma_perp', 'gamma_parallel']
if set(keys).intersection(derived_1):
msg = ('You cannot set gamma, gamma_perp, ' +
'or gamma_parallel. These are derived parameters. ' +
'You can set ds_dt and dalpha_dt instead.\n')
else:
msg = ""
msg += 'Unrecognized parameters: {:}'.format(difference)
raise KeyError(msg)
if self._type['Cassan08']:
self._check_valid_combination_1_source_Cassan08(keys)
else:
self._check_valid_combination_1_source_standard(keys)
def _check_types_for_Cassan08(self):
"""
Check if Cassan08 is used and if so, then make sure that
the trajectory is rectilinear and there is only one source.
"""
if not self._type['Cassan08']:
return
types = ['parallax', 'xallarap', 'lens 2-parameter orbital motion']
for type_ in types:
if self._type[type_]:
raise NotImplementedError(
'Currently we do not allow Cassan (2008) '
'parameterization of binary lens and ' + type_)
if self._n_sources > 1:
raise NotImplementedError(
"Cassan (2008) parameterization doesn't work for "
"multi sources models")
def _divide_parameters(self, parameters):
"""
Divide an input dict into each source separately.
Some of the parameters are copied to both dicts.
"""
skipped_parameters = ['q_source']
source_parameters = []
for i in range(self.n_sources):
params_i = dict()
for (key, value) in parameters.items():
if key in skipped_parameters:
continue
(head, end) = self._split_parameter_name(key)
if end is None:
params_i[key] = value
elif end == i + 1:
params_i[head] = value
source_parameters.append(params_i)
if self.n_sources == 2 and self._type['xallarap']:
self._set_changed_parameters_2nd_source(parameters['q_source'], source_parameters[1])
return source_parameters
def _set_changed_parameters_2nd_source(self, q_source, parameters_2):
"""
For xallarap model with 2 sources, the orbit of the second source
must have 2 parameters changed.
Functions starts with tests of input
"""
if q_source <= 0.:
raise ValueError('q_source cannot be negative')
check_keys = ['xi_semimajor_axis', 'xi_argument_of_latitude_reference']
for key in check_keys:
if key not in parameters_2:
raise KeyError('xallarap model with 2 sources requires ' + key)
parameters_2['xi_semimajor_axis'] /= q_source
parameters_2['xi_argument_of_latitude_reference'] += 180.
if parameters_2['xi_argument_of_latitude_reference'] > 360.:
parameters_2['xi_argument_of_latitude_reference'] -= 360.
def __repr__(self):
"""A nice way to represent a ModelParameters object as a string"""
out = self._get_main_parameters_to_print()
if self.is_xallarap:
fmt = "\nxallarap reference position: ({:.4f}, {:.4f})"
if self.n_sources == 1:
source = self
else:
source = self._source_1_parameters
position = source.xallarap_reference_position
out += fmt.format(position[0, 0], position[1, 0])
return out
def _get_main_parameters_to_print(self):
"""
prepare all the standard parameters to be printed
"""
keys = self._get_keys_for_repr()
formats = self._get_formats_dict_for_repr()
ordered_keys = self._get_ordered_keys_for_repr()
variables = [''] * (self._n_sources + 1)
values = [''] * (self._n_sources + 1)
for key in ordered_keys:
if key not in keys:
continue
index = self._split_parameter_name(key)[1]
index = 0 if index is None else index
(full_name, value) = self._get_values_for_repr(formats[key], key)
(fmt_1, fmt_2) = self._get_formats_for_repr(formats[key], full_name)
variables[index] += fmt_1.format(full_name)
values[index] += fmt_2.format(value)
print_msg = ''
for (i, variable) in enumerate(variables):
if variable and values[i]:
print_msg += "{:}\n{:}".format(variable, values[i])
if i < self.n_sources and variables[i+1]:
print_msg += "\n"
return print_msg
def _get_keys_for_repr(self):
"""
get all the keys that will be printed
"""
keys = set(self.parameters.keys())
if 'pi_E_E' in keys or 'pi_E_N' in keys:
keys |= {'t_0_par'}
if 'ds_dt' in keys or 'dalpha_dt' in keys:
keys |= {'t_0_kep'}
if self.is_xallarap:
keys |= {'t_0_xi'}
return keys
def _get_formats_dict_for_repr(self):
"""
define formats that define how to print the numbers
"""
# Below we define dict of dicts. Key of inner ones: 'width',
# 'precision', and optional: 'unit' and 'name'.
formats = {
't_0': {'width': 13, 'precision': 5, 'unit': 'HJD'},
'u_0': {'width': 9, 'precision': 6},
't_eff': {'width': 10, 'precision': 6, 'unit': 'd'},
't_E': {'width': 10, 'precision': 4, 'unit': 'd'},
'rho': {'width': 7, 'precision': 5},
't_star': {'width': 13, 'precision': 6, 'unit': 'd'},
'pi_E_N': {'width': 9, 'precision': 5},
'pi_E_E': {'width': 9, 'precision': 5},
't_0_par': {'width': 13, 'precision': 5, 'unit': 'HJD'},
's': {'width': 9, 'precision': 5},
'q': {'width': 12, 'precision': 8},
'alpha': {'width': 11, 'precision': 5, 'unit': 'deg'},
'convergence_K': {'width': 12, 'precision': 8},
'shear_G': {'width': 12, 'precision': 8},
'ds_dt': {
'width': 11, 'precision': 5, 'unit': '/yr', 'name': 'ds/dt'},
'dalpha_dt': {
'width': 18, 'precision': 5, 'unit': 'deg/yr',
'name': 'dalpha/dt'},
't_0_kep': {'width': 13, 'precision': 5, 'unit': 'HJD'},
'x_caustic_in': {'width': 13, 'precision': 7},
'x_caustic_out': {'width': 13, 'precision': 7},
't_caustic_in': {'width': 13, 'precision': 5, 'unit': 'HJD'},
't_caustic_out': {'width': 13, 'precision': 5, 'unit': 'HJD'},
'xi_period': {'width': 10, 'precision': 4,
'unit': 'd', 'name': 'xallarap period'},
'xi_semimajor_axis': {'width': 9, 'precision': 6,
'name': 'xallarap semimajor axis'},
'xi_inclination': {'width': 11, 'precision': 5, 'unit': 'deg',
'name': 'xallarap inclination'},
'xi_Omega_node': {'width': 11, 'precision': 5, 'unit': 'deg',
'name': 'xallarap Omega node'},
'xi_argument_of_latitude_reference': {
'width': 11, 'precision': 5, 'unit': 'deg',
'name': 'xallarap argument of latitude reference'},
'xi_eccentricity': {'width': 8, 'precision': 6,
'name': 'xallarap eccentricity'},
'xi_omega_periapsis': {'width': 11, 'precision': 5, 'unit': 'deg',
'name': 'xallarap omega periapsis'},
'q_source': {'width': 12, 'precision': 8},
't_0_xi': {'width': 13, 'precision': 5, 'unit': 'HJD'},
}
# Add multiple source parameters with the same settings.
if self.n_sources > 1:
for i in range(self.n_sources):
for param_head in self._all_source_params_head:
form = formats[param_head]
key = '{0}_{1}'.format(param_head, i+1)
formats[key] = {'width': form['width'],
'precision': form['precision']}
if 'unit' in form:
formats[key]['unit'] = form['unit']
if 'name' in form:
raise KeyError('internal issue: {:}'.format(key))
return formats
def _get_ordered_keys_for_repr(self):
"""
define the default order of parameters
"""
basic_keys = ['t_0', 'u_0', 't_E', 'rho', 't_star']
additional_keys = [
'pi_E_N', 'pi_E_E', 't_0_par', 's', 'q', 'alpha',
'convergence_K', 'shear_G', 'ds_dt', 'dalpha_dt', 't_0_kep',
'x_caustic_in', 'x_caustic_out', 't_caustic_in', 't_caustic_out',
'xi_period', 'xi_semimajor_axis', 'xi_inclination',
'xi_Omega_node', 'xi_argument_of_latitude_reference',
'xi_eccentricity', 'xi_omega_periapsis', 'q_source', 't_0_xi'
]
ordered_keys = []
if self.n_sources > 1:
for param_head in basic_keys:
if param_head == 't_E':
ordered_keys.append(param_head)
else:
for i in range(self.n_sources):
ordered_keys.append('{0}_{1}'.format(param_head, i + 1))
else:
ordered_keys = basic_keys
for key in additional_keys:
ordered_keys.append(key)
return ordered_keys
def _get_values_for_repr(self, form, key):
"""
Get full name of the parameter and its value (float)
to be used by __rerp__().
"""
full_name = form.get('name', key)
if 'unit' in form:
full_name += " ({:})".format(form['unit'])
value = getattr(self, key)
return (full_name, value)
def _get_formats_for_repr(self, form, full_name):
"""
Extract formats to be used by __repr__().
"""
fmt_1 = '{:>' + str(max([form['width'], len(full_name)]))
fmt_2 = fmt_1 + '.' + str(form['precision']) + 'f} '
fmt_1 += '} '
return (fmt_1, fmt_2)
def _check_valid_combination_of_sources(self, keys):
"""
make sure that there is no conflict between t_0 and t_0_1 etc.
Also make sure that t_0 and u_0 are defined for all sources.
"""
self._check_for_incompatible_source_parameters(keys)
self._check_for_missing_source_parameters(keys)
self._check_for_extra_source_parameters(keys)
def _check_for_incompatible_source_parameters(self, keys):
"""
make sure that there is no conflict between t_0 and t_0_1 etc.
"""
for parameter in self._primary_source_params_head:
if parameter in keys:
# conflict between t_0 and t_0_1
for i in range(self.n_sources):
if '{0}_{1}'.format(parameter, i+1) in keys:
msg = 'You cannot set both {:} and {:}'
raise KeyError(msg.format(parameter, '{0}_{1}'.format(parameter, i+1)))
for parameter in self._finite_source_params_head:
if parameter in keys:
raise KeyError('You must specify which source {0} goes with'.format(parameter))
if self.is_xallarap:
self._check_for_parameters_incompatible_with_xallarap(keys)
def _check_for_parameters_incompatible_with_xallarap(self, keys):
"""
Check for additional source parameters with xallarap (bad).
"""
for parameter in keys:
try:
key_num = parameter.split('_')[-1]
if ((int(key_num) > 1) and (parameter[0:-len(key_num) - 1] in self._primary_source_params_head)):
msg = 'xallarap parameters cannot be mixed with {:}'
raise NotImplementedError(msg.format(parameter))
except ValueError:
pass
def _check_for_missing_source_parameters(self, keys):
"""
Also make sure that t_0 and u_0 are defined for all sources.
"""
if (self.n_sources > 1) and ('q_source' not in keys):
for i in range(self.n_sources):
if 't_0_{0}'.format(i + 1) not in keys:
raise KeyError(
't_0_{0} is missing from parameters.'.format(i+1) +
'Your parameters: {0}'.format(keys))
for i in range(self.n_sources):
if ('u_0_{0}'.format(i + 1) not in keys) and ('t_eff_{0}'.format(i + 1) not in keys):
raise KeyError(
'Either u_0_{0} or t_eff_{0} must be specified.'.format(i+1) +
'Your parameters: {0}'.format(keys))
def _check_for_extra_source_parameters(self, keys):
"""
Check if parameters have been set for sources that don't exist.
"""
for key in keys:
key_parts = key.split('_')
if len(key_parts) > 1:
try:
if int(key_parts[1]) > self.n_sources:
raise KeyError(
'{0} is defined but there are only '.format(key) +
'{0} sources.'.format(self.n_sources))
except ValueError:
pass
def _check_valid_combination_1_source_standard(self, keys):
"""
Here we check parameters for non-Cassan08 parameterization.
"""
self._check_valid_combination_1_source_t_0_u_0(keys)
self._check_valid_combination_1_source_t_E(keys)
self._check_valid_combination_1_source_parallax(keys)
self._check_valid_combination_1_source_mass_sheet(keys)
self._check_valid_combination_1_source_binary_lens(keys)
self._check_valid_combination_1_source_xallarap(keys)
def _check_valid_combination_1_source_t_0_u_0(self, keys):
"""
Make sure that t_0 and u_0 are defined.
"""
if 't_0' not in keys:
raise KeyError('t_0 must be defined')
if ('u_0' not in keys) and ('t_eff' not in keys):
raise KeyError('not enough information to calculate u_0')
def _check_valid_combination_1_source_t_E(self, keys):
"""
Make sure that t_E is defined and that it's not overdefined.
"""
if (('t_E' not in keys) and
(('u_0' not in keys) or ('t_eff' not in keys)) and
(('rho' not in keys) or ('t_star' not in keys))):
raise KeyError('not enough information to calculate t_E')
if (('rho' in keys) and ('t_star' in keys) and ('u_0' in keys) and
('t_eff' in keys)):
raise KeyError('You cannot define rho, t_star, u_0, and t_eff')
if ('t_E' in keys) and ('rho' in keys) and ('t_star' in keys):
raise KeyError('Only 1 or 2 of (t_E, rho, t_star) may be defined.')
if ('t_E' in keys) and ('u_0' in keys) and ('t_eff' in keys):
raise KeyError('Only 1 or 2 of (u_0, t_E, t_eff) may be defined.')
def _check_valid_combination_1_source_parallax(self, keys):
"""
Here we check parallax parameters for non-Cassan08 parameterization.
"""
# If parallax is defined, then both components must be set:
if ('pi_E_N' in keys) != ('pi_E_E' in keys):
raise KeyError(
'You have to define either both or none of (pi_E_N, pi_E_E).')
# t_0_par makes sense only when parallax is defined.
if 't_0_par' in keys and not self._type['parallax']:
raise KeyError('t_0_par makes sense only when parallax is defined')
# Parallax needs reference epoch:
if 'pi_E_N' in keys:
if 't_0' not in keys and 't_0_par' not in keys:
raise KeyError(
'Parallax is defined, hence either t_0 or t_0_par has ' +
'to be set.')
def _check_valid_combination_1_source_mass_sheet(self, keys):
"""
Make sure that alpha is defined if shear_G is defined,
but not if only convergence_K is defined.
"""
if ('shear_G' in keys) and ('alpha' not in keys):
raise KeyError(
'A model with external mass sheet shear requires alpha.')
if ('shear_G' not in keys) and ('convergence_K' in keys):
if 'alpha' in keys:
raise KeyError(
'A model with external mass sheet convergence and '
'no shear cannot have alpha defined.')
def _check_valid_combination_1_source_binary_lens(self, keys):
"""
Here we check binary lens parameters for non-Cassan08 parameterization.
"""
# s, q, and alpha must all be defined if s or q are defined
if ('s' in keys) or ('q' in keys):
if (('s' not in keys) or
('q' not in keys) or ('alpha' not in keys)):
raise KeyError(
'A binary model requires all three of (s, q, alpha).')
# If ds_dt is defined, dalpha_dt must be defined
if ('ds_dt' in keys) or ('dalpha_dt' in keys):
if ('ds_dt' not in keys) or ('dalpha_dt' not in keys):
raise KeyError(
'Lens orbital motion requires both ds_dt and dalpha_dt.' +
'\nNote that you can set either of them to 0.')
# If orbital motion is defined, then reference epoch has to be set.
if 't_0' not in keys and 't_0_kep' not in keys:
raise KeyError(
'Orbital motion requires reference epoch, ' +
'i.e., t_0 or t_0_kep')
# t_0_kep makes sense only when orbital motion is defined.
if 't_0_kep' in keys:
if 'ds_dt' not in keys or 'dalpha_dt' not in keys:
raise KeyError(
't_0_kep makes sense only when orbital motion is defined.')
def _check_valid_combination_1_source_xallarap(self, keys):
"""
If xallarap parameters are defined,
then make sure there are all required parameters
"""
if not self._type['xallarap']:
return
self._check_orbit_parameters(keys, "xi_")
def _check_orbit_parameters(self, keys, prefix):
"""
check if orbit is properly defined; prefix is added to
checked orbit parameters
"""
required = ('period semimajor_axis inclination Omega_node argument_of_latitude_reference').split()
required = [prefix + req for req in required]
for parameter in required:
if parameter not in keys:
raise KeyError(parameter)
allowed = set([prefix + 'eccentricity', prefix + 'omega_periapsis'])
n_used = len(set(keys).intersection(allowed))
if n_used not in [0, len(allowed)]:
raise KeyError(
'Error in defining ' + prefix + 'eccentricity and ' +
prefix + 'omega_periapsis. ' +
'Both of them or neither should be defined.')
def _check_valid_combination_1_source_Cassan08(self, keys):
"""
Check parameters defined for Cassan 2008 parameterization.
Currently, only static models are accepted.
"""
# Check that all required parameters are defined.
parameters = ['s', 'q', 'x_caustic_in', 'x_caustic_out',
't_caustic_in', 't_caustic_out']
for parameter in parameters:
if parameter not in keys:
raise KeyError(
'If you use Cassan 2008 parameterization, then all ' +
'these parameters have to be defined:\n' +
' \n'.join(parameters))
# Source size cannot be over-defined.
if ('rho' in keys) and ('t_star' in keys):
raise KeyError('Both rho and t_star cannot be defined for ' +
'Cassan 08 parameterization.')
def _check_valid_parameter_values(self, parameters):
"""
Prevent user from setting negative (unphysical) values for
t_E, t_star, rho etc. Shear_G should be complex.
Also, check that all values are scalars.
"""
full_names = {
't_E': 'Einstein timescale', 't_star': 'Source crossing time',
'rho': 'Source size', 's': 'separation'}
for (name, full) in full_names.items():
if name in parameters.keys():
if parameters[name] < 0.:
fmt = "{:} cannot be negative: {:}"
raise ValueError(fmt.format(full, parameters[name]))
for (key, value) in parameters.items():
if not np.isscalar(value) or isinstance(value, str):
msg = "{:} must be a scalar: {:}, {:}"
raise TypeError(msg.format(key, value, type(value)))
for name in ['x_caustic_in', 'x_caustic_out']:
if name in parameters.keys():
if parameters[name] < 0. or parameters[name] > 1.:
msg = "Parameter {:} has to be in [0, 1] range, not {:}"
raise ValueError(msg.format(name, parameters[name]))
for name in ['q']:
if name in parameters.keys():
if parameters[name] <= 0.:
msg = "Parameter {:} has to be larger than 0, not {:}"
raise ValueError(msg.format(name, parameters[name]))
for name in ['xi_eccentricity']:
if name in parameters.keys():
if parameters[name] < 0. or parameters[name] >= 1.:
msg = "Parameter {:} has to be in [0, 1) range, not {:}"
raise ValueError(msg.format(name, parameters[name]))
if 'shear_G' in parameters.keys():
if not isinstance(parameters['shear_G'], complex):
raise TypeError("External shear (shear_G) must be complex")
def _set_parameters(self, parameters):
"""
check if parameter values make sense and remember the copy of the dict
"""
self._check_valid_parameter_values(parameters)
self.parameters = dict(parameters)
def _update_sources(self, parameter, value):
"""
For multi-source models, update the values for all sources.
Note that pi_E_N and pi_E_E are changed separately.
"""
if self.n_sources == 1:
return
for i in range(self.n_sources):
source = self.__getattr__('_source_{0}_parameters'.format(i+1))
try:
source.__getattr__(parameter)
source.__setattr__(parameter, value)
except KeyError:
continue
if self.is_xallarap:
if parameter == 'q_source':
value_ = self.parameters['xi_semimajor_axis'] / value
setattr(self._source_2_parameters, 'xi_semimajor_axis', value_)
elif parameter == 'xi_semimajor_axis':
value /= self.parameters['q_source']
setattr(self._source_2_parameters, parameter, value)
elif parameter == 'xi_argument_of_latitude_reference':
value += 180.
setattr(self._source_2_parameters, parameter, value)
self._update_sources_xallarap_reference()
def _get_uniform_caustic_sampling(self):
"""
Sets self._uniform_caustic if that is required.
Also resets self._standard_parameters.
"""
recalculate = (self._uniform_caustic is None or
self.s != self._uniform_caustic.s or
self.q != self._uniform_caustic.q)
if recalculate:
self._uniform_caustic = UniformCausticSampling(s=self.s, q=self.q)
self._standard_parameters = None
def _get_standard_parameters_from_Cassan08(self):
"""
Calculate these parameters:
t_0 u_0 t_E alpha
based on:
x_caustic_in x_caustic_out t_caustic_in t_caustic_out
using transformation that depends on:
s q
"""
self._get_uniform_caustic_sampling()
if self._standard_parameters is None:
keys = ['x_caustic_in', 'x_caustic_out',
't_caustic_in', 't_caustic_out']
kwargs = {key: self.parameters[key] for key in keys}
self._standard_parameters = (
self._uniform_caustic.get_standard_parameters(**kwargs))
@property
def t_0(self):
"""
*float*
The time of minimum projected separation between the source
and the lens center of mass.
"""
if self._type['Cassan08']:
self._get_standard_parameters_from_Cassan08()
return self._standard_parameters['t_0']
return self.parameters['t_0']
@t_0.setter
def t_0(self, new_t_0):
if self._type['Cassan08']:
raise ValueError('t_0 cannot be set for model using ' +
'Cassan (2008) parameterization')
self.parameters['t_0'] = new_t_0
self._update_sources('t_0', new_t_0)
@property
def u_0(self):
"""
*float*
The minimum projected separation between the source
and the lens center of mass.
"""
if self._type['Cassan08']:
self._get_standard_parameters_from_Cassan08()
return self._standard_parameters['u_0']
if 'u_0' in self.parameters.keys():
return self.parameters['u_0']
else:
try:
u_0_quantity = self.parameters['t_eff'] / self.parameters['t_E']
return u_0_quantity
except KeyError:
raise AttributeError('u_0 is not defined for these parameters: {0}'.format(self.parameters.keys()))
@u_0.setter
def u_0(self, new_u_0):
if self._type['Cassan08']:
raise ValueError('u_0 cannot be set for model using ' +
'Cassan (2008) parameterization')
if 'u_0' in self.parameters.keys():
self.parameters['u_0'] = new_u_0
self._update_sources('u_0', new_u_0)
else:
raise AttributeError('u_0 is not a parameter of this model.')
@property
def t_star(self):
"""
*float*
t_star = rho * t_E = source radius crossing time in days
"""
if 't_star' in self.parameters.keys():
return self.parameters['t_star']
elif ('rho' in self.parameters.keys() and self._type['Cassan08']):
return self.rho * self.t_E
else:
try:
return (self.parameters['t_E'] * self.parameters['rho'])
except KeyError:
raise AttributeError(
't_star is not defined for these parameters: {0}'.format(
self.parameters.keys()))
@t_star.setter
def t_star(self, new_t_star):
if 't_star' in self.parameters.keys():
self.parameters['t_star'] = new_t_star
self._update_sources('t_star', new_t_star)
else:
raise AttributeError('t_star is not a parameter of this model.')
if new_t_star < 0.:
raise ValueError(
'Source crossing time cannot be negative:', new_t_star)
@property
def t_eff(self):
"""
*float*
t_eff = u_0 * t_E = effective timescale in days
"""
if 't_eff' in self.parameters.keys():
return self.parameters['t_eff']
else:
try:
return (self.parameters['t_E'] * self.parameters['u_0'])
except KeyError:
raise AttributeError(
't_eff is not defined for these parameters: {0}'.format(
self.parameters.keys()))
@t_eff.setter
def t_eff(self, new_t_eff):
if 't_eff' in self.parameters.keys():
self.parameters['t_eff'] = new_t_eff
self._update_sources('t_eff', new_t_eff)
else:
raise AttributeError('t_eff is not a parameter of this model.')
@property
def t_E(self):
"""
*float*
The Einstein timescale in days.
"""
if self._type['Cassan08']:
self._get_standard_parameters_from_Cassan08()
return self._standard_parameters['t_E']
if 't_E' in self.parameters.keys():
return self.parameters['t_E']
elif ('t_star' in self.parameters.keys() and
'rho' in self.parameters.keys()):
return self.t_star / self.rho
elif ('t_eff' in self.parameters.keys() and
'u_0' in self.parameters.keys()):
return self.t_eff / abs(self.u_0)
else:
raise AttributeError("You're trying to access t_E that was not set")
@t_E.setter
def t_E(self, new_t_E):
if self._type['Cassan08']:
raise ValueError('t_E cannot be set for model using ' +
'Cassan (2008) parameterization')
if new_t_E is None:
raise ValueError('Must provide a value')
if new_t_E < 0.:
raise ValueError('Einstein timescale cannot be negative:', new_t_E)
if 't_E' in self.parameters.keys():
self.parameters['t_E'] = new_t_E
self._update_sources('t_E', new_t_E)
else:
raise AttributeError('t_E is not a parameter of this model.')
@property
def rho(self):
"""
*float*
source size as a fraction of the Einstein radius
"""
if 'rho' in self.parameters.keys():
return self.parameters['rho']
elif 't_star' in self.parameters.keys() and 't_E' in self.parameters.keys():
return self.t_star / self.t_E
elif 't_star' in self.parameters.keys() and self._type['Cassan08']:
return self.t_star / self.t_E
else:
raise AttributeError("rho is not defined and cannot be calculated")
@rho.setter
def rho(self, new_rho):
if 'rho' in self.parameters.keys():
if new_rho < 0.:
raise ValueError('source size (rho) cannot be negative')
self.parameters['rho'] = new_rho
self._update_sources('rho', new_rho)
else:
raise AttributeError('rho is not a parameter of this model.')
@property
def alpha(self):
"""
*float*
The angle of the source trajectory relative to the binary lens
axis (or primary-secondary axis). Measured counterclockwise,
i.e., according to convention advocated by
`Skowron et al. 2011 (ApJ, 738, 87) <https://ui.adsabs.harvard.edu/abs/2011ApJ...738...87S/abstract>`_,
but shifted by 180 deg.
"""
if self._type['Cassan08']:
self._get_standard_parameters_from_Cassan08()
return self._standard_parameters['alpha']
return self.parameters['alpha']
@alpha.setter
def alpha(self, new_alpha):
if self._type['Cassan08']:
raise ValueError('alpha cannot be set for model using Cassan (2008) parameterization')
self.parameters['alpha'] = new_alpha
self._update_sources('alpha', new_alpha)
@property
def q(self):
"""
*float*
mass ratio of the two lens components. Only 2 bodies allowed.
"""
return self.parameters['q']
@q.setter
def q(self, new_q):
if new_q <= 0.:
raise ValueError('mass ratio q has to be larger than 0')
self.parameters['q'] = new_q
self._update_sources('q', new_q)
@property
def convergence_K(self):
"""
*float*
Convergence of external mass sheet.
"""
if 'convergence_K' in self.parameters.keys():
return self.parameters['convergence_K']
else:
raise AttributeError('convergence_K is not a parameter of this model.')
@convergence_K.setter
def convergence_K(self, new_K):
if 'convergence_K' in self.parameters.keys():
self.parameters['convergence_K'] = new_K
self._update_sources('convergence_K', new_K)
else:
raise AttributeError('convergence_K is not a parameter of this model.')
@property
def shear_G(self):
"""
*complex*
Shear of external mass sheet.
"""
if 'shear_G' in self.parameters.keys():
return self.parameters['shear_G']
else:
raise AttributeError('shear_G is not a parameter of this model.')
@shear_G.setter
def shear_G(self, new_G):
if 'shear_G' in self.parameters.keys():
self.parameters['shear_G'] = new_G
self._update_sources('shear_G', new_G)
else:
raise AttributeError('shear_G is not a parameter of this model.')
@property
def s(self):
"""
*float*
separation of the two lens components relative to Einstein ring size
"""
return self.parameters['s']
@s.setter
def s(self, new_s):
if new_s < 0.:
raise ValueError(
'Binary lens separation cannot be negative:', new_s)
self.parameters['s'] = new_s
self._update_sources('s', new_s)
@property
def pi_E_N(self):
"""
*float*
The North component of the microlensing parallax vector.
"""
if 'pi_E_N' in self.parameters.keys():
return self.parameters['pi_E_N']
elif 'pi_E' in self.parameters.keys():
return self.parameters['pi_E'][0]
else:
raise AttributeError('pi_E_N not defined for this model')
@pi_E_N.setter
def pi_E_N(self, new_value):
if 'pi_E_N' in self.parameters.keys():
self.parameters['pi_E_N'] = new_value
self._update_sources('pi_E_N', new_value)
else:
raise AttributeError('pi_E_N is not a parameter of this model.')
@property
def pi_E_E(self):
"""
*float*
The East component of the microlensing parallax vector.
"""
if 'pi_E_E' in self.parameters.keys():
return self.parameters['pi_E_E']
else:
raise AttributeError('pi_E_N not defined for this model')
@pi_E_E.setter
def pi_E_E(self, new_value):
if 'pi_E_E' in self.parameters.keys():
self.parameters['pi_E_E'] = new_value
self._update_sources('pi_E_E', new_value)
else:
raise AttributeError('pi_E_E is not a parameter of this model.')
@property
def pi_E(self):
"""
Not defined.
It was used in previous versions. Use :py:attr:`~pi_E_N` and :py:attr:`~pi_E_E` instead.
"""
raise AttributeError('pi_E is not defined. Use pi_E_N and pi_E_E instead')
@pi_E.setter
def pi_E(self, new_value):
raise AttributeError('pi_E is not defined. Use pi_E_N and pi_E_E instead')
@property
def t_0_par(self):
"""
*float*
The reference time for the calculation of parallax. If not set
explicitly, then it is assumed t_0_par = t_0. If there are multiple sources,
t_0_1 is used.
Note that this is a reference value and not the fitting parameter.
It is best to fix it at the begin of calculations.
"""
if 't_0_par' not in self.parameters.keys():
if 't_0_kep' in self.parameters.keys():
return self.parameters['t_0_kep']
elif 't_0' in self.parameters.keys():
return self.parameters['t_0']
elif self.n_sources > 1:
return self.t_0_1
else:
raise AttributeError('No valid value for setting t_0_par', self.parameters)
else:
return self.parameters['t_0_par']
@t_0_par.setter
def t_0_par(self, new_t_0_par):
self.parameters['t_0_par'] = new_t_0_par
self._update_sources('t_0_par', new_t_0_par)
@property
def pi_E_mag(self):
"""
*float*
The magnitude of the microlensing parallax vector.
"""
if 'pi_E_N' in self.parameters.keys() and 'pi_E_E' in self.parameters.keys():
pi_E_N = self.parameters['pi_E_N']
pi_E_E = self.parameters['pi_E_E']
else:
raise AttributeError('pi_E not defined for this model')
return np.sqrt(pi_E_N**2 + pi_E_E**2)
@property
def x_caustic_in(self):
"""
*float*
Curvelinear coordinate (in `Cassan (2008) parameterization
<https://ui.adsabs.harvard.edu/abs/2008A%26A...491..587C/abstract>`_)
of caustic entrance for a static binary lens model. See
:py:class:`~MulensModel.uniformcausticsampling.UniformCausticSampling`.
"""
return self.parameters['x_caustic_in']
@x_caustic_in.setter
def x_caustic_in(self, new_value):
if new_value < 0. or new_value > 1.:
msg = "x_caustic_in must be between 0 and 1, not {:}"
raise ValueError(msg.format(new_value))
self._standard_parameters = None
self.parameters['x_caustic_in'] = new_value
@property
def x_caustic_out(self):
"""
*float*
Curvelinear coordinate (in `Cassan (2008) parameterization
<https://ui.adsabs.harvard.edu/abs/2008A%26A...491..587C/abstract>`_)
of caustic exit for a static binary lens model. See
:py:class:`~MulensModel.uniformcausticsampling.UniformCausticSampling`.
"""
return self.parameters['x_caustic_out']
@x_caustic_out.setter
def x_caustic_out(self, new_value):
if new_value < 0. or new_value > 1.:
msg = "x_caustic_out must be between 0 and 1, not {:}"
raise ValueError(msg.format(new_value))
self._standard_parameters = None
self.parameters['x_caustic_out'] = new_value
@property
def t_caustic_in(self):
"""
*float*
Epoch of caustic entrance for a static binary lens model in
`Cassan (2008) parameterization
<https://ui.adsabs.harvard.edu/abs/2008A%26A...491..587C/abstract>`_)
See
:py:class:`~MulensModel.uniformcausticsampling.UniformCausticSampling`.
"""
return self.parameters['t_caustic_in']
@t_caustic_in.setter
def t_caustic_in(self, new_value):
self._standard_parameters = None
self.parameters['t_caustic_in'] = new_value
@property
def t_caustic_out(self):
"""
*float*
Epoch of caustic exit for a static binary lens model in
`Cassan (2008) parameterization
<https://ui.adsabs.harvard.edu/abs/2008A%26A...491..587C/abstract>`_)
See
:py:class:`~MulensModel.uniformcausticsampling.UniformCausticSampling`.
"""
return self.parameters['t_caustic_out']
@t_caustic_out.setter
def t_caustic_out(self, new_value):
self._standard_parameters = None
self.parameters['t_caustic_out'] = new_value
@property
def ds_dt(self):
"""
*float*
Change rate of separation :py:attr:`~s` in 1/year.
"""
return self.parameters['ds_dt']
@ds_dt.setter
def ds_dt(self, new_ds_dt):
self.parameters['ds_dt'] = new_ds_dt
self._update_sources('ds_dt', new_ds_dt)
@property
def dalpha_dt(self):
"""
*float*
Change rate of angle :py:attr:`~alpha` in deg/year.
"""
return self.parameters['dalpha_dt']
@dalpha_dt.setter
def dalpha_dt(self, new_dalpha_dt):
self.parameters['dalpha_dt'] = new_dalpha_dt
self._update_sources('dalpha_dt', new_dalpha_dt)
@property
def t_0_kep(self):
"""
*float*
The reference time for the calculation of lens orbital motion.
If not set explicitly, then it is assumed t_0_kep = t_0 (or t_0_1 for multi-source models).
Note that this is a reference value and not the fitting parameter.
It is best to fix it at the begin of calculations.
"""
if 't_0_kep' not in self.parameters.keys():
if 't_0_par' in self.parameters.keys():
return self.parameters['t_0_par']
elif 't_0' in self.parameters.keys():
return self.parameters['t_0']
elif self.n_sources > 1:
return self.t_0_1
else:
return self.parameters['t_0_kep']
@t_0_kep.setter
def t_0_kep(self, new):
self.parameters['t_0_kep'] = new
self._update_sources('t_0_kep', new)
@property
def xi_period(self):
"""
*float*
Orbital period of the source system (xallarap) in days.
"""
return self.parameters['xi_period']
@xi_period.setter
def xi_period(self, new_value):
if new_value < 0.:
raise ValueError('Xallarap period cannot be negative')
self.parameters['xi_period'] = new_value
self._update_sources('xi_period', new_value)
@property
def xi_semimajor_axis(self):
"""
*float*
Semi-major axis of the source orbit (xallarap) in the theta_E units.
"""
return self.parameters['xi_semimajor_axis']
@xi_semimajor_axis.setter
def xi_semimajor_axis(self, new_value):
if new_value < 0.:
raise ValueError('Xallarap semimajor axis cannot be negative')
self.parameters['xi_semimajor_axis'] = new_value
self._update_sources('xi_semimajor_axis', new_value)
@property
def xi_Omega_node(self):
"""
*float*
The longitude of the ascending node of the xallarap orbit, i.e.,
the angle from relative lens-source proper motion direction
to the ascending node direction.
The units are degrees.
"""
return self.parameters['xi_Omega_node']
@xi_Omega_node.setter
def xi_Omega_node(self, new_value):
self.parameters['xi_Omega_node'] = new_value
self._update_sources('xi_Omega_node', new_value)
@property
def xi_inclination(self):
"""
*float*
The inclination of the xallarap orbit, i.e.,
the angle between source-orbit plane and the sky plane.
The units are degrees.
"""
return self.parameters['xi_inclination']
@xi_inclination.setter
def xi_inclination(self, new_value):
self.parameters['xi_inclination'] = new_value
self._update_sources('xi_inclination', new_value)
@property
def xi_argument_of_latitude_reference(self):
"""
*float*
The argument of latitude for the xallarap orbit at :py:attr:`~t_0_xi`.
The argument of latitude is a sum of the true anomaly and
the argument of periapsis. In standard notation: u = nu + omega.
This parameter is internally used to calculate perihelion passage
(T_0 in standard notation).
The units are degrees.
"""
return self.parameters['xi_argument_of_latitude_reference']
@xi_argument_of_latitude_reference.setter
def xi_argument_of_latitude_reference(self, new_value):
self.parameters['xi_argument_of_latitude_reference'] = new_value
self._update_sources('xi_argument_of_latitude_reference', new_value)
@property
def xi_eccentricity(self):
"""
*float*
The eccentricity of the xallarap orbit. Has to be in [0, 1) range.
"""
return self.parameters['xi_eccentricity']
@xi_eccentricity.setter
def xi_eccentricity(self, new_value):
if new_value < 0. or new_value >= 1.:
raise ValueError('xallarap eccentricity has to be between 0 and 1')
self.parameters['xi_eccentricity'] = new_value
self._update_sources('xi_eccentricity', new_value)
@property
def xi_omega_periapsis(self):
"""
*float*
The argument of periapsis of the xallarap orbit, i.e., the angle
between the ascending node and periapsis measured in
the direction of motion.
The units are degrees.
"""
return self.parameters['xi_omega_periapsis']
@xi_omega_periapsis.setter
def xi_omega_periapsis(self, new_value):
self.parameters['xi_omega_periapsis'] = new_value
self._update_sources('xi_omega_periapsis', new_value)
@property
def t_0_xi(self):
"""
*float*
Reference epoch for xallarap orbit.
If not provided, then it defaults to :py:attr:`~t_0`.
"""
if 't_0_xi' not in self.parameters.keys():
return self.parameters['t_0']
else:
return self.parameters['t_0_xi']
@t_0_xi.setter
def t_0_xi(self, new_value):
self.parameters['t_0_xi'] = new_value
self._update_sources('t_0_xi', new_value)
@property
def q_source(self):
"""
*float*
The mass ratio of the second and the first source.
This is value must be positive and can be > 1.
Defined only for xallarap binary-source models because it does not
affect the magnification for binary-source models without xallarap.
"""
return self.parameters['q_source']
@q_source.setter
def q_source(self, new_value):
if new_value < 0.:
raise ValueError('q_source cannot be negative')
self.parameters['q_source'] = new_value
self._update_sources('q_source', new_value)
@property
def xallarap_reference_position(self):
"""
*np.ndarray* of shape (2, 1)
The position of the first source at :py:attr:`~t_0_xi` relative to
the source center of mass. It is a 2D vector that is subtracted from
the source position along the orbit in order to calculate the shift
caused by xallarap.
"""
return self._xallarap_reference_position
@property
def t_0_1(self):
"""
*float*
The time of minimum projected separation between the source no. 1
and the lens center of mass.
"""
return self.parameters['t_0_1']
@t_0_1.setter
def t_0_1(self, new_t_0_1):
self.parameters['t_0_1'] = new_t_0_1
self._source_1_parameters.t_0 = new_t_0_1
@property
def t_0_2(self):
"""
*float*
The time of minimum projected separation between the source no. 2
and the lens center of mass.
"""
return self.parameters['t_0_2']
@t_0_2.setter
def t_0_2(self, new_t_0_2):
self.parameters['t_0_2'] = new_t_0_2
self._source_2_parameters.t_0 = new_t_0_2
@property
def u_0_1(self):
"""
*float*
The minimum projected separation between the source no. 1
and the lens center of mass.
"""
if 'u_0_1' in self.parameters.keys():
return self.parameters['u_0_1']
else:
try:
t_eff = self._source_1_parameters.parameters['t_eff']
t_E = self._source_1_parameters.parameters['t_E']
return t_eff / t_E
except KeyError:
raise AttributeError(
'u_0_1 is not defined for these parameters: {0}'.format(
self.parameters.keys()))
@u_0_1.setter
def u_0_1(self, new_u_0_1):
if 'u_0_1' in self.parameters.keys():
self.parameters['u_0_1'] = new_u_0_1
self._source_1_parameters.u_0 = new_u_0_1
else:
raise AttributeError('u_0_1 is not a parameter of this model.')
@property
def u_0_2(self):
"""
*float*
The minimum projected separation between the source no. 2
and the lens center of mass.
"""
if 'u_0_2' in self.parameters.keys():
return self.parameters['u_0_2']
else:
try:
t_eff = self._source_2_parameters.parameters['t_eff']
t_E = self._source_2_parameters.parameters['t_E']
return t_eff / t_E
except KeyError:
raise AttributeError(
'u_0_2 is not defined for these parameters: {0}'.format(
self.parameters.keys()))
@u_0_2.setter
def u_0_2(self, new_u_0_2):
if 'u_0_2' in self.parameters.keys():
self.parameters['u_0_2'] = new_u_0_2
self._source_2_parameters.u_0 = new_u_0_2
else:
raise AttributeError('u_0_2 is not a parameter of this model.')
@property
def t_star_1(self):
"""
*float*
t_star_1 = rho_1 * t_E_1 = source no. 1 radius crossing time in days
"""
if 't_star_1' in self.parameters.keys():
return self.parameters['t_star_1']
else:
try:
t_E = self._source_1_parameters.parameters['t_E']
rho = self._source_1_parameters.parameters['rho']
return t_E * rho
except KeyError:
raise AttributeError(
't_star_1 is not defined for these parameters: {0}'.format(
self.parameters.keys()))
@t_star_1.setter
def t_star_1(self, new_t_star_1):
if 't_star_1' in self.parameters.keys():
self.parameters['t_star_1'] = new_t_star_1
self._source_1_parameters.t_star = new_t_star_1
else:
raise AttributeError('t_star_1 is not a parameter of this model.')
if new_t_star_1 < 0.:
raise ValueError(
'Source crossing time cannot be negative:', new_t_star_1)
@property
def t_star_2(self):
"""
*float*
t_star_2 = rho_2 * t_E_2 = source no. 2 radius crossing time in days.
"""
if 't_star_2' in self.parameters.keys():
return self.parameters['t_star_2']
else:
try:
t_E = self._source_2_parameters.parameters['t_E']
rho = self._source_2_parameters.parameters['rho']
return t_E * rho
except KeyError:
raise AttributeError(
't_star_2 is not defined for these parameters: {0}'.format(
self.parameters.keys()))
@t_star_2.setter
def t_star_2(self, new_t_star_2):
if 't_star_2' in self.parameters.keys():
self.parameters['t_star_2'] = new_t_star_2
self._source_2_parameters.t_star = new_t_star_2
else:
raise AttributeError('t_star_2 is not a parameter of this model.')
if new_t_star_2 < 0.:
raise ValueError(
'Source crossing time cannot be negative:', new_t_star_2)
@property
def rho_1(self):
"""
*float*
source no. 1 size as a fraction of the Einstein radius
"""
if 'rho_1' in self.parameters.keys():
return self.parameters['rho_1']
elif ('t_star' in self._source_1_parameters.parameters.keys() and
't_E' in self._source_1_parameters.parameters.keys()):
return (self._source_1_parameters.t_star /
self._source_1_parameters.t_E)
else:
fmt = 'rho_1 is not defined for these parameters: {:}'
raise AttributeError(fmt.format(self.parameters.keys()))
@rho_1.setter
def rho_1(self, new_rho_1):
if 'rho_1' in self.parameters.keys():
if new_rho_1 < 0.:
raise ValueError('source size (rho_1) cannot be negative')
self.parameters['rho_1'] = new_rho_1
self._source_1_parameters.rho = new_rho_1
else:
raise AttributeError('rho_1 is not a parameter of this model.')
@property
def rho_2(self):
"""
*float*
source no. 2 size as a fraction of the Einstein radius
"""
if 'rho_2' in self.parameters.keys():
return self.parameters['rho_2']
elif ('t_star' in self._source_2_parameters.parameters.keys() and
't_E' in self._source_2_parameters.parameters.keys()):
return (self._source_2_parameters.t_star /
self._source_2_parameters.t_E)
else:
raise AttributeError(
'rho_2 is not defined for these parameters: {0}'.format(
self.parameters.keys()))
@rho_2.setter
def rho_2(self, new_rho_2):
if 'rho_2' in self.parameters.keys():
if new_rho_2 < 0.:
raise ValueError('source size (rho_2) cannot be negative')
self.parameters['rho_2'] = new_rho_2
self._source_2_parameters.rho = new_rho_2
else:
raise AttributeError('rho_2 is not a parameter of this model.')
def get_s(self, epoch):
"""
Returns the value of separation :py:attr:`~s` at a given epoch or
epochs (if orbital motion parameters are set).
Arguments :
epoch: *float*, *list*, *np.ndarray*
The time(s) at which to calculate :py:attr:`~s`.
Returns :
separation: *float* or *np.ndarray*
Value(s) of separation for given epochs.
"""
if 'ds_dt' not in self.parameters.keys():
return self.s
if isinstance(epoch, list):
epoch = np.array(epoch)
s_of_t = self.s + self.ds_dt * (epoch - self.t_0_kep) / 365.25
return s_of_t
def get_alpha(self, epoch):
"""
Returns the value of angle :py:attr:`~alpha` at a given epoch or
epochs (if orbital motion parameters are set).
Arguments :
epoch: *float*, *list*, *np.ndarray*
The time(s) at which to calculate :py:attr:`~alpha`.
Returns :
angle: *float*
Value(s) of angle for given epochs in degrees
"""
if 'dalpha_dt' not in self.parameters.keys():
return self.alpha
if isinstance(epoch, list):
epoch = np.array(epoch)
alpha_of_t = self.alpha + self.dalpha_dt * (epoch - self.t_0_kep) / 365.25
return alpha_of_t
@property
def gamma_parallel(self):
"""
*float*
Parallel component of instantaneous velocity of the secondary
relative to the primary in 1/year.
It is parallel to the primary-secondary axis.
Equals :py:attr:`~ds_dt`/:py:attr:`~s`. Cannot be set.
"""
return self.ds_dt / self.s
@property
def gamma_perp(self):
"""
*float*
Perpendicular component of instantaneous velocity of the secondary
relative to the primary. It is perpendicular to the primary-secondary
axis. It has sign opposite to :py:attr:`~dalpha_dt`
and is in rad/yr, not deg/yr. Cannot be set.
"""
return -self.dalpha_dt * (np.pi / 180.)
@property
def gamma(self):
"""
*float*
Instantaneous velocity of the secondary relative to the primary in
1/year. Cannot be set.
"""
return (self.gamma_parallel**2 + self.gamma_perp**2)**0.5
def is_finite_source(self):
"""
Checks if model has finite source. For binary source models it checks
if either of the sources is finite.
Returns:
is_finite_source: *boolean*
*True* if at least one source has finite size.
"""
return self._type['finite source']
def is_static(self):
"""
Checks if model is static, i.e., orbital motion parameters are not set.
Returns :
is_static: *boolean*
*True* if *dalpha_dt* or *ds_dt* are set.
"""
return not self._type['lens 2-parameter orbital motion']
@property
def n_lenses(self):
"""
*int*
Number of objects in the lens system.
"""
return self._n_lenses
@property
def n_sources(self):
"""
*int*
Number of luminous sources.
It can be be 1 for a xallarap model.
"""
return self._n_sources
@property
def is_external_mass_sheet(self):
"""
*bool*
Whether an external mass sheet is included in the model
"""
return self._type['mass sheet']
@property
def is_external_mass_sheet_with_shear(self):
"""
*bool*
Whether an external mass sheet is included in the
model with non-zero shear
"""
return (('shear_G' in self.parameters.keys()) and
(self.parameters['shear_G'] != 0))
@property
def is_xallarap(self):
"""
*bool*
Whether the parameters include the xallarap or not.
"""
return self._type['xallarap']
@property
def source_1_parameters(self):
"""
:py:class:`~MulensModel.modelparameters.ModelParameters`
Parameters of source 1 in multi-source model.
**Do not change returned values.** To change
parameters of the source 1, simply change the parameters of double
source instance.
"""
if self.n_sources == 1:
raise ValueError('source_1_parameters cannot be accessed for ' +
'single source models')
return self._source_1_parameters
@property
def source_2_parameters(self):
"""
:py:class:`~MulensModel.modelparameters.ModelParameters`
Parameters of source 2 in multi-source model.
**Do not change returned values.** To change
parameters of the source 1, simply change the parameters of double
source instance.
"""
if self.n_sources == 1:
raise ValueError('source_2_parameters cannot be accessed for ' +
'single source models')
return self._source_2_parameters
@property
def uniform_caustic_sampling(self):
"""
:py:class:`~MulensModel.uniformcausticsampling.UniformCausticSampling`
An instance of the class
:py:class:`~MulensModel.uniformcausticsampling.UniformCausticSampling`
that is used to calculate standard parameters based on
the curvelinear coordinates.
The main usage is access to the *jacobian()* function.
In most cases, you do not need to access this property directly.
"""
if not self._type['Cassan08']:
raise ValueError(
'These parameters are not in curvelinear parameterization. ' +
'Hence you cannot access uniform_caustic_sampling property.')
self._get_uniform_caustic_sampling()
return self._uniform_caustic
def as_dict(self):
"""
Give parameters as a dict.
Returns :
dictionary: *dict*
The dictionary of model parameters.
"""
return self.parameters
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@source@MulensModel@modelparameters.py@.PATH_END.py
|
{
"filename": "core.py",
"repo_name": "samuelyeewl/specmatch-emp",
"repo_path": "specmatch-emp_extracted/specmatch-emp-master/specmatchemp/core.py",
"type": "Python"
}
|
"""
@filename core.py
SpecMatch-Emp core functions
"""
import os
import sys
from shutil import copy
import logging
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from specmatchemp import SPECMATCHDIR
from specmatchemp import SHIFT_REFERENCES
from specmatchemp import spectrum
from specmatchemp import shift
from specmatchemp import specmatch
from specmatchemp import library
def specmatch_spectrum(specpath, plot_level=0, inlib=False, outdir="./",
num_best=5, suffix="", wavlim='all', lib_subset=None,
name=None, n_lib_subset=None):
"""Perform the specmatch on a given spectrum
Args:
specpath (str): Path to target spectrum
plot_level (int, 0-2): Level of plots
0 - No plots saved, 1 - Representative plots, 2 - All plots
inlib (str or False): String to search within library for to exclude
from matching process
outdir (str): Output file directory
num_best (int): Number of best matches to use at lincomb stage
suffix (str): String to append to output file names
Returns:
specmatch.SpecMatch object
"""
if not os.path.exists(specpath):
raise ValueError(specpath + " does not exist!")
if wavlim == 'all':
target = spectrum.read_hires_fits(specpath)
else:
target = spectrum.read_hires_fits(specpath).cut(*wavlim)
# Determine the name of the target
if inlib:
name = inlib
elif name is None:
name = os.path.basename(specpath)[:-5]
if n_lib_subset is not None:
lib = library.read_hdf(wavlim='none')
lib_subset = lib.library_params.lib_index
lib_subset = np.random.choice(
lib_subset, size=n_lib_subset, replace=False
)
lib = library.read_hdf(wavlim=wavlim, lib_index_subset=lib_subset)
sm = specmatch.SpecMatch(target, lib)
sm.shift()
if inlib:
targ_idx = lib.get_index(inlib)
targ_param, targ_spec = lib[targ_idx]
sm.match(ignore=targ_idx, wavlim=wavlim)
else:
targ_param = None
sm.match(wavlim=wavlim)
sm.target.name = name # attach target name
sm.lincomb(num_best)
# Print results
print("SpecMatch Results for {0}".format(name))
sm.results_to_txt(sys.stdout)
outdir = os.path.join(outdir, name)
if not os.path.exists(outdir):
os.makedirs(outdir)
# Save final results
outpath = os.path.join(outdir, name + suffix + '_results.txt')
with open(outpath, 'w') as f:
if inlib:
f.write('Library Parameters\n')
f.write('------------------\n')
f.write('Teff: {0:.0f} +/- {1:.0f} K\n'.format(
targ_param['Teff'], targ_param['u_Teff']))
f.write('Radius: {0:.3f} +/- {1:.3f} Rsun\n'.format(
targ_param['radius'], targ_param['u_radius']))
f.write('[Fe/H]: {0:.2f} +/- {1:.2f} dex\n'.format(
targ_param['feh'], targ_param['u_feh']))
f.write('\n')
sm.results_to_txt(f, verbose=True)
print("created {}".format(outpath))
# Save full results
outpath = os.path.join(outdir, name + suffix + '_sm.hdf')
sm.to_hdf(outpath)
print("created {}".format(outpath))
# Create representative plots
if plot_level is not None and plot_level > 0:
plotspath = os.path.join(outdir, name + suffix + '_plots.pdf')
with PdfPages(plotspath) as pdf:
region = (5100, 5200)
wavlim = (5160, 5190)
order = 2
plot_shifts(sm, pdf, order, wavlim)
plot_match(sm, pdf, region, wavlim, targ_param)
plot_lincomb(sm, pdf, region, wavlim, targ_param)
print("created {}".format(plotspath))
# Create full plots
if plot_level == 2:
shiftplotspath = os.path.join(outdir, name + suffix +
'_shift_plots.pdf')
with PdfPages(shiftplotspath) as pdf:
for order in range(np.shape(sm.target_unshifted.w)[0]):
plot_shifts(sm, pdf, order, wavlim='all')
matchplotspath = os.path.join(outdir, name + suffix +
'_match_plots.pdf')
with PdfPages(matchplotspath) as pdf:
for reg in sm.regions:
plot_match(sm, pdf, reg, wavlim='all', targ_param=targ_param)
lincombplotspath = os.path.join(outdir, name + suffix +
'_lincomb_plots.pdf')
with PdfPages(lincombplotspath) as pdf:
for reg in sm.lincomb_regions:
plot_lincomb(sm, pdf, reg, wavlim='all', targ_param=targ_param)
print("created {}".format(plotspath))
return sm
def plot_shifts(sm, pdf, order, wavlim='all', singleorder=False):
"""Create shift plots
Args:
sm (specmatch.SpecMatch): SpecMatch object to plot
pdf: Pdf file object
order (int): HIRES order to plot
wavlim: A specific wavelength range to plot spectrum
singleorder (bool): Whether to plot lags as a single order
"""
name = sm.target.name
if wavlim == 'all':
min_w = sm.target_unshifted.w[order][0]
max_w = sm.target_unshifted.w[order][-1]
wavlim = (min_w, max_w)
# Shifted spectrum
fig = plt.figure(figsize=(10, 5))
sm.plot_shifted_spectrum(wavlim=wavlim)
plt.title('Shift results for star {0}'.format(name))
fig.set_tight_layout(True)
pdf.savefig()
plt.close()
# Cross-correlation
fig, axL = plt.subplots(ncols=2, figsize=(10, 5))
plt.sca(axL[0])
sm.plot_xcorr(order, True)
plt.title('{0} cross-correlation for order {1:d}'.format(name, order))
plt.sca(axL[1])
sm.plot_xcorr(order, True)
meanshift = np.nanmean(sm.shift_data['fit'])
dpix = 30
plt.xlim(meanshift-dpix, meanshift+dpix)
fig.set_tight_layout(True)
pdf.savefig()
plt.close()
# Lags
fig = plt.figure(figsize=(8, 6))
if singleorder:
sm.plot_shift_lags(order)
plt.title('{0} lags for order {1:d}'.format(name, order))
else:
sm.plot_shift_lags()
plt.title('{0} lags'.format(name))
fig.set_tight_layout(True)
pdf.savefig()
plt.close()
def plot_shift_data(target_unshifted, target_shifted, reference, shift_data,
pdf, order, wavlim='all', singleorder=False):
"""Create shift plots from shift data
Args:
target_unshifted (HiresSpectrum): Unshifted target spectrum
target_shifted (Spectrum): Shifted target spectrum
reference (Spectrum): Reference spectrum
shift_data (dict): Shift data object
pdf: Pdf file object
order (int): HIRES order to plot
wavlim: A specific wavelength range to plot spectrum
"""
# Create temp specmatch object
sm = specmatch.SpecMatch(target_unshifted)
sm.target = target_shifted
sm.shift_ref = reference
sm.shift_data = shift_data
plot_shifts(sm, pdf, order, wavlim, singleorder)
def plot_match(sm, pdf, region=0, wavlim='all', targ_param=None):
"""Create match plots
Args:
sm (specmatch.SpecMatch): SpecMatch object to plot
pdf: Pdf file object
region (int or tuple): Match region to plot
wavlim: A specific wavelength range to plot spectrum
targ_param: Target parameters
"""
name = sm.target.name
# Chi-squared surfaces
fig = plt.figure(figsize=(12, 8))
sm.plot_chi_squared_surface()
if targ_param is not None:
# Plot target parameters if available
axes = fig.axes
axes[0].axvline(targ_param['Teff'], color='k')
axes[1].axvline(targ_param['radius'], color='k')
axes[2].axvline(targ_param['feh'], color='k')
plt.suptitle('{0} chi-squared surface'.format(name))
plt.tight_layout(rect=[0, 0, 1, 0.95])
pdf.savefig()
plt.close()
# Best match spectrum
fig = plt.figure(figsize=(10, 4))
sm.plot_best_match_spectra(region=region, wavlim=wavlim, num_best=1)
plt.suptitle('{0} best matching spectrum'.format(name))
plt.tight_layout(rect=[0.05, 0.05, 1, 0.95])
pdf.savefig()
plt.close()
def plot_lincomb(sm, pdf, region=0, wavlim='all', targ_param=None):
"""Create lincomb plots
Args:
sm (specmatch.SpecMatch): SpecMatch object to plot
pdf: Pdf file object
region (int or tuple): Match region to plot
wavlim: A specific wavelength range to plot spectrum
targ_param: Target parameters
"""
name = sm.target.name
# Reference locations
fig = plt.figure(figsize=(10, 8))
sm.plot_references(region=region, verbose=True)
axes = fig.axes
axes[0].legend(numpoints=1, fontsize='small', loc='best')
if targ_param is not None:
# Plot target parameters if available
axes[0].plot(targ_param['Teff'], targ_param['radius'], '*',
ms=15, color='red', label='Target')
axes[1].plot(targ_param['Teff'], targ_param['radius'], '*',
ms=15, color='red')
axes[2].plot(targ_param['feh'], targ_param['radius'], '*',
ms=15, color='red')
axes[3].plot(targ_param['feh'], targ_param['radius'], '*',
ms=15, color='red')
plt.suptitle('{0} references used in linear combination'
.format(name))
plt.tight_layout(rect=[0.05, 0.05, 1, 0.95])
pdf.savefig()
plt.close()
# Lincomb results
fig = plt.figure(figsize=(12, 6))
sm.plot_lincomb(region=region, wavlim=wavlim)
plt.title('{0} Linear Combination results'.format(name))
fig.set_tight_layout(True)
pdf.savefig()
plt.close()
def match_spectrum(specpath, indir="./", plot_level=0, inlib=False,
outdir="./", suffix=""):
"""Match a spectrum given its observation ID
Args:
specpath (str): Path to spectrum or its CPS observation id jXX.XXXX
indir (str): Directory to look in for target spectrum
plot_level (int, 0-2): Level of plotting to save
inlib (str or False): String to search within library for to exclude
from matching process
outdir (str): Output file directory
suffix (str): String to append to output file names
Returns:
specmatch.SpecMatch object
"""
# Check if specpath is a path or an observation ID
if os.path.exists(specpath):
targ_path = specpath
targid = os.path.splitext(os.path.basename(specpath))[0]
else:
targ_path = os.path.join(indir, 'r' + specpath + '_adj' + suffix +
'.fits')
if not os.path.exists(targ_path):
raise ValueError(specpath + " does not exist!")
targid = 'r' + specpath
# Load shifted spectrum
target = spectrum.read_fits(targ_path)
lib = library.read_hdf()
sm = specmatch.SpecMatch(target, lib)
if inlib:
name = inlib
sm.target.name = inlib
sm.target.attrs['obs'] = targid
targ_idx = lib.get_index(inlib)
targ_param, targ_spec = lib[targ_idx]
sm.match(ignore=targ_idx)
else:
name = targid
targ_param = None
sm.match()
# Save results
outdir = os.path.join(outdir, name)
if not os.path.exists(outdir):
os.mkdir(outdir)
outpath = os.path.join(outdir, name + suffix + '_match.csv')
sm.match_results.to_csv(outpath)
# Save SpecMatch object
outpath = os.path.join(outdir, name + suffix + '_sm.hdf')
sm.to_hdf(outpath)
# Generate representative plots
if plot_level == 1:
plotspath = os.path.join(outdir, name + suffix + '_match_plots.pdf')
with PdfPages(plotspath) as pdf:
region = (5100, 5200)
wavlim = (5160, 5190)
plot_match(sm, pdf, region, wavlim, targ_param)
# Generate full plots
if plot_level == 2:
plotspath = os.path.join(outdir, name + suffix + '_match_plots.pdf')
with PdfPages(plotspath) as pdf:
for reg in sm.regions:
plot_match(sm, pdf, reg, wavlim='all', targ_param=targ_param)
return sm
def lincomb_spectrum(respath, plot_level=0, inlib=False, outdir="./",
num_best=5, suffix=""):
"""Match a spectrum using the linear combination approach.
Can only be used to resume an existing SpecMatch object.
Args:
respath (str): Path to existing SpecMatch.hdf file
plot_level (int, 0-2): Level of plotting to save
inlib (str or False): String to search within library for to exclude
from matching process
outdir (str): Output file directory
num_best (int): Number of best matches to use at lincomb stage
suffix (str): String to append to output file names
Returns:
specmatch.SpecMatch object
"""
lib = library.read_hdf()
sm = specmatch.SpecMatch.read_hdf(respath, lib)
name = sm.target.name
if inlib:
targ_idx = lib.get_index(inlib)
targ_param, targ_spec = lib[targ_idx]
else:
targ_param = None
sm.lincomb(num_best=num_best)
# Print results
print("SpecMatch Results for {0}".format(name))
for p in library.Library.STAR_PROPS:
print("{0}: {1:.2f}".format(p, sm.results[p]))
outdir = os.path.join(outdir, name)
if not os.path.exists(outdir):
os.mkdir(outdir)
# Save final results
outpath = os.path.join(outdir, name + suffix + '_results.txt')
with open(outpath, 'w') as f:
if inlib:
f.write('Library Parameters\n')
f.write('------------------\n')
f.write('Teff: {0:.0f} +/- {1:.0f} K\n'.format(
targ_param['Teff'], targ_param['u_Teff']))
f.write('Radius: {0:.3f} +/- {1:.3f} Rsun\n'.format(
targ_param['radius'], targ_param['u_radius']))
f.write('[Fe/H]: {0:.2f} +/- {1:.2f} dex\n'.format(
targ_param['feh'], targ_param['u_feh']))
f.write('\n')
sm.results_to_txt(f, verbose=True)
# Save full results
outpath = os.path.join(outdir, name + suffix + '_lincomb_sm.hdf')
sm.to_hdf(outpath)
# Plot results
if plot_level == 1:
plotspath = os.path.join(outdir, name + suffix + '_lincomb_plots.pdf')
with PdfPages(plotspath) as pdf:
region = (5100, 5200)
wavlim = (5160, 5190)
plot_lincomb(sm, pdf, region, wavlim, targ_param)
if plot_level == 2:
plotspath = os.path.join(outdir, name + suffix + '_lincomb_plots.pdf')
with PdfPages(plotspath) as pdf:
for reg in sm.lincomb_regions:
plot_lincomb(sm, pdf, reg, wavlim='all', targ_param=targ_param)
return sm
def shift_spectrum(specpath, plot_level=0, indir=None, outdir="./",
suffix="_adj", mask=True, no_bootstrap=False,
flatten=False):
"""Shift a target spectrum given an observation code.
Saves the shifted spectrum in a fits file.
Args:
specpath (str): Path to spectrum or its CPS observation id jXX.XXXX
plot_level (int, 0-2): Level of plotting to save
indir (str): Directory to look in for target spectrum
outdir (str): Directory to store output files
suffix (str): String to append to output file names
mask (bool): Use a mask to remove telluric lines
no_bootstrap (bool): Shift a spectrum without bootstrapping
flatten (bool): If multiple chips are provided, flatten into a single
spectrum file.
Returns:
shifted, unshifted, shift_data
"""
# Check if specpath is a path or an observation ID
if os.path.exists(specpath):
targ_path = specpath
targid = os.path.splitext(os.path.basename(specpath))[0]
else:
return _multishift_spectrum(specpath, plot_level, indir, outdir,
suffix, mask, no_bootstrap, flatten)
# if a different directory is provided, copy the file into specmatchemp
# working directory
specdir = os.path.join(SPECMATCHDIR, 'spectra')
shiftedspecdir = os.path.join(SPECMATCHDIR, 'shifted_spectra')
if indir != specdir and indir is not None:
copy(targ_path, specdir)
# load target and references
if mask:
maskfile = os.path.join(SPECMATCHDIR, 'hires_telluric_mask.csv')
else:
maskfile = None
targ_spec = spectrum.read_hires_fits(targ_path, maskfile)
if no_bootstrap:
# Shift directly onto NSO spectrum
ref_specs = [spectrum.read_fits(os.path.join(shiftedspecdir,
'nso_adj.fits'))]
shift_data = {}
print("Shifting directly against NSO spectrum.")
shifted = shift.shift(targ_spec, ref_specs[0], store=shift_data)
shift_data['shift_reference'] = 0
else:
# Shift spectrum onto boostrapped spectra
ref_specs = [spectrum.read_fits(os.path.join(shiftedspecdir,
r[0] + '_adj.fits')) for r in SHIFT_REFERENCES]
shift_data = {}
shifted = shift.bootstrap_shift(targ_spec, ref_specs, store=shift_data)
# Save shifted spectrum
outpath = os.path.join(shiftedspecdir, targid + suffix + '.fits')
shift.save_shift_to_fits(outpath, shifted, targ_spec, shift_data,
clobber=True)
if outdir != shiftedspecdir:
if not os.path.exists(outdir):
os.mkdir(outdir)
copy(outpath, outdir)
# Generate representative plots
if plot_level == 1:
plotfile = os.path.join(outdir, targid + "_shift_plots.pdf")
print("Saving plots to " + plotfile)
with PdfPages(plotfile) as pdf:
# Get reference used
shift_ref = ref_specs[shift_data['shift_reference']]
# Plot single order
plot_shift_data(targ_spec, shifted, shift_ref, shift_data, pdf, 2)
# Generate individual plots for every order
elif plot_level == 2:
plotfile = os.path.join(outdir, targid + "_shift_plots.pdf")
print("Saving plots to " + plotfile)
with PdfPages(plotfile) as pdf:
# Get reference used
shift_ref = ref_specs[shift_data['shift_reference']]
num_orders = shift_data['num_orders']
for i in range(num_orders):
plot_shift_data(targ_spec, shifted, shift_ref, shift_data,
pdf, i, singleorder=True)
return shifted, targ_spec, shift_data
def _multishift_spectrum(cps_id, plot_level=0, indir=None, outdir="./",
suffix="_adj", mask=True, no_bootstrap=False,
flatten=False):
"""Helper function to shift multiple chips"""
# If an observation id is given, search for all chips and shift each in
# turn
bj_path = os.path.join(indir, 'b' + cps_id + '.fits')
if os.path.exists(bj_path):
print("Shifting bj chip...")
bj = shift_spectrum(bj_path, plot_level=plot_level, indir=indir,
outdir=outdir, suffix=suffix, mask=mask,
no_bootstrap=no_bootstrap, flatten=False)
else:
bj = None
rj_path = os.path.join(indir, 'r' + cps_id + '.fits')
if os.path.exists(rj_path):
print("Shifting rj chip...")
rj = shift_spectrum(rj_path, plot_level=plot_level, indir=indir,
outdir=outdir, suffix=suffix, mask=mask,
no_bootstrap=no_bootstrap, flatten=False)
else:
rj = None
ij_path = os.path.join(indir, 'i' + cps_id + '.fits')
if os.path.exists(ij_path):
print("Shifting ij chip...")
ij = shift_spectrum(ij_path, plot_level=plot_level, indir=indir,
outdir=outdir, suffix=suffix, mask=mask,
no_bootstrap=no_bootstrap, flatten=False)
else:
ij = None
if bj is None and rj is None and ij is None:
raise ValueError("No observations corresponding to " + cps_id +
"could be found in " + indir)
specs_shifted = []
specs_unshifted = []
specs_sd = []
chips = []
if bj is not None:
specs_shifted.append(bj[0])
specs_unshifted.append(bj[1])
specs_sd.append(bj[2])
chips.append('bj')
if rj is not None:
specs_shifted.append(rj[0])
specs_unshifted.append(rj[1])
specs_sd.append(rj[2])
chips.append('rj')
if ij is not None:
specs_shifted.append(ij[0])
specs_unshifted.append(ij[1])
specs_sd.append(ij[2])
chips.append('ij')
if flatten:
# Combine all chips into a single file
print("Flattening {0:d} spectra".format(len(specs_shifted)))
shiftedspecdir = os.path.join(SPECMATCHDIR, 'shifted_spectra')
nso = spectrum.read_fits(os.path.join(shiftedspecdir,
'nso_adj.fits'))
shifted = spectrum.Spectrum.combine_spectra(specs_shifted,
nso.w, name=cps_id, prefixes=chips)
unshifted = spectrum.HiresSpectrum.combine_spectra(specs_unshifted,
name=cps_id, prefixes=chips)
shift_data = {}
for (i, sd) in enumerate(specs_sd):
for k, v in sd.items():
shift_data[chips[i] + '/' + k] = v
# Save flattened spectrum
outpath = os.path.join(shiftedspecdir, cps_id + suffix + '.fits')
shifted.to_fits(outpath, clobber=True)
if outdir != shiftedspecdir:
if not os.path.exists(outdir):
os.mkdir(outdir)
copy(outpath, outdir)
return shifted, unshifted, shift_data
else:
return specs_shifted, specs_unshifted, specs_sd
|
samuelyeewlREPO_NAMEspecmatch-empPATH_START.@specmatch-emp_extracted@specmatch-emp-master@specmatchemp@core.py@.PATH_END.py
|
{
"filename": "model.py",
"repo_name": "IvS-KULeuven/IvSPythonRepository",
"repo_path": "IvSPythonRepository_extracted/IvSPythonRepository-master/sed/model.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Interface to the SED library.
The most basic usage of this module is:
>>> wave,flux = get_table(teff=10000,logg=4.0)
This will retrieve the model SED with the specified B{effective temperature} and
B{logg}, from the standard B{grid}, in standard B{units} and with zero
B{reddening}. All these things can be specified though (see below).
Section 1. Available model grids
================================
Section 1.1 Available grids
---------------------------
- kurucz: The Kurucz model grids, (default setting) reference: Kurucz 1993, yCat, 6039, 0
- metallicity (z): m01 is -0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989)
- metallicity (z): p01 is +0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989)
- alpha enhancement (alpha): True means alpha enhanced (+0.4)
- turbulent velocity (vturb): vturb in km/s
- nover= True means no overshoot
- odfnew=True means no overshoot but with better opacities and abundances
- tmap: NLTE grids computed for sdB stars with the Tubingen NLTE Model
Atmosphere package. No further parameters are available. Reference:
Werner et al. 2003,
Section 1.2 Plotting the domains of all spectral grids
------------------------------------------------------
We make a plot of the domains of all spectral grids. Therefore, we first collect
all the grid names
>>> grids = get_gridnames()
Preparation of the plot: set the color cycle of the current axes to the spectral
color cycle.
>>> from cycler import cycler
>>> p = pl.figure(figsize=(10,8))
>>> color_cycle = cycler(color=[pl.cm.Spectral(j) for j in np.linspace(0, 1.0, len(grids))])
>>> p = pl.gca().set_color_cycle(color_cycle)
To plot all the grid points, we run over all grid names (which are strings), and
retrieve their dimensions. The dimensions are just two arrays giving the teff-
and logg-coordinate of each SED in the grid. They can thus be easily plot:
>>> for grid in grids:
... teffs,loggs = get_grid_dimensions(grid=grid)
... p = pl.plot(np.log10(teffs),loggs,'o',ms=7,label=grid)
And we need to set some of the plotting details to make it look nicer.
>>> p = pl.xlim(pl.xlim()[::-1])
>>> p = pl.ylim(pl.ylim()[::-1])
>>> p = pl.xlabel('Effective temperature [K]')
>>> p = pl.ylabel('log( Surface gravity [cm s$^{-1}$]) [dex]')
>>> xticks = [3000,5000,7000,10000,15000,25000,35000,50000,65000]
>>> p = pl.xticks([np.log10(i) for i in xticks],['%d'%(i) for i in xticks])
>>> p = pl.legend(loc='upper left',prop=dict(size='small'))
>>> p = pl.grid()
]include figure]]ivs_sed_model_grid.png]
Section 2. Retrieval of model SEDs
==================================
Subsection 2.1 Default settings
-------------------------------
To get information on the grid that is currently defined, you can type the
following. Note that not all parameters are relevant for all grids, e.g. the
convection theory parameter C{ct} has no influence when the Kurucz grid is
chosen.
>>> print defaults
{'use_scratch': False, 'Rv': 3.1, 'co': 1.05, 'c': 0.5, 'grid': 'kurucz', 'alpha': False, 'odfnew': True, 'ct': 'mlt', 'a': 0.0, 'vturb': 2, 'law': 'fitzpatrick2004', 'm': 1.0, 't': 1.0, 'z': 0.0, 'nover': False, 'He': 97}
or
>>> print os.path.basename(get_file())
kurucz93_z0.0_k2odfnew_sed.fits
You can change the defaults with the function L{set_defaults}:
>>> set_defaults(z=0.5)
>>> print defaults
{'use_scratch': False, 'Rv': 3.1, 'co': 1.05, 'c': 0.5, 'grid': 'kurucz', 'alpha': False, 'odfnew': True, 'ct': 'mlt', 'a': 0.0, 'vturb': 2, 'law': 'fitzpatrick2004', 'm': 1.0, 't': 1.0, 'z': 0.5, 'nover': False, 'He': 97}
And reset the 'default' default values by calling L{set_defaults} without arguments
>>> set_defaults()
>>> print defaults
{'use_scratch': False, 'Rv': 3.1, 'co': 1.05, 'c': 0.5, 'grid': 'kurucz', 'alpha': False, 'odfnew': True, 'ct': 'mlt', 'a': 0.0, 'vturb': 2, 'law': 'fitzpatrick2004', 'm': 1.0, 't': 1.0, 'z': 0.0, 'nover': False, 'He': 97}
Subsection 2.2 Speeding up
--------------------------
When fitting an sed using the builder class, or repeatedly reading model seds,
or integrated photometry, the main bottleneck on the speed will be the disk access
This can be circumvented by using the scratch disk. To do this, call the function
copy2scratch() after setting the default settings as explained above. f.x.:
>>> set_defaults(grid='kurucz', z=0.5)
>>> copy2scratch()
You have to do this every time you change a grid setting. This function creates a
directory named 'your_username' on the scratch disk and works from there. So you
won`t disturbed other users.
After the fitting process use the function
>>> clean_scratch()
to remove the models that you used from the scratch disk. Be carefull with this,
because it will remove the models without checking if there is another process
using them. So if you have multiple scripts running that are using the same models,
only clean the scratch disk after the last process is finished.
The gain in speed can be up to 70% in single sed fitting, and up to 40% in binary
and multiple sed fitting.
For the sake of the examples, we'll set the defaults back to z=0.0:
>>> set_defaults()
Subsection 2.3 Model SEDs
-------------------------
Be careful when you supply parameters: e.g., not all grids are calculated for
the same range of metallicities. In L{get_table}, only the effective temperature
and logg are 'interpolatable' quantities. You have to set the metallicity to a
grid value. The reddening value can take any value: it is not interpolated but
calculated. You can thus also specify the type of reddening law (see L{reddening}).
>>> wave,flux = get_table(teff=12345,logg=4.321,ebv=0.12345,z=0.5)
but
>>> try:
... wave,flux = get_table(teff=12345,logg=4.321,ebv=0.12345,z=0.6)
... except IOError,msg:
... print msg
File sedtables/modelgrids/kurucz93_z0.6_k2odfnew_sed.fits not found in any of the specified data directories /STER/pieterd/IVSDATA/, /STER/kristofs/IVSdata
Since the Kurucz model atmospheres have not been calculated for the value of
C{z=0.6}.
Instead of changing the defaults of this module with L{set_defaults}, you can
also give extra arguments to L{get_table} to specify the grid you want to use.
The default settings will not change in this case.
>>> wave,flux = get_table(teff=16321,logg=4.321,ebv=0.12345,z=0.3,grid='tlusty')
The default B{units} of the SEDs are angstrom and erg/s/cm2/AA/sr. To change them,
do:
>>> wave,flux = get_table(teff=16321,logg=4.321,wave_units='micron',flux_units='Jy/sr')
To B{remove the steradian} from the units when you know the angular diameter of
your star in milliarcseconds, you can do (we have to convert diameter to surface):
>>> ang_diam = 3.21 # mas
>>> scale = conversions.convert('mas','sr',ang_diam/2.)
>>> wave,flux = get_table(teff=9602,logg=4.1,ebv=0.0,z=0.0,grid='kurucz')
>>> flux *= scale
The example above is representative for the case of Vega. So, if we now calculate
the B{synthetic flux} in the GENEVA.V band, we should end up with the zeropoint
magnitude of this band, which is close to zero:
>>> flam = synthetic_flux(wave,flux,photbands=['GENEVA.V'])
>>> print '%.3f'%(conversions.convert('erg/s/cm2/AA','mag',flam,photband='GENEVA.V')[0])
0.063
Compare this with the calibrated value
>>> print filters.get_info(['GENEVA.V'])['vegamag'][0]
0.061
Section 3. Retrieval of integrated photometry
=============================================
Instead of retrieving a model SED, you can immediately retrieve pre-calculated
integrated photometry. The benefit of this approach is that it is B{much} faster
than retrieving the model SED and then calculating the synthetic flux. Also,
you can supply arbitrary metallicities within the grid boundaries, as interpolation
is done in effective temperature, surface gravity, reddening B{and} metallicity.
Note that also the B{reddening law is fixed} now, you need to recalculate the
tables for different parameters if you need them.
The B{massive speed-up} is accomplished the following way: it may take a few tens
of seconds to retrieve the first pre-integrated SED, because all available
files from the specified grid will be loaded into memory, and a `markerarray'
will be made allowing a binary search in the grid. This makes it easy to retrieve
all models around the speficied point in N-dimensional space. Next, a linear
interpolation method is applied to predict the photometric values of the
specified point.
All defaults set for the retrieval of model SEDs are applicable for the integrated
photometry tables as well.
When retrieving integrated photometry, you also get the B{absolute luminosity}
(integration of total SED) as a bonus. This is the absolute luminosity assuming
the star has a radius of 1Rsol. Multiply by Rstar**2 to get the true luminosity.
Because photometric filters cannot trivially be assigned a B{wavelength} to (see
L{filters.eff_wave}), by default, no wavelength information is retrieved. If you
want to retrieve the effective wavelengths of the filters themselves (not taking
into account the model atmospheres), you can give an extra keyword argument
C{wave_units}. If you want to take into account the model atmosphere, use
L{filters.eff_wave}.
>>> photbands = ['GENEVA.U','2MASS.J']
>>> fluxes,Labs = get_itable(teff=16321,logg=4.321,ebv=0.12345,z=0.123,photbands=photbands)
>>> waves,fluxes,Labs = get_itable(teff=16321,logg=4.321,ebv=0.12345,z=0.123,photbands=photbands,wave_units='AA')
Note that the integration only gives you fluxes, and is thus independent from
the zeropoints of the filters (but dependent on the transmission curves). To
get the synthetic magnitudes, you can do
>>> mymags = [conversions.convert('erg/s/cm2/AA','mag',fluxes[i],photband=photbands[i]) for i in range(len(photbands))]
The mags don't mean anything in this case because they have not been corrected
for the distance to the star.
The retrieval of integrated photometry can go much faster if you want to do
it for a whole set of parameters. The L{get_itable_pix} function has a much
more flexible, reliable and fast interpolation scheme. It is possible to
interpolate also over doppler shift and interstellar Rv, as long as the grids
have been computed before. See L{get_itable_pix} for more information.
Subsection 3. Full example
==========================
We build an SED of Vega and compute synthetic magnitudes in the GENEVA and
2MASS bands.
These are the relevant parameters of Vega and photometric passbands
>>> ang_diam = 3.21 # mas
>>> teff = 9602
>>> logg = 4.1
>>> ebv = 0.0
>>> z = 0.0
>>> photbands = ['GENEVA.U','GENEVA.G','2MASS.J','2MASS.H','2MASS.KS']
We can compute (R/d) to scale the synthetic flux as
>>> scale = conversions.convert('mas','sr',ang_diam/2.)
We retrieve the SED
>>> wave,flux = get_table(teff=teff,logg=logg,ebv=ebv,z=z,grid='kurucz')
>>> flux *= scale
Then compute the synthetic fluxes, and compare them with the synthetic fluxes as
retrieved from the pre-calculated tables
>>> fluxes_calc = synthetic_flux(wave,flux,photbands)
>>> wave_int,fluxes_int,Labs = get_itable(teff=teff,logg=logg,ebv=ebv,z=z,photbands=photbands,wave_units='AA')
>>> fluxes_int *= scale
Convert to magnitudes:
>>> m1 = [conversions.convert('erg/s/cm2/AA','mag',fluxes_calc[i],photband=photbands[i]) for i in range(len(photbands))]
>>> m2 = [conversions.convert('erg/s/cm2/AA','mag',fluxes_int[i],photband=photbands[i]) for i in range(len(photbands))]
And make a nice plot
>>> p = pl.figure()
>>> p = pl.loglog(wave,flux,'k-',label='Kurucz model')
>>> p = pl.plot(wave_int,fluxes_calc,'ro',label='Calculated')
>>> p = pl.plot(wave_int,fluxes_int,'bx',ms=10,mew=2,label='Pre-calculated')
>>> p = [pl.annotate('%s: %.3f'%(b,m),(w,f),color='r') for b,m,w,f in zip(photbands,m1,wave_int,fluxes_calc)]
>>> p = [pl.annotate('%s: %.3f'%(b,m),(w-1000,0.8*f),color='b') for b,m,w,f in zip(photbands,m2,wave_int,fluxes_int)]
>>> p = pl.xlabel('Wavelength [Angstrom]')
>>> p = pl.ylabel('Flux [erg/s/cm2/AA]')
]include figure]]ivs_sed_model_example.png]
"""
import re
import os
import glob
import logging
import astropy.io.fits as pf
import numpy as np
try:
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import griddata
new_scipy = True
except ImportError:
from Scientific.Functions.Interpolation import InterpolatingFunction
new_scipy = False
from scipy.interpolate import interp1d
from ivs import config
from ivs.units import conversions
from ivs.units import constants
from ivs.aux import loggers
from ivs.aux.decorators import memoized,clear_memoization
import itertools
import functools
from ivs.aux import numpy_ext
from ivs.sed import filters
from ivs.inout import fits
from ivs.sigproc import interpol
from ivs.sed import reddening
import getpass
import shutil
logger = logging.getLogger("SED.MODEL")
logger.addHandler(loggers.NullHandler)
caldir = os.sep.join(['sedtables','calibrators'])
#-- default values for grids
__defaults__ = dict(grid='kurucz',odfnew=True,z=+0.0,vturb=2,
alpha=False,nover=False, # KURUCZ
He=97, # WD
ct='mlt', # NEMO (convection theory)
t=1.0,a=0.0,c=0.5,m=1.0,co=1.05, # MARCS and COMARCS
Rv=3.1,law='fitzpatrick2004', # reddening info for integrated grids
use_scratch=False)
defaults = __defaults__.copy()
defaults_multiple = [defaults.copy(),defaults.copy()]
#-- relative location of the grids
basedir = 'sedtables/modelgrids/'
scratchdir = None
#{ Interface to library
def set_defaults(*args,**kwargs):
"""
Set defaults of this module
If you give no keyword arguments, the default values will be reset.
"""
clear_memoization(keys=['ivs.sed.model'])
#-- these are the default defaults
if not kwargs:
kwargs = __defaults__.copy()
for key in kwargs:
if key in defaults:
defaults[key] = kwargs[key]
logger.info('Set %s to %s'%(key,kwargs[key]))
def set_defaults_multiple(*args):
"""
Set defaults for multiple stars
"""
if not args:
args = [defaults for i in range(len(defaults_multiple))]
for i,arg in enumerate(args):
for key in arg:
if key in defaults_multiple[i]:
defaults_multiple[i][key] = arg[key]
logger.info('Set %s to %s (star %d)'%(key,arg[key],i))
def copy2scratch(**kwargs):
"""
Copy the grids to the scratch directory to speed up the fitting process.
Files are placed in the directory: /scratch/uname/ where uname is your username.
This function checks the grids that are set with the functions set_defaults()
and set_defaults_multiple(). Every time a grid setting is changed, this
function needs to be called again.
Don`t forget to remove the files from the scratch directory after the fitting
process is completed with clean_scratch()
It is possible to give z='*' and Rv='*' as an option; when you do that, the grids
with all z, Rv values are copied. Don't forget to add that option to clean_scratch too!
"""
global scratchdir
uname = getpass.getuser()
if not os.path.isdir('/scratch/%s/'%(uname)):
os.makedirs('/scratch/%s/'%(uname))
scratchdir = '/scratch/%s/'%(uname)
#-- we have defaults for the single and multiple grid
defaults_ = []
defaults_.append(defaults)
defaults_.extend(defaults_multiple)
#-- now run over the defaults for the single and multiple grid, and
# copy the necessary files to the scratch disk
for default in defaults_:
default['use_scratch'] = False
#-- set the z with the starred version '*' if asked for, but remember
# the original value to reset it after the loop is done.
originalDefaults = {}
for key in kwargs:
if key in default:
originalDefaults[key] = default[key]
default[key] = kwargs[key]
logger.debug('Using provided value for {0:s}={1:s} when copying to scratch'.format(key,str(kwargs[key])))
#grid
fname = get_file(integrated=False,**default)
#-- we could have received a list (multiple files) or a string (single file)
if isinstance(fname,str):
fname = [fname]
for ifname in fname:
if not os.path.isfile(scratchdir + os.path.basename(ifname)):
shutil.copy(ifname,scratchdir)
logger.info('Copied grid: %s to scratch'%(ifname))
else:
logger.info('Using existing grid: %s from scratch'%(os.path.basename(ifname)))
#integrated grid
fname = get_file(integrated=True,**default)
if isinstance(fname,str):
fname = [fname]
for ifname in fname:
if not os.path.isfile(scratchdir + os.path.basename(ifname)):
shutil.copy(ifname,scratchdir)
logger.info('Copied grid: %s to scratch'%(ifname))
else:
logger.info('Using existing grid: %s from scratch'%(os.path.basename(ifname)))
default['use_scratch'] = True
for key in kwargs:
if key in default:
default[key] = originalDefaults[key]
def clean_scratch(**kwargs):
"""
Remove the grids that were copied to the scratch directory by using the
function copy2scratch(). Be carefull with this function, as it doesn't check
if the models are still in use. If you are running multiple scripts that
use the same models, only clean the scratch disk after the last script is
finnished.
"""
defaults_ = []
defaults_.append(defaults)
defaults_.extend(defaults_multiple)
for default in defaults_:
if default['use_scratch']:
originalDefaults = {}
for key in kwargs:
if key in default:
originalDefaults[key] = default[key]
default[key] = kwargs[key]
logger.debug('Using provided value for {0:s}={1:s} when deleting from scratch'.format(key,str(kwargs[key])))
#if z is not None:
#previous_z = default['z']
#default['z']
fname = get_file(integrated=False,**default)
if isinstance(fname,str):
fname = [fname]
for ifname in fname:
if os.path.isfile(ifname):
logger.info('Removed file: %s'%(ifname))
os.remove(ifname)
fname = get_file(integrated=True,**default)
if isinstance(fname,str):
fname = [fname]
for ifname in fname:
if os.path.isfile(ifname):
logger.info('Removed file: %s'%(ifname))
os.remove(ifname)
default['use_scratch'] = False
for key in kwargs:
if key in default:
default[key] = originalDefaults[key]
#if z is not None:
#default['z'] = previous_z
def defaults2str():
"""
Convert the defaults to a string, e.g. for saving files.
"""
return '_'.join([str(i)+str(defaults[i]) for i in sorted(defaults.keys())])
def defaults_multiple2str():
"""
Convert the defaults to a string, e.g. for saving files.
"""
return '_'.join([str(i)+str(defaults[i]) for defaults in defaults_multiple for i in sorted(sorted(defaults.keys()))])
def get_gridnames(grid=None):
"""
Return a list of available grid names.
If you specificy the grid's name, you get two lists: one with all available
original, non-integrated grids, and one with the pre-calculated photometry.
@parameter grid: name of the type of grid (optional)
@type grid: string
@return: list of grid names
@rtype: list of str
"""
if grid is None:
return ['kurucz','fastwind','cmfgen','sdb_uli','wd_boris','wd_da','wd_db',
'tlusty','uvblue','atlas12','nemo','tkachenko','marcs','marcs2','tmap',
]
#'marcs','marcs2','comarcs','tlusty','uvblue','atlas12']
else:
files = config.glob(basedir,'*%s*.fits'%(grid))
integrated = [os.path.basename(ff) for ff in files if os.path.basename(ff)[0]=='i']
original = [os.path.basename(ff) for ff in files if os.path.basename(ff)[0]!='i']
return original,integrated
def get_file(integrated=False,**kwargs):
"""
Retrieve the filename containing the specified SED grid.
The keyword arguments are specific to the kind of grid you're using.
Basic keywords are 'grid' for the name of the grid, and 'z' for metallicity.
For other keywords, see the source code.
Available grids and example keywords:
- grid='kurucz93':
- metallicity (z): m01 is -0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989)
- metallicity (z): p01 is +0.1 log metal abundance relative to solar (solar abundances from Anders and Grevesse 1989)
- alpha enhancement (alpha): True means alpha enhanced (+0.4)
- turbulent velocity (vturb): vturb in km/s
- nover= True means no overshoot
- odfnew=True means no overshoot but with better opacities and abundances
- grid='tlusty':
- z: log10(Z/Z0)
- grid='sdb_uli': metallicity and helium fraction (z, he=98)
- grid='fastwind': no options
- grid='wd_boris': no options
- grid='stars': precomputed stars (vega, betelgeuse...)
- grid='uvblue'
- grid='marcs'
- grid='marcs2'
- grid='atlas12'
- grid='tkachenko': metallicity z
- grid='nemo': convection theory and metallicity (CM=Canuto and Mazzitelli 1991),
(CGM=Canuto,Goldman,Mazzitelli 1996), (MLT=mixinglengththeory a=0.5)
- grid='marcsjorissensp': high resolution spectra from 4000 to 25000 A of (online available) MARCS grid computed by A. Jorissen
with turbospectrum v12.1.1 in late 2012, then converted to the Kurucz wavelength grid (by S. Bloemen and M. Hillen).
@param integrated: choose integrated version of the gridcopy2scratch
@type integrated: boolean
@keyword grid: gridname (default Kurucz)
@type grid: str
@return: gridfile
@rtype: str
"""
#-- possibly you give a filename
grid = kwargs.get('grid',defaults['grid'])
use_scratch = kwargs.get('use_scratch',defaults['use_scratch'])
if os.path.isfile(grid):
logger.debug('Selected %s'%(grid))
if integrated:
basename = os.path.basename(grid)
return os.path.join(os.path.dirname(grid),basename[0]=='i' and basename or 'i'+basename)
logging.debug('Returning grid path: '+grid)
return grid
grid = grid.lower()
#-- general
z = kwargs.get('z',defaults['z'])
# z = defaults['z']
Rv = kwargs.get('Rv',defaults['Rv'])
#-- only for Kurucz
vturb = int(kwargs.get('vturb',defaults['vturb']))
odfnew = kwargs.get('odfnew',defaults['odfnew'])
alpha = kwargs.get('alpha',defaults['alpha'])
nover = kwargs.get('nover',defaults['nover'])
#-- only for WD
He = int(kwargs.get('He',defaults['He']))
#-- only for Marcs and COMarcs
t = kwargs.get('t',defaults['t'])
a = kwargs.get('a',defaults['a'])
c = kwargs.get('c',defaults['c'])
m = kwargs.get('m',defaults['m'])
co= kwargs.get('co',defaults['co'])
#-- only for Nemo
ct = kwargs.get('ct','mlt')
#-- figure out what grid to use
if grid=='fastwind':
basename = 'fastwind_sed.fits'
elif grid in ['kurucz','kurucz2']:
if not isinstance(z,str): z = '%.1f'%(z)
if not isinstance(vturb,str): vturb = '%d'%(vturb)
if grid=='kurucz2' and integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
if not alpha and not nover and not odfnew:
basename = 'kurucz93_z%s_k%s_sed%sls.fits'%(z,vturb,postfix)
elif alpha and odfnew:
basename = 'kurucz93_z%s_ak%sodfnew_sed%s.fits'%(z,vturb,postfix)
elif odfnew:
basename = 'kurucz93_z%s_k%sodfnew_sed%s.fits'%(z,vturb,postfix)
elif nover:
basename = 'kurucz93_z%s_k%snover_sed%s.fits'%(z,vturb,postfix)
elif grid=='cmfgen':
basename = 'cmfgen_sed.fits'
elif grid=='sdb_uli':
if not isinstance(z,str): z = '%.1f'%(z)
if not isinstance(He,str): He = '%d'%(He)
basename = 'SED_int_h%s_z%s.fits'%(He,z)
elif grid=='wd_boris':
basename = 'SED_WD_Gaensicke.fits'
elif grid=='wd_da':
if integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
basename = 'SED_WD_Koester_DA%s.fits'%(postfix)
elif grid=='wd_db':
basename = 'SED_WD_Koester_DB.fits'
elif grid=='marcs':
if not isinstance(z,str): z = '%.1f'%(z)
if not isinstance(t,str): t = '%.1f'%(t)
if not isinstance(a,str): a = '%.2f'%(a)
if not isinstance(c,str): c = '%.2f'%(c)
basename = 'marcsp_z%st%s_a%s_c%s_sed.fits'%(z,t,a,c)
elif grid=='marcs2':
basename = 'marcsp2_z0.00t2.0_m.1.0c0.00_sed.fits'
elif grid == 'marcsana':
# if not isinstance(z,str): z = '%.2f'%(z)
if z == '*':
basename = 'MARCS_SED_Ana_z*.fits'
else:
if isinstance(z,str):
z = float(z)
basename = 'MARCS_SED_Ana_z{:+.2f}.fits'.format(z)
# basename = 'MARCS_SED_Ana_z+0.25.fits'
elif grid=='comarcs':
if not isinstance(z,str): z = '%.2f'%(z)
if not isinstance(co,str): co = '%.2f'%(co)
if not isinstance(m,str): m = '%.1f'%(m)
basename = 'comarcsp_z%sco%sm%sxi2.50_sed.fits'%(z,co,m)
elif grid=='stars':
basename = 'kurucz_stars_sed.fits'
elif grid=='tlusty':
if not isinstance(z,str): z = '%.2f'%(z)
basename = 'tlusty_z%s_sed.fits'%(z)
elif grid=='uvblue':
if not isinstance(z,str): z = '%.1f'%(z)
basename = 'uvblue_z%s_k2_sed.fits'%(z)
elif grid=='atlas12':
if not isinstance(z,str): z = '%.1f'%(z)
basename = 'atlas12_z%s_sed.fits'%(z)
elif grid=='tkachenko':
if not isinstance(z,str): z = '%.2f'%(z)
basename = 'tkachenko_z%s.fits'%(z)
elif grid=='nemo':
ct = ct.lower()
if ct=='mlt': ct = ct+'072'
else: ct = ct+'288'
basename = 'nemo_%s_z%.2f_v%d.fits'%(ct,z,vturb)
elif grid=='tmap':
if integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
basename = 'TMAP2012_lowres%s.fits'%(postfix) #only available for 1 metalicity
elif grid=='heberb':
basename = 'Heber2000_B_h909_extended.fits' #only 1 metalicity
elif grid=='hebersdb':
basename = 'Heber2000_sdB_h909_extended.fits' #only 1 metalicity
elif grid=='tmapsdb':
# grids for sdB star fitting (JorisV)
if integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
basename = 'TMAP2012_sdB_extended%s.fits'%(postfix)
elif grid=='kuruczsdb':
# grids for sdB star fitting (JorisV)
if not isinstance(z,str): z = '%.1f'%(z)
if integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
basename = 'kurucz_z%s_sdB%s.fits'%(z,postfix)
elif grid=='kuruczpagb':
# grids for post-AGB star fitting (MichelH)
if not isinstance(z,str): z = '%.1f'%(z)
if integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
basename = 'kurucz_pAGB_z%s_sed%s.fits'%(z,postfix)
elif grid=='tmaptest':
""" Grids exclusively for testing purposes"""
if integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
basename = 'TMAP2012_SEDtest%s.fits'%(postfix) #only available for 1 metalicity
elif grid=='kurucztest':
""" Grids exclusively for testing purposes"""
if not isinstance(z,str): z = '%.1f'%(z)
if integrated:
postfix = '_lawfitzpatrick2004_Rv'
if not isinstance(Rv,str): Rv = '{:.2f}'.format(Rv)
postfix+= Rv
else:
postfix = ''
basename = 'kurucz_SEDtest_z%s%s.fits'%(z,postfix) #only available for 1 metalicity
elif grid=='marcsjorissensp':
if not isinstance(z,str): z = '%.2f'%(z)
if not isinstance(a,str): a = '%.2f'%(a)
if not isinstance(m,str): m = '%.1f'%(m)
basename = 'MARCSJorissenSp_m%s_z%s_a%s_sed.fits'%(m,z,a)
else:
raise ValueError("Grid {} is not recognized: either give valid descriptive arguments, or give an absolute filepath".format(grid))
#-- retrieve the absolute path of the file and check if it exists:
if not '*' in basename:
if use_scratch:
if integrated:
grid = scratchdir+'i'+basename
else:
grid = scratchdir+basename
else:
if integrated:
grid = config.get_datafile(basedir,'i'+basename)
else:
grid = config.get_datafile(basedir,basename)
#-- we could also ask for a list of files, when wildcards are given:
else:
grid = config.glob(basedir,'i'+basename)
if use_scratch:
if integrated:
grid = glob.glob(scratchdir+'i'+basename)
else:
grid = glob.glob(scratchdir+basename)
else:
if integrated:
grid = config.glob(basedir,'i'+basename)
else:
grid = config.glob(basedir,basename)
#grid.sort()
logger.debug('Returning grid path(s): %s'%(grid))
return grid
def _blackbody_input(fctn):
"""
Prepare input and output for blackbody-like functions.
If the user gives wavelength units and Flambda units, we only need to convert
everything to SI (and back to the desired units in the end).
If the user gives frequency units and Fnu units, we only need to convert
everything to SI ( and back to the desired units in the end).
If the user gives wavelength units and Fnu units, we need to convert
the wavelengths first to frequency.
"""
@functools.wraps(fctn)
def dobb(x,T,**kwargs):
wave_units = kwargs.get('wave_units','AA')
flux_units = kwargs.get('flux_units','erg/s/cm2/AA')
to_kwargs = {}
#-- prepare input
#-- what kind of units did we receive?
curr_conv = constants._current_convention
# X: wavelength/frequency
x_unit_type = conversions.get_type(wave_units)
x = conversions.convert(wave_units,curr_conv,x)
# T: temperature
if isinstance(T,tuple):
T = conversions.convert(T[1],'K',T[0])
# Y: flux
y_unit_type = conversions.change_convention('SI',flux_units)
#-- if you give Jy vs micron, we need to first convert wavelength to frequency
if y_unit_type=='kg1 rad-1 s-2' and x_unit_type=='length':
x = conversions.convert(conversions._conventions[curr_conv]['length'],'rad/s',x)
x_unit_type = 'frequency'
elif y_unit_type=='kg1 m-1 s-3' and x_unit_type=='frequency':
x = conversions.convert('rad/s',conversions._conventions[curr_conv]['length'],x)
x_unit_type = 'length'
elif not y_unit_type in ['kg1 rad-1 s-2','kg1 m-1 s-3']:
raise NotImplementedError(flux_units,y_unit_type)
#-- correct for rad
if x_unit_type=='frequency':
x /= (2*np.pi)
to_kwargs['freq'] = (x,'Hz')
else:
to_kwargs['wave'] = (x,conversions._conventions[curr_conv]['length'])
#-- run function
I = fctn((x,x_unit_type),T)
#-- prepare output
disc_integrated = kwargs.get('disc_integrated',True)
ang_diam = kwargs.get('ang_diam',None)
if disc_integrated:
I *= np.sqrt(2*np.pi)
if ang_diam is not None:
scale = conversions.convert(ang_diam[1],'sr',ang_diam[0]/2.)
I *= scale
I = conversions.convert(curr_conv,flux_units,I,**to_kwargs)
return I
return dobb
@_blackbody_input
def blackbody(x,T,wave_units='AA',flux_units='erg/s/cm2/AA',disc_integrated=True,ang_diam=None):
"""
Definition of black body curve.
To get them into the same units as the Kurucz disc-integrated SEDs, they are
multiplied by sqrt(2*pi) (set C{disc_integrated=True}).
You can only give an angular diameter if disc_integrated is True.
To convert the scale parameter back to mas, simply do:
ang_diam = 2*conversions.convert('sr','mas',scale)
See decorator L{blackbody_input} for details on how the input parameters
are handled: the user is free to choose wavelength or frequency units, choose
*which* wavelength or frequency units, and can even mix them. To be sure that
everything is handled correctly, we need to do some preprocessing and unit
conversions.
Be careful when, e.g. during fitting, scale contains an error: be sure to set
the option C{unpack=True} in the L{conversions.convert} function!
>>> x = np.linspace(2.3595,193.872,500)
>>> F1 = blackbody(x,280.,wave_units='AA',flux_units='Jy',ang_diam=(1.,'mas'))
>>> F2 = rayleigh_jeans(x,280.,wave_units='micron',flux_units='Jy',ang_diam=(1.,'mas'))
>>> F3 = wien(x,280.,wave_units='micron',flux_units='Jy',ang_diam=(1.,'mas'))
>>> p = pl.figure()
>>> p = pl.subplot(121)
>>> p = pl.plot(x,F1)
>>> p = pl.plot(x,F2)
>>> p = pl.plot(x,F3)
>>> F1 = blackbody(x,280.,wave_units='AA',flux_units='erg/s/cm2/AA',ang_diam=(1.,'mas'))
>>> F2 = rayleigh_jeans(x,280.,wave_units='micron',flux_units='erg/s/cm2/AA',ang_diam=(1.,'mas'))
>>> F3 = wien(x,280.,wave_units='micron',flux_units='erg/s/cm2/AA',ang_diam=(1.,'mas'))
>>> p = pl.subplot(122)
>>> p = pl.plot(x,F1)
>>> p = pl.plot(x,F2)
>>> p = pl.plot(x,F3)
@param: wavelength
@type: ndarray
@param T: temperature, unit
@type: tuple (float,str)
@param wave_units: wavelength units (frequency or length)
@type wave_units: str (units)
@param flux_units: flux units (could be in Fnu-units or Flambda-units)
@type flux_units: str (units)
@param disc_integrated: if True, they are in the same units as Kurucz-disc-integrated SEDs
@type disc_integrated: bool
@param ang_diam: angular diameter (in mas or rad or something similar)
@type ang_diam: (value, unit)
@return: intensity
@rtype: array
"""
x,x_unit_type = x
#-- make the appropriate black body
if x_unit_type=='frequency': # frequency units
factor = 2.0 * constants.hh / constants.cc**2
expont = constants.hh / (constants.kB*T)
I = factor * x**3 * 1. / (np.exp(expont*x) - 1.)
elif x_unit_type=='length': # wavelength units
factor = 2.0 * constants.hh * constants.cc**2
expont = constants.hh*constants.cc / (constants.kB*T)
I = factor / x**5. * 1. / (np.exp(expont/x) - 1.)
else:
raise ValueError(x_unit_type)
return I
@_blackbody_input
def rayleigh_jeans(x,T,wave_units='AA',flux_units='erg/s/cm2/AA',disc_integrated=True,ang_diam=None):
"""
Rayleigh-Jeans approximation of a black body.
Valid at long wavelengths.
For input details, see L{blackbody}.
@return: intensity
@rtype: array
"""
x,x_unit_type = x
#-- now make the appropriate model
if x_unit_type=='frequency': # frequency units
factor = 2.0 * constants.kB*T / constants.cc**2
I = factor * x**2
elif x_unit_type=='length': # wavelength units
factor = 2.0 * constants.cc * constants.kB*T
I = factor / x**4.
else:
raise ValueError(unit_type)
return I
@_blackbody_input
def wien(x,T,wave_units='AA',flux_units='erg/s/cm2/AA',disc_integrated=True,ang_diam=None):
"""
Wien approximation of a black body.
Valid at short wavelengths.
For input details, see L{blackbody}.
@return: intensity
@rtype: array
"""
x,x_unit_type = x
#-- now make the appropriate model
if x_unit_type=='frequency': # frequency units
factor = 2.0 * constants.hh / constants.cc**2
expont = constants.hh / (constants.kB*T)
I = factor * x**3 * 1. * np.exp(-expont*x)
elif x_unit_type=='length': # wavelength units
factor = 2.0 * constants.hh * constants.cc**2
expont = constants.hh*constants.cc / (constants.kB*T)
I = factor / x**5. * np.exp(-expont/x)
else:
raise ValueError(unit_type)
return I
def get_table_single(teff=None,logg=None,ebv=None,rad=None,star=None,
wave_units='AA',flux_units='erg/s/cm2/AA/sr',**kwargs):
"""
Retrieve the spectral energy distribution of a model atmosphere.
Wavelengths in A (angstrom)
Fluxes in Ilambda = ergs/cm2/s/AA/sr, except specified via 'units',
If you give 'units', and /sr is not included, you are responsible yourself
for giving an extra keyword with the angular diameter C{ang_diam}, or other
possibilities offered by the C{units.conversions.convert} function.
Possibility to redden the fluxes according to the reddening parameter EB_V.
Extra kwargs can specify the grid type.
Extra kwargs can specify constraints on the size of the grid to interpolate.
Extra kwargs can specify reddening law types.
Extra kwargs can specify information for conversions.
Example usage:
>>> from pylab import figure,gca,subplot,title,gcf,loglog
>>> p = figure(figsize=(10,6))
>>> p=gcf().canvas.set_window_title('Test of <get_table>')
>>> p = subplot(131)
>>> p = loglog(*get_table(grid='FASTWIND',teff=35000,logg=4.0),**dict(label='Fastwind'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=35000,logg=4.0),**dict(label='Kurucz'))
>>> p = loglog(*get_table(grid='TLUSTY',teff=35000,logg=4.0),**dict(label='Tlusty'))
>>> p = loglog(*get_table(grid='MARCS',teff=5000,logg=2.0),**dict(label='Marcs'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=5000,logg=2.0),**dict(label='Kurucz'))
>>> p = pl.xlabel('Wavelength [angstrom]');p = pl.ylabel('Flux [erg/s/cm2/AA/sr]')
>>> p = pl.legend(loc='upper right',prop=dict(size='small'))
>>> p = subplot(132)
>>> p = loglog(*get_table(grid='FASTWIND',teff=35000,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='Fastwind'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=35000,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='Kurucz'))
>>> p = loglog(*get_table(grid='TLUSTY',teff=35000,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='Tlusty'))
>>> p = loglog(*get_table(grid='MARCS',teff=5000,logg=2.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='Marcs'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=5000,logg=2.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='Kurucz'))
>>> p = pl.xlabel('Wavelength [micron]');p = pl.ylabel('Flux [Jy/sr]')
>>> p = pl.legend(loc='upper right',prop=dict(size='small'))
>>> p = subplot(133);p = title('Kurucz')
>>> p = loglog(*get_table(grid='KURUCZ',teff=10000,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='10000'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=10250,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='10250'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=10500,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='10500'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=10750,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='10750'))
>>> p = loglog(*get_table(grid='KURUCZ',teff=11000,logg=4.0,wave_units='micron',flux_units='Jy/sr'),**dict(label='11000'))
>>> p = pl.xlabel('Wavelength [micron]');p = pl.ylabel('Flux [Jy/sr]')
>>> p = pl.legend(loc='upper right',prop=dict(size='small'))
]]include figure]]ivs_sed_model_comparison.png]
@param teff: effective temperature
@type teff: float
@param logg: logarithmic gravity (cgs)
@type logg: float
@param ebv: reddening coefficient
@type ebv: float
@param wave_units: units to convert the wavelengths to (if not given, A)
@type wave_units: str
@param flux_units: units to convert the fluxes to (if not given, erg/s/cm2/AA/sr)
@type flux_units: str
@return: wavelength,flux
@rtype: (ndarray,ndarray)
"""
#-- get the FITS-file containing the tables
gridfile = get_file(**kwargs)
#-- read the file:
ff = pf.open(gridfile)
#-- a possible grid is the one where only selected stellar models are
# present. In that case, we there is no need for interpolation or
# other stuff.
if star is not None:
wave = ff[star.upper()].data.field('wavelength')
flux = ff[star.upper()].data.field('flux')
else:
teff = float(teff)
logg = float(logg)
#-- if we have a grid model, no need for interpolation
try:
#-- extenstion name as in fits files prepared by Steven
mod_name = "T%05d_logg%01.02f" %(teff,logg)
mod = ff[mod_name]
wave = mod.data.field('wavelength')
flux = mod.data.field('flux')
logger.debug('Model SED taken directly from file (%s)'%(os.path.basename(gridfile)))
#-- if the teff/logg is not present, use the interpolation thing
except KeyError:
#-- it is possible we first have to set the interpolation function.
# This function is memoized, so if it will not be calculated
# twice.
wave,teffs,loggs,flux,flux_grid = get_grid_mesh(**kwargs)
logger.debug('Model SED interpolated from grid %s (%s)'%(os.path.basename(gridfile),kwargs))
wave = wave + 0.
flux = flux_grid(np.log10(teff),logg) + 0.
#-- convert to arrays
wave = np.array(wave,float)
flux = np.array(flux,float)
#-- redden if necessary
if ebv is not None and ebv>0:
if 'wave' in list(kwargs.keys()):
removed = kwargs.pop('wave')
flux = reddening.redden(flux,wave=wave,ebv=ebv,rtype='flux',**kwargs)
if flux_units!='erg/s/cm2/AA/sr':
flux = conversions.convert('erg/s/cm2/AA/sr',flux_units,flux,wave=(wave,'AA'),**kwargs)
if wave_units!='AA':
wave = conversions.convert('AA',wave_units,wave,**kwargs)
ff.close()
if rad != None:
flux = rad**2 * flux
return wave,flux
def get_itable_single(teff=None,logg=None,ebv=0,z=0,rad=None,photbands=None,
wave_units=None,flux_units='erg/s/cm2/AA/sr',**kwargs):
"""
Retrieve the spectral energy distribution of a model atmosphere in
photometric passbands.
Wavelengths in A (angstrom). If you set 'wavelengths' to None, no effective
wavelengths will be calculated. Otherwise, the effective wavelength is
calculated taking the model flux into account.
Fluxes in Ilambda = ergs/cm2/s/AA/sr, except specified via 'units',
If you give 'units', and /sr is not included, you are responsible yourself
for giving an extra keyword with the angular diameter C{ang_diam}, or other
possibilities offered by the C{units.conversions.convert} function.
Possibility to redden the fluxes according to the reddening parameter EB_V.
Extra kwargs can specify the grid type.
Extra kwargs can specify constraints on the size of the grid to interpolate.
Extra kwargs can specify reddening law types.
Extra kwargs can specify information for conversions.
@param teff: effective temperature
@type teff: float
@param logg: logarithmic gravity (cgs)
@type logg: float
@param ebv: reddening coefficient
@type ebv: float
@param photbands: photometric passbands
@type photbands: list of photometric passbands
@param wave_units: units to convert the wavelengths to (if not given, A)
@type wave_units: str
@param flux_units: units to convert the fluxes to (if not given, erg/s/cm2/AA/sr)
@type flux_units: str
@keyword clear_memory: flag to clear memory from previously loaded SED tables.
If you set it to False, you can easily get an overloaded memory!
@type clear_memory: boolean
@return: (wave,) flux, absolute luminosity
@rtype: (ndarray,)ndarray,float
"""
if 'vrad' in kwargs:
logger.debug('vrad is NOT taken into account when interpolating in get_itable()')
if 'rv' in kwargs:
logger.debug('Rv is NOT taken into account when interpolating in get_itable()')
# TODO: when skip_z is True, the default of z should be changed to the
# input default
# skip_z = False
# if 'z' in kwargs:
# skip_z = True
if photbands is None:
raise ValueError('no photometric passbands given')
ebvrange = kwargs.pop('ebvrange',(-np.inf,np.inf))
zrange = kwargs.pop('zrange',(-np.inf,np.inf))
clear_memory = kwargs.pop('clear_memory',True)
#-- get the FITS-file containing the tables
#c0 = time.time()
#c1 = time.time() - c0
#-- retrieve structured information on the grid (memoized)
# if skip_z:
# markers,(g_teff,g_logg,g_ebv),gpnts,ext = _get_itable_markers(photbands,ebvrange=ebvrange,zrange=zrange,
# include_Labs=True,clear_memory=clear_memory,**kwargs)
# else:
markers,(g_teff,g_logg,g_ebv, g_z),gpnts,ext = _get_itable_markers(photbands,ebvrange=ebvrange,zrange=zrange,
include_Labs=True,clear_memory=clear_memory,**kwargs)
#c2 = time.time() - c0 - c1
#-- if we have a grid model, no need for interpolation
try:
input_code = float('%3d%05d%03d%03d'%(int(round((z+5)*100)),int(round(teff)),int(round(logg*100)),int(round(ebv*100))))
index = markers.searchsorted(input_code)
output_code = markers[index]
#-- if not available, go on and interpolate!
# we raise a KeyError for symmetry with C{get_table}.
if not input_code==output_code:
raise KeyError
#c0_ = time.time()
flux = ext[index]
#c1_ = time.time()-c0_
#flux = np.array([ext.data.field(photband)[index] for photband in photbands])
#logger.debug('Model iSED taken directly from file (%s)'%(os.path.basename(gridfile)))
#-- if the teff/logg is not present, use the interpolation thing
except KeyError:
#c1_ = 0
#-- cheat edges in interpolating function
if not new_scipy:
teff = teff+1e-2
logg = logg-1e-6
ebv = ebv+1e-6
#-- it is possible we have to interpolate: identify the grid points in
# the immediate vicinity of the given fundamental parameters
i_teff = max(1,g_teff.searchsorted(teff))
i_logg = max(1,g_logg.searchsorted(logg))
i_ebv = max(1,g_ebv.searchsorted(ebv))
i_z = max(1,g_z.searchsorted(z))
if i_teff==len(g_teff): i_teff -= 1
if i_logg==len(g_logg): i_logg -= 1
if i_ebv==len(g_ebv): i_ebv -= 1
if i_z==len(g_z): i_z -= 1
#-- prepare fluxes matrix for interpolation, and x,y an z axis
teffs_subgrid = g_teff[i_teff-1:i_teff+1]
loggs_subgrid = g_logg[i_logg-1:i_logg+1]
ebvs_subgrid = g_ebv[i_ebv-1:i_ebv+1]
zs_subgrid = g_z[i_z-1:i_z+1]
#-- iterates over df-1 values (df=degrees of freedom): we know that the
# grid is ordered via z in the last part (about twice as fast).
# Reducing the grid size to 2 increases the speed again with a factor 2.
#-- if metallicity needs to be interpolated
if not (z in g_z):
fluxes = np.zeros((2,2,2,2,len(photbands)+1))
for i,j,k in itertools.product(range(2),range(2),range(2)):
input_code = float('%3d%05d%03d%03d'%(int(round((zs_subgrid[i]+5)*100)),\
int(round(teffs_subgrid[j])),\
int(round(loggs_subgrid[k]*100)),\
int(round(ebvs_subgrid[1]*100))))
index = markers.searchsorted(input_code)
fluxes[i,j,k] = ext[index-1:index+1]
myf = InterpolatingFunction([zs_subgrid,np.log10(teffs_subgrid),
loggs_subgrid,ebvs_subgrid],np.log10(fluxes),default=-100*np.ones_like(fluxes.shape[1]))
flux = 10**myf(z,np.log10(teff),logg,ebv) + 0.
#-- if only teff,logg and ebv need to be interpolated (faster)
else:
fluxes = np.zeros((2,2,2,len(photbands)+1))
for i,j in itertools.product(range(2),range(2)):
input_code = float('%3d%05d%03d%03d'%(int(round((z+5)*100)),\
int(round(teffs_subgrid[i])),\
int(round(loggs_subgrid[j]*100)),\
int(round(ebvs_subgrid[1]*100))))
index = markers.searchsorted(input_code)
fluxes[i,j] = ext[index-1:index+1]
myf = InterpolatingFunction([np.log10(teffs_subgrid),
loggs_subgrid,ebvs_subgrid],np.log10(fluxes),default=-100*np.ones_like(fluxes.shape[1]))
flux = 10**myf(np.log10(teff),logg,ebv) + 0.
#-- new scipy version
else:
#-- take care of inner edge of grid
i_teff = max(1,g_teff.searchsorted(teff))
i_logg = max(1,g_logg.searchsorted(logg))
i_ebv = max(1,g_ebv.searchsorted(ebv))
i_z = max(1,g_z.searchsorted(z))
#-- take care of outer edge of grid
if i_teff==len(g_teff): i_teff -= 1
if i_logg==len(g_logg): i_logg -= 1
if i_ebv==len(g_ebv): i_ebv -= 1
if i_z==len(g_z): i_z -= 1
if not (z in g_z):
#-- prepare fluxes matrix for interpolation, and x,y an z axis
myflux = np.zeros((16,4+len(photbands)+1))
mygrid = itertools.product(g_teff[i_teff-1:i_teff+1],g_logg[i_logg-1:i_logg+1],g_z[i_z-1:i_z+1])
for i,(t,g,zz) in enumerate(mygrid):
myflux[2*i,:4] = t,g,g_ebv[i_ebv-1],zz
myflux[2*i+1,:4] = t,g,g_ebv[i_ebv],zz
input_code = float('%3d%05d%03d%03d'%(int(round((zz+5)*100)),\
int(round(t)),int(round(g*100)),\
int(round(g_ebv[i_ebv]*100))))
index = markers.searchsorted(input_code)
myflux[2*i,4:] = ext[index-1]
myflux[2*i+1,4:] = ext[index]
#-- interpolate in log10 of temperature
myflux[:,0] = np.log10(myflux[:,0])
flux = 10**griddata(myflux[:,:4],np.log10(myflux[:,4:]),(np.log10(teff),logg,ebv,z))
else:
#-- prepare fluxes matrix for interpolation, and x,y axis
myflux = np.zeros((8,3+len(photbands)+1))
mygrid = itertools.product(g_teff[i_teff-1:i_teff+1],g_logg[i_logg-1:i_logg+1])
for i,(t,g) in enumerate(mygrid):
myflux[2*i,:3] = t,g,g_ebv[i_ebv-1]
myflux[2*i+1,:3] = t,g,g_ebv[i_ebv]
input_code = float('%3d%05d%03d%03d'%(int(round((z+5)*100)),\
int(round(t)),int(round(g*100)),\
int(round(g_ebv[i_ebv]*100))))
index = markers.searchsorted(input_code)
myflux[2*i,3:] = ext[index-1]
myflux[2*i+1,3:] = ext[index]
#-- interpolate in log10 of temperature
myflux[:,0] = np.log10(myflux[:,0])
flux = 10**griddata(myflux[:,:3],np.log10(myflux[:,3:]),(np.log10(teff),logg,ebv))
except IndexError:
#-- probably metallicity outside of grid
raise ValueError('point outside of grid (teff={teff}, logg={logg}, ebv={ebv}, z={z}'.format(**locals()))
except ValueError:
#-- you tried to make a code of a negative number
raise ValueError('point outside of grid (teff={teff}, logg={logg}, ebv={ebv}, z={z}'.format(**locals()))
if np.any(np.isnan(flux)):
#-- you tried to make a code of a negative number
raise ValueError('point outside of grid (teff={teff}, logg={logg}, ebv={ebv}, z={z}'.format(**locals()))
if np.any(np.isinf(flux)):
flux = np.zeros(fluxes.shape[-1])
#return flux[:-1],flux[-1]#,np.array([c1_,c2,c3])
#-- convert to arrays: remember that last column of the fluxes is actually
# absolute luminosity
flux,Labs = np.array(flux[:-1],float),flux[-1]
#-- Take radius into account when provided
if rad != None:
flux,Labs = flux*rad**2, Labs*rad**2
if flux_units!='erg/s/cm2/AA/sr':
flux = conversions.nconvert('erg/s/cm2/AA/sr',flux_units,flux,photband=photbands,**kwargs)
if wave_units is not None:
model = get_table(teff=teff,logg=logg,ebv=ebv,**kwargs)
wave = filters.eff_wave(photbands,model=model)
if wave_units !='AA':
wave = wave = conversions.convert('AA',wave_units,wave,**kwargs)
return wave,flux,Labs
else:
return flux,Labs
def get_itable(photbands=None, wave_units=None, flux_units='erg/s/cm2/AA/sr',
grids=None, **kwargs):
"""
Retrieve the integrated spectral energy distribution of a combined model
atmosphere.
>>> teff1,teff2 = 20200,5100
>>> logg1,logg2 = 4.35,2.00
>>> ebv = 0.2,0.2
>>> photbands = ['JOHNSON.U','JOHNSON.V','2MASS.J','2MASS.H','2MASS.KS']
>>> wave1,flux1 = get_table(teff=teff1,logg=logg1,ebv=ebv[0])
>>> wave2,flux2 = get_table(teff=teff2,logg=logg2,ebv=ebv[1])
>>> wave3,flux3 = get_table_multiple(teff=(teff1,teff2),logg=(logg1,logg2),ebv=ebv,radius=[1,20])
>>> iwave1,iflux1,iLabs1 = get_itable(teff=teff1,logg=logg1,ebv=ebv[0],photbands=photbands,wave_units='AA')
>>> iflux2,iLabs2 = get_itable(teff=teff2,logg=logg2,ebv=ebv[1],photbands=photbands)
>>> iflux3,iLabs3 = get_itable_multiple(teff=(teff1,teff2),logg=(logg1,logg2),z=(0,0),ebv=ebv,radius=[1,20.],photbands=photbands)
>>> p = pl.figure()
>>> p = pl.gcf().canvas.set_window_title('Test of <get_itable_multiple>')
>>> p = pl.loglog(wave1,flux1,'r-')
>>> p = pl.loglog(iwave1,iflux1,'ro',ms=10)
>>> p = pl.loglog(wave2,flux2*20**2,'b-')
>>> p = pl.loglog(iwave1,iflux2*20**2,'bo',ms=10)
>>> p = pl.loglog(wave3,flux3,'k-',lw=2)
>>> p = pl.loglog(iwave1,iflux3,'kx',ms=10,mew=2)
@param teff: effective temperature
@type teff: tuple floats
@param logg: logarithmic gravity (cgs)
@type logg: tuple floats
@param ebv: reddening coefficient
@type ebv: tuple floats
@param z: metallicity
@type z: tuple floats
@param radius: ratio of R_i/(R_{i-1})
@type radius: tuple of floats
@param photbands: photometric passbands
@type photbands: list
@param flux_units: units to convert the fluxes to (if not given, erg/s/cm2/AA/sr)
@type flux_units: str
@param grids: specifications for grid1
@type grids: list of dict
@param full_output: return all individual SEDs
@type full_output: boolean
@return: wavelength,flux
@rtype: (ndarray,ndarray)
"""
#-- Find the parameters provided and store them separately.
values, parameters, components = {}, set(), set()
for key in list(kwargs.keys()):
if re.search("^(teff|logg|ebv|z|rad)\d?$", key):
par, comp = re.findall("^(teff|logg|ebv|z|rad)(\d?)$", key)[0]
values[key] = kwargs.pop(key)
parameters.add(par)
components.add(comp)
#-- If there is only one component, we can directly return the result
if len(components) == 1:
kwargs.update(values)
return get_itable_single(photbands=photbands,wave_units=wave_units,
flux_units=flux_units,**kwargs)
#-- run over all fluxes and sum them, we do not need to multiply with the radius
# as the radius is provided as an argument to itable_single_pix.
fluxes, Labs = [],[]
for i, (comp, grid) in enumerate(zip(components,defaults_multiple)):
trash = grid.pop('z',0.0), grid.pop('Rv',0.0)
kwargs_ = kwargs
kwargs_.update(grid)
for par in parameters:
kwargs_[par] = values[par+comp] if par+comp in values else values[par]
f,L = get_itable_single(photbands=photbands,wave_units=None,**kwargs_)
fluxes.append(f)
Labs.append(L)
fluxes = np.sum(fluxes,axis=0)
Labs = np.sum(Labs,axis=0)
if flux_units!='erg/s/cm2/AA/sr':
fluxes = np.array([conversions.convert('erg/s/cm2/AA/sr',flux_units,fluxes[i],photband=photbands[i]) for i in range(len(fluxes))])
if wave_units is not None:
model = get_table_multiple(teff=teff,logg=logg,ebv=ebv, grids=grids,**kwargs)
wave = filters.eff_wave(photbands,model=model)
if wave_units !='AA':
wave = wave = conversions.convert('AA',wave_units,wave)
return wave,fluxes,Labs
return fluxes,Labs
##-- set default parameters
#if grids is None:
#grids = [defaults_multiple[i] for i in range(len(teff))]
#if radius is None:
#radius = tuple([1. for i in teff])
##-- gather all the SEDs from the individual components
#fluxes,Labs = [],[]
#for i in range(len(teff)):
#iteff,ilogg,iz,irrad,iebv = teff[i],logg[i],z[i],radius[i],ebv[0]
#mykwargs = dict(list(grids[i].items()) + list(kwargs.items()))
#if 'z' in mykwargs:
#thrash = mykwargs.pop('z')
##mykwargs = dict(list(kwargs.items()))
#iflux,iLabs = get_itable(teff=iteff,logg=ilogg,ebv=iebv,z=iz,photbands=photbands,clear_memory=False,**mykwargs)
#fluxes.append(iflux*irrad**2)
#Labs.append(iLabs*irrad**2)
#fluxes = np.sum(fluxes,axis=0)
#Labs = np.sum(Labs)
#if flux_units!='erg/s/cm2/AA/sr':
#fluxes = np.array([conversions.convert('erg/s/cm2/AA/sr',flux_units,fluxes[i],photband=photbands[i]) for i in range(len(fluxes))])
#if wave_units is not None:
#model = get_table_multiple(teff=teff,logg=logg,ebv=ebv, grids=grids,**kwargs)
#wave = filters.eff_wave(photbands,model=model)
#if wave_units !='AA':
#wave = wave = conversions.convert('AA',wave_units,wave)
#return wave,fluxes,Labs
#return fluxes,Labs
def get_itable_single_pix(teff=None,logg=None,ebv=None,z=0,rv=3.1,vrad=0,photbands=None,
wave_units=None,flux_units='erg/s/cm2/AA/sr',**kwargs):
"""
Super fast grid interpolator.
Possible kwargs are teffrange,loggrange etc.... that are past on to
L{_get_pix_grid}. You should probably use these options when you want to
interpolate in many variables; supplying these ranges will make the grid
smaller and thus decrease memory usage.
It is possible to fix C{teff}, C{logg}, C{ebv}, C{z}, C{rv} and/or C{vrad}
to one value, in which case it B{has} to be a point in the grid. If you want
to retrieve a list of fluxes with the same ebv value that is not in the grid,
you need to give an array with all equal values. The reason is that the
script can try to minimize the number of interpolations, by fixing a
variable on a grid point. The fluxes on the other gridpoints will then not
be interpolated over. These parameter also have to be listed with the
additional C{exc_interpolpar} keyword.
>>> teffs = np.linspace(5000,7000,100)
>>> loggs = np.linspace(4.0,4.5,100)
>>> ebvs = np.linspace(0,1,100)
>>> zs = np.linspace(-0.5,0.5,100)
>>> rvs = np.linspace(2.2,5.0,100)
>>> set_defaults(grid='kurucz2')
>>> flux,labs = get_itable_pix(teffs,loggs,ebvs,zs,rvs,photbands=['JOHNSON.V'])
>>> names = ['teffs','loggs','ebvs','zs','rvs']
>>> p = pl.figure()
>>> for i in range(len(names)):
... p = pl.subplot(2,3,i+1)
... p = pl.plot(locals()[names[i]],flux[0],'k-')
... p = pl.xlabel(names[i])
Thanks to Steven Bloemen for the core implementation of the interpolation
algorithm.
The addition of the exc_interpolpar keyword was done by Michel Hillen (Jan 2016).
"""
#-- setup some standard values when they are not provided
ebv = np.array([0 for i in teff]) if ebv is None else ebv
z = np.array([0.for i in teff]) if z is None else z
rv = np.array([3.1 for i in teff]) if rv is None else rv
vrad = np.array([0 for i in teff]) if vrad is None else vrad
#for var in ['teff','logg','ebv','z','rv','vrad']:
#if not hasattr(locals()[var],'__iter__'):
#print var, locals()[var]
#locals()[var] = np.array([ locals()[var] ])
#print locals()
vrad = 0
N = 1
variables = []
clear_memory = kwargs.pop('clear_memory',False)
#variables = kwargs.pop('variables',['teff','logg','ebv','z','rv','vrad']) # !!!
for var in ['teff','logg','ebv','z','rv','vrad']: # !!!
if not hasattr(locals()[var],'__iter__'):
kwargs.setdefault(var+'range',(locals()[var],locals()[var]))
else:
N = len(locals()[var])
variables.append(var)
#-- retrieve structured information on the grid (memoized)
axis_values,gridpnts,pixelgrid,cols = _get_pix_grid(photbands,
include_Labs=True, variables=variables,
clear_memory=clear_memory,**kwargs) # !!!
#-- Remove parameters from the grid if it is requested that these should not be interpolated
#-- (with the exc_interpolpar keyword). This can only work if the requested values of
#-- these parameters all correspond to a single point in the original grid!
#-- we check whether this condition is fulfilled
#-- if not, then the parameter is not excluded from the interpolation
# #-- and a warning is raised to the log
# for var in kwargs.get('exc_interpolpar',[]): # e.g. for Kurucz, var can be anything in ['teff','logg','ebv','z']
# # retrieve the unique values in var
# var_uniquevalue = np.unique(np.array(locals()[var]))
# # if there is more than one unique value in var, then our condition is not fulfilled
# if len(var_uniquevalue) > 1:
# logger.warning('{} is requested to be excluded from interpolation, although fluxes for more than one value are requested!?'.format(var))
# else:
# # retrieve the index of var in the 'pixelgrid' and 'cols' arrays of the original grid
# print(cols)
# print(var)
# var_index = np.where(cols == var)[0]
# # retrieve the index of the unique value in the original grid
# print(var_index)
# print(axis_values[var_index])
# print(var_uniquevalue[0])
# var_uniquevalue_index = np.where(axis_values[var_index] == var_uniquevalue[0])[0]
# # if the unique value does not correspond to a grid point of the original grid, then we only raise a warning
# if len(var_uniquevalue_index) == 0:
# logger.warning('{} can only be excluded from interpolation, as requested, if its values are all equal to an actual grid point!'.format(var))
# else:
# # remove var from the list of variables in the original grid
# trash = axis_values.pop(var_index)
# cols = np.delete(cols,[var_index])
# # since we do not know the axis of var in advance, we devise a clever way to
# # bring it to the first axis by transposing the array
# indices = [x for x in range(pixelgrid.ndim)]
# indices.remove(var_index)
# indices.insert(0,var_index)
# pixelgrid = np.transpose(pixelgrid, indices)
# # now we select the subgrid corresponding to the requested value of var
# pixelgrid = pixelgrid[var_uniquevalue_index[0]]
#-- prepare input:
values = np.zeros((len(cols),N))
for i,col in enumerate(cols):
values[i] = locals()[col]
pars = 10**interpol.interpolate(values,axis_values,pixelgrid)
flux,Labs = pars[:-1],pars[-1]
#-- Take radius into account when provided
if 'rad' in kwargs:
flux,Labs = flux*kwargs['rad']**2, Labs*kwargs['rad']**2
#-- change flux and wavelength units if needed
if flux_units!='erg/s/cm2/AA/sr':
flux = conversions.nconvert('erg/s/cm2/AA/sr',flux_units,flux,photband=photbands,**kwargs)
if wave_units is not None:
model = get_table(teff=teff,logg=logg,ebv=ebv,**kwargs)
wave = filters.eff_wave(photbands,model=model)
if wave_units !='AA':
wave = conversions.convert('AA',wave_units,wave,**kwargs)
return wave,flux,Labs
else:
return flux,Labs
def get_itable_pix(photbands=None, wave_units=None, flux_units='erg/s/cm2/AA/sr',
grids=None, **kwargs):
"""
Super fast grid interpolator for multiple tables, completely based on get_itable_pix.
"""
#-- Find the parameters provided and store them separately.
values, parameters, components = {}, set(), set()
for key in list(kwargs.keys()):
if re.search("^(teff|logg|ebv|z|rv|vrad|rad)\d?$", key):
par, comp = re.findall("^(teff|logg|ebv|z|rv|vrad|rad)(\d?)$", key)[0]
values[key] = kwargs.pop(key)
parameters.add(par)
components.add(comp)
#-- If there is only one component, we can directly return the result
if len(components) == 1:
kwargs.update(values)
return get_itable_single_pix(photbands=photbands,wave_units=wave_units,
flux_units=flux_units,**kwargs)
#-- run over all fluxes and sum them, we do not need to multiply with the radius
# as the radius is provided as an argument to itable_single_pix.
fluxes, Labs = [],[]
for i, (comp, grid) in enumerate(zip(components,defaults_multiple)):
trash = grid.pop('z',0.0), grid.pop('Rv',0.0)
kwargs_ = kwargs
kwargs_.update(grid)
for par in parameters:
kwargs_[par] = values[par+comp] if par+comp in values else values[par]
f,L = get_itable_single_pix(photbands=photbands,wave_units=None,**kwargs_)
fluxes.append(f)
Labs.append(L)
fluxes = np.sum(fluxes,axis=0)
Labs = np.sum(Labs,axis=0)
if flux_units!='erg/s/cm2/AA/sr':
fluxes = np.array([conversions.convert('erg/s/cm2/AA/sr',flux_units,fluxes[i],photband=photbands[i]) for i in range(len(fluxes))])
if wave_units is not None:
model = get_table_multiple(teff=teff,logg=logg,ebv=ebv, grids=grids,**kwargs)
wave = filters.eff_wave(photbands,model=model)
if wave_units !='AA':
wave = conversions.convert('AA',wave_units,wave)
return wave,fluxes,Labs
return fluxes,Labs
#def get_table_multiple(teff=None,logg=None,ebv=None,radius=None,
#wave_units='AA',flux_units='erg/cm2/s/AA/sr',grids=None,full_output=False,**kwargs):
def get_table(wave_units='AA',flux_units='erg/cm2/s/AA/sr',grids=None,full_output=False,**kwargs):
"""
Retrieve the spectral energy distribution of a combined model atmosphere.
Example usage:
>>> teff1,teff2 = 20200,5100
>>> logg1,logg2 = 4.35,2.00
>>> wave1,flux1 = get_table(teff=teff1,logg=logg1,ebv=0.2)
>>> wave2,flux2 = get_table(teff=teff2,logg=logg2,ebv=0.2)
>>> wave3,flux3 = get_table_multiple(teff=(teff1,teff2),logg=(logg1,logg2),ebv=(0.2,0.2),radius=[1,20])
>>> p = pl.figure()
>>> p = pl.gcf().canvas.set_window_title('Test of <get_table_multiple>')
>>> p = pl.loglog(wave1,flux1,'r-')
>>> p = pl.loglog(wave2,flux2,'b-')
>>> p = pl.loglog(wave2,flux2*20**2,'b--')
>>> p = pl.loglog(wave3,flux3,'k-',lw=2)
@param teff: effective temperature
@type teff: tuple floats
@param logg: logarithmic gravity (cgs)
@type logg: tuple floats
@param ebv: tuple reddening coefficients
@type ebv: tuple floats
@param radius: radii of the stars
@type radius: tuple of floats
@param wave_units: units to convert the wavelengths to (if not given, A)
@type wave_units: str
@param flux_units: units to convert the fluxes to (if not given, erg/s/cm2/AA/sr)
@type flux_units: str
@param grids: specifications for grid1
@type grids: list of dict
@param full_output: return all individual SEDs
@type full_output: boolean
@return: wavelength,flux
@rtype: (ndarray,ndarray)
"""
values, parameters, components = {}, set(), set()
for key in list(kwargs.keys()):
if re.search("^(teff|logg|ebv|z|rad)\d?$", key):
par, comp = re.findall("^(teff|logg|ebv|z|rad)(\d?)$", key)[0]
values[key] = kwargs.pop(key)
parameters.add(par)
components.add(comp)
#-- If there is only one components we can directly return the result
if len(components) == 1:
kwargs.update(values)
return get_table_single(wave_units=wave_units, flux_units=flux_units,
full_output=full_output,**kwargs)
#-- Run over all fluxes and sum them, we do not need to multiply with the radius
# as the radius is provided as an argument to get_table_single.
waves, fluxes = [],[]
for i, (comp, grid) in enumerate(zip(components,defaults_multiple)):
trash = grid.pop('z',0.0), grid.pop('Rv',0.0)
kwargs_ = kwargs.copy()
kwargs_.update(grid)
for par in parameters:
kwargs_[par] = values[par+comp] if par+comp in values else values[par]
w,f = get_table_single(**kwargs_)
waves.append(w)
fluxes.append(f)
##-- set default parameters
#if grids is None:
#grids = [defaults_multiple[i] for i in range(len(teff))]
#if radius is None:
#radius = tuple([1. for i in teff])
##-- gather all the SEDs from the individual components
#waves,fluxes = [],[]
#for i in range(len(teff)):
#iteff,ilogg,iebv = teff[i],logg[i],ebv[i]
#mykwargs = dict(list(grids[i].items()) + list(kwargs.items()))
#iwave,iflux = get_table(teff=iteff,logg=ilogg,ebv=iebv,**mykwargs)
#waves.append(iwave)
#fluxes.append(iflux)
#-- what's the total wavelength range? Merge all wavelength arrays and
# remove double points
waves_ = np.sort(np.hstack(waves))
waves_ = np.hstack([waves_[0],waves_[1:][np.diff(waves_)>0]])
# cut out the part which is common to every wavelength range
wstart = max([w[0] for w in waves])
wend = min([w[-1] for w in waves])
waves_ = waves_[( (wstart<=waves_) & (waves_<=wend))]
if full_output:
fluxes_ = []
else:
fluxes_ = 0.
##-- interpolate onto common grid in log!
#for i,(wave,flux) in enumerate(zip(waves,fluxes)):
#intf = interp1d(np.log10(wave),np.log10(flux),kind='linear')
#if full_output:
#fluxes_.append(radius[i]**2*10**intf(np.log10(waves_)))
#else:
#fluxes_ += radius[i]**2*10**intf(np.log10(waves_))
#-- interpolate onto common grid in log!
for i,(wave,flux) in enumerate(zip(waves,fluxes)):
intf = interp1d(np.log10(wave),np.log10(flux),kind='linear')
if full_output:
fluxes_.append(10**intf(np.log10(waves_)))
else:
fluxes_ += 10**intf(np.log10(waves_))
if flux_units!='erg/cm2/s/AA/sr':
fluxes_ = conversions.convert('erg/s/cm2/AA/sr',flux_units,fluxes_,wave=(waves_,'AA'),**kwargs)
if wave_units!='AA':
waves_ = conversions.convert('AA',wave_units,waves_,**kwargs)
#-- where the fluxes are zero, log can do weird
if full_output:
fluxes_ = np.vstack(fluxes_)
keep = ~np.isnan(np.sum(fluxes_,axis=0))
waves_ = waves_[keep]
fluxes_ = fluxes_[:,keep]
else:
keep = ~np.isnan(fluxes_)
waves_ = waves_[keep]
fluxes_ = fluxes_[keep]
return waves_,fluxes_
def get_grid_dimensions(**kwargs):
"""
Retrieve possible effective temperatures and gravities from a grid.
E.g. kurucz, sdB, fastwind...
@rtype: (ndarray,ndarray)
@return: effective temperatures, gravities
"""
gridfile = get_file(**kwargs)
ff = pf.open(gridfile)
teffs = []
loggs = []
for mod in ff[1:]:
teffs.append(float(mod.header['TEFF']))
loggs.append(float(mod.header['LOGG']))
ff.close()
#-- maybe the fits extensions are not in right order...
matrix = np.vstack([np.array(teffs),np.array(loggs)]).T
matrix = numpy_ext.sort_order(matrix,order=[0,1])
teffs,loggs = matrix.T
return teffs,loggs
def get_igrid_dimensions(**kwargs):
"""
Retrieve possible effective temperatures, surface gravities and reddenings
from an integrated grid.
E.g. kurucz, sdB, fastwind...
@rtype: (ndarray,ndarray,ndarray)
@return: effective temperatures, surface, gravities, E(B-V)s
"""
gridfile = get_file(integrated=True,**kwargs)
ff = pf.open(gridfile)
teffs = ff[1].data.field('TEFF')
loggs = ff[1].data.field('LOGG')
ebvs = ff[1].data.field('EBV')
ff.close()
#correct = (teffs==14000) & (loggs==2.0)
#teffs[correct] = 12000
return teffs,loggs,ebvs
@memoized
def get_grid_mesh(wave=None,teffrange=None,loggrange=None,**kwargs):
"""
Return InterpolatingFunction spanning the available grid of atmosphere models.
WARNING: the grid must be entirely defined on a mesh grid, but it does not
need to be equidistant.
It is thus the user's responsibility to know whether the grid is evenly
spaced in logg and teff (e.g. this is not so for the CMFGEN models).
You can supply your own wavelength range, since the grid models'
resolution are not necessarily homogeneous. If not, the first wavelength
array found in the grid will be used as a template.
It might take a long a time and cost a lot of memory if you load the entire
grid. Therefor, you can also set range of temperature and gravity.
WARNING: 30000,50000 did not work out for FASTWIND, since we miss a model!
Example usage:
@param wave: wavelength to define the grid on
@type wave: ndarray
@param teffrange: starting and ending of the grid in teff
@type teffrange: tuple of floats
@param loggrange: starting and ending of the grid in logg
@type loggrange: tuple of floats
@return: wavelengths, teffs, loggs and fluxes of grid, and the interpolating
function
@rtype: (3x1Darray,3Darray,interp_func)
"""
#-- get the dimensions of the grid
teffs,loggs = get_grid_dimensions(**kwargs)
#-- build flux grid, assuming a perfectly sampled grid (needs not to be
# equidistant)
if teffrange is not None:
sa = (teffrange[0]<=teffs) & (teffs<=teffrange[1])
teffs = teffs[sa]
if loggrange is not None:
sa = (loggrange[0]<=loggs) & (loggs<=loggrange[1])
loggs = loggs[sa]
#-- ScientificPython interface
if not new_scipy:
logger.warning('SCIENTIFIC PYTHON')
#-- clip if necessary
teffs = list(set(list(teffs)))
loggs = list(set(list(loggs)))
teffs = np.sort(teffs)
loggs = np.sort(loggs)
if wave is not None:
flux = np.ones((len(teffs),len(loggs),len(wave)))
#-- run over teff and logg, and interpolate the models onto the supplied
# wavelength range
gridfile = get_file(**kwargs)
ff = pf.open(gridfile)
for i,teff in enumerate(teffs):
for j,logg in enumerate(loggs):
try:
mod_name = "T%05d_logg%01.02f" %(teff,logg)
mod = ff[mod_name]
wave_ = mod.data.field('wavelength')#array(mod.data.tolist())[:,0]
flux_ = mod.data.field('flux')#array(mod.data.tolist())[:,1]
#-- if there is no wavelength range given, we assume that
# the whole grid has the same resolution, and the first
# wave-array will be used as a template
if wave is None:
wave = wave_
flux = np.ones((len(teffs),len(loggs),len(wave)))
except KeyError:
continue
#-- it could be that we're lucky and the grid is completely
# homogeneous. In that case, there is no need for interpolation
try:
flux[i,j,:] = flux_
except:
flux[i,j,:] = np.interp(wave,wave_,flux_)
ff.close()
flux_grid = InterpolatingFunction([np.log10(teffs),loggs],flux)
logger.info('Constructed SED interpolation grid')
#-- Scipy interface
else:
logger.warning('SCIPY')
#-- run over teff and logg, and interpolate the models onto the supplied
# wavelength range
gridfile = get_file(**kwargs)
ff = pf.open(gridfile)
if wave is not None:
fluxes = np.zeros((len(teffs),len(wave)))
for i,(teff,logg) in enumerate(zip(teffs,loggs)):
mod_name = "T%05d_logg%01.02f" %(teff,logg)
mod = ff[mod_name]
wave_ = mod.data.field('wavelength')
flux_ = mod.data.field('flux')
#-- if there is no wavelength range given, we assume that
# the whole grid has the same resolution, and the first
# wave-array will be used as a template
if wave is None:
wave = wave_
flux = np.ones((len(teffs),len(wave)))
try:
flux[i] = flux_
except:
flux[i] = np.interp(wave,wave_,flux_)
ff.close()
flux_grid = LinearNDInterpolator(np.array([np.log10(teffs),loggs]).T,flux)
return wave,teffs,loggs,flux,flux_grid
#}
#{ Calibration
def list_calibrators(library='calspec'):
"""
Print and return the list of calibrators
@parameter library: name of the library (calspec, ngsl, stelib)
@type library: str
@return: list of calibrator names
@rtype: list of str
"""
files = config.glob(os.path.join(caldir,library),'*.fits')
targname = dict(calspec='targetid',ngsl='targname',stelib='object')[library]
names = []
for ff in files:
name = pf.getheader(ff)[targname]
#star_info = sesame.search(name)
#if not star_info:
# continue
#else:
names.append(name)
return names
def get_calibrator(name='alpha_lyr',version=None,wave_units=None,flux_units=None,library='calspec'):
"""
Retrieve a calibration SED
If C{version} is None, get the last version.
Example usage:
>>> wave,flux = get_calibrator(name='alpha_lyr')
>>> wave,flux = get_calibrator(name='alpha_lyr',version='003')
@param name: calibrator name
@type name: str
@param version: version of the calibration file
@type version: str
@param wave_units: units of wavelength arrays (default: AA)
@type wave_units: str (interpretable by C{units.conversions.convert})
@param flux_units: units of flux arrays (default: erg/s/cm2/AA)
@type flux_units: str (interpretable by C{units.conversions.convert})
@return: wavelength and flux arrays of calibrator
@rtype: (ndarray,ndarray)
"""
#-- collect calibration files
files = config.glob(os.path.join(caldir,library),'*.fits')
targname = dict(calspec='targetid',ngsl='targname',stelib='object')[library]
calfile = None
for ff in files:
#-- check if the name matches with the given one
fits_file = pf.open(ff)
header = fits_file[0].header
if name in ff or name in header[targname]:
#-- maybe the target is correct, but the 'model version' is not
if version is not None and version not in ff:
fits_file.close()
continue
#-- extract the wavelengths and flux
calfile = ff
if library in ['calspec','ngsl']:
wave = fits_file[1].data.field('wavelength')
flux = fits_file[1].data.field('flux')
elif library in ['stelib']:
wave,flux = fits.read_spectrum(ff)
else:
raise ValueError("Don't know what to do with files from library {}".format(library))
fits_file.close()
if calfile is None:
raise ValueError('Calibrator %s (version=%s) not found'%(name,version))
if flux_units is not None:
flux = conversions.convert('erg/s/cm2/AA',flux_units,flux,wave=(wave,'AA'))
if wave_units is not None:
wave = conversions.convert('AA',wave_units,wave)
logger.info('Calibrator %s selected'%(calfile))
return wave,flux
@memoized
def read_calibrator_info(library='ngsl'):
filename = config.get_datafile('sedtables/calibrators','{}.ident'.format(library))
names = []
fits_files = []
phot_files = []
with open(filename,'r') as ff:
for line in ff.readlines():
line = line.strip().split(',')
try:
fits_file = config.get_datafile('sedtables/calibrators',line[1])
phot_file = config.get_datafile('sedtables/calibrators',line[2])
#-- it can happen that there is no photfile for a target
except IOError:
continue
names.append(line[0])
fits_files.append(fits_file)
phot_files.append(phot_file)
return names,fits_files,phot_files
def calibrate():
"""
Calibrate photometry.
Not finished!
ABmag = -2.5 Log F_nu - 48.6 with F_nu in erg/s/cm2/Hz
Flux computed as 10**(-(meas-mag0)/2.5)*F0
Magnitude computed as -2.5*log10(Fmeas/F0)
F0 = 3.6307805477010029e-20 erg/s/cm2/Hz
STmag = -2.5 Log F_lam - 21.10 with F_lam in erg/s/cm2/AA
Flux computed as 10**(-(meas-mag0)/2.5)*F0
Magnitude computed as -2.5*log10(Fmeas/F0)
F0 = 3.6307805477010028e-09 erg/s/cm2/AA
Vegamag = -2.5 Log F_lam - C with F_lam in erg/s/cm2/AA
Flux computed as 10**(-meas/2.5)*F0
Magnitude computed as -2.5*log10(Fmeas/F0)
"""
F0ST = 3.6307805477010028e-09
F0AB = 3.6307805477010029e-20
#-- get calibrator
wave,flux = get_calibrator(name='alpha_lyr')
zp = filters.get_info()
#-- calculate synthetic fluxes
syn_flux = synthetic_flux(wave,flux,zp['photband'])
syn_flux_fnu = synthetic_flux(wave,flux,zp['photband'],units='Fnu')
Flam0_lit = conversions.nconvert(zp['Flam0_units'],'erg/s/cm2/AA',zp['Flam0'],photband=zp['photband'])
Fnu0_lit = conversions.nconvert(zp['Fnu0_units'],'erg/s/cm2/Hz',zp['Fnu0'],photband=zp['photband'])
#-- we have Flam0 but not Fnu0: compute Fnu0
keep = (zp['Flam0_lit']==1) & (zp['Fnu0_lit']==0)
Fnu0 = conversions.nconvert(zp['Flam0_units'],'erg/s/cm2/Hz',zp['Flam0'],photband=zp['photband'])
zp['Fnu0'][keep] = Fnu0[keep]
zp['Fnu0_units'][keep] = 'erg/s/cm2/Hz'
#-- we have Fnu0 but not Flam0: compute Flam0
keep = (zp['Flam0_lit']==0) & (zp['Fnu0_lit']==1)
Flam0 = conversions.nconvert(zp['Fnu0_units'],'erg/s/cm2/AA',zp['Fnu0'],photband=zp['photband'])
# set everything in correct units for convenience:
Flam0 = conversions.nconvert(zp['Flam0_units'],'erg/s/cm2/AA',zp['Flam0'])
Fnu0 = conversions.nconvert(zp['Fnu0_units'],'erg/s/cm2/Hz',zp['Fnu0'])
#-- as a matter of fact, set Flam0 and Fnu for all the stuff for which we
# have no literature values
keep = (zp['Flam0_lit']==0) & (zp['Fnu0_lit']==0)
zp['Flam0'][keep] = syn_flux[keep]
zp['Flam0_units'][keep] = 'erg/s/cm2/AA'
zp['Fnu0'][keep] = syn_flux_fnu[keep]
zp['Fnu0_units'][keep] = 'erg/s/cm2/Hz'
keep = np.array(['DENIS' in photb and True or False for photb in zp['photband']])
#-- we have no Flam0, only ZP vegamags
keep = (zp['vegamag_lit']==1) & (zp['Flam0_lit']==0)
zp['Flam0'][keep] = syn_flux[keep]
zp['Flam0_units'][keep] = 'erg/s/cm2/AA'
#-- we have no Flam0, no ZP vegamas but STmags
keep = (zp['STmag_lit']==1) & (zp['Flam0_lit']==0)
m_vega = 2.5*np.log10(F0ST/syn_flux) + zp['STmag']
zp['vegamag'][keep] = m_vega[keep]
#-- we have no Fnu0, no ZP vegamas but ABmags
keep = (zp['ABmag_lit']==1) & (zp['Flam0_lit']==0)
F0AB_lam = conversions.convert('erg/s/cm2/Hz','erg/s/cm2/AA',F0AB,photband=zp['photband'])
m_vega = 2.5*np.log10(F0AB_lam/syn_flux) + zp['ABmag']
zp['vegamag'][keep] = m_vega[keep]
#-- set the central wavelengths of the bands
set_wave = np.isnan(zp['eff_wave'])
zp['eff_wave'][set_wave] = filters.eff_wave(zp['photband'][set_wave])
return zp
#}
#{ Synthetic photometry
def synthetic_flux(wave,flux,photbands,units=None):
"""
Extract flux measurements from a synthetic SED (Fnu or Flambda).
The fluxes below 4micron are calculated assuming PHOTON-counting detectors
(e.g. CCDs).
Flam = int(P_lam * f_lam * lam, dlam) / int(P_lam * lam, dlam)
When otherwise specified, we assume ENERGY-counting detectors (e.g. bolometers)
Flam = int(P_lam * f_lam, dlam) / int(P_lam, dlam)
Where P_lam is the total system dimensionless sensitivity function, which
is normalised so that the maximum equals 1. Also, f_lam is the SED of the
object, in units of energy per time per unit area per wavelength.
The PHOTON-counting part of this routine has been thoroughly checked with
respect to johnson UBV, geneva and stromgren filters, and only gives offsets
with respect to the Kurucz integrated files (.geneva and stuff on his websites). These could be
due to different normalisation.
You can also readily integrate in Fnu instead of Flambda by suppling a list
of strings to 'units'. This should have equal length of photbands, and
should contain the strings 'flambda' and 'fnu' corresponding to each filter.
In that case, the above formulas reduce to
Fnu = int(P_nu * f_nu / nu, dnu) / int(P_nu / nu, dnu)
and
Fnu = int(P_nu * f_nu, dnu) / int(P_nu, dnu)
Small note of caution: P_nu is not equal to P_lam according to
Maiz-Apellaniz, he states that P_lam = P_nu/lambda. But in the definition
we use above here, it *is* the same!
The model fluxes should B{always} be given in Flambda (erg/s/cm2/AA). The
program will convert them to Fnu where needed.
The output is a list of numbers, equal in length to the 'photband' inputs.
The units of the output are erg/s/cm2/AA where Flambda was given, and
erg/s/cm2/Hz where Fnu was given.
The difference is only marginal for 'blue' bands. For example, integrating
2MASS in Flambda or Fnu is only different below the 1.1% level:
>>> wave,flux = get_table(teff=10000,logg=4.0)
>>> energys = synthetic_flux(wave,flux,['2MASS.J','2MASS.J'],units=['flambda','fnu'])
>>> e0_conv = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',energys[0],photband='2MASS.J')
>>> np.abs(energys[1]-e0_conv)/energys[1]<0.012
True
But this is not the case for IRAS.F12:
>>> energys = synthetic_flux(wave,flux,['IRAS.F12','IRAS.F12'],units=['flambda','fnu'])
>>> e0_conv = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',energys[0],photband='IRAS.F12')
>>> np.abs(energys[1]-e0_conv)/energys[1]>0.1
True
If you have a spectrum in micron vs Jy and want to calculate the synthetic
fluxes in Jy, a little bit more work is needed to get everything in the
right units. In the following example, we first generate a constant flux
spectrum in micron and Jy. Then, we convert flux to erg/s/cm2/AA using the
wavelengths (this is no approximation) and convert wavelength to angstrom.
Next, we compute the synthetic fluxes in the IRAS band in Fnu, and finally
convert the outcome (in erg/s/cm2/Hz) to Jansky.
>>> wave,flux = np.linspace(0.1,200,10000),np.ones(10000)
>>> flam = conversions.convert('Jy','erg/s/cm2/AA',flux,wave=(wave,'micron'))
>>> lam = conversions.convert('micron','AA',wave)
>>> energys = synthetic_flux(lam,flam,['IRAS.F12','IRAS.F25','IRAS.F60','IRAS.F100'],units=['Fnu','Fnu','Fnu','Fnu'])
>>> energys = conversions.convert('erg/s/cm2/Hz','Jy',energys)
You are responsible yourself for having a response curve covering the
model fluxes!
Now. let's put this all in practice in a more elaborate example: we want
to check if the effective wavelength is well defined. To do that we will:
1. construct a model (black body)
2. make our own weird, double-shaped filter (CCD-type and BOL-type detector)
3. compute fluxes in Flambda, and convert to Fnu via the effective wavelength
4. compute fluxes in Fnu, and compare with step 3.
In an ideal world, the outcome of step (3) and (4) must be equal:
Step (1): We construct a black body model.
WARNING: OPEN.BOL only works in Flambda for now.
See e.g. Maiz-Apellaniz, 2006.
@param wave: model wavelengths (angstrom)
@type wave: ndarray
@param flux: model fluxes (erg/s/cm2/AA)
@type flux: ndarray
@param photbands: list of photometric passbands
@type photbands: list of str
@param units: list containing Flambda or Fnu flag (defaults to all Flambda)
@type units: list of strings or str
@return: model fluxes (erg/s/cm2/AA or erg/s/cm2/Hz)
@rtype: ndarray
"""
if isinstance(units,str):
units = [units]*len(photbands)
energys = np.zeros(len(photbands))
#-- only keep relevant information on filters:
filter_info = filters.get_info()
keep = np.searchsorted(filter_info['photband'],photbands)
filter_info = filter_info[keep]
for i,photband in enumerate(photbands):
#if filters.is_color
waver,transr = filters.get_response(photband)
#-- make wavelength range a bit bigger, otherwise F25 from IRAS has only
# one Kurucz model point in its wavelength range... this is a bit
# 'ad hoc' but seems to work.
region = ((waver[0]-0.4*waver[0])<=wave) & (wave<=(2*waver[-1]))
#-- if we're working in infrared (>4e4A) and the model is not of high
# enough resolution (100000 points over wavelength range), interpolate
# the model in logscale on to a denser grid (in logscale!)
if filter_info['eff_wave'][i]>=4e4 and sum(region)<1e5 and sum(region)>1:
logger.debug('%10s: Interpolating model to integrate over response curve'%(photband))
wave_ = np.logspace(np.log10(wave[region][0]),np.log10(wave[region][-1]),1e5)
flux_ = 10**np.interp(np.log10(wave_),np.log10(wave[region]),np.log10(flux[region]),)
else:
wave_ = wave[region]
flux_ = flux[region]
if not len(wave_):
energys[i] = np.nan
continue
#-- perhaps the entire response curve falls in between model points (happends with
# narrowband UV filters), or there's very few model points covering it
if (np.searchsorted(wave_,waver[-1])-np.searchsorted(wave_,waver[0]))<5:
wave__ = np.sort(np.hstack([wave_,waver]))
flux_ = np.interp(wave__,wave_,flux_)
wave_ = wave__
#-- interpolate response curve onto model grid
transr = np.interp(wave_,waver,transr,left=0,right=0)
#-- integrated flux: different for bolometers and CCDs
#-- WE WORK IN FLAMBDA
if units is None or ((units is not None) and (units[i].upper()=='FLAMBDA')):
if photband=='OPEN.BOL':
energys[i] = np.trapz(flux_,x=wave_)
elif filter_info['type'][i]=='BOL':
energys[i] = np.trapz(flux_*transr,x=wave_)/np.trapz(transr,x=wave_)
elif filter_info['type'][i]=='CCD':
energys[i] = np.trapz(flux_*transr*wave_,x=wave_)/np.trapz(transr*wave_,x=wave_)
#-- we work in FNU
elif units[i].upper()=='FNU':
#-- convert wavelengths to frequency, Flambda to Fnu
freq_ = conversions.convert('AA','Hz',wave_)
flux_f = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flux_,wave=(wave_,'AA'))
#-- sort again!
sa = np.argsort(freq_)
transr = transr[sa]
freq_ = freq_[sa]
flux_f = flux_f[sa]
if filter_info['type'][i]=='BOL':
energys[i] = np.trapz(flux_f*transr,x=freq_)/np.trapz(transr,x=freq_)
elif filter_info['type'][i]=='CCD':
energys[i] = np.trapz(flux_f*transr/freq_,x=wave_)/np.trapz(transr/freq_,x=wave_)
else:
raise ValueError('units %s not understood'%(units))
#-- that's it!
return energys
def synthetic_color(wave,flux,colors,units=None):
"""
Construct colors from a synthetic SED.
@param wave: model wavelengths (angstrom)
@type wave: ndarray
@param flux: model fluxes (erg/s/cm2/AA)
@type flux: ndarray
@param colors: list of photometric passbands
@type colors: list of str
@param units: list containing Flambda or Fnu flag (defaults to all Flambda)
@type units: list of strings or str
@return: flux ratios or colors
@rtype: ndarray
"""
if units is None:
units = [None for color in colors]
syn_colors = np.zeros(len(colors))
for i,(color,unit) in enumerate(zip(colors,units)):
#-- retrieve the passbands necessary to construct the color, and the
# function that defines the color
photbands,color_func = filters.make_color(color)
#-- compute the synthetic fluxes to construct the color
fluxes = synthetic_flux(wave,flux,photbands,units=unit)
#-- construct the color
syn_colors[i] = color_func(*list(fluxes))
return syn_colors
def luminosity(wave,flux,radius=1.):
"""
Calculate the bolometric luminosity of a model SED.
Flux should be in cgs per unit wavelength (same unit as wave).
The latter is integrated out, so it is of no importance. After integration,
flux, should have units erg/s/cm2.
Returned luminosity is in solar units.
If you give radius=1 and want to correct afterwards, multiply the obtained
Labs with radius**2.
@param wave: model wavelengths
@type wave: ndarray
@param flux: model fluxes (Flam)
@type flux: ndarray
@param radius: stellar radius in solar units
@type radius: float
@return: total bolometric luminosity
@rtype: float
"""
Lint = np.trapz(flux,x=wave)
Labs = Lint*4*np.pi/constants.Lsol_cgs*(radius*constants.Rsol_cgs)**2
return Labs
#}
@memoized
def _get_itable_markers(photbands,
teffrange=(-np.inf,np.inf),loggrange=(-np.inf,np.inf),
ebvrange=(-np.inf,np.inf),zrange=(-np.inf,np.inf),
include_Labs=True,clear_memory=True,**kwargs):
"""
Get a list of markers to more easily retrieve integrated fluxes.
"""
if clear_memory:
clear_memoization(keys=['ivs.sed.model'])
# Possibility to not fetch all grid files when not needed
# does not work currently
# if 'z_skip' in kwargs:
# gridfiles = get_file(integrated=True,**kwargs)
# else:
gridfiles = get_file(z='*',integrated=True,**kwargs)
if isinstance(gridfiles,str):
gridfiles = [gridfiles]
#-- sort gridfiles per metallicity
metals_sa = np.argsort([pf.getheader(ff,1)['z'] for ff in gridfiles])
gridfiles = np.array(gridfiles)[metals_sa]
flux = []
gridpnts = []
grid_z = []
markers = []
#-- collect information
for gridfile in gridfiles:
ff = pf.open(gridfile)
ext = ff[1]
z = ff[1].header['z']
if z<zrange[0] or zrange[1]<z:
continue
teffs = ext.data.field('teff')
loggs = ext.data.field('logg')
ebvs = ext.data.field('ebv')
keep = (ebvrange[0]<=ebvs) & (ebvs<=ebvrange[1])
#-- for some reason, the Kurucz grid has a lonely point at Teff=14000,logg=2
# which messes up our interpolation
#correct = (teffs==14000) & (loggs==2.0)
#teffs[correct] = 12000
teffs,loggs,ebvs = teffs[keep],loggs[keep],ebvs[keep]
grid_teffs = np.sort(list(set(teffs)))
grid_loggs = np.sort(list(set(loggs)))
grid_ebvs = np.sort(list(set(ebvs)))
grid_z.append(z)
#-- we construct an array representing the teff-logg-ebv-z content, but
# in one number: 5000040031500 means:
# T=50000,logg=4.0,E(B-V)=0.31 and Z = 0.00
# Note that Z is Z+5 so that we avoid minus signs...
markers.append(np.zeros(len(teffs)))
gridpnts.append(np.zeros((len(teffs),4)))
for i,(it,il,ie) in enumerate(zip(teffs,loggs,ebvs)):
#snippet to exclude negative logg of marcs:
if il<0.:
continue
markers[-1][i] = float('%3d%05d%03d%03d'%(int(round((z+5)*100)),int(round(it)),int(round(il*100)),int(round(ie*100))))
gridpnts[-1][i]= it,il,ie,z
flux.append(_get_flux_from_table(ext,photbands,include_Labs=include_Labs))
ff.close()
flux = np.vstack(flux)
markers = np.hstack(markers)
gridpnts = np.vstack(gridpnts)
grid_z = np.sort(grid_z)
return np.array(markers),(grid_teffs,grid_loggs,grid_ebvs,grid_z),gridpnts,flux
@memoized
def _get_pix_grid(photbands,
teffrange=(-np.inf,np.inf),loggrange=(-np.inf,np.inf),
ebvrange=(-np.inf,np.inf),zrange=(-np.inf,np.inf),
rvrange=(-np.inf,np.inf),vradrange=(-np.inf,np.inf),
include_Labs=True,clear_memory=True,
variables=['teff','logg','ebv','z','rv','vrad'],**kwargs):
"""
Prepare the pixalted grid.
In principle, it should be possible to return any number of free parameters
here. I'm thinking about:
teff, logg, ebv, z, Rv, vrad.
"""
if clear_memory:
clear_memoization(keys=['ivs.sed.model'])
# gridfiles = get_file(integrated=False,**kwargs)
gridfiles = get_file(integrated=True,**kwargs)
if isinstance(gridfiles,str):
gridfiles = [gridfiles]
flux = []
grid_pars = []
#snippet to modify variables if Ana's marcs grids are called
if gridfiles[-1].find('Ana') != -1:
variables = ['teff','logg','ebv']
grid_names = np.array(variables)
#-- collect information from all the grid files
for gridfile in gridfiles:
with pf.open(gridfile) as ff:
#-- make an alias for further reference
ext = ff[1]
#-- we already cut the grid here, in order not to take too much memory
keep = np.ones(len(ext.data),bool)
for name in variables:
#-- we need to be carefull for rounding errors
low,high = locals()[name+'range']
in_range = (low<=ext.data.field(name)) & (ext.data.field(name)<=high)
on_edge = np.allclose(ext.data.field(name),low) | np.allclose(ext.data.field(name),high)
#on_edge_low = np.less_equal(np.abs(ext.data.field(name)-low),1e-8 + 1e-5*np.abs(low))
#on_edge_high = np.less_equal(np.abs(ext.data.field(name)-high),1e-8 + 1e-5*np.abs(high))
#keep_this = (in_range | on_edge_low | on_edge_high)
#if not sum(keep_this):
#logger.warning("_get_pix_grid: No selection done in axis {}".format(name))
#continue
#keep = keep & keep_this
keep = keep & (in_range | on_edge)
partial_grid = np.vstack([ext.data.field(name)[keep] for name in variables])
if sum(keep):
grid_pars.append(partial_grid)
#-- the flux grid:
flux.append(_get_flux_from_table(ext,photbands,include_Labs=include_Labs)[keep])
#-- make the entire grid: it consists of fluxes and grid parameters
flux = np.vstack(flux)
grid_pars = np.hstack(grid_pars)
#-- this is also the place to put some stuff in logarithmic scale if
# this is needed
#grid_pars[0] = np.log10(grid_pars[0])
flux = np.log10(flux)
#-- don't take axes into account if it has only one value
keep = np.ones(len(grid_names),bool)
for i in range(len(grid_names)):
if np.all(grid_pars[i]==grid_pars[i][0]):
keep[i] = False
grid_pars = grid_pars[keep]
#-- we need to know what variable parameters we have in the grid
grid_names = grid_names[keep]
#-- create the pixeltype grid
axis_values, pixelgrid = interpol.create_pixeltypegrid(grid_pars,flux.T)
return axis_values,grid_pars.T,pixelgrid,grid_names
def _get_flux_from_table(fits_ext,photbands,index=None,include_Labs=True):
"""
Retrieve flux and flux ratios from an integrated SED table.
@param fits_ext: fits extension containing integrated flux
@type fits_ext: FITS extension
@param photbands: list of photometric passbands
@type photbands: list of str
@param index: slice or index of rows to retrieve
@type index: slice or integer
@return: fluxes or flux ratios
#@rtype: list
"""
if index is None:
index = slice(None) #-- full range
fluxes = []
for photband in photbands:
try:
if not filters.is_color(photband):
fluxes.append(fits_ext.data.field(photband)[index])
else:
system,color = photband.split('.')
if '-' in color:
band0,band1 = color.split('-')
fluxes.append(fits_ext.data.field('%s.%s'%(system,band0))[index]/fits_ext.data.field('%s.%s'%(system,band1))[index])
elif color=='M1':
fv = fits_ext.data.field('STROMGREN.V')[index]
fy = fits_ext.data.field('STROMGREN.Y')[index]
fb = fits_ext.data.field('STROMGREN.B')[index]
fluxes.append(fv*fy/fb**2)
elif color=='C1':
fu = fits_ext.data.field('STROMGREN.U')[index]
fv = fits_ext.data.field('STROMGREN.V')[index]
fb = fits_ext.data.field('STROMGREN.B')[index]
fluxes.append(fu*fb/fv**2)
except KeyError:
logger.warning('Passband %s missing from table'%(photband))
fluxes.append(np.nan*np.ones(len(fits_ext.data)))
#-- possibly include absolute luminosity
if include_Labs:
fluxes.append(fits_ext.data.field("Labs")[index])
fluxes = np.array(fluxes).T
if index is not None:
fluxes = fluxes
return fluxes
if __name__=="__main__":
import doctest
import pylab as pl
doctest.testmod()
pl.show()
|
IvS-KULeuvenREPO_NAMEIvSPythonRepositoryPATH_START.@IvSPythonRepository_extracted@IvSPythonRepository-master@sed@model.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/hoverlabel/font/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattergeo.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@hoverlabel@font@_color.py@.PATH_END.py
|
{
"filename": "test_file_alignment.py",
"repo_name": "h5py/h5py",
"repo_path": "h5py_extracted/h5py-master/h5py/tests/test_file_alignment.py",
"type": "Python"
}
|
import h5py
from .common import TestCase
def is_aligned(dataset, offset=4096):
# Here we check if the dataset is aligned
return dataset.id.get_offset() % offset == 0
def dataset_name(i):
return f"data{i:03}"
class TestFileAlignment(TestCase):
"""
Ensure that setting the file alignment has the desired effect
in the internal structure.
"""
def test_no_alignment_set(self):
fname = self.mktemp()
# 881 is a prime number, so hopefully this help randomize the alignment
# enough
# A nice even number might give a pathological case where
# While we don't want the data to be aligned, it ends up aligned...
shape = (881,)
with h5py.File(fname, 'w') as h5file:
# Create up to 1000 datasets
# At least one of them should be misaligned.
# While this isn't perfect, it seems that there
# The case where 1000 datasets get created is one where the data
# is aligned. Therefore, during correct operation, this test is
# expected to finish quickly
for i in range(1000):
dataset = h5file.create_dataset(
dataset_name(i), shape, dtype='uint8')
# Assign data so that the dataset is instantiated in
# the file
dataset[...] = i
if not is_aligned(dataset):
# Break early asserting that the file is not aligned
break
else:
raise RuntimeError("Data was all found to be aligned to 4096")
def test_alignment_set_above_threshold(self):
# 2022/01/19 hmaarrfk
# UnitTest (TestCase) doesn't play well with pytest parametrization.
alignment_threshold = 1000
alignment_interval = 4096
for shape in [
(1033,), # A prime number above the threshold
(1000,), # Exactly equal to the threshold
(1001,), # one above the threshold
]:
fname = self.mktemp()
with h5py.File(fname, 'w',
alignment_threshold=alignment_threshold,
alignment_interval=alignment_interval) as h5file:
# Create up to 1000 datasets
# They are all expected to be aligned
for i in range(1000):
dataset = h5file.create_dataset(
dataset_name(i), shape, dtype='uint8')
# Assign data so that the dataset is instantiated in
# the file
dataset[...] = (i % 256) # Truncate to uint8
assert is_aligned(dataset, offset=alignment_interval)
def test_alignment_set_below_threshold(self):
# 2022/01/19 hmaarrfk
# UnitTest (TestCase) doesn't play well with pytest parametrization.
alignment_threshold = 1000
alignment_interval = 1024
for shape in [
(881,), # A prime number below the threshold
(999,), # Exactly one below the threshold
]:
fname = self.mktemp()
with h5py.File(fname, 'w',
alignment_threshold=alignment_threshold,
alignment_interval=alignment_interval) as h5file:
# Create up to 1000 datasets
# At least one of them should be misaligned.
# While this isn't perfect, it seems that there
# The case where 1000 datasets get created is one where the
# data is aligned. Therefore, during correct operation, this
# test is expected to finish quickly
for i in range(1000):
dataset = h5file.create_dataset(
dataset_name(i), shape, dtype='uint8')
# Assign data so that the dataset is instantiated in
# the file
dataset[...] = i
if not is_aligned(dataset, offset=alignment_interval):
# Break early asserting that the file is not aligned
break
else:
raise RuntimeError(
"Data was all found to be aligned to "
f"{alignment_interval}. This is highly unlikely.")
|
h5pyREPO_NAMEh5pyPATH_START.@h5py_extracted@h5py-master@h5py@tests@test_file_alignment.py@.PATH_END.py
|
{
"filename": "FormatConversion-checkpoint.ipynb",
"repo_name": "HaowenZhang/TRINITY",
"repo_path": "TRINITY_extracted/TRINITY-main/obs/Aird_qpdf_no_high/.ipynb_checkpoints/FormatConversion-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
#This notebook converts the data provided by James Aird to the format that can be read by our model.
import pandas as pd
import numpy as np
```
```python
data = np.loadtxt('./pledd_all.dat', comments='#')
z_low, z_high, mass_low, mass_high, logER, p, p_low, p_high, flag = data.transpose()
mass = 0.5 * (mass_low + mass_high)
z_low_uni = np.unique(z_low)
z_high_uni = np.zeros(len(z_low_uni))
for i in range(len(z_low_uni)):
z_high_uni[i] = np.unique(z_high[np.where(z_low == z_low_uni[i])[0]])
print(z_low_uni)
print(z_high_uni)
print(p_high / p)
print(p / p_low)
for i in range(len(z_low_uni)):
z_mid = 0.5*(z_low_uni[i] + z_high_uni[i])
filename = ('./Aird_2018_z%.2f.qpdf' % z_mid)
f = open(filename, 'w')
f.write('#zlow: %.1f\n#zhigh: %.1f\n#ref: https://zenodo.org/record/1009605#.W85W6hNKifU\n#type: qpdf_eta\n' % (z_low_uni[i], z_high_uni[i]))
f.close()
ind = np.where((z_low == z_low_uni[i]) & (z_high == z_high_uni[i]) & (flag == 1))[0]
masses = np.unique(mass[ind])
for m in masses:
sub_ind = np.where(mass[ind] == m)
ERs = np.unique((logER[ind])[sub_ind])
ER_selected = np.unique([ERs[int(i * len(ERs) / 4)] for i in range(4)])
print(ER_selected)
for j in range(len(ER_selected)):
ind_select = np.where((z_low == z_low_uni[i]) & (z_high == z_high_uni[i]) & (flag == 1) & (mass == m) & (logER == ER_selected[j]))
#The factor of 1.65 comes from the fact that the stipulated uncertainties by James represent 90% credit interval.
data = np.array([mass[ind_select], logER[ind_select], np.log10(p[ind_select]), np.log10(p_high[ind_select] / p[ind_select]) / 1.65, np.log10(p[ind_select] / p_low[ind_select]) / 1.65])
df = pd.DataFrame(data=data.transpose(), columns=['mass', 'eta', 'prob', 'err_h', 'err_l'])
df.to_csv(filename, sep=' ', header=False, index=False, mode='a+')
```
[0.1 0.5 1. 1.5 2. 2.5 3. ]
[0.5 1. 1.5 2. 2.5 3. 4. ]
[ 5.84070796 5.70570571 5.62632696 ... 34.71615721 35.26086957
34.57446809]
[29.16129032 29.21052632 29.07407407 ... 80.35087719 82.14285714
83.92857143]
[-3.08 -1.56 -0.04 1.48]
[-3.24 -1.72 -0.12 1.4 ]
[-3.48 -1.88 -0.28 1.32]
[-3.64 -2.04 -0.36 1.32]
[-3.8 -2.12 -0.44 1.24]
[-3.96 -2.28 -0.52 1.24]
[-2.6 -1.24 0.2 1.56]
[-2.76 -1.32 0.12 1.56]
[-3. -1.56 -0.04 1.48]
[-3.16 -1.64 -0.12 1.4 ]
[-3.32 -1.8 -0.2 1.4 ]
[-3.48 -1.88 -0.28 1.32]
[-2.36 -1.08 0.28 1.64]
[-2.6 -1.24 0.2 1.56]
[-2.76 -1.32 0.12 1.56]
[-2.92 -1.48 0.04 1.48]
[-3.08 -1.56 -0.04 1.48]
[-2.2 -0.92 0.36 1.64]
[-2.44 -1.08 0.28 1.64]
[-2.6 -1.24 0.2 1.56]
[-2.76 -1.32 0.12 1.56]
[-2.12 -0.84 0.44 1.72]
[-2.28 -1. 0.36 1.64]
[-2.44 -1.08 0.28 1.64]
[-1.88 -0.68 0.52 1.72]
[-2.12 -0.84 0.44 1.72]
[-2.2 -0.92 0.36 1.64]
[-1.8 -0.6 0.6 1.8]
[-1.96 -0.76 0.52 1.72]
[-2.12 -0.84 0.44 1.72]
```python
```
|
HaowenZhangREPO_NAMETRINITYPATH_START.@TRINITY_extracted@TRINITY-main@obs@Aird_qpdf_no_high@.ipynb_checkpoints@FormatConversion-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "_width.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergl/error_y/_width.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="scattergl.error_y", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergl@error_y@_width.py@.PATH_END.py
|
{
"filename": "plot_Fig1.py",
"repo_name": "igomezv/simplemc_tests",
"repo_path": "simplemc_tests_extracted/simplemc_tests-main/simplemc/plots/plot_Fig1.py",
"type": "Python"
}
|
#!/usr/bin/env python
from RunBase import *
import pylab
T = LCDMCosmology(mnu=0)
zLOWZ = 0.32
zCMASS = 0.57
zLyaA = 2.34
zLyaC = 2.36
zl = arange(0, 3, 0.1)
pylab.figure(figsize=(8, 10))
pylab.subplot(3, 1, 1)
y1 = [T.DaOverrd(z) for z in zl]
pylab.errorbar(zCMASS, 9.519*(1+zCMASS), yerr=0.134 *
(1+zCMASS), color='red', fmt='-o')
pylab.errorbar(zLyaA, 11.28*(1+zLyaA), yerr=0.65 *
(1 + zLyaA), color='blue', fmt='-o')
pylab.errorbar(zLyaC, 10.8*(1+zLyaC), yerr=0.4 *
(1+zLyaC), color='magenta', fmt='-o')
pylab.plot(zl, y1, 'k-')
pylab.ylabel("$D_a(z)/r_d$")
pylab.subplot(3, 1, 2)
y1 = [T.HIOverrd(z) for z in zl]
pylab.errorbar(zCMASS, 20.75, yerr=0.73, color='red', fmt='-o')
pylab.errorbar(zLyaA, 9.18, yerr=0.28, color='blue', fmt='-o')
pylab.errorbar(zLyaC, 9.0, yerr=0.3, color='magenta', fmt='-o')
pylab.plot(zl, y1, 'k-')
pylab.ylabel("$H^{-1}(z)/r_d$")
pylab.subplot(3, 1, 3)
y1 = [T.DVOverrd(z) for z in zl]
pylab.errorbar(zLOWZ, 8.467, yerr=0.167, color='green', fmt='-o')
pylab.plot(zl, y1, 'k-')
pylab.ylabel("$D_v(z)/r_d$")
pylab.plot([], [], 'g-', label='LOWZ')
pylab.plot([], [], 'r-', label='CMASS')
pylab.plot([], [], 'b-', label='Lyman-$\\alpha$ auto')
pylab.plot([], [], 'magenta', label='Lyman-$\\alpha$ cross')
pylab.legend(loc='lower right')
pylab.xlabel("z")
pylab.savefig("Fig1.pdf")
pylab.show()
|
igomezvREPO_NAMEsimplemc_testsPATH_START.@simplemc_tests_extracted@simplemc_tests-main@simplemc@plots@plot_Fig1.py@.PATH_END.py
|
{
"filename": "_legendwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/_legendwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendwidth", parent_name="scattersmith", **kwargs):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@_legendwidth.py@.PATH_END.py
|
{
"filename": "_arrowhead.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/scene/annotation/_arrowhead.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ArrowheadValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="arrowhead", parent_name="layout.scene.annotation", **kwargs
):
super(ArrowheadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 8),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@scene@annotation@_arrowhead.py@.PATH_END.py
|
{
"filename": "test_radtran_calc_tau_rayleigh.py",
"repo_name": "Jingxuan97/nemesispy",
"repo_path": "nemesispy_extracted/nemesispy-main/nemesispy/test/test_radtran_calc_tau_rayleigh.py",
"type": "Python"
}
|
import numpy as np
from nemesispy.radtran.calc_tau_rayleigh import calc_tau_rayleigh
from nemesispy.radtran.forward_model import ForwardModel
from nemesispy.data.gcm.process_gcm import (nlon,nlat,xlon,xlat,npv,pv,\
tmap,h2omap,comap,co2map,ch4map,hemap,h2map,vmrmap,\
tmap_mod,h2omap_mod,comap_mod,co2map_mod,ch4map_mod,\
hemap_mod,h2map_mod,vmrmap_mod,phase_grid,\
kevin_phase_by_wave,kevin_wave_by_phase,\
pat_phase_by_wave,pat_wave_by_phase,\
vmrmap_mod_new,tmap_hot)
from test_data.planet_wasp_43b import planet
print(planet)
# FM = ForwardModel()
# ForwardModel
|
Jingxuan97REPO_NAMEnemesispyPATH_START.@nemesispy_extracted@nemesispy-main@nemesispy@test@test_radtran_calc_tau_rayleigh.py@.PATH_END.py
|
{
"filename": "test_doc.py",
"repo_name": "mpi4py/mpi4py",
"repo_path": "mpi4py_extracted/mpi4py-master/test/test_doc.py",
"type": "Python"
}
|
from mpi4py import MPI
import mpiunittest as unittest
import sys
ModuleType = type(MPI)
ClassType = type(MPI.Comm)
FunctionType = type(MPI.Init)
StaticMethodType = type(MPI.buffer.allocate)
ClassMethodType = type(MPI.Comm.Get_parent)
MethodDescrType = type(MPI.Comm.Get_rank)
GetSetDescrType = type(MPI.Comm.rank)
def getdocstr(mc, docstrings, namespace=None):
name = getattr(mc, '__name__', None)
if name is None: return
if name in ('__builtin__', 'builtins'): return
if namespace: name = f'{namespace}.{name}'
if type(mc) in (
ModuleType,
ClassType,
):
doc = getattr(mc, '__doc__', None)
if doc == "<undocumented>": return
docstrings[name] = doc
for k, v in vars(mc).items():
if isinstance(v, (classmethod, staticmethod)):
v = v.__get__(mc)
getdocstr(v, docstrings, name)
elif type(mc) in (
FunctionType,
StaticMethodType,
ClassMethodType,
MethodDescrType,
GetSetDescrType,
):
doc = getattr(mc, '__doc__', None)
if doc == "<undocumented>": return
if doc is not None:
sig, _, doc = doc.partition('\n')
docstrings[name] = doc
@unittest.skipIf(hasattr(sys, 'pypy_version_info'), 'pypy')
class TestDoc(unittest.TestCase):
def testDoc(self):
ignore = {'py2f', 'f2py'}
invalid = False
missing = False
docs = { }
getdocstr(MPI, docs)
for k in docs:
doc = docs[k]
name = k.split('.')[-1]
if name in ignore:
continue
if not doc and name.startswith('_'):
continue
if doc is None:
print (f"'{k}': missing docstring")
missing = True
continue
if not doc.strip():
print (f"'{k}': empty docstring")
missing = True
continue
if doc.startswith('\n') and not doc.endswith(' '):
print (f"'{k}': mismatch start and end whitespace")
invalid = True
if not doc.startswith('\n') and doc.endswith(' '):
print (f"'{k}': mismatch start and end whitespace")
invalid = True
if doc.replace(' ', '').endswith('\n\n'):
print (f"'{k}': docstring ends with too many newlines")
invalid = True
doc = doc.strip()
if doc[0] == doc[0].lower():
print (f"'{k}': docstring starts with lowercase")
invalid = True
if not doc.endswith('.'):
print (f"'{k}': docstring does not end with '.'")
invalid = True
summary, _, description = doc.partition('\n')
if not summary.endswith('.'):
print (f"'{k}': summary line does not end with '.'")
invalid = True
self.assertFalse(missing)
self.assertFalse(invalid)
if __name__ == '__main__':
unittest.main()
|
mpi4pyREPO_NAMEmpi4pyPATH_START.@mpi4py_extracted@mpi4py-master@test@test_doc.py@.PATH_END.py
|
{
"filename": "dataclasses.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/langchain_core/pydantic_v1/dataclasses.py",
"type": "Python"
}
|
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@langchain_core@pydantic_v1@dataclasses.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "migueldvb/cine",
"repo_path": "cine_extracted/cine-master/cine/tests/__init__.py",
"type": "Python"
}
|
"""
This package contains utilities to run the cine test suite.
"""
|
migueldvbREPO_NAMEcinePATH_START.@cine_extracted@cine-master@cine@tests@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "andizq/discminer",
"repo_path": "discminer_extracted/discminer-main/README.md",
"type": "Markdown"
}
|
<p align="center">
<img src="https://raw.githubusercontent.com/andizq/andizq.github.io/master/discminer/discminer_logo.jpeg" width="500" height="" ></p>
<h2 align="center">The Channel Map Modelling Code</h2>
<div align="center">
<a href="https://github.com/andizq/discminer/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/badge/license-MIT-FEE440.svg?style=for-the-badge"></a>
<a href="https://github.com/andizq/discminer/pulls"><img alt="Pull request?" src="https://img.shields.io/badge/Become%20a-miner%20%e2%9a%92-00BBF9.svg?style=for-the-badge"></a>
<a href="https://github.com/andizq"><img alt="andizq" src="https://img.shields.io/badge/with%20%e2%99%a1%20by-andizq-ff1414.svg?style=for-the-badge"></a>
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg?style=for-the-badge"></a>
</div>
<div align="center">
Welcome to the discminer repository! Looking for quick examples and tutorials? Check out the docs.
<br />
<a href="https://github.com/andizq/discminer/issues/new?assignees=&labels=bug&title=bug%3A+">Report a Bug</a>
·
<a href="https://github.com/andizq/discminer/issues/new?assignees=&labels=enhancement&title=feature%3A+">Request a Feature</a>
·
<a href="https://github.com/andizq/discminer/issues/new?assignees=&labels=question&title=question%3A+">Ask a Question</a>
</div>
- Model channel maps from molecular line emission of discs by fitting intensity **and** rotation velocity
- Study the disc vertical structure by modelling front and back side emission surfaces
- Compute moment maps that accurately capture complex line profile morphologies
- Extract rotation curves, radial and meridional velocities, intensity and line width profiles
- Analyse the disc dynamical structure by modelling Keplerian motion + pressure support + self-gravity at once
- Identify velocity and intensity substructures; study their coherence and degree of localisation
- Non-axisymmetric models are possible; all attributes can be described as a function of $R,\phi,z$ disc coords
<img
src="images/discminer_outline.png"
alt="Discminer workflow and capabilities"
style="display: inline-block; margin: 0 auto; max-width: 500px">
## Mining tools
Discminer offers a wide range of analysis and visualisation tools to fully explore the physical and dynamical structure of your disc.
### cube
- Compute moment maps that accurately capture complex line profile morphologies.
- Output moment maps include **peak intensity**, **line width**, **line slope**, and **centroid velocity**.
- Easily clip, downsample, and convert to brightness temperature units.
- Quickly visualise model versus data channels and interactively extract spectra.
### rail
- Extract azimuthal and radial profiles of intensity, line width and velocity from moment maps.
- Compute rotation curves and decompose disc velocity into its three-dimensional components.
- Reveal large-scale signatures and quantify their pitch angle, width, extent, and coherence degree.
### pick
- Identify small-scale velocity and intensity perturbations, and estimate their localisation degree.
### plottools
- Customise intensity channels and residual maps, and highlight coherent and localised perturbations.
- Use sky or disc projections interchangeably for easier visualisation of features.
- Easily overlay the disc geometry (orientation and vertical structure) on any observable product.
- Overlay 1D profiles or 2D maps from external data to e.g. highlight the presence of dust substructures.
## Installation
```bash
pip install discminer
```
To upgrade the code,
```bash
pip install -U discminer
```
#### Optional dependencies
- [termtables](https://pypi.org/project/termtables)
- [termplotlib](https://pypi.org/project/termplotlib)
- [FilFinder](https://pypi.org/project/fil-finder)
- [schwimmbad](https://pypi.org/project/schwimmbad)
- [ipython](https://ipython.readthedocs.io/en/stable)
## How to use
The package documentation is still under construction, but you can find practical examples demonstrating the main
functionality of the code in the `./template` folder of this repository.
To run the examples on your local machine you can clone this repository and follow the instructions provided in the readme file,
```bash
git clone https://github.com/andizq/discminer.git
cd discminer/template
less README.rst
```
## Citation
If you find `discminer` useful for your research please cite the work of [Izquierdo et al. 2021](https://ui.adsabs.harvard.edu/abs/2021A%26A...650A.179I/abstract),
```latex
@ARTICLE{2021A&A...650A.179I,
author = {{Izquierdo}, A.~F. and {Testi}, L. and {Facchini}, S. and {Rosotti}, G.~P. and {van Dishoeck}, E.~F.},
title = "{The Disc Miner. I. A statistical framework to detect and quantify kinematical perturbations driven by young planets in discs}",
journal = {\aap},
keywords = {planet-disk interactions, planets and satellites: detection, protoplanetary disks, radiative transfer, Astrophysics - Earth and Planetary Astrophysics, Astrophysics - Solar and Stellar Astrophysics},
year = 2021,
month = jun,
volume = {650},
eid = {A179},
pages = {A179},
doi = {10.1051/0004-6361/202140779},
archivePrefix = {arXiv},
eprint = {2104.09596},
primaryClass = {astro-ph.EP},
adsurl = {https://ui.adsabs.harvard.edu/abs/2021A&A...650A.179I},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
|
andizqREPO_NAMEdiscminerPATH_START.@discminer_extracted@discminer-main@README.md@.PATH_END.py
|
{
"filename": "_newton_solver.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/linear_model/_glm/_newton_solver.py",
"type": "Python"
}
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
"""
Newton solver for Generalized Linear Models
"""
import warnings
from abc import ABC, abstractmethod
import numpy as np
import scipy.linalg
import scipy.optimize
from ..._loss.loss import HalfSquaredError
from ...exceptions import ConvergenceWarning
from ...utils.optimize import _check_optimize_result
from .._linear_loss import LinearModelLoss
class NewtonSolver(ABC):
"""Newton solver for GLMs.
This class implements Newton/2nd-order optimization routines for GLMs. Each Newton
iteration aims at finding the Newton step which is done by the inner solver. With
Hessian H, gradient g and coefficients coef, one step solves:
H @ coef_newton = -g
For our GLM / LinearModelLoss, we have gradient g and Hessian H:
g = X.T @ loss.gradient + l2_reg_strength * coef
H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity
Backtracking line search updates coef = coef_old + t * coef_newton for some t in
(0, 1].
This is a base class, actual implementations (child classes) may deviate from the
above pattern and use structure specific tricks.
Usage pattern:
- initialize solver: sol = NewtonSolver(...)
- solve the problem: sol.solve(X, y, sample_weight)
References
----------
- Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization"
2nd edition
https://doi.org/10.1007/978-0-387-40065-5
- Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization."
Cambridge University Press, 2004.
https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Initial coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
linear_loss : LinearModelLoss
The loss to be minimized.
l2_reg_strength : float, default=0.0
L2 regularization strength.
tol : float, default=1e-4
The optimization problem is solved when each of the following condition is
fulfilled:
1. maximum |gradient| <= tol
2. Newton decrement d: 1/2 * d^2 <= tol
max_iter : int, default=100
Maximum number of Newton steps allowed.
n_threads : int, default=1
Number of OpenMP threads to use for the computation of the Hessian and gradient
of the loss function.
Attributes
----------
coef_old : ndarray of shape coef.shape
Coefficient of previous iteration.
coef_newton : ndarray of shape coef.shape
Newton step.
gradient : ndarray of shape coef.shape
Gradient of the loss w.r.t. the coefficients.
gradient_old : ndarray of shape coef.shape
Gradient of previous iteration.
loss_value : float
Value of objective function = loss + penalty.
loss_value_old : float
Value of objective function of previous itertion.
raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)
converged : bool
Indicator for convergence of the solver.
iteration : int
Number of Newton steps, i.e. calls to inner_solve
use_fallback_lbfgs_solve : bool
If set to True, the solver will resort to call LBFGS to finish the optimisation
procedure in case of convergence issues.
gradient_times_newton : float
gradient @ coef_newton, set in inner_solve and used by line_search. If the
Newton step is a descent direction, this is negative.
"""
def __init__(
self,
*,
coef,
linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True),
l2_reg_strength=0.0,
tol=1e-4,
max_iter=100,
n_threads=1,
verbose=0,
):
self.coef = coef
self.linear_loss = linear_loss
self.l2_reg_strength = l2_reg_strength
self.tol = tol
self.max_iter = max_iter
self.n_threads = n_threads
self.verbose = verbose
def setup(self, X, y, sample_weight):
"""Precomputations
If None, initializes:
- self.coef
Sets:
- self.raw_prediction
- self.loss_value
"""
_, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)
self.loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=self.raw_prediction,
)
@abstractmethod
def update_gradient_hessian(self, X, y, sample_weight):
"""Update gradient and Hessian."""
@abstractmethod
def inner_solve(self, X, y, sample_weight):
"""Compute Newton step.
Sets:
- self.coef_newton
- self.gradient_times_newton
"""
def fallback_lbfgs_solve(self, X, y, sample_weight):
"""Fallback solver in case of emergency.
If a solver detects convergence problems, it may fall back to this methods in
the hope to exit with success instead of raising an error.
Sets:
- self.coef
- self.converged
"""
opt_res = scipy.optimize.minimize(
self.linear_loss.loss_gradient,
self.coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": self.max_iter - self.iteration,
"maxls": 50, # default is 20
"iprint": self.verbose - 1,
"gtol": self.tol,
"ftol": 64 * np.finfo(np.float64).eps,
},
args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
)
self.iteration += _check_optimize_result("lbfgs", opt_res)
self.coef = opt_res.x
self.converged = opt_res.status == 0
def line_search(self, X, y, sample_weight):
"""Backtracking line search.
Sets:
- self.coef_old
- self.coef
- self.loss_value_old
- self.loss_value
- self.gradient_old
- self.gradient
- self.raw_prediction
"""
# line search parameters
beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11
eps = 16 * np.finfo(self.loss_value.dtype).eps
t = 1 # step size
# gradient_times_newton = self.gradient @ self.coef_newton
# was computed in inner_solve.
armijo_term = sigma * self.gradient_times_newton
_, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(
self.coef_newton, X
)
self.coef_old = self.coef
self.loss_value_old = self.loss_value
self.gradient_old = self.gradient
# np.sum(np.abs(self.gradient_old))
sum_abs_grad_old = -1
is_verbose = self.verbose >= 2
if is_verbose:
print(" Backtracking Line Search")
print(f" eps=16 * finfo.eps={eps}")
for i in range(21): # until and including t = beta**20 ~ 1e-6
self.coef = self.coef_old + t * self.coef_newton
raw = self.raw_prediction + t * raw_prediction_newton
self.loss_value, self.gradient = self.linear_loss.loss_gradient(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=raw,
)
# Note: If coef_newton is too large, loss_gradient may produce inf values,
# potentially accompanied by a RuntimeWarning.
# This case will be captured by the Armijo condition.
# 1. Check Armijo / sufficient decrease condition.
# The smaller (more negative) the better.
loss_improvement = self.loss_value - self.loss_value_old
check = loss_improvement <= t * armijo_term
if is_verbose:
print(
f" line search iteration={i+1}, step size={t}\n"
f" check loss improvement <= armijo term: {loss_improvement} "
f"<= {t * armijo_term} {check}"
)
if check:
break
# 2. Deal with relative loss differences around machine precision.
tiny_loss = np.abs(self.loss_value_old * eps)
check = np.abs(loss_improvement) <= tiny_loss
if is_verbose:
print(
" check loss |improvement| <= eps * |loss_old|:"
f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
)
if check:
if sum_abs_grad_old < 0:
sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)
# 2.1 Check sum of absolute gradients as alternative condition.
sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)
check = sum_abs_grad < sum_abs_grad_old
if is_verbose:
print(
" check sum(|gradient|) < sum(|gradient_old|): "
f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
)
if check:
break
t *= beta
else:
warnings.warn(
(
f"Line search of Newton solver {self.__class__.__name__} at"
f" iteration #{self.iteration} did no converge after 21 line search"
" refinement iterations. It will now resort to lbfgs instead."
),
ConvergenceWarning,
)
if self.verbose:
print(" Line search did not converge and resorts to lbfgs instead.")
self.use_fallback_lbfgs_solve = True
return
self.raw_prediction = raw
if is_verbose:
print(
f" line search successful after {i+1} iterations with "
f"loss={self.loss_value}."
)
def check_convergence(self, X, y, sample_weight):
"""Check for convergence.
Sets self.converged.
"""
if self.verbose:
print(" Check Convergence")
# Note: Checking maximum relative change of coefficient <= tol is a bad
# convergence criterion because even a large step could have brought us close
# to the true minimum.
# coef_step = self.coef - self.coef_old
# change = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old)))
# check = change <= tol
# 1. Criterion: maximum |gradient| <= tol
# The gradient was already updated in line_search()
g_max_abs = np.max(np.abs(self.gradient))
check = g_max_abs <= self.tol
if self.verbose:
print(f" 1. max |gradient| {g_max_abs} <= {self.tol} {check}")
if not check:
return
# 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol
# d = sqrt(grad @ hessian^-1 @ grad)
# = sqrt(coef_newton @ hessian @ coef_newton)
# See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1.
d2 = self.coef_newton @ self.hessian @ self.coef_newton
check = 0.5 * d2 <= self.tol
if self.verbose:
print(f" 2. Newton decrement {0.5 * d2} <= {self.tol} {check}")
if not check:
return
if self.verbose:
loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
)
print(f" Solver did converge at loss = {loss_value}.")
self.converged = True
def finalize(self, X, y, sample_weight):
"""Finalize the solvers results.
Some solvers may need this, others not.
"""
pass
def solve(self, X, y, sample_weight):
"""Solve the optimization problem.
This is the main routine.
Order of calls:
self.setup()
while iteration:
self.update_gradient_hessian()
self.inner_solve()
self.line_search()
self.check_convergence()
self.finalize()
Returns
-------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Solution of the optimization problem.
"""
# setup usually:
# - initializes self.coef if needed
# - initializes and calculates self.raw_predictions, self.loss_value
self.setup(X=X, y=y, sample_weight=sample_weight)
self.iteration = 1
self.converged = False
self.use_fallback_lbfgs_solve = False
while self.iteration <= self.max_iter and not self.converged:
if self.verbose:
print(f"Newton iter={self.iteration}")
self.use_fallback_lbfgs_solve = False # Fallback solver.
# 1. Update Hessian and gradient
self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)
# TODO:
# if iteration == 1:
# We might stop early, e.g. we already are close to the optimum,
# usually detected by zero gradients at this stage.
# 2. Inner solver
# Calculate Newton step/direction
# This usually sets self.coef_newton and self.gradient_times_newton.
self.inner_solve(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 3. Backtracking line search
# This usually sets self.coef_old, self.coef, self.loss_value_old
# self.loss_value, self.gradient_old, self.gradient,
# self.raw_prediction.
self.line_search(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 4. Check convergence
# Sets self.converged.
self.check_convergence(X=X, y=y, sample_weight=sample_weight)
# 5. Next iteration
self.iteration += 1
if not self.converged:
if self.use_fallback_lbfgs_solve:
# Note: The fallback solver circumvents check_convergence and relies on
# the convergence checks of lbfgs instead. Enough warnings have been
# raised on the way.
self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)
else:
warnings.warn(
(
f"Newton solver did not converge after {self.iteration - 1} "
"iterations."
),
ConvergenceWarning,
)
self.iteration -= 1
self.finalize(X=X, y=y, sample_weight=sample_weight)
return self.coef
class NewtonCholeskySolver(NewtonSolver):
"""Cholesky based Newton solver.
Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear
solver.
"""
def setup(self, X, y, sample_weight):
super().setup(X=X, y=y, sample_weight=sample_weight)
if self.linear_loss.base_loss.is_multiclass:
# Easier with ravelled arrays, e.g., for scipy.linalg.solve.
# As with LinearModelLoss, we always are contiguous in n_classes.
self.coef = self.coef.ravel(order="F")
# Note that the computation of gradient in LinearModelLoss follows the shape of
# coef.
self.gradient = np.empty_like(self.coef)
# But the hessian is always 2d.
n = self.coef.size
self.hessian = np.empty_like(self.coef, shape=(n, n))
# To help case distinctions.
self.is_multinomial_with_intercept = (
self.linear_loss.base_loss.is_multiclass and self.linear_loss.fit_intercept
)
self.is_multinomial_no_penalty = (
self.linear_loss.base_loss.is_multiclass and self.l2_reg_strength == 0
)
def update_gradient_hessian(self, X, y, sample_weight):
_, _, self.hessian_warning = self.linear_loss.gradient_hessian(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
gradient_out=self.gradient,
hessian_out=self.hessian,
raw_prediction=self.raw_prediction, # this was updated in line_search
)
def inner_solve(self, X, y, sample_weight):
if self.hessian_warning:
warnings.warn(
(
f"The inner solver of {self.__class__.__name__} detected a "
"pointwise hessian with many negative values at iteration "
f"#{self.iteration}. It will now resort to lbfgs instead."
),
ConvergenceWarning,
)
if self.verbose:
print(
" The inner solver detected a pointwise Hessian with many "
"negative values and resorts to lbfgs instead."
)
self.use_fallback_lbfgs_solve = True
return
# Note: The following case distinction could also be shifted to the
# implementation of HalfMultinomialLoss instead of here within the solver.
if self.is_multinomial_no_penalty:
# The multinomial loss is overparametrized for each unpenalized feature, so
# at least the intercepts. This can be seen by noting that predicted
# probabilities are invariant under shifting all coefficients of a single
# feature j for all classes by the same amount c:
# coef[k, :] -> coef[k, :] + c => proba stays the same
# where we have assumned coef.shape = (n_classes, n_features).
# Therefore, also the loss (-log-likelihood), gradient and hessian stay the
# same, see
# Noah Simon and Jerome Friedman and Trevor Hastie. (2013) "A Blockwise
# Descent Algorithm for Group-penalized Multiresponse and Multinomial
# Regression". https://doi.org/10.48550/arXiv.1311.6529
#
# We choose the standard approach and set all the coefficients of the last
# class to zero, for all features including the intercept.
n_classes = self.linear_loss.base_loss.n_classes
n_dof = self.coef.size // n_classes # degree of freedom per class
n = self.coef.size - n_dof # effective size
self.coef[n_classes - 1 :: n_classes] = 0
self.gradient[n_classes - 1 :: n_classes] = 0
self.hessian[n_classes - 1 :: n_classes, :] = 0
self.hessian[:, n_classes - 1 :: n_classes] = 0
# We also need the reduced variants of gradient and hessian where the
# entries set to zero are removed. For 2 features and 3 classes with
# arbitrary values, "x" means removed:
# gradient = [0, 1, x, 3, 4, x]
#
# hessian = [0, 1, x, 3, 4, x]
# [1, 7, x, 9, 10, x]
# [x, x, x, x, x, x]
# [3, 9, x, 21, 22, x]
# [4, 10, x, 22, 28, x]
# [x, x, x, x, x, x]
# The following slicing triggers copies of gradient and hessian.
gradient = self.gradient.reshape(-1, n_classes)[:, :-1].flatten()
hessian = self.hessian.reshape(n_dof, n_classes, n_dof, n_classes)[
:, :-1, :, :-1
].reshape(n, n)
elif self.is_multinomial_with_intercept:
# Here, only intercepts are unpenalized. We again choose the last class and
# set its intercept to zero.
self.coef[-1] = 0
self.gradient[-1] = 0
self.hessian[-1, :] = 0
self.hessian[:, -1] = 0
gradient, hessian = self.gradient[:-1], self.hessian[:-1, :-1]
else:
gradient, hessian = self.gradient, self.hessian
try:
with warnings.catch_warnings():
warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
self.coef_newton = scipy.linalg.solve(
hessian, -gradient, check_finite=False, assume_a="sym"
)
if self.is_multinomial_no_penalty:
self.coef_newton = np.c_[
self.coef_newton.reshape(n_dof, n_classes - 1), np.zeros(n_dof)
].reshape(-1)
assert self.coef_newton.flags.f_contiguous
elif self.is_multinomial_with_intercept:
self.coef_newton = np.r_[self.coef_newton, 0]
self.gradient_times_newton = self.gradient @ self.coef_newton
if self.gradient_times_newton > 0:
if self.verbose:
print(
" The inner solver found a Newton step that is not a "
"descent direction and resorts to LBFGS steps instead."
)
self.use_fallback_lbfgs_solve = True
return
except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e:
warnings.warn(
f"The inner solver of {self.__class__.__name__} stumbled upon a "
"singular or very ill-conditioned Hessian matrix at iteration "
f"{self.iteration}. It will now resort to lbfgs instead.\n"
"Further options are to use another solver or to avoid such situation "
"in the first place. Possible remedies are removing collinear features"
" of X or increasing the penalization strengths.\n"
"The original Linear Algebra message was:\n" + str(e),
scipy.linalg.LinAlgWarning,
)
# Possible causes:
# 1. hess_pointwise is negative. But this is already taken care in
# LinearModelLoss.gradient_hessian.
# 2. X is singular or ill-conditioned
# This might be the most probable cause.
#
# There are many possible ways to deal with this situation. Most of them
# add, explicitly or implicitly, a matrix to the hessian to make it
# positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed.
# Instead, we resort to lbfgs.
if self.verbose:
print(
" The inner solver stumbled upon an singular or ill-conditioned "
"Hessian matrix and resorts to LBFGS instead."
)
self.use_fallback_lbfgs_solve = True
return
def finalize(self, X, y, sample_weight):
if self.is_multinomial_no_penalty:
# Our convention is usually the symmetric parametrization where
# sum(coef[classes, features], axis=0) = 0.
# We convert now to this convention. Note that it does not change
# the predicted probabilities.
n_classes = self.linear_loss.base_loss.n_classes
self.coef = self.coef.reshape(n_classes, -1, order="F")
self.coef -= np.mean(self.coef, axis=0)
elif self.is_multinomial_with_intercept:
# Only the intercept needs an update to the symmetric parametrization.
n_classes = self.linear_loss.base_loss.n_classes
self.coef[-n_classes:] -= np.mean(self.coef[-n_classes:])
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@linear_model@_glm@_newton_solver.py@.PATH_END.py
|
{
"filename": "angle.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/galsim/angle.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
__all__ = [ 'AngleUnit', 'Angle', '_Angle',
'radians', 'hours', 'degrees', 'arcmin', 'arcsec' ]
from coord import AngleUnit, Angle, _Angle
from coord import radians, hours, degrees, arcmin, arcsec
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@galsim@angle.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.