metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "_variantsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/cone/hoverlabel/font/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="variantsrc", parent_name="cone.hoverlabel.font", **kwargs
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@cone@hoverlabel@font@_variantsrc.py@.PATH_END.py
|
{
"filename": "README-cpp.md",
"repo_name": "cshsgy/ExoCubed",
"repo_path": "ExoCubed_extracted/ExoCubed-main/doc/README-cpp.md",
"type": "Markdown"
}
|
## Modern C++ programming
### From C to C++ coding style
- use full path name starting from the `src` folder for include guard
- use `snprintf` instead of `sprintf`
- use `rand_r` instead of `rand`
- use `strtok_r` instead of `strtok`
|
cshsgyREPO_NAMEExoCubedPATH_START.@ExoCubed_extracted@ExoCubed-main@doc@README-cpp.md@.PATH_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermap/marker/colorbar/_title.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name="title", parent_name="scattermap.marker.colorbar", **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h". Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermap@marker@colorbar@_title.py@.PATH_END.py
|
{
"filename": "A01read_ari.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioMC/examples/04_read_ari_files/A01read_ari.py",
"type": "Python"
}
|
import logging
import numpy as np
import argparse
import matplotlib.pyplot as plt
from NuRadioReco.utilities import units
import NuRadioReco.detector.detector as detector
import NuRadioReco.modules.io.eventReader
from NuRadioReco.framework.parameters import stationParameters as stnp
logging.basicConfig(level=logging.INFO)
# Parse eventfile as argument
parser = argparse.ArgumentParser(description='NuRadioSim file')
parser.add_argument('inputfilename', type=str,
help='path to NuRadioMC simulation result')
parser.add_argument('detectordescription', type=str,
help='path to detectordescription')
args = parser.parse_args()
if __name__ == "__main__":
# read in detector positions (this is a dummy detector)
det = detector.Detector(json_filename=args.detectordescription)
# initialize modules
eventReader = NuRadioReco.modules.io.eventReader.eventReader()
eventReader.begin(args.inputfilename)
for event in eventReader.run():
for station in event.get_stations():
station_id = station.get_id()
for channel in station.iter_channels():
channel_id = channel.get_id()
# get time trace and times of bins
trace = channel.get_trace()
times = channel.get_times()
# or get the frequency spetrum instead
spectrum = channel.get_frequency_spectrum()
frequencies = channel.get_frequencies()
# obtain the position of the channel/antenna from the detector description
position = det.get_relative_position(station_id, channel_id)
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioMC@examples@04_read_ari_files@A01read_ari.py@.PATH_END.py
|
{
"filename": "_core.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/experimental/key_reuse/_core.py",
"type": "Python"
}
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections import defaultdict
from collections.abc import Callable, Iterator
from functools import partial, reduce, total_ordering
from typing import Any, NamedTuple
import jax
from jax import lax
from jax import tree_util
from jax.errors import KeyReuseError
from jax.interpreters import batching, mlir
from jax._src import api_util
from jax._src import core
from jax._src import linear_util as lu
from jax._src import pjit
from jax._src import prng
from jax._src import random
from jax._src import source_info_util
from jax._src import traceback_util
from jax._src import util
from jax._src.ad_checkpoint import remat_p
from jax._src.debugging import debug_callback_p
from jax._src.interpreters import partial_eval as pe
from jax._src.util import weakref_lru_cache
from jax.experimental.shard_map import shard_map_p
import numpy as np
traceback_util.register_exclusion(__file__)
_source_context_message = (
'PRNG key first used at the above location was subsequently reused'
' at the following location:')
def key_reuse_error_with_source_traceback(
message: str, traceback: source_info_util.Traceback | None) -> KeyReuseError:
err = KeyReuseError(message)
if traceback is not None:
filtered_tb = traceback_util.filter_traceback(traceback.as_python_traceback())
if filtered_tb:
context_err = KeyReuseError(_source_context_message).with_traceback(filtered_tb)
context_err.__context__ = err.__context__
context_err.__cause__ = err.__cause__
context_err.__suppress_context__ = err.__suppress_context__
err.__context__ = None
err.__cause__ = context_err
return err
# Create Source() and Sink() objects which validate inputs, have
# correct equality semantics, and are hashable & immutable.
@total_ordering
class _SourceSinkBase:
idx: int
mask: bool | np.ndarray
def __init__(self, idx: int, mask: bool | np.bool_ | np.ndarray = True):
assert isinstance(idx, int)
if isinstance(mask, np.ndarray):
assert mask.dtype == np.dtype('bool')
if np.all(mask):
mask = True
elif not np.any(mask):
mask = False
elif mask.flags.writeable:
mask = np.array(mask, copy=True)
mask.flags.writeable = False
elif isinstance(mask, np.bool_):
mask = bool(mask)
else:
assert isinstance(mask, bool)
super().__setattr__("idx", idx)
super().__setattr__("mask", mask)
def __setattr__(self, *args, **kwargs):
raise ValueError(f"{self.__class__.__name__} is immutable")
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.idx == other.idx
and np.shape(self.mask) == np.shape(other.mask)
and np.all(self.mask == other.mask))
def __lt__(self, other):
if isinstance(other, Forward):
return True
elif isinstance(other, _SourceSinkBase):
return ((self.__class__.__name__, self.idx)
< (other.__class__.__name__, other.idx))
else:
return NotImplemented
def __hash__(self):
if isinstance(self.mask, bool):
return hash((self.__class__, self.idx, self.mask))
else:
mask = np.asarray(self.mask)
return hash((self.__class__, self.idx, mask.shape,
tuple(mask.flatten().tolist())))
def __repr__(self):
if self.mask is True:
return f"{self.__class__.__name__}({self.idx})"
return f"{self.__class__.__name__}({self.idx}, {self.mask})"
class Sink(_SourceSinkBase):
pass
class Source(_SourceSinkBase):
pass
class Forward(NamedTuple):
in_idx: int
out_idx: int
def __repr__(self):
return f"Forward({self.in_idx}, {self.out_idx})"
# KeyReuseSignature is essentially a frozen set of Source/Sink/Forward
# objects, with a few convenience methods related to key reuse checking.
class KeyReuseSignature:
_args: frozenset[Source | Sink | Forward]
def __init__(self, *args):
self._args = frozenset(args)
def __repr__(self):
return f"KeyReuseSignature{tuple(sorted(self._args))}"
def __eq__(self, other):
return isinstance(other, KeyReuseSignature) and self._args == other._args
def __hash__(self):
return hash(self._args)
@property
def sinks(self) -> Iterator[Sink]:
yield from (s for s in self._args if isinstance(s, Sink))
@property
def sources(self) -> Iterator[Source]:
yield from (s for s in self._args if isinstance(s, Source))
@property
def forwards(self) -> Iterator[Forward]:
yield from (s for s in self._args if isinstance(s, Forward))
def check_signature(self, *args, funcname="function", context=None):
for sink in self.sinks:
key = args[sink.idx]
if not isinstance(key, prng.PRNGKeyArray):
continue
if np.any(key._consumed & sink.mask):
msg = f"Previously-consumed key passed to {funcname} at index {sink.idx}"
if context:
msg += " {context}"
raise key_reuse_error_with_source_traceback(
msg, key._source_info and key._source_info.traceback)
def update_consumption(self, args_in, args_out):
for sink in self.sinks:
arg = args_in[sink.idx]
if isinstance(arg, prng.PRNGKeyArray):
arg._consumed = arg._consumed | sink.mask
if np.any(sink.mask):
arg._source_info = source_info_util.current()
for arg in args_out:
if isinstance(arg, prng.PRNGKeyArray):
arg._consumed = True
for source in self.sources:
if isinstance(args_out[source.idx], prng.PRNGKeyArray):
args_out[source.idx]._consumed = ~np.asarray(source.mask)
for forward in self.forwards:
arg_in = args_in[forward.in_idx]
arg_out = args_out[forward.out_idx]
if isinstance(arg_in, prng.PRNGKeyArray) and isinstance(arg_out, prng.PRNGKeyArray):
arg_out._consumed = arg_in._consumed
class DynamicKeyReuseSignature(NamedTuple):
signature: Callable[[core.JaxprEqn], KeyReuseSignature]
def dynamic_key_reuse_signature(f: Callable[[core.JaxprEqn], KeyReuseSignature]) -> DynamicKeyReuseSignature:
return DynamicKeyReuseSignature(f)
def key_reuse_signature_from_eqn(eqn: core.JaxprEqn) -> KeyReuseSignature:
if eqn.primitive in key_reuse_signatures:
sig = key_reuse_signatures[eqn.primitive]
if isinstance(sig, KeyReuseSignature):
return sig
elif isinstance(sig, DynamicKeyReuseSignature):
return sig.signature(eqn)
else:
raise TypeError(
f"Unrecognized key reuse sigature of type {type(sig)}: {sig}")
else:
return unknown_signature(eqn)
def key_reuse_signature_from_primitive(prim, *args, **params):
if prim == pjit.pjit_p:
return jaxpr_type_signature(params['jaxpr'].jaxpr)
if prim not in key_reuse_signatures:
# TODO(jakevdp) should we generate an unknown signature here?
raise RuntimeError(f"Internal: no key reuse rule for primitive {prim}")
sig = key_reuse_signatures[prim]
if isinstance(sig, KeyReuseSignature):
return sig
elif isinstance(sig, DynamicKeyReuseSignature):
jaxpr = jax.make_jaxpr(partial(prim.bind, **params))(*args).jaxpr
return jaxpr_type_signature(jaxpr)
else:
raise TypeError(
f"Unrecognized key reuse sigature of type {type(sig)}: {sig}")
consume_p = core.Primitive("consume")
consume_p.def_impl(lambda x: x)
consume_p.def_abstract_eval(lambda x: x)
batching.defvectorized(consume_p)
mlir.register_lowering(
consume_p,
mlir.lower_fun(lambda x: x, multiple_results=False))
def consume(key):
"""Consume the key and return a consumed copy."""
return consume_p.bind(key)
assert_consumed_value_p = core.Primitive("assert_consumed_value")
assert_consumed_value_p.def_impl(lambda x, *, value: x)
assert_consumed_value_p.def_abstract_eval(lambda x, *, value: x)
batching.defvectorized(assert_consumed_value_p)
mlir.register_lowering(
assert_consumed_value_p,
mlir.lower_fun(lambda x, *, value: x, multiple_results=False))
def assert_unconsumed(key):
"""Assert that a key is unconsumed"""
assert_consumed_value_p.bind(key, value=False)
def assert_consumed(key, value=True):
"""Assert that a key is consumed"""
assert_consumed_value_p.bind(key, value=value)
def _check_consumed_value(eqn, consumed):
"""Extra check for use with assert_consumed_value_p"""
expected = eqn.params['value']
if not np.all(consumed == expected):
if np.all(expected):
raise AssertionError(f"Expected key to be consumed in {eqn}")
elif not np.any(expected):
raise AssertionError(f"Expected key to not be consumed in {eqn}")
else:
raise AssertionError(f"Expected {expected}, got {consumed} in {eqn}")
# The behavior of most primitives can be described via simple signatures.
key_reuse_signatures: dict[core.Primitive, KeyReuseSignature | DynamicKeyReuseSignature] = {}
key_reuse_signatures[consume_p] = KeyReuseSignature(Sink(0), Forward(0, 0))
key_reuse_signatures[assert_consumed_value_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[random.random_clone_p] = KeyReuseSignature(Source(0))
key_reuse_signatures[prng.random_bits_p] = KeyReuseSignature(Sink(0))
# TODO(jakevdp): should fold_in sink its input key?
key_reuse_signatures[prng.random_fold_in_p] = KeyReuseSignature(Source(0))
key_reuse_signatures[prng.random_seed_p] = KeyReuseSignature(Source(0))
key_reuse_signatures[prng.random_split_p] = KeyReuseSignature(Sink(0), Source(0))
key_reuse_signatures[random.random_gamma_p] = KeyReuseSignature(Sink(0))
# TODO(jakevdp): broadcast should probably consume the input to avoid implicit duplication
key_reuse_signatures[lax.broadcast_in_dim_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[lax.copy_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[lax.convert_element_type_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[lax.device_put_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[lax.reshape_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[lax.squeeze_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[prng.random_wrap_p] = KeyReuseSignature(Source(0))
# TODO(jakevdp): should unwrap sink its input key?
key_reuse_signatures[prng.random_unwrap_p] = KeyReuseSignature()
key_reuse_signatures[debug_callback_p] = KeyReuseSignature()
key_reuse_signatures[lax.dynamic_slice_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[lax.dynamic_update_slice_p] = KeyReuseSignature(Sink(1), Forward(0, 0))
key_reuse_signatures[lax.gather_p] = KeyReuseSignature(Forward(0, 0))
key_reuse_signatures[lax.scatter_p] = KeyReuseSignature(Sink(2), Forward(0, 0))
# Equality checks don't consume
key_reuse_signatures[lax.eq_p] = KeyReuseSignature()
key_reuse_signatures[lax.ne_p] = KeyReuseSignature()
# The default signature will Sink all key inputs, and not Source any.
def unknown_signature(eqn):
def is_key(var: core.Atom):
return hasattr(var.aval, "dtype") and jax.dtypes.issubdtype(var.aval.dtype, jax.dtypes.prng_key)
return KeyReuseSignature(
*(Sink(idx) for idx, var in enumerate(eqn.invars) if is_key(var))
)
@weakref_lru_cache
def jaxpr_type_signature(jaxpr: core.Jaxpr) -> KeyReuseSignature:
"""Parse the jaxpr to determine key reuse signature"""
consumed: dict[core.Atom, bool | np.ndarray] = {}
forwards: dict[core.Atom, core.Atom] = {} # map forwarded outputs to inputs.
def resolve_forwards(var: core.Atom) -> core.Atom:
if not forwards:
return var
for _ in range(len(forwards) + 1):
if isinstance(var, core.Literal):
return var
if var in forwards:
var = forwards[var]
else:
return var
raise ValueError("forwarding cycle detected")
def is_key(var: core.Atom):
return hasattr(var.aval, "dtype") and jax.dtypes.issubdtype(var.aval.dtype, jax.dtypes.prng_key)
def sink(var: core.Atom, mask=True):
if not is_key(var):
return
var = resolve_forwards(var)
assert not isinstance(var, core.Literal)
if np.any(np.logical_and(consumed.get(var, False), mask)):
return True
consumed[var] = np.logical_or(consumed.get(var, False), mask)
def source(var: core.Atom, mask=False):
if not is_key(var):
return
var = resolve_forwards(var)
assert not isinstance(var, core.Literal)
consumed[var] = mask
def is_consumed(var: core.Atom):
var = resolve_forwards(var)
if isinstance(var, core.Literal):
return False
return consumed.get(var, False)
for eqn in jaxpr.eqns:
traceback = eqn.source_info.traceback
name_stack = source_info_util.current_name_stack() + eqn.source_info.name_stack
with source_info_util.user_context(traceback, name_stack=name_stack):
signature = key_reuse_signature_from_eqn(eqn)
if eqn.primitive == assert_consumed_value_p:
# This is a special case that goes beyond normal key reuse logic.
_check_consumed_value(eqn, is_consumed(eqn.invars[0]))
for in_idx, out_idx in signature.forwards:
forwards[eqn.outvars[out_idx]] = eqn.invars[in_idx]
for snk in signature.sinks:
if not 0 <= snk.idx < len(eqn.invars):
raise KeyReuseError(f"In {eqn.primitive}, sink {snk.idx} out of range [0, {len(eqn.invars)}]")
if sink(eqn.invars[snk.idx], snk.mask):
raise KeyReuseError(f"In {eqn.primitive}, argument {snk.idx} is already consumed.")
for var in eqn.outvars:
if not isinstance(var, core.Literal) and var not in forwards:
source(var, True) # consumed unless in a Source.
for src in signature.sources:
if not 0 <= src.idx < len(eqn.outvars):
raise KeyReuseError(f"In {eqn.primitive}, source {src.idx} out of range [0, {len(eqn.outvars)}]")
source(eqn.outvars[src.idx])
all_inputs = [*jaxpr.invars, *jaxpr.constvars]
return KeyReuseSignature(
*(Sink(i, consumed[v]) for i, v in enumerate(all_inputs)
if is_key(v) and np.any(consumed.get(v, False))),
*(Source(i) for i, v in enumerate(jaxpr.outvars)
if is_key(v) and resolve_forwards(v) not in all_inputs and not consumed.get(v, False)),
*(Forward(all_inputs.index(resolve_forwards(outvar)), idx_out) # type: ignore[arg-type]
for idx_out, outvar in enumerate(jaxpr.outvars)
if is_key(outvar) and resolve_forwards(outvar) in all_inputs)
)
def function_type_signature(fun: Callable[..., Any], *args: Any) -> KeyReuseSignature:
args_flat, in_tree = tree_util.tree_flatten(args)
in_avals_flat = [core.get_aval(arg) for arg in args_flat]
wrapped_fun, _ = api_util.flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
jaxpr, _, _, () = pe.trace_to_jaxpr_dynamic(wrapped_fun, in_avals_flat)
return jaxpr_type_signature(jaxpr)
def check_key_reuse_jaxpr(jaxpr: core.Jaxpr) -> None:
"""Check the jaxpr for key reuse."""
jaxpr_type_signature(jaxpr)
def check_key_reuse(fun: Callable[..., Any], /, *args: Any) -> None:
"""Function to statically check key reuse."""
function_type_signature(fun, *args)
#----------------------------------------------------------------------------------
# key reuse rules for particular primitives:
@dynamic_key_reuse_signature
def _slice_signature(eqn):
in_aval = eqn.invars[0].aval
if not jax.dtypes.issubdtype(in_aval.dtype, jax.dtypes.prng_key):
return KeyReuseSignature(Forward(0, 0))
if any(core.is_symbolic_dim(s) for s in in_aval.shape):
return KeyReuseSignature(Forward(0, 0))
start_indices = eqn.params['start_indices']
limit_indices = eqn.params['limit_indices']
strides = eqn.params['strides'] or (1,) * len(start_indices)
idx = tuple(slice(*tup) for tup in util.safe_zip(start_indices, limit_indices, strides))
sink = np.zeros(in_aval.shape, dtype=bool)
sink[idx] = True
return KeyReuseSignature(Sink(0, sink), Source(0))
key_reuse_signatures[lax.slice_p] = _slice_signature
@dynamic_key_reuse_signature
def _concatenate_signature(eqn):
num_vals = len(eqn.invars)
# TODO(jakevdp): should this signature be more granular?
if num_vals == 1:
return KeyReuseSignature(Forward(0, 0))
else:
return KeyReuseSignature(*(Sink(i) for i in range(num_vals)), Source(0))
key_reuse_signatures[lax.concatenate_p] = _concatenate_signature
@dynamic_key_reuse_signature
def _pjit_key_type_signature(eqn):
return jaxpr_type_signature(eqn.params['jaxpr'].jaxpr)
key_reuse_signatures[pjit.pjit_p] = _pjit_key_type_signature
@dynamic_key_reuse_signature
def _shard_map_type_signature(eqn):
return jaxpr_type_signature(eqn.params['jaxpr'])
key_reuse_signatures[shard_map_p] = _shard_map_type_signature
@dynamic_key_reuse_signature
def _cond_key_type_signature(eqn):
signatures = [jaxpr_type_signature(branch.jaxpr) for branch in eqn.params['branches']]
sinks = defaultdict(list)
sources = defaultdict(list)
for sig in signatures:
for sink in sig.sinks:
sinks[sink.idx].append(sink.mask)
for source in sig.sources:
sources[source.idx].append(source.mask)
combined_sinks = [Sink(i + 1, reduce(np.logical_or, m)) for i, m in sinks.items()]
combined_sources = [Source(i, reduce(np.logical_and, m)) for i, m in sources.items()]
combined_forwards = [Forward(f.in_idx + 1, f.out_idx) for f in
set.intersection(*(set(sig.forwards) for sig in signatures))]
return KeyReuseSignature(*combined_sinks, *combined_sources, *combined_forwards)
key_reuse_signatures[lax.cond_p] = _cond_key_type_signature
@dynamic_key_reuse_signature
def _scan_key_type_signature(eqn):
jaxpr = eqn.params['jaxpr'].jaxpr
num_consts = eqn.params['num_consts']
num_carry = eqn.params['num_carry']
signature = jaxpr_type_signature(jaxpr)
# scan body should not consume key in constants
if any(np.any(s.mask) for s in signature.sinks if s.idx < num_consts):
raise KeyReuseError("scan body function leads to key reuse when repeatedly executed, "
"because key constants are repeatedly consumed:\n"
f" {signature=}\n"
f" {eqn=}\n"
f" {jaxpr=}")
# scan carry should only consume keys that are sourced on output.
carry_sinks = {s.idx - num_consts: s.mask for s in signature.sinks
if 0 <= s.idx - num_consts < num_carry and np.any(s.mask)}
carry_sources = {s.idx: s.mask for s in signature.sources
if 0 <= s.idx < num_carry and np.any(s.mask)}
if not set(carry_sinks).issubset(set(carry_sources)): # TODO(jakevdp): check that masks match
raise KeyReuseError("scan body function leads to key reuse when repeatedly executed, "
"because consumed inputs don't match sourced outputs:\n"
f" {signature=}\n"
f" {eqn=}\n"
f" {jaxpr=}")
return signature
key_reuse_signatures[jax.lax.scan_p] = _scan_key_type_signature
@dynamic_key_reuse_signature
def _while_key_type_signature(eqn):
cond_jaxpr = eqn.params['cond_jaxpr'].jaxpr
cond_nconsts = eqn.params['cond_nconsts']
body_jaxpr = eqn.params['body_jaxpr'].jaxpr
body_nconsts = eqn.params['body_nconsts']
cond_signature = jaxpr_type_signature(cond_jaxpr)
body_signature = jaxpr_type_signature(body_jaxpr)
# Error if there are sinks among consts.
if any(np.any(s.mask) for s in cond_signature.sinks if s.idx < cond_nconsts):
raise KeyReuseError("while_loop cond function leads to key reuse when repeatedly executed: "
f" {cond_signature=}\n"
f" {eqn=}")
if any(np.any(s.mask) for s in body_signature.sinks if s.idx < body_nconsts):
raise KeyReuseError("while_loop body function leads to key reuse when repeatedly executed: "
f" {body_signature=}\n"
f" {eqn=}")
# carry should only consume keys that are sourced on output.
body_carry_sinks = {s.idx - body_nconsts: s.mask for s in body_signature.sinks if s.idx >= body_nconsts}
cond_carry_sinks = {s.idx - cond_nconsts: s.mask for s in cond_signature.sinks if s.idx >= cond_nconsts}
carry_sources = {s.idx: s.mask for s in body_signature.sources}
# TODO(jakevdp): check masks at each index?
if not (cond_carry_sinks.keys() <= carry_sources.keys()):
raise KeyReuseError("while_loop cond function leads to key reuse when repeatedly executed: "
f" {cond_signature=}\n"
f" {eqn=}")
if not (body_carry_sinks.keys() <= carry_sources.keys()):
raise KeyReuseError("while_loop body function leads to key reuse when repeatedly executed: "
f" {body_signature=}\n"
f" {eqn=}")
if body_carry_sinks.keys() & cond_carry_sinks.keys():
raise KeyReuseError("while_loop cond and body functions both use the same key: "
f" {cond_signature=}\n"
f" {body_signature=}\n"
f" {eqn=}")
return body_signature
key_reuse_signatures[jax.lax.while_p] = _while_key_type_signature
@dynamic_key_reuse_signature
def _remat_key_type_signature(eqn):
# The assumption here is that the non-differentiated pass contains all relevant
# key usage, and the differentiated pass
# 1) will only consume keys that are already consumed in the non-differentiated pass
# 2) will never create keys
# Therefore, the differentiated pass is a no-op.
if eqn.params['differentiated']:
return KeyReuseSignature()
return jaxpr_type_signature(eqn.params['jaxpr'])
key_reuse_signatures[remat_p] = _remat_key_type_signature
def call_impl_with_key_reuse_checks(prim: core.Primitive, raw_impl: Callable[..., Any], *args, **kwargs) -> Any:
if prim not in key_reuse_signatures:
# TODO(jakevdp): should we use an unknown signature here?
return raw_impl(*args, **kwargs)
signature = key_reuse_signature_from_primitive(prim, *args, **kwargs)
funcname = "jit-compiled function" if prim == pjit.pjit_p else str(prim)
consts = kwargs['jaxpr'].consts if prim == pjit.pjit_p else []
signature.check_signature(*args, *consts, funcname=funcname)
result = raw_impl(*args, **kwargs)
signature.update_consumption([*args, *consts], result if prim.multiple_results else [result])
return result
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@experimental@key_reuse@_core.py@.PATH_END.py
|
{
"filename": "mpi.py",
"repo_name": "jonaselgammal/GPry",
"repo_path": "GPry_extracted/GPry-main/gpry/mpi.py",
"type": "Python"
}
|
# Defining some helpers for parallelisation.
import dill
from mpi4py import MPI
import numpy as np
from numpy.random import SeedSequence, default_rng, Generator
# Use dill pickler (can seriealize more stuff, e.g. lambdas)
MPI.pickle.__init__(dill.dumps, dill.loads)
# Define some interfaces
comm = MPI.COMM_WORLD
SIZE = comm.Get_size()
RANK = comm.Get_rank()
is_main_process = not bool(RANK)
multiple_processes = SIZE > 1
def get_random_state(seed=None):
"""
Generates seed sequences for processes running in parallel.
Parameters
----------
seed : int or numpy seed, or numpy.random.Generator, optional (default=None)
A random seed to initialise a Generator, or a Generator to be used directly.
If none is provided a random one will be drawn.
"""
if isinstance(seed, Generator):
return seed
if is_main_process:
ss = SeedSequence(seed)
child_seeds = ss.spawn(SIZE)
ss = comm.scatter(child_seeds if is_main_process else None)
return default_rng(ss)
def split_number_for_parallel_processes(n, n_proc=SIZE):
"""
Splits a number of atomic tasks `n` between the parallel processes.
If `n` is not divisible by the number of processes, processes with lower rank are
preferred, e.g. 5 tasks for 3 processes are assigned as [2, 2, 1].
Parameters
----------
n : int
The number of atomic tasks
n_proc : int, optional (default=number of MPI comm's)
The number of processes to divide the tasks between
Returns
-------
An array with the number of tasks corresponding each process.
"""
n_rounded_to_nproc = int(np.ceil(n / n_proc)) * n_proc
slots = np.zeros(n_rounded_to_nproc, dtype=int)
slots[:n] = 1
slots = slots.reshape((int(len(slots) / n_proc), n_proc))
return np.sum(slots, axis=0)
def multi_gather_array(arrs):
"""
Gathers (possibly a list of) arrays from all processes into the main process.
NB: mpi-gather guarantees rank order is preserved.
Parameters
----------
arrs : array-like
The arrays to gather
Returns
-------
The gathered array(s) from all processes
"""
if not isinstance(arrs, (list, tuple)):
arrs = [arrs]
Nobj = len(arrs)
if multiple_processes:
all_arrs = comm.gather(arrs)
if is_main_process:
arrs = [np.concatenate([all_arrs[r][i]
for r in range(SIZE)]) for i in range(Nobj)]
return arrs
else:
return [None for i in range(Nobj)]
else:
return arrs
def sync_processes():
"""
Makes all processes halt here until all have reached this point.
"""
comm.barrier()
def share_attr(instance, attr_name, root=0):
"""Broadcasts ``attr`` of ``instance`` from process of rank ``root``."""
if not multiple_processes:
return
setattr(instance, attr_name,
comm.bcast(getattr(instance, attr_name, None), root=root))
|
jonaselgammalREPO_NAMEGPryPATH_START.@GPry_extracted@GPry-main@gpry@mpi.py@.PATH_END.py
|
{
"filename": "kendra.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/retrievers/kendra.py",
"type": "Python"
}
|
import re
from abc import ABC, abstractmethod
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Union,
)
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import (
BaseModel,
Field,
model_validator,
validator,
)
from typing_extensions import Annotated
def clean_excerpt(excerpt: str) -> str:
"""Clean an excerpt from Kendra.
Args:
excerpt: The excerpt to clean.
Returns:
The cleaned excerpt.
"""
if not excerpt:
return excerpt
res = re.sub(r"\s+", " ", excerpt).replace("...", "")
return res
def combined_text(item: "ResultItem") -> str:
"""Combine a ResultItem title and excerpt into a single string.
Args:
item: the ResultItem of a Kendra search.
Returns:
A combined text of the title and excerpt of the given item.
"""
text = ""
title = item.get_title()
if title:
text += f"Document Title: {title}\n"
excerpt = clean_excerpt(item.get_excerpt())
if excerpt:
text += f"Document Excerpt: \n{excerpt}\n"
return text
DocumentAttributeValueType = Union[str, int, List[str], None]
"""Possible types of a DocumentAttributeValue.
Dates are also represented as str.
"""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Highlight(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Information that highlights the keywords in the excerpt."""
BeginOffset: int
"""The zero-based location in the excerpt where the highlight starts."""
EndOffset: int
"""The zero-based location in the excerpt where the highlight ends."""
TopAnswer: Optional[bool]
"""Indicates whether the result is the best one."""
Type: Optional[str]
"""The highlight type: STANDARD or THESAURUS_SYNONYM."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class TextWithHighLights(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Text with highlights."""
Text: str
"""The text."""
Highlights: Optional[Any]
"""The highlights."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class AdditionalResultAttributeValue( # type: ignore[call-arg]
BaseModel, extra="allow"
):
"""Value of an additional result attribute."""
TextWithHighlightsValue: TextWithHighLights
"""The text with highlights value."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class AdditionalResultAttribute(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Additional result attribute."""
Key: str
"""The key of the attribute."""
ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"]
"""The type of the value."""
Value: AdditionalResultAttributeValue
"""The value of the attribute."""
def get_value_text(self) -> str:
return self.Value.TextWithHighlightsValue.Text
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class DocumentAttributeValue(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Value of a document attribute."""
DateValue: Optional[str]
"""The date expressed as an ISO 8601 string."""
LongValue: Optional[int]
"""The long value."""
StringListValue: Optional[List[str]]
"""The string list value."""
StringValue: Optional[str]
"""The string value."""
@property
def value(self) -> DocumentAttributeValueType:
"""The only defined document attribute value or None.
According to Amazon Kendra, you can only provide one
value for a document attribute.
"""
if self.DateValue:
return self.DateValue
if self.LongValue:
return self.LongValue
if self.StringListValue:
return self.StringListValue
if self.StringValue:
return self.StringValue
return None
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class DocumentAttribute(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Document attribute."""
Key: str
"""The key of the attribute."""
Value: DocumentAttributeValue
"""The value of the attribute."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class ResultItem(BaseModel, ABC, extra="allow"): # type: ignore[call-arg]
"""Base class of a result item."""
Id: Optional[str]
"""The ID of the relevant result item."""
DocumentId: Optional[str]
"""The document ID."""
DocumentURI: Optional[str]
"""The document URI."""
DocumentAttributes: Optional[List[DocumentAttribute]] = []
"""The document attributes."""
ScoreAttributes: Optional[dict]
"""The kendra score confidence"""
@abstractmethod
def get_title(self) -> str:
"""Document title."""
@abstractmethod
def get_excerpt(self) -> str:
"""Document excerpt or passage original content as retrieved by Kendra."""
def get_additional_metadata(self) -> dict:
"""Document additional metadata dict.
This returns any extra metadata except these:
* result_id
* document_id
* source
* title
* excerpt
* document_attributes
"""
return {}
def get_document_attributes_dict(self) -> Dict[str, DocumentAttributeValueType]:
"""Document attributes dict."""
return {attr.Key: attr.Value.value for attr in (self.DocumentAttributes or [])}
def get_score_attribute(self) -> str:
"""Document Score Confidence"""
if self.ScoreAttributes is not None:
return self.ScoreAttributes["ScoreConfidence"]
else:
return "NOT_AVAILABLE"
def to_doc(
self, page_content_formatter: Callable[["ResultItem"], str] = combined_text
) -> Document:
"""Converts this item to a Document."""
page_content = page_content_formatter(self)
metadata = self.get_additional_metadata()
metadata.update(
{
"result_id": self.Id,
"document_id": self.DocumentId,
"source": self.DocumentURI,
"title": self.get_title(),
"excerpt": self.get_excerpt(),
"document_attributes": self.get_document_attributes_dict(),
"score": self.get_score_attribute(),
}
)
return Document(page_content=page_content, metadata=metadata)
class QueryResultItem(ResultItem):
"""Query API result item."""
DocumentTitle: TextWithHighLights
"""The document title."""
FeedbackToken: Optional[str]
"""Identifies a particular result from a particular query."""
Format: Optional[str]
"""
If the Type is ANSWER, then format is either:
* TABLE: a table excerpt is returned in TableExcerpt;
* TEXT: a text excerpt is returned in DocumentExcerpt.
"""
Type: Optional[str]
"""Type of result: DOCUMENT or QUESTION_ANSWER or ANSWER"""
AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = []
"""One or more additional attributes associated with the result."""
DocumentExcerpt: Optional[TextWithHighLights]
"""Excerpt of the document text."""
def get_title(self) -> str:
return self.DocumentTitle.Text
def get_attribute_value(self) -> str:
if not self.AdditionalAttributes:
return ""
if not self.AdditionalAttributes[0]:
return ""
else:
return self.AdditionalAttributes[0].get_value_text()
def get_excerpt(self) -> str:
if (
self.AdditionalAttributes
and self.AdditionalAttributes[0].Key == "AnswerText"
):
excerpt = self.get_attribute_value()
elif self.DocumentExcerpt:
excerpt = self.DocumentExcerpt.Text
else:
excerpt = ""
return excerpt
def get_additional_metadata(self) -> dict:
additional_metadata = {"type": self.Type}
return additional_metadata
class RetrieveResultItem(ResultItem):
"""Retrieve API result item."""
DocumentTitle: Optional[str]
"""The document title."""
Content: Optional[str]
"""The content of the item."""
def get_title(self) -> str:
return self.DocumentTitle or ""
def get_excerpt(self) -> str:
return self.Content or ""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class QueryResult(BaseModel, extra="allow"): # type: ignore[call-arg]
"""`Amazon Kendra Query API` search result.
It is composed of:
* Relevant suggested answers: either a text excerpt or table excerpt.
* Matching FAQs or questions-answer from your FAQ file.
* Documents including an excerpt of each document with its title.
"""
ResultItems: List[QueryResultItem]
"""The result items."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class RetrieveResult(BaseModel, extra="allow"): # type: ignore[call-arg]
"""`Amazon Kendra Retrieve API` search result.
It is composed of:
* relevant passages or text excerpts given an input query.
"""
QueryId: str
"""The ID of the query."""
ResultItems: List[RetrieveResultItem]
"""The result items."""
KENDRA_CONFIDENCE_MAPPING = {
"NOT_AVAILABLE": 0.0,
"LOW": 0.25,
"MEDIUM": 0.50,
"HIGH": 0.75,
"VERY_HIGH": 1.0,
}
class AmazonKendraRetriever(BaseRetriever):
"""`Amazon Kendra Index` retriever.
Args:
index_id: Kendra index id
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
top_k: No of results to return
attribute_filter: Additional filtering of results based on metadata
See: https://docs.aws.amazon.com/kendra/latest/APIReference
document_relevance_override_configurations: Overrides relevance tuning
configurations of fields/attributes set at the index level
See: https://docs.aws.amazon.com/kendra/latest/APIReference
page_content_formatter: generates the Document page_content
allowing access to all result item attributes. By default, it uses
the item's title and excerpt.
client: boto3 client for Kendra
user_context: Provides information about the user context
See: https://docs.aws.amazon.com/kendra/latest/APIReference
Example:
.. code-block:: python
retriever = AmazonKendraRetriever(
index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03"
)
"""
index_id: str
region_name: Optional[str] = None
credentials_profile_name: Optional[str] = None
top_k: int = 3
attribute_filter: Optional[Dict] = None
document_relevance_override_configurations: Optional[List[Dict]] = None
page_content_formatter: Callable[[ResultItem], str] = combined_text
client: Any
user_context: Optional[Dict] = None
min_score_confidence: Annotated[Optional[float], Field(ge=0.0, le=1.0)]
@validator("top_k")
def validate_top_k(cls, value: int) -> int:
if value < 0:
raise ValueError(f"top_k ({value}) cannot be negative.")
return value
@model_validator(mode="before")
@classmethod
def create_client(cls, values: Dict[str, Any]) -> Any:
top_k = values.get("top_k")
if top_k is not None and top_k < 0:
raise ValueError(f"top_k ({top_k}) cannot be negative.")
if values.get("client") is not None:
return values
try:
import boto3
if values.get("credentials_profile_name"):
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values.get("region_name"):
client_params["region_name"] = values["region_name"]
values["client"] = session.client("kendra", **client_params)
return values
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
def _kendra_query(self, query: str) -> Sequence[ResultItem]:
kendra_kwargs = {
"IndexId": self.index_id,
# truncate the query to ensure that
# there is no validation exception from Kendra.
"QueryText": query.strip()[0:999],
"PageSize": self.top_k,
}
if self.attribute_filter is not None:
kendra_kwargs["AttributeFilter"] = self.attribute_filter
if self.document_relevance_override_configurations is not None:
kendra_kwargs["DocumentRelevanceOverrideConfigurations"] = (
self.document_relevance_override_configurations
)
if self.user_context is not None:
kendra_kwargs["UserContext"] = self.user_context
response = self.client.retrieve(**kendra_kwargs)
r_result = RetrieveResult.parse_obj(response)
if r_result.ResultItems:
return r_result.ResultItems
# Retrieve API returned 0 results, fall back to Query API
response = self.client.query(**kendra_kwargs)
q_result = QueryResult.parse_obj(response)
return q_result.ResultItems
def _get_top_k_docs(self, result_items: Sequence[ResultItem]) -> List[Document]:
top_docs = [
item.to_doc(self.page_content_formatter)
for item in result_items[: self.top_k]
]
return top_docs
def _filter_by_score_confidence(self, docs: List[Document]) -> List[Document]:
"""
Filter out the records that have a score confidence
greater than the required threshold.
"""
if not self.min_score_confidence:
return docs
filtered_docs = [
item
for item in docs
if (
item.metadata.get("score") is not None
and isinstance(item.metadata["score"], str)
and KENDRA_CONFIDENCE_MAPPING.get(item.metadata["score"], 0.0)
>= self.min_score_confidence
)
]
return filtered_docs
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Run search on Kendra index and get top k documents
Example:
.. code-block:: python
docs = retriever.invoke('This is my query')
"""
result_items = self._kendra_query(query)
top_k_docs = self._get_top_k_docs(result_items)
return self._filter_by_score_confidence(top_k_docs)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@retrievers@kendra.py@.PATH_END.py
|
{
"filename": "shear.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/des_y1/shear.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import DES
class shear(DES):
r"""
Cosmic shear data from the first year of the Dark Energy Survey (DES Y1)
\cite{Abbott:2017wau}.
"""
bibtex_file = 'des_y1.bibtex'
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@des_y1@shear.py@.PATH_END.py
|
{
"filename": "HP_2011_ds62.py",
"repo_name": "CaymanUnterborn/ExoPlex",
"repo_path": "ExoPlex_extracted/ExoPlex-master/ExoPlex/burnman/minerals/HP_2011_ds62.py",
"type": "Python"
}
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit
# for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2021 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
HP_2011_ds62
^^^^^^^^^^^^
Endmember minerals from Holland and Powell 2011 and references therein.
Update to dataset version 6.2.
The values in this document are all in S.I. units,
unlike those in the original tc-ds62.txt.
File autogenerated using HPdata_to_burnman.py.
"""
from ..classes.mineral import Mineral
"""
ENDMEMBERS
"""
class fo (Mineral):
def __init__(self):
self.params = {'name': 'fo',
'formula': {'Mg': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2172590.0,
'S_0': 95.1,
'V_0': 4.366e-05,
'Cp': [233.3, 0.001494, -603800.0, -1869.7],
'a_0': 2.85e-05,
'K_0': 128500e6,
'Kprime_0': 3.84,
'Kdprime_0': -3e-11,
'n': 7.0,
'molar_mass': 0.1406931}
Mineral.__init__(self)
class fa (Mineral):
def __init__(self):
self.params = {'name': 'fa',
'formula': {'Fe': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1477720.0,
'S_0': 151.0,
'V_0': 4.631e-05,
'Cp': [201.1, 0.01733, -1960600.0, -900.9],
'a_0': 2.82e-05,
'K_0': 125600e6,
'Kprime_0': 4.68,
'Kdprime_0': -3.7e-11,
'n': 7.0,
'molar_mass': 0.2037731}
Mineral.__init__(self)
class teph (Mineral):
def __init__(self):
self.params = {'name': 'teph',
'formula': {'Mn': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1733970.0,
'S_0': 155.9,
'V_0': 4.899e-05,
'Cp': [219.6, 0.0, -1292700.0, -1308.3],
'a_0': 2.86e-05,
'K_0': 125600e6,
'Kprime_0': 4.68,
'Kdprime_0': -3.7e-11,
'n': 7.0,
'molar_mass': 0.2019591}
Mineral.__init__(self)
class lrn (Mineral):
def __init__(self):
self.params = {'name': 'lrn',
'formula': {'Ca': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2306920.0,
'S_0': 127.6,
'V_0': 5.16e-05,
'Cp': [247.5, -0.003206, 0.0, -2051.9],
'a_0': 2.9e-05,
'K_0': 98500e6,
'Kprime_0': 4.07,
'Kdprime_0': -4.1e-11,
'n': 7.0,
'molar_mass': 0.1722391}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 1710.0,
'S_D': 10.03,
'V_D': 5e-07}]]
Mineral.__init__(self)
class mont (Mineral):
def __init__(self):
self.params = {'name': 'mont',
'formula': {'Ca': 1.0, 'Mg': 1.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2251260.0,
'S_0': 109.5,
'V_0': 5.148e-05,
'Cp': [250.7, -0.010433, -797200.0, -1996.1],
'a_0': 2.87e-05,
'K_0': 113400e6,
'Kprime_0': 3.87,
'Kdprime_0': -3.4e-11,
'n': 7.0,
'molar_mass': 0.1564661}
Mineral.__init__(self)
class chum (Mineral):
def __init__(self):
self.params = {'name': 'chum',
'formula': {'H': 2.0, 'Mg': 9.0, 'O': 18.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -9613540.0,
'S_0': 440.5,
'V_0': 0.00019801,
'Cp': [1071.0, -0.016533, -7899600.0, -7373.9],
'a_0': 3.2e-05,
'K_0': 119900e6,
'Kprime_0': 4.58,
'Kdprime_0': -3.8e-11,
'n': 33.0,
'molar_mass': 0.62109208}
Mineral.__init__(self)
class chdr (Mineral):
def __init__(self):
self.params = {'name': 'chdr',
'formula': {'H': 2.0, 'Mg': 5.0, 'O': 10.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -5254890.0,
'S_0': 260.0,
'V_0': 0.00011084,
'Cp': [625.0, -0.001088, -2259900.0, -4910.7],
'a_0': 1.82e-05,
'K_0': 116100e6,
'Kprime_0': 4.8,
'Kdprime_0': -4.1e-11,
'n': 19.0,
'molar_mass': 0.33970588}
Mineral.__init__(self)
class mwd (Mineral):
def __init__(self):
self.params = {'name': 'mwd',
'formula': {'Mg': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2138520.0,
'S_0': 93.9,
'V_0': 4.051e-05,
'Cp': [208.7, 0.003942, -1709500.0, -1302.8],
'a_0': 2.37e-05,
'K_0': 172600e6,
'Kprime_0': 3.84,
'Kdprime_0': -2.2e-11,
'n': 7.0,
'molar_mass': 0.1406931}
Mineral.__init__(self)
class fwd (Mineral):
def __init__(self):
self.params = {'name': 'fwd',
'formula': {'Fe': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1467900.0,
'S_0': 146.0,
'V_0': 4.321e-05,
'Cp': [201.1, 0.01733, -1960600.0, -900.9],
'a_0': 2.73e-05,
'K_0': 169000e6,
'Kprime_0': 4.35,
'Kdprime_0': -2.6e-11,
'n': 7.0,
'molar_mass': 0.2037731}
Mineral.__init__(self)
class mrw (Mineral):
def __init__(self):
self.params = {'name': 'mrw',
'formula': {'Mg': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2127680.0,
'S_0': 90.0,
'V_0': 3.949e-05,
'Cp': [213.3, 0.00269, -1410400.0, -1495.9],
'a_0': 2.01e-05,
'K_0': 178100e6,
'Kprime_0': 4.35,
'Kdprime_0': -2.4e-11,
'n': 7.0,
'molar_mass': 0.1406931}
Mineral.__init__(self)
class frw (Mineral):
def __init__(self):
self.params = {'name': 'frw',
'formula': {'Fe': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1471760.0,
'S_0': 140.0,
'V_0': 4.203e-05,
'Cp': [166.8, 0.04261, -1705400.0, -541.4],
'a_0': 2.22e-05,
'K_0': 197700e6,
'Kprime_0': 4.92,
'Kdprime_0': -2.5e-11,
'n': 7.0,
'molar_mass': 0.2037731}
Mineral.__init__(self)
class mpv (Mineral):
def __init__(self):
self.params = {'name': 'mpv',
'formula': {'Mg': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1443030.0,
'S_0': 62.6,
'V_0': 2.445e-05,
'Cp': [149.3, 0.002918, -2983000.0, -799.1],
'a_0': 1.87e-05,
'K_0': 251000e6,
'Kprime_0': 4.14,
'Kdprime_0': -1.6e-11,
'n': 5.0,
'molar_mass': 0.1003887}
Mineral.__init__(self)
class fpv (Mineral):
def __init__(self):
self.params = {'name': 'fpv',
'formula': {'Fe': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1084640.0,
'S_0': 91.0,
'V_0': 2.548e-05,
'Cp': [133.2, 0.01083, -3661400.0, -314.7],
'a_0': 1.87e-05,
'K_0': 281000e6,
'Kprime_0': 4.14,
'Kdprime_0': -1.6e-11,
'n': 5.0,
'molar_mass': 0.1319287}
Mineral.__init__(self)
class apv (Mineral):
def __init__(self):
self.params = {'name': 'apv',
'formula': {'Al': 2.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -1646630.0,
'S_0': 51.8,
'V_0': 2.54e-05,
'Cp': [139.5, 0.00589, -2460600.0, -589.2],
'a_0': 1.8e-05,
'K_0': 203000e6,
'Kprime_0': 4.0,
'Kdprime_0': -2e-11,
'n': 5.0,
'molar_mass': 0.1019612}
Mineral.__init__(self)
class cpv (Mineral):
def __init__(self):
self.params = {'name': 'cpv',
'formula': {'Ca': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1541730.0,
'S_0': 73.5,
'V_0': 2.745e-05,
'Cp': [159.3, 0.0, -967300.0, -1075.4],
'a_0': 1.87e-05,
'K_0': 236000e6,
'Kprime_0': 3.9,
'Kdprime_0': -1.6e-11,
'n': 5.0,
'molar_mass': 0.1161617}
Mineral.__init__(self)
class mak (Mineral):
def __init__(self):
self.params = {'name': 'mak',
'formula': {'Mg': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1490870.0,
'S_0': 59.3,
'V_0': 2.635e-05,
'Cp': [147.8, 0.002015, -2395000.0, -801.8],
'a_0': 2.12e-05,
'K_0': 211000e6,
'Kprime_0': 4.55,
'Kdprime_0': -2.2e-11,
'n': 5.0,
'molar_mass': 0.1003887}
Mineral.__init__(self)
class fak (Mineral):
def __init__(self):
self.params = {'name': 'fak',
'formula': {'Fe': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1142140.0,
'S_0': 91.5,
'V_0': 2.76e-05,
'Cp': [100.3, 0.013328, -4364900.0, 419.8],
'a_0': 2.12e-05,
'K_0': 218000e6,
'Kprime_0': 4.55,
'Kdprime_0': -2.2e-11,
'n': 5.0,
'molar_mass': 0.1319287}
Mineral.__init__(self)
class maj (Mineral):
def __init__(self):
self.params = {'name': 'maj',
'formula': {'Mg': 4.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -6050400.0,
'S_0': 255.2,
'V_0': 0.00011457,
'Cp': [713.6, -0.000997, -1158200.0, -6622.3],
'a_0': 1.83e-05,
'K_0': 160000e6,
'Kprime_0': 4.56,
'Kdprime_0': -2.8e-11,
'n': 20.0,
'molar_mass': 0.4015548}
Mineral.__init__(self)
class py (Mineral):
def __init__(self):
self.params = {'name': 'py',
'formula': {'Al': 2.0, 'Mg': 3.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6281960.0,
'S_0': 269.5,
'V_0': 0.00011313,
'Cp': [633.5, 0.0, -5196100.0, -4315.2],
'a_0': 2.37e-05,
'K_0': 174300e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.3e-11,
'n': 20.0,
'molar_mass': 0.4031273}
Mineral.__init__(self)
class alm (Mineral):
def __init__(self):
self.params = {'name': 'alm',
'formula': {'Al': 2.0, 'Fe': 3.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5260700.0,
'S_0': 342.0,
'V_0': 0.00011525,
'Cp': [677.3, 0.0, -3772700.0, -5044.0],
'a_0': 2.12e-05,
'K_0': 190000e6,
'Kprime_0': 2.98,
'Kdprime_0': -1.6e-11,
'n': 20.0,
'molar_mass': 0.4977473}
Mineral.__init__(self)
class spss (Mineral):
def __init__(self):
self.params = {'name': 'spss',
'formula': {'Al': 2.0, 'Mn': 3.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5693490.0,
'S_0': 335.3,
'V_0': 0.00011792,
'Cp': [646.9, 0.0, -4525800.0, -4452.8],
'a_0': 2.27e-05,
'K_0': 174000e6,
'Kprime_0': 6.68,
'Kdprime_0': -3.8e-11,
'n': 20.0,
'molar_mass': 0.4950263}
Mineral.__init__(self)
class gr (Mineral):
def __init__(self):
self.params = {'name': 'gr',
'formula': {'Al': 2.0, 'Ca': 3.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6643010.0,
'S_0': 255.0,
'V_0': 0.00012535,
'Cp': [626.0, 0.0, -5779200.0, -4002.9],
'a_0': 2.2e-05,
'K_0': 172000e6,
'Kprime_0': 5.53,
'Kdprime_0': -3.2e-11,
'n': 20.0,
'molar_mass': 0.4504463}
Mineral.__init__(self)
class andr (Mineral):
def __init__(self):
self.params = {'name': 'andr',
'formula': {'Ca': 3.0, 'Fe': 2.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5769100.0,
'S_0': 316.4,
'V_0': 0.00013204,
'Cp': [638.6, 0.0, -4955100.0, -3989.2],
'a_0': 2.86e-05,
'K_0': 158800e6,
'Kprime_0': 5.68,
'Kdprime_0': -3.6e-11,
'n': 20.0,
'molar_mass': 0.5081733}
Mineral.__init__(self)
class knor (Mineral):
def __init__(self):
self.params = {'name': 'knor',
'formula': {'Cr': 2.0, 'Mg': 3.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5687710.0,
'S_0': 317.0,
'V_0': 0.00011738,
'Cp': [613.0, 0.003606, -4178000.0, -3729.4],
'a_0': 2.37e-05,
'K_0': 174300e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.3e-11,
'n': 20.0,
'molar_mass': 0.4531565}
Mineral.__init__(self)
class osma (Mineral):
def __init__(self):
self.params = {'name': 'osma',
'formula': {'Al': 5.0, 'K': 1.0, 'Mg': 2.0, 'O': 30.0, 'Si': 10.0},
'equation_of_state': 'hp_tmt',
'H_0': -14896310.0,
'S_0': 755.0,
'V_0': 0.00037893,
'Cp': [1540.7, -0.011359, -10339000.0, -11699.0],
'a_0': 4.7e-06,
'K_0': 129000e6,
'Kprime_0': 4.1,
'Kdprime_0': -3.1e-11,
'n': 48.0,
'molar_mass': 0.9834528}
Mineral.__init__(self)
class osmm (Mineral):
def __init__(self):
self.params = {'name': 'osmm',
'formula': {'Al': 3.0, 'K': 1.0, 'Mg': 3.0, 'O': 30.0, 'Si': 11.0},
'equation_of_state': 'hp_tmt',
'H_0': -14786740.0,
'S_0': 740.0,
'V_0': 0.0003844,
'Cp': [1525.5, -0.010267, -10538000.0, -11337.0],
'a_0': 4.7e-06,
'K_0': 129000e6,
'Kprime_0': 4.1,
'Kdprime_0': -3.1e-11,
'n': 48.0,
'molar_mass': 0.9818803}
Mineral.__init__(self)
class osfa (Mineral):
def __init__(self):
self.params = {'name': 'osfa',
'formula': {'Al': 5.0, 'Fe': 2.0, 'K': 1.0, 'O': 30.0, 'Si': 10.0},
'equation_of_state': 'hp_tmt',
'H_0': -14215490.0,
'S_0': 780.0,
'V_0': 0.0003845,
'Cp': [1558.6, -0.011359, -9476500.0, -11845.0],
'a_0': 4.9e-06,
'K_0': 129000e6,
'Kprime_0': 4.1,
'Kdprime_0': -3.1e-11,
'n': 48.0,
'molar_mass': 1.0465328}
Mineral.__init__(self)
class vsv (Mineral):
def __init__(self):
self.params = {'name': 'vsv',
'formula': {'Al': 11.0, 'Ca': 19.0, 'H': 9.0, 'Mg': 2.0, 'O': 78.0, 'Si': 18.0},
'equation_of_state': 'hp_tmt',
'H_0': -42345820.0,
'S_0': 1890.0,
'V_0': 0.000852,
'Cp': [4488.0, -0.057952, -22269300.0, -33478.0],
'a_0': 2.75e-05,
'K_0': 125500e6,
'Kprime_0': 4.8,
'Kdprime_0': -3.8e-11,
'n': 137.0,
'molar_mass': 2.86945216}
Mineral.__init__(self)
class andalusite (Mineral):
def __init__(self):
self.params = {'name': 'and',
'formula': {'Al': 2.0, 'O': 5.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2588670.0,
'S_0': 92.7,
'V_0': 5.153e-05,
'Cp': [277.3, -0.006588, -1914100.0, -2265.6],
'a_0': 1.81e-05,
'K_0': 144200e6,
'Kprime_0': 6.89,
'Kdprime_0': -4.8e-11,
'n': 8.0,
'molar_mass': 0.1620455}
Mineral.__init__(self)
class ky (Mineral):
def __init__(self):
self.params = {'name': 'ky',
'formula': {'Al': 2.0, 'O': 5.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2592970.0,
'S_0': 83.5,
'V_0': 4.414e-05,
'Cp': [279.4, -0.007124, -2055600.0, -2289.4],
'a_0': 1.92e-05,
'K_0': 160100e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.5e-11,
'n': 8.0,
'molar_mass': 0.1620455}
Mineral.__init__(self)
class sill (Mineral):
def __init__(self):
self.params = {'name': 'sill',
'formula': {'Al': 2.0, 'O': 5.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2585790.0,
'S_0': 95.4,
'V_0': 4.986e-05,
'Cp': [280.2, -0.0069, -1375700.0, -2399.4],
'a_0': 1.12e-05,
'K_0': 164000e6,
'Kprime_0': 5.06,
'Kdprime_0': -3.1e-11,
'n': 8.0,
'molar_mass': 0.1620455}
self.property_modifiers = [['bragg_williams', {'deltaH': 4750.0,
'deltaV': 1e-07,
'Wh': 4750.0,
'Wv': 1e-07,
'n': 1.0,
'factor': 0.25}]]
Mineral.__init__(self)
class smul (Mineral):
def __init__(self):
self.params = {'name': 'smul',
'formula': {'Al': 2.0, 'O': 5.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2569210.0,
'S_0': 101.5,
'V_0': 4.987e-05,
'Cp': [280.2, -0.0069, -1375700.0, -2399.4],
'a_0': 1.36e-05,
'K_0': 174000e6,
'Kprime_0': 4.0,
'Kdprime_0': -2.3e-11,
'n': 8.0,
'molar_mass': 0.1620455}
Mineral.__init__(self)
class amul (Mineral):
def __init__(self):
self.params = {'name': 'amul',
'formula': {'Al': 2.5, 'O': 4.75, 'Si': 0.5},
'equation_of_state': 'hp_tmt',
'H_0': -2485530.0,
'S_0': 113.0,
'V_0': 5.083e-05,
'Cp': [244.8, 0.000968, -2533300.0, -1641.6],
'a_0': 1.36e-05,
'K_0': 174000e6,
'Kprime_0': 4.0,
'Kdprime_0': -2.3e-11,
'n': 7.75,
'molar_mass': 0.15749365}
Mineral.__init__(self)
class tpz (Mineral):
def __init__(self):
self.params = {'name': 'tpz',
'formula': {'Al': 2.0, 'H': 2.0, 'O': 6.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2900710.0,
'S_0': 100.5,
'V_0': 5.339e-05,
'Cp': [387.7, -0.00712, -857200.0, -3744.2],
'a_0': 1.57e-05,
'K_0': 131500e6,
'Kprime_0': 4.06,
'Kdprime_0': -3.1e-11,
'n': 11.0,
'molar_mass': 0.18006078}
Mineral.__init__(self)
class mst (Mineral):
def __init__(self):
self.params = {'name': 'mst',
'formula': {'Al': 18.0, 'H': 4.0, 'Mg': 4.0, 'O': 48.0, 'Si': 7.5},
'equation_of_state': 'hp_tmt',
'H_0': -25123740.0,
'S_0': 910.0,
'V_0': 0.0004426,
'Cp': [2820.5, -0.059366, -13774000.0, -24126.0],
'a_0': 1.81e-05,
'K_0': 168400e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.4e-11,
'n': 81.5,
'molar_mass': 1.56553121}
Mineral.__init__(self)
class fst (Mineral):
def __init__(self):
self.params = {'name': 'fst',
'formula': {'Al': 18.0, 'Fe': 4.0, 'H': 4.0, 'O': 48.0, 'Si': 7.5},
'equation_of_state': 'hp_tmt',
'H_0': -23754630.0,
'S_0': 1010.0,
'V_0': 0.0004488,
'Cp': [2880.0, -0.056595, -10642000.0, -25373.0],
'a_0': 1.83e-05,
'K_0': 180000e6,
'Kprime_0': 4.76,
'Kdprime_0': -2.6e-11,
'n': 81.5,
'molar_mass': 1.69169121}
Mineral.__init__(self)
class mnst (Mineral):
def __init__(self):
self.params = {'name': 'mnst',
'formula': {'Al': 18.0, 'H': 4.0, 'Mn': 4.0, 'O': 48.0, 'Si': 7.5},
'equation_of_state': 'hp_tmt',
'H_0': -24245850.0,
'S_0': 1034.0,
'V_0': 0.0004546,
'Cp': [2873.3, -0.089064, -12688000.0, -24749.0],
'a_0': 2.09e-05,
'K_0': 180000e6,
'Kprime_0': 4.76,
'Kdprime_0': -2.6e-11,
'n': 81.5,
'molar_mass': 1.68806321}
Mineral.__init__(self)
class mctd (Mineral):
def __init__(self):
self.params = {'name': 'mctd',
'formula': {'Al': 2.0, 'H': 2.0, 'Mg': 1.0, 'O': 7.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -3549250.0,
'S_0': 146.0,
'V_0': 6.875e-05,
'Cp': [417.4, -0.003771, -2920600.0, -3417.8],
'a_0': 2.63e-05,
'K_0': 145600e6,
'Kprime_0': 4.06,
'Kdprime_0': -2.8e-11,
'n': 13.0,
'molar_mass': 0.22036518}
Mineral.__init__(self)
class fctd (Mineral):
def __init__(self):
self.params = {'name': 'fctd',
'formula': {'Al': 2.0, 'Fe': 1.0, 'H': 2.0, 'O': 7.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -3208290.0,
'S_0': 167.0,
'V_0': 6.98e-05,
'Cp': [416.1, -0.003477, -2835900.0, -3360.3],
'a_0': 2.8e-05,
'K_0': 145600e6,
'Kprime_0': 4.06,
'Kdprime_0': -2.8e-11,
'n': 13.0,
'molar_mass': 0.25190518}
Mineral.__init__(self)
class mnctd (Mineral):
def __init__(self):
self.params = {'name': 'mnctd',
'formula': {'Al': 2.0, 'H': 2.0, 'Mn': 1.0, 'O': 7.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -3336150.0,
'S_0': 166.0,
'V_0': 7.175e-05,
'Cp': [464.4, -0.012654, -1147200.0, -4341.0],
'a_0': 2.6e-05,
'K_0': 145600e6,
'Kprime_0': 4.06,
'Kdprime_0': -2.8e-11,
'n': 13.0,
'molar_mass': 0.25099818}
Mineral.__init__(self)
class merw (Mineral):
def __init__(self):
self.params = {'name': 'merw',
'formula': {'Ca': 3.0, 'Mg': 1.0, 'O': 8.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4545700.0,
'S_0': 253.1,
'V_0': 9.847e-05,
'Cp': [417.5, 0.008117, -2923000.0, -2320.3],
'a_0': 3.19e-05,
'K_0': 120000e6,
'Kprime_0': 4.07,
'Kdprime_0': -3.4e-11,
'n': 14.0,
'molar_mass': 0.3287052}
Mineral.__init__(self)
class spu (Mineral):
def __init__(self):
self.params = {'name': 'spu',
'formula': {'C': 1.0, 'Ca': 5.0, 'O': 11.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -5846720.0,
'S_0': 332.0,
'V_0': 0.00014697,
'Cp': [614.1, -0.003508, -2493100.0, -4168.0],
'a_0': 3.4e-05,
'K_0': 95000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.3e-11,
'n': 19.0,
'molar_mass': 0.4445651}
Mineral.__init__(self)
class zo (Mineral):
def __init__(self):
self.params = {'name': 'zo',
'formula': {'Al': 3.0, 'Ca': 2.0, 'H': 1.0, 'O': 13.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6896290.0,
'S_0': 298.0,
'V_0': 0.00013575,
'Cp': [662.0, 0.010416, -6006400.0, -4260.7],
'a_0': 3.12e-05,
'K_0': 104400e6,
'Kprime_0': 4.0,
'Kdprime_0': -3.8e-11,
'n': 22.0,
'molar_mass': 0.45435714}
Mineral.__init__(self)
class cz (Mineral):
def __init__(self):
self.params = {'name': 'cz',
'formula': {'Al': 3.0, 'Ca': 2.0, 'H': 1.0, 'O': 13.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6895540.0,
'S_0': 301.0,
'V_0': 0.0001363,
'Cp': [630.9, 0.013693, -6645800.0, -3731.1],
'a_0': 2.33e-05,
'K_0': 119700e6,
'Kprime_0': 4.07,
'Kdprime_0': -3.4e-11,
'n': 22.0,
'molar_mass': 0.45435714}
Mineral.__init__(self)
class ep (Mineral):
def __init__(self):
self.params = {'name': 'ep',
'formula': {'Al': 2.0, 'Ca': 2.0, 'Fe': 1.0, 'H': 1.0, 'O': 13.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6473830.0,
'S_0': 315.0,
'V_0': 0.0001392,
'Cp': [613.3, 0.02207, -7160000.0, -2987.7],
'a_0': 2.34e-05,
'K_0': 134000e6,
'Kprime_0': 4.0,
'Kdprime_0': -3e-11,
'n': 22.0,
'molar_mass': 0.48322064}
Mineral.__init__(self)
class fep (Mineral):
def __init__(self):
self.params = {'name': 'fep',
'formula': {'Al': 1.0, 'Ca': 2.0, 'Fe': 2.0, 'H': 1.0, 'O': 13.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6028590.0,
'S_0': 329.0,
'V_0': 0.0001421,
'Cp': [584.7, 0.030447, -7674200.0, -2244.3],
'a_0': 2.31e-05,
'K_0': 151300e6,
'Kprime_0': 4.0,
'Kdprime_0': -2.6e-11,
'n': 22.0,
'molar_mass': 0.51208414}
Mineral.__init__(self)
class pmt (Mineral):
def __init__(self):
self.params = {'name': 'pmt',
'formula': {'Al': 2.0, 'Ca': 2.0, 'H': 1.0, 'Mn': 1.0, 'O': 13.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6543030.0,
'S_0': 340.0,
'V_0': 0.0001382,
'Cp': [569.8, 0.02779, -5442900.0, -2812.6],
'a_0': 2.38e-05,
'K_0': 119700e6,
'Kprime_0': 4.07,
'Kdprime_0': -3.4e-11,
'n': 22.0,
'molar_mass': 0.48231364}
Mineral.__init__(self)
class law (Mineral):
def __init__(self):
self.params = {'name': 'law',
'formula': {'Al': 2.0, 'Ca': 1.0, 'H': 4.0, 'O': 10.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4868630.0,
'S_0': 229.0,
'V_0': 0.00010132,
'Cp': [687.8, 0.001566, 375900.0, -7179.2],
'a_0': 2.65e-05,
'K_0': 122900e6,
'Kprime_0': 5.45,
'Kdprime_0': -4.4e-11,
'n': 19.0,
'molar_mass': 0.31423776}
Mineral.__init__(self)
class mpm (Mineral):
def __init__(self):
self.params = {'name': 'mpm',
'formula': {'Al': 5.0, 'Ca': 4.0, 'H': 7.0, 'Mg': 1.0, 'O': 28.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -14386910.0,
'S_0': 629.0,
'V_0': 0.0002955,
'Cp': [1720.8, -0.024928, -5998700.0, -14620.3],
'a_0': 2.48e-05,
'K_0': 161500e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.5e-11,
'n': 51.0,
'molar_mass': 0.94307628}
Mineral.__init__(self)
class fpm (Mineral):
def __init__(self):
self.params = {'name': 'fpm',
'formula': {'Al': 5.0, 'Ca': 4.0, 'Fe': 1.0, 'H': 7.0, 'O': 28.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -14034040.0,
'S_0': 657.0,
'V_0': 0.0002968,
'Cp': [1737.2, -0.024582, -5161100.0, -14963.0],
'a_0': 2.49e-05,
'K_0': 161500e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.5e-11,
'n': 51.0,
'molar_mass': 0.97461628}
Mineral.__init__(self)
class jgd (Mineral):
def __init__(self):
self.params = {'name': 'jgd',
'formula': {'Ca': 4.0, 'Fe': 6.0, 'H': 7.0, 'O': 28.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -11808960.0,
'S_0': 830.0,
'V_0': 0.0003108,
'Cp': [1795.4, -0.037986, -4455700.0, -14888.0],
'a_0': 2.49e-05,
'K_0': 161500e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.5e-11,
'n': 51.0,
'molar_mass': 1.11893378}
Mineral.__init__(self)
class geh (Mineral):
def __init__(self):
self.params = {'name': 'geh',
'formula': {'Al': 2.0, 'Ca': 2.0, 'O': 7.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -3992240.0,
'S_0': 198.5,
'V_0': 9.024e-05,
'Cp': [405.7, -0.007099, -1188300.0, -3174.4],
'a_0': 2.23e-05,
'K_0': 108000e6,
'Kprime_0': 4.08,
'Kdprime_0': -3.8e-11,
'n': 12.0,
'molar_mass': 0.2742003}
self.property_modifiers = [['bragg_williams', {'deltaH': 7510.0,
'deltaV': 9e-07,
'Wh': 7500.0,
'Wv': 9e-07,
'n': 1.0,
'factor': 0.8}]]
Mineral.__init__(self)
class ak (Mineral):
def __init__(self):
self.params = {'name': 'ak',
'formula': {'Ca': 2.0, 'Mg': 1.0, 'O': 7.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3865620.0,
'S_0': 212.5,
'V_0': 9.254e-05,
'Cp': [385.4, 0.003209, -247500.0, -2889.9],
'a_0': 2.57e-05,
'K_0': 142000e6,
'Kprime_0': 4.06,
'Kdprime_0': -2.9e-11,
'n': 12.0,
'molar_mass': 0.2726278}
Mineral.__init__(self)
class rnk (Mineral):
def __init__(self):
self.params = {'name': 'rnk',
'formula': {'Ca': 3.0, 'O': 7.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3943820.0,
'S_0': 210.0,
'V_0': 9.651e-05,
'Cp': [372.3, -0.002893, -2462400.0, -2181.3],
'a_0': 3.28e-05,
'K_0': 95000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.3e-11,
'n': 12.0,
'molar_mass': 0.2884008}
Mineral.__init__(self)
class ty (Mineral):
def __init__(self):
self.params = {'name': 'ty',
'formula': {'C': 2.0, 'Ca': 5.0, 'O': 13.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -6368040.0,
'S_0': 390.0,
'V_0': 0.00017039,
'Cp': [741.7, -0.005345, -1434600.0, -5878.5],
'a_0': 3.42e-05,
'K_0': 95000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.3e-11,
'n': 22.0,
'molar_mass': 0.4885746}
Mineral.__init__(self)
class crd (Mineral):
def __init__(self):
self.params = {'name': 'crd',
'formula': {'Al': 4.0, 'Mg': 2.0, 'O': 18.0, 'Si': 5.0},
'equation_of_state': 'hp_tmt',
'H_0': -9163430.0,
'S_0': 404.1,
'V_0': 0.00023322,
'Cp': [906.1, 0.0, -7902000.0, -6293.4],
'a_0': 6.8e-06,
'K_0': 129000e6,
'Kprime_0': 4.1,
'Kdprime_0': -3.1e-11,
'n': 29.0,
'molar_mass': 0.5849527}
self.property_modifiers = [['bragg_williams', {'deltaH': 36710.0,
'deltaV': 1e-06,
'Wh': 36700.0,
'Wv': 1e-06,
'n': 2.0,
'factor': 1.5}]]
Mineral.__init__(self)
class hcrd (Mineral):
def __init__(self):
self.params = {'name': 'hcrd',
'formula': {'Al': 4.0, 'H': 2.0, 'Mg': 2.0, 'O': 19.0, 'Si': 5.0},
'equation_of_state': 'hp_tmt',
'H_0': -9448520.0,
'S_0': 483.0,
'V_0': 0.00023322,
'Cp': [955.3, 0.0, -8352600.0, -6301.2],
'a_0': 6.7e-06,
'K_0': 129000e6,
'Kprime_0': 4.1,
'Kdprime_0': -3.1e-11,
'n': 32.0,
'molar_mass': 0.60296798}
self.property_modifiers = [['bragg_williams', {'deltaH': 36710.0,
'deltaV': 1e-06,
'Wh': 36700.0,
'Wv': 1e-06,
'n': 2.0,
'factor': 1.5}]]
Mineral.__init__(self)
class fcrd (Mineral):
def __init__(self):
self.params = {'name': 'fcrd',
'formula': {'Al': 4.0, 'Fe': 2.0, 'O': 18.0, 'Si': 5.0},
'equation_of_state': 'hp_tmt',
'H_0': -8444070.0,
'S_0': 461.0,
'V_0': 0.0002371,
'Cp': [924.0, 0.0, -7039400.0, -6439.6],
'a_0': 6.7e-06,
'K_0': 129000e6,
'Kprime_0': 4.1,
'Kdprime_0': -3.1e-11,
'n': 29.0,
'molar_mass': 0.6480327}
self.property_modifiers = [['bragg_williams', {'deltaH': 36710.0,
'deltaV': 1e-06,
'Wh': 36700.0,
'Wv': 1e-06,
'n': 2.0,
'factor': 1.5}]]
Mineral.__init__(self)
class mncrd (Mineral):
def __init__(self):
self.params = {'name': 'mncrd',
'formula': {'Al': 4.0, 'Mn': 2.0, 'O': 18.0, 'Si': 5.0},
'equation_of_state': 'hp_tmt',
'H_0': -8693590.0,
'S_0': 473.0,
'V_0': 0.00024027,
'Cp': [886.5, 0.0, -8840000.0, -5590.4],
'a_0': 6.9e-06,
'K_0': 129000e6,
'Kprime_0': 4.1,
'Kdprime_0': -3.1e-11,
'n': 29.0,
'molar_mass': 0.6462187}
self.property_modifiers = [['bragg_williams', {'deltaH': 36710.0,
'deltaV': 1e-06,
'Wh': 36700.0,
'Wv': 1e-06,
'n': 2.0,
'factor': 1.5}]]
Mineral.__init__(self)
class phA (Mineral):
def __init__(self):
self.params = {'name': 'phA',
'formula': {'H': 6.0, 'Mg': 7.0, 'O': 14.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -7129620.0,
'S_0': 350.5,
'V_0': 0.00015422,
'Cp': [962.0, -0.011521, -4517800.0, -7724.7],
'a_0': 3.55e-05,
'K_0': 145000e6,
'Kprime_0': 4.06,
'Kdprime_0': -2.8e-11,
'n': 29.0,
'molar_mass': 0.45634524}
Mineral.__init__(self)
class sph (Mineral):
def __init__(self):
self.params = {'name': 'sph',
'formula': {'Ca': 1.0, 'O': 5.0, 'Si': 1.0, 'Ti': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2601660.0,
'S_0': 124.0,
'V_0': 5.565e-05,
'Cp': [227.9, 0.002924, -3539500.0, -894.3],
'a_0': 1.58e-05,
'K_0': 101700e6,
'Kprime_0': 9.85,
'Kdprime_0': -9.7e-11,
'n': 8.0,
'molar_mass': 0.1960275}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 485.0,
'S_D': 0.4,
'V_D': 5e-08}]]
Mineral.__init__(self)
class cstn (Mineral):
def __init__(self):
self.params = {'name': 'cstn',
'formula': {'Ca': 1.0, 'O': 5.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -2496350.0,
'S_0': 99.5,
'V_0': 4.818e-05,
'Cp': [205.6, 0.006034, -5517700.0, -352.6],
'a_0': 1.58e-05,
'K_0': 178200e6,
'Kprime_0': 4.0,
'Kdprime_0': -2.2e-11,
'n': 8.0,
'molar_mass': 0.176246}
Mineral.__init__(self)
class zrc (Mineral):
def __init__(self):
self.params = {'name': 'zrc',
'formula': {'O': 4.0, 'Si': 1.0, 'Zr': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2035070.0,
'S_0': 83.03,
'V_0': 3.926e-05,
'Cp': [232.0, -0.014405, 0.0, -2238.2],
'a_0': 1.25e-05,
'K_0': 230100e6,
'Kprime_0': 4.04,
'Kdprime_0': -1.8e-11,
'n': 6.0,
'molar_mass': 0.1833071}
Mineral.__init__(self)
class en (Mineral):
def __init__(self):
self.params = {'name': 'en',
'formula': {'Mg': 2.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3090220.0,
'S_0': 132.5,
'V_0': 6.262e-05,
'Cp': [356.2, -0.00299, -596900.0, -3185.3],
'a_0': 2.27e-05,
'K_0': 105900e6,
'Kprime_0': 8.65,
'Kdprime_0': -8.2e-11,
'n': 10.0,
'molar_mass': 0.2007774}
Mineral.__init__(self)
class pren (Mineral):
def __init__(self):
self.params = {'name': 'pren',
'formula': {'Mg': 2.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3084560.0,
'S_0': 137.0,
'V_0': 6.476e-05,
'Cp': [356.2, -0.00299, -596900.0, -3185.3],
'a_0': 2.3e-05,
'K_0': 105900e6,
'Kprime_0': 8.65,
'Kdprime_0': -8.2e-11,
'n': 10.0,
'molar_mass': 0.2007774}
Mineral.__init__(self)
class cen (Mineral):
def __init__(self):
self.params = {'name': 'cen',
'formula': {'Mg': 2.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3091110.0,
'S_0': 132.0,
'V_0': 6.264e-05,
'Cp': [306.0, -0.003793, -3041700.0, -1852.1],
'a_0': 2.11e-05,
'K_0': 105900e6,
'Kprime_0': 8.65,
'Kdprime_0': -8.2e-11,
'n': 10.0,
'molar_mass': 0.2007774}
Mineral.__init__(self)
class hen (Mineral):
def __init__(self):
self.params = {'name': 'hen',
'formula': {'Mg': 2.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3082730.0,
'S_0': 131.7,
'V_0': 6.099e-05,
'Cp': [356.2, -0.00299, -596900.0, -3185.3],
'a_0': 2.26e-05,
'K_0': 150000e6,
'Kprime_0': 5.5,
'Kdprime_0': -3.6e-11,
'n': 10.0,
'molar_mass': 0.2007774}
Mineral.__init__(self)
class fs (Mineral):
def __init__(self):
self.params = {'name': 'fs',
'formula': {'Fe': 2.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -2388710.0,
'S_0': 189.9,
'V_0': 6.592e-05,
'Cp': [398.7, -0.006579, 1290100.0, -4058.0],
'a_0': 3.26e-05,
'K_0': 101000e6,
'Kprime_0': 4.08,
'Kdprime_0': -4e-11,
'n': 10.0,
'molar_mass': 0.2638574}
Mineral.__init__(self)
class mgts (Mineral):
def __init__(self):
self.params = {'name': 'mgts',
'formula': {'Al': 2.0, 'Mg': 1.0, 'O': 6.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -3196670.0,
'S_0': 131.0,
'V_0': 6.05e-05,
'Cp': [371.4, -0.004082, -398400.0, -3547.1],
'a_0': 2.17e-05,
'K_0': 102800e6,
'Kprime_0': 8.55,
'Kdprime_0': -8.3e-11,
'n': 10.0,
'molar_mass': 0.2023499}
Mineral.__init__(self)
class di (Mineral):
def __init__(self):
self.params = {'name': 'di',
'formula': {'Ca': 1.0, 'Mg': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3201850.0,
'S_0': 142.9,
'V_0': 6.619e-05,
'Cp': [314.5, 4.1e-05, -2745900.0, -2020.1],
'a_0': 2.73e-05,
'K_0': 119200e6,
'Kprime_0': 5.19,
'Kdprime_0': -4.4e-11,
'n': 10.0,
'molar_mass': 0.2165504}
Mineral.__init__(self)
class hed (Mineral):
def __init__(self):
self.params = {'name': 'hed',
'formula': {'Ca': 1.0, 'Fe': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -2842060.0,
'S_0': 175.0,
'V_0': 6.795e-05,
'Cp': [340.2, 0.000812, -1047800.0, -2646.7],
'a_0': 2.38e-05,
'K_0': 119200e6,
'Kprime_0': 3.97,
'Kdprime_0': -3.3e-11,
'n': 10.0,
'molar_mass': 0.2480904}
Mineral.__init__(self)
class jd (Mineral):
def __init__(self):
self.params = {'name': 'jd',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3025270.0,
'S_0': 133.5,
'V_0': 6.04e-05,
'Cp': [319.4, 0.003616, -1173900.0, -2469.5],
'a_0': 2.1e-05,
'K_0': 128100e6,
'Kprime_0': 3.81,
'Kdprime_0': -3e-11,
'n': 10.0,
'molar_mass': 0.2021387}
Mineral.__init__(self)
class acm (Mineral):
def __init__(self):
self.params = {'name': 'acm',
'formula': {'Fe': 1.0, 'Na': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -2583430.0,
'S_0': 170.6,
'V_0': 6.459e-05,
'Cp': [307.1, 0.016758, -1685500.0, -2125.8],
'a_0': 2.11e-05,
'K_0': 106000e6,
'Kprime_0': 4.08,
'Kdprime_0': -3.8e-11,
'n': 10.0,
'molar_mass': 0.2310022}
Mineral.__init__(self)
class kos (Mineral):
def __init__(self):
self.params = {'name': 'kos',
'formula': {'Cr': 1.0, 'Na': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -2746840.0,
'S_0': 149.65,
'V_0': 6.309e-05,
'Cp': [309.2, 0.005419, -664600.0, -2176.6],
'a_0': 1.94e-05,
'K_0': 130800e6,
'Kprime_0': 3.0,
'Kdprime_0': -2.3e-11,
'n': 10.0,
'molar_mass': 0.2271533}
Mineral.__init__(self)
class cats (Mineral):
def __init__(self):
self.params = {'name': 'cats',
'formula': {'Al': 2.0, 'Ca': 1.0, 'O': 6.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -3310110.0,
'S_0': 135.0,
'V_0': 6.356e-05,
'Cp': [347.6, -0.006974, -1781600.0, -2757.5],
'a_0': 2.08e-05,
'K_0': 119200e6,
'Kprime_0': 5.19,
'Kdprime_0': -4.4e-11,
'n': 10.0,
'molar_mass': 0.2181229}
self.property_modifiers = [['bragg_williams', {'deltaH': 3800.0,
'deltaV': 1e-07,
'Wh': 3800.0,
'Wv': 1e-07,
'n': 1.0,
'factor': 0.25}]]
Mineral.__init__(self)
class caes (Mineral):
def __init__(self):
self.params = {'name': 'caes',
'formula': {'Al': 1.0, 'Ca': 0.5, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3002020.0,
'S_0': 127.0,
'V_0': 6.05e-05,
'Cp': [362.0, -0.016944, -175900.0, -3565.7],
'a_0': 2.31e-05,
'K_0': 119200e6,
'Kprime_0': 5.19,
'Kdprime_0': -4.4e-11,
'n': 9.5,
'molar_mass': 0.1991879}
Mineral.__init__(self)
class rhod (Mineral):
def __init__(self):
self.params = {'name': 'rhod',
'formula': {'Mn': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1322380.0,
'S_0': 100.5,
'V_0': 3.494e-05,
'Cp': [138.4, 0.004088, -1936000.0, -538.9],
'a_0': 2.81e-05,
'K_0': 84000e6,
'Kprime_0': 4.0,
'Kdprime_0': -4.8e-11,
'n': 5.0,
'molar_mass': 0.1310217}
Mineral.__init__(self)
class pxmn (Mineral):
def __init__(self):
self.params = {'name': 'pxmn',
'formula': {'Mn': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1323160.0,
'S_0': 99.3,
'V_0': 3.472e-05,
'Cp': [138.4, 0.004088, -1936000.0, -538.9],
'a_0': 2.8e-05,
'K_0': 84000e6,
'Kprime_0': 4.0,
'Kdprime_0': -4.8e-11,
'n': 5.0,
'molar_mass': 0.1310217}
Mineral.__init__(self)
class wo (Mineral):
def __init__(self):
self.params = {'name': 'wo',
'formula': {'Ca': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1633770.0,
'S_0': 82.5,
'V_0': 3.993e-05,
'Cp': [159.3, 0.0, -967300.0, -1075.4],
'a_0': 2.54e-05,
'K_0': 79500e6,
'Kprime_0': 4.1,
'Kdprime_0': -5.2e-11,
'n': 5.0,
'molar_mass': 0.1161617}
Mineral.__init__(self)
class pswo (Mineral):
def __init__(self):
self.params = {'name': 'pswo',
'formula': {'Ca': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1627960.0,
'S_0': 87.8,
'V_0': 4.008e-05,
'Cp': [157.8, 0.0, -967300.0, -1075.4],
'a_0': 2.85e-05,
'K_0': 110000e6,
'Kprime_0': 4.08,
'Kdprime_0': -3.7e-11,
'n': 5.0,
'molar_mass': 0.1161617}
Mineral.__init__(self)
class wal (Mineral):
def __init__(self):
self.params = {'name': 'wal',
'formula': {'Ca': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1625900.0,
'S_0': 83.5,
'V_0': 3.7633e-05,
'Cp': [159.3, 0.0, -967300.0, -1075.4],
'a_0': 2.54e-05,
'K_0': 79500e6,
'Kprime_0': 4.1,
'Kdprime_0': -5.2e-11,
'n': 5.0,
'molar_mass': 0.1161617}
Mineral.__init__(self)
class tr (Mineral):
def __init__(self):
self.params = {'name': 'tr',
'formula': {'Ca': 2.0, 'H': 2.0, 'Mg': 5.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -12304870.0,
'S_0': 553.0,
'V_0': 0.0002727,
'Cp': [1260.2, 0.00383, -11455000.0, -8237.6],
'a_0': 2.61e-05,
'K_0': 76200e6,
'Kprime_0': 4.1,
'Kdprime_0': -5.4e-11,
'n': 41.0,
'molar_mass': 0.81236648}
Mineral.__init__(self)
class fact (Mineral):
def __init__(self):
self.params = {'name': 'fact',
'formula': {'Ca': 2.0, 'Fe': 5.0, 'H': 2.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -10504120.0,
'S_0': 710.0,
'V_0': 0.0002842,
'Cp': [1290.0, 0.029992, -8447500.0, -8947.0],
'a_0': 2.88e-05,
'K_0': 76000e6,
'Kprime_0': 4.1,
'Kdprime_0': -5.4e-11,
'n': 41.0,
'molar_mass': 0.97006648}
Mineral.__init__(self)
class ts (Mineral):
def __init__(self):
self.params = {'name': 'ts',
'formula': {'Al': 4.0, 'Ca': 2.0, 'H': 2.0, 'Mg': 3.0, 'O': 24.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -12555270.0,
'S_0': 533.0,
'V_0': 0.000268,
'Cp': [1244.8, 0.024348, -11965000.0, -8112.1],
'a_0': 2.66e-05,
'K_0': 76000e6,
'Kprime_0': 4.1,
'Kdprime_0': -5.4e-11,
'n': 41.0,
'molar_mass': 0.81551148}
Mineral.__init__(self)
class parg (Mineral):
def __init__(self):
self.params = {'name': 'parg',
'formula': {'Al': 3.0, 'Ca': 2.0, 'H': 2.0, 'Mg': 4.0, 'Na': 1.0, 'O': 24.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -12664730.0,
'S_0': 635.0,
'V_0': 0.0002719,
'Cp': [1280.2, 0.022997, -12359500.0, -8065.8],
'a_0': 2.8e-05,
'K_0': 91200e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.5e-11,
'n': 42.0,
'molar_mass': 0.83582478}
Mineral.__init__(self)
class gl (Mineral):
def __init__(self):
self.params = {'name': 'gl',
'formula': {'Al': 2.0, 'H': 2.0, 'Mg': 3.0, 'Na': 2.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -11960240.0,
'S_0': 530.0,
'V_0': 0.0002598,
'Cp': [1717.5, -0.12107, 7075000.0, -19272.0],
'a_0': 1.49e-05,
'K_0': 88300e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.6e-11,
'n': 41.0,
'molar_mass': 0.78354308}
Mineral.__init__(self)
class fgl (Mineral):
def __init__(self):
self.params = {'name': 'fgl',
'formula': {'Al': 2.0, 'Fe': 3.0, 'H': 2.0, 'Na': 2.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -10880210.0,
'S_0': 624.0,
'V_0': 0.0002659,
'Cp': [1762.9, -0.118992, 9423700.0, -20207.1],
'a_0': 1.83e-05,
'K_0': 89000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.6e-11,
'n': 41.0,
'molar_mass': 0.87816308}
Mineral.__init__(self)
class rieb (Mineral):
def __init__(self):
self.params = {'name': 'rieb',
'formula': {'Fe': 5.0, 'H': 2.0, 'Na': 2.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -10024780.0,
'S_0': 695.0,
'V_0': 0.0002749,
'Cp': [1787.3, -0.124882, 9627100.0, -20275.5],
'a_0': 1.81e-05,
'K_0': 89000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.6e-11,
'n': 41.0,
'molar_mass': 0.93589008}
Mineral.__init__(self)
class anth (Mineral):
def __init__(self):
self.params = {'name': 'anth',
'formula': {'H': 2.0, 'Mg': 7.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -12066840.0,
'S_0': 537.0,
'V_0': 0.0002654,
'Cp': [1277.3, 0.025825, -9704600.0, -9074.7],
'a_0': 2.52e-05,
'K_0': 70000e6,
'Kprime_0': 4.11,
'Kdprime_0': -5.9e-11,
'n': 41.0,
'molar_mass': 0.78082048}
Mineral.__init__(self)
class fanth (Mineral):
def __init__(self):
self.params = {'name': 'fanth',
'formula': {'Fe': 7.0, 'H': 2.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -9624520.0,
'S_0': 725.0,
'V_0': 0.0002787,
'Cp': [1383.1, 0.030669, -4224700.0, -11257.6],
'a_0': 2.74e-05,
'K_0': 70000e6,
'Kprime_0': 4.11,
'Kdprime_0': -5.9e-11,
'n': 41.0,
'molar_mass': 1.00160048}
Mineral.__init__(self)
class cumm (Mineral):
def __init__(self):
self.params = {'name': 'cumm',
'formula': {'H': 2.0, 'Mg': 7.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -12064690.0,
'S_0': 538.0,
'V_0': 0.0002633,
'Cp': [1277.3, 0.025825, -9704600.0, -9074.7],
'a_0': 2.52e-05,
'K_0': 70000e6,
'Kprime_0': 4.11,
'Kdprime_0': -5.9e-11,
'n': 41.0,
'molar_mass': 0.78082048}
Mineral.__init__(self)
class grun (Mineral):
def __init__(self):
self.params = {'name': 'grun',
'formula': {'Fe': 7.0, 'H': 2.0, 'O': 24.0, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -9607150.0,
'S_0': 735.0,
'V_0': 0.0002784,
'Cp': [1383.1, 0.030669, -4224700.0, -11257.6],
'a_0': 2.74e-05,
'K_0': 64800e6,
'Kprime_0': 4.12,
'Kdprime_0': -6.4e-11,
'n': 41.0,
'molar_mass': 1.00160048}
Mineral.__init__(self)
class ged (Mineral):
def __init__(self):
self.params = {'name': 'ged',
'formula': {'Al': 4.0, 'H': 2.0, 'Mg': 5.0, 'O': 24.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -12329140.0,
'S_0': 517.0,
'V_0': 0.00025548,
'Cp': [1307.7, 0.023642, -9307400.0, -9799.0],
'a_0': 2.41e-05,
'K_0': 77000e6,
'Kprime_0': 4.1,
'Kdprime_0': -5.3e-11,
'n': 41.0,
'molar_mass': 0.78396548}
Mineral.__init__(self)
class spr4 (Mineral):
def __init__(self):
self.params = {'name': 'spr4',
'formula': {'Al': 8.0, 'Mg': 4.0, 'O': 20.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -11022020.0,
'S_0': 425.5,
'V_0': 0.000199,
'Cp': [1133.1, -0.007596, -8816600.0, -8180.6],
'a_0': 2.05e-05,
'K_0': 250000e6,
'Kprime_0': 4.04,
'Kdprime_0': -1.6e-11,
'n': 34.0,
'molar_mass': 0.689231}
Mineral.__init__(self)
class spr5 (Mineral):
def __init__(self):
self.params = {'name': 'spr5',
'formula': {'Al': 10.0, 'Mg': 3.0, 'O': 20.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -11135570.0,
'S_0': 419.5,
'V_0': 0.0001975,
'Cp': [1103.4, 0.001015, -10957000.0, -7409.2],
'a_0': 2.06e-05,
'K_0': 250000e6,
'Kprime_0': 4.04,
'Kdprime_0': -1.6e-11,
'n': 34.0,
'molar_mass': 0.6908035}
Mineral.__init__(self)
class fspr (Mineral):
def __init__(self):
self.params = {'name': 'fspr',
'formula': {'Al': 8.0, 'Fe': 4.0, 'O': 20.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -9659530.0,
'S_0': 485.0,
'V_0': 0.00019923,
'Cp': [1132.9, -0.007348, -10420200.0, -7036.6],
'a_0': 1.96e-05,
'K_0': 250000e6,
'Kprime_0': 4.04,
'Kdprime_0': -1.7e-11,
'n': 34.0,
'molar_mass': 0.815391}
Mineral.__init__(self)
class mcar (Mineral):
def __init__(self):
self.params = {'name': 'mcar',
'formula': {'Al': 2.0, 'H': 4.0, 'Mg': 1.0, 'O': 10.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4771050.0,
'S_0': 221.5,
'V_0': 0.0001059,
'Cp': [683.0, -0.014054, 291000.0, -6976.4],
'a_0': 2.43e-05,
'K_0': 52500e6,
'Kprime_0': 4.14,
'Kdprime_0': -7.9e-11,
'n': 19.0,
'molar_mass': 0.29846476}
Mineral.__init__(self)
class fcar (Mineral):
def __init__(self):
self.params = {'name': 'fcar',
'formula': {'Al': 2.0, 'Fe': 1.0, 'H': 4.0, 'O': 10.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4411440.0,
'S_0': 251.1,
'V_0': 0.00010695,
'Cp': [686.6, -0.012415, 186000.0, -6884.0],
'a_0': 2.21e-05,
'K_0': 52500e6,
'Kprime_0': 4.14,
'Kdprime_0': -7.9e-11,
'n': 19.0,
'molar_mass': 0.33000476}
Mineral.__init__(self)
class deer (Mineral):
def __init__(self):
self.params = {'name': 'deer',
'formula': {'Fe': 18.0, 'H': 10.0, 'O': 50.0, 'Si': 12.0},
'equation_of_state': 'hp_tmt',
'H_0': -18341400.0,
'S_0': 1650.0,
'V_0': 0.0005574,
'Cp': [3164.4, -0.027883, -5039100.0, -26721.0],
'a_0': 2.75e-05,
'K_0': 63000e6,
'Kprime_0': 4.12,
'Kdprime_0': -6.5e-11,
'n': 90.0,
'molar_mass': 2.1522854}
Mineral.__init__(self)
class mu (Mineral):
def __init__(self):
self.params = {'name': 'mu',
'formula': {'Al': 3.0, 'H': 2.0, 'K': 1.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5976510.0,
'S_0': 292.0,
'V_0': 0.00014083,
'Cp': [756.4, -0.01984, -2170000.0, -6979.2],
'a_0': 3.07e-05,
'K_0': 49000e6,
'Kprime_0': 4.15,
'Kdprime_0': -8.5e-11,
'n': 21.0,
'molar_mass': 0.39830798}
Mineral.__init__(self)
class cel (Mineral):
def __init__(self):
self.params = {'name': 'cel',
'formula': {'Al': 1.0, 'H': 2.0, 'K': 1.0, 'Mg': 1.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -5834840.0,
'S_0': 290.0,
'V_0': 0.00013957,
'Cp': [741.2, -0.018748, -2368800.0, -6616.9],
'a_0': 3.07e-05,
'K_0': 70000e6,
'Kprime_0': 4.11,
'Kdprime_0': -5.9e-11,
'n': 21.0,
'molar_mass': 0.39673548}
Mineral.__init__(self)
class fcel (Mineral):
def __init__(self):
self.params = {'name': 'fcel',
'formula': {'Al': 1.0, 'Fe': 1.0, 'H': 2.0, 'K': 1.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -5468490.0,
'S_0': 330.0,
'V_0': 0.0001407,
'Cp': [756.3, -0.019147, -1586100.0, -6928.7],
'a_0': 3.18e-05,
'K_0': 70000e6,
'Kprime_0': 4.11,
'Kdprime_0': -5.9e-11,
'n': 21.0,
'molar_mass': 0.42827548}
Mineral.__init__(self)
class pa (Mineral):
def __init__(self):
self.params = {'name': 'pa',
'formula': {'Al': 3.0, 'H': 2.0, 'Na': 1.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5942840.0,
'S_0': 277.0,
'V_0': 0.00013211,
'Cp': [803.0, -0.03158, 217000.0, -8151.0],
'a_0': 3.7e-05,
'K_0': 51500e6,
'Kprime_0': 6.51,
'Kdprime_0': -1.26e-10,
'n': 21.0,
'molar_mass': 0.38219948}
Mineral.__init__(self)
class ma (Mineral):
def __init__(self):
self.params = {'name': 'ma',
'formula': {'Al': 4.0, 'Ca': 1.0, 'H': 2.0, 'O': 12.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -6242070.0,
'S_0': 265.0,
'V_0': 0.00012964,
'Cp': [744.4, -0.0168, -2074400.0, -6783.2],
'a_0': 2.33e-05,
'K_0': 100000e6,
'Kprime_0': 4.08,
'Kdprime_0': -4.1e-11,
'n': 21.0,
'molar_mass': 0.39818368}
Mineral.__init__(self)
class phl (Mineral):
def __init__(self):
self.params = {'name': 'phl',
'formula': {'Al': 1.0, 'H': 2.0, 'K': 1.0, 'Mg': 3.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6214880.0,
'S_0': 326.0,
'V_0': 0.00014964,
'Cp': [770.3, -0.036939, -2328900.0, -6531.6],
'a_0': 3.8e-05,
'K_0': 51300e6,
'Kprime_0': 7.33,
'Kdprime_0': -1.43e-10,
'n': 22.0,
'molar_mass': 0.41725998}
Mineral.__init__(self)
class ann (Mineral):
def __init__(self):
self.params = {'name': 'ann',
'formula': {'Al': 1.0, 'Fe': 3.0, 'H': 2.0, 'K': 1.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5143720.0,
'S_0': 420.0,
'V_0': 0.00015432,
'Cp': [815.7, -0.034861, 19800.0, -7466.7],
'a_0': 3.8e-05,
'K_0': 51300e6,
'Kprime_0': 7.33,
'Kdprime_0': -1.43e-10,
'n': 22.0,
'molar_mass': 0.51187998}
Mineral.__init__(self)
class mnbi (Mineral):
def __init__(self):
self.params = {'name': 'mnbi',
'formula': {'Al': 1.0, 'H': 2.0, 'K': 1.0, 'Mn': 3.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5477520.0,
'S_0': 433.0,
'V_0': 0.00015264,
'Cp': [809.9, -0.059213, -1514400.0, -6998.7],
'a_0': 3.8e-05,
'K_0': 53000e6,
'Kprime_0': 7.33,
'Kdprime_0': -1.43e-10,
'n': 22.0,
'molar_mass': 0.50915898}
Mineral.__init__(self)
class east (Mineral):
def __init__(self):
self.params = {'name': 'east',
'formula': {'Al': 3.0, 'H': 2.0, 'K': 1.0, 'Mg': 2.0, 'O': 12.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -6330380.0,
'S_0': 318.0,
'V_0': 0.00014738,
'Cp': [785.5, -0.038031, -2130300.0, -6893.7],
'a_0': 3.8e-05,
'K_0': 53000e6,
'Kprime_0': 7.33,
'Kdprime_0': -1.43e-10,
'n': 22.0,
'molar_mass': 0.41883248}
Mineral.__init__(self)
class naph (Mineral):
def __init__(self):
self.params = {'name': 'naph',
'formula': {'Al': 1.0, 'H': 2.0, 'Mg': 3.0, 'Na': 1.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6172010.0,
'S_0': 318.0,
'V_0': 0.0001445,
'Cp': [773.5, -0.040229, -2597900.0, -6512.6],
'a_0': 3.28e-05,
'K_0': 51300e6,
'Kprime_0': 7.33,
'Kdprime_0': -1.43e-10,
'n': 22.0,
'molar_mass': 0.40115148}
Mineral.__init__(self)
class clin (Mineral):
def __init__(self):
self.params = {'name': 'clin',
'formula': {'Al': 2.0, 'H': 8.0, 'Mg': 5.0, 'O': 18.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -8909160.0,
'S_0': 437.0,
'V_0': 0.0002114,
'Cp': [1170.8, -0.001508, -3825800.0, -10315.0],
'a_0': 2.04e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 36.0,
'molar_mass': 0.55579722}
Mineral.__init__(self)
class ames (Mineral):
def __init__(self):
self.params = {'name': 'ames',
'formula': {'Al': 4.0, 'H': 8.0, 'Mg': 4.0, 'O': 18.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -9040460.0,
'S_0': 412.0,
'V_0': 0.0002071,
'Cp': [1186.0, -0.002599, -3627200.0, -10677.0],
'a_0': 2e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 36.0,
'molar_mass': 0.55736972}
Mineral.__init__(self)
class afchl (Mineral):
def __init__(self):
self.params = {'name': 'afchl',
'formula': {'H': 8.0, 'Mg': 6.0, 'O': 18.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -8727860.0,
'S_0': 439.0,
'V_0': 0.0002157,
'Cp': [1155.0, -0.000417, -4024400.0, -9952.9],
'a_0': 2.04e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 36.0,
'molar_mass': 0.55422472}
Mineral.__init__(self)
class daph (Mineral):
def __init__(self):
self.params = {'name': 'daph',
'formula': {'Al': 2.0, 'Fe': 5.0, 'H': 8.0, 'O': 18.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -7116910.0,
'S_0': 584.0,
'V_0': 0.0002162,
'Cp': [1192.0, -0.00594, -4826400.0, -9768.3],
'a_0': 2.27e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 36.0,
'molar_mass': 0.71349722}
Mineral.__init__(self)
class mnchl (Mineral):
def __init__(self):
self.params = {'name': 'mnchl',
'formula': {'Al': 2.0, 'H': 8.0, 'Mn': 5.0, 'O': 18.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -7702320.0,
'S_0': 595.0,
'V_0': 0.0002259,
'Cp': [1136.5, -0.005243, -5548100.0, -8911.5],
'a_0': 2.23e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 36.0,
'molar_mass': 0.70896222}
Mineral.__init__(self)
class sud (Mineral):
def __init__(self):
self.params = {'name': 'sud',
'formula': {'Al': 4.0, 'H': 8.0, 'Mg': 2.0, 'O': 18.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -8626540.0,
'S_0': 395.0,
'V_0': 0.000203,
'Cp': [1436.1, -0.048749, -2748500.0, -13764.0],
'a_0': 1.99e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 35.0,
'molar_mass': 0.53684522}
Mineral.__init__(self)
class fsud (Mineral):
def __init__(self):
self.params = {'name': 'fsud',
'formula': {'Al': 4.0, 'Fe': 2.0, 'H': 8.0, 'O': 18.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -7899850.0,
'S_0': 456.0,
'V_0': 0.000204,
'Cp': [1466.3, -0.047365, -1182800.0, -14388.0],
'a_0': 2.08e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 35.0,
'molar_mass': 0.59992522}
Mineral.__init__(self)
class prl (Mineral):
def __init__(self):
self.params = {'name': 'prl',
'formula': {'Al': 2.0, 'H': 2.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -5640610.0,
'S_0': 239.0,
'V_0': 0.00012804,
'Cp': [784.5, -0.042948, 1251000.0, -8495.9],
'a_0': 4.5e-05,
'K_0': 37000e6,
'Kprime_0': 10.0,
'Kdprime_0': -2.71e-10,
'n': 20.0,
'molar_mass': 0.36031368}
Mineral.__init__(self)
class ta (Mineral):
def __init__(self):
self.params = {'name': 'ta',
'formula': {'H': 2.0, 'Mg': 3.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -5897170.0,
'S_0': 259.0,
'V_0': 0.00013665,
'Cp': [622.2, 0.0, -6385500.0, -3916.3],
'a_0': 1.8e-05,
'K_0': 43000e6,
'Kprime_0': 6.17,
'Kdprime_0': -1.44e-10,
'n': 21.0,
'molar_mass': 0.37926568}
Mineral.__init__(self)
class fta (Mineral):
def __init__(self):
self.params = {'name': 'fta',
'formula': {'Fe': 3.0, 'H': 2.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -4798540.0,
'S_0': 352.0,
'V_0': 0.00014225,
'Cp': [579.7, 0.039494, -6459300.0, -3088.1],
'a_0': 1.8e-05,
'K_0': 43000e6,
'Kprime_0': 6.17,
'Kdprime_0': -1.44e-10,
'n': 21.0,
'molar_mass': 0.47388568}
Mineral.__init__(self)
class tats (Mineral):
def __init__(self):
self.params = {'name': 'tats',
'formula': {'Al': 2.0, 'H': 2.0, 'Mg': 2.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6001290.0,
'S_0': 259.0,
'V_0': 0.0001351,
'Cp': [549.5, 0.036324, -8606600.0, -2515.3],
'a_0': 1.8e-05,
'K_0': 43000e6,
'Kprime_0': 6.17,
'Kdprime_0': -1.44e-10,
'n': 21.0,
'molar_mass': 0.38083818}
Mineral.__init__(self)
class tap (Mineral):
def __init__(self):
self.params = {'name': 'tap',
'formula': {'Al': 2.0, 'H': 2.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -5649780.0,
'S_0': 235.0,
'V_0': 0.0001345,
'Cp': [784.5, -0.042948, 1251000.0, -8495.9],
'a_0': 4.5e-05,
'K_0': 37000e6,
'Kprime_0': 10.0,
'Kdprime_0': -2.71e-10,
'n': 20.0,
'molar_mass': 0.36031368}
Mineral.__init__(self)
class minn (Mineral):
def __init__(self):
self.params = {'name': 'minn',
'formula': {'Fe': 3.0, 'H': 2.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -4819310.0,
'S_0': 355.0,
'V_0': 0.00014851,
'Cp': [579.7, 0.039494, -6459300.0, -3088.1],
'a_0': 1.8e-05,
'K_0': 43000e6,
'Kprime_0': 6.17,
'Kdprime_0': -1.44e-10,
'n': 21.0,
'molar_mass': 0.47388568}
Mineral.__init__(self)
class minm (Mineral):
def __init__(self):
self.params = {'name': 'minm',
'formula': {'H': 2.0, 'Mg': 3.0, 'O': 12.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -5866000.0,
'S_0': 263.9,
'V_0': 0.00014291,
'Cp': [622.2, 0.0, -6385500.0, -3916.3],
'a_0': 1.8e-05,
'K_0': 43000e6,
'Kprime_0': 6.17,
'Kdprime_0': -1.44e-10,
'n': 21.0,
'molar_mass': 0.37926568}
Mineral.__init__(self)
class kao (Mineral):
def __init__(self):
self.params = {'name': 'kao',
'formula': {'Al': 2.0, 'H': 4.0, 'O': 9.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4122000.0,
'S_0': 203.7,
'V_0': 9.934e-05,
'Cp': [436.7, -0.034295, -4055900.0, -2699.1],
'a_0': 2.51e-05,
'K_0': 64500e6,
'Kprime_0': 4.12,
'Kdprime_0': -6.4e-11,
'n': 17.0,
'molar_mass': 0.25816036}
Mineral.__init__(self)
class pre (Mineral):
def __init__(self):
self.params = {'name': 'pre',
'formula': {'Al': 2.0, 'Ca': 2.0, 'H': 2.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -6202170.0,
'S_0': 292.8,
'V_0': 0.00014026,
'Cp': [724.9, -0.013865, -2059000.0, -6323.9],
'a_0': 1.58e-05,
'K_0': 109300e6,
'Kprime_0': 4.01,
'Kdprime_0': -3.7e-11,
'n': 21.0,
'molar_mass': 0.41238418}
Mineral.__init__(self)
class fpre (Mineral):
def __init__(self):
self.params = {'name': 'fpre',
'formula': {'Al': 1.0, 'Ca': 2.0, 'Fe': 1.0, 'H': 2.0, 'O': 12.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -5766640.0,
'S_0': 320.0,
'V_0': 0.000148,
'Cp': [737.1, -0.01681, -1957300.0, -6358.1],
'a_0': 1.58e-05,
'K_0': 109300e6,
'Kprime_0': 4.01,
'Kdprime_0': -3.7e-11,
'n': 21.0,
'molar_mass': 0.44124768}
Mineral.__init__(self)
class chr (Mineral):
def __init__(self):
self.params = {'name': 'chr',
'formula': {'H': 4.0, 'Mg': 3.0, 'O': 9.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4361000.0,
'S_0': 221.3,
'V_0': 0.00010746,
'Cp': [624.7, -0.02077, -1721800.0, -5619.4],
'a_0': 2.2e-05,
'K_0': 62800e6,
'Kprime_0': 4.0,
'Kdprime_0': -6.4e-11,
'n': 18.0,
'molar_mass': 0.27711236}
Mineral.__init__(self)
class liz (Mineral):
def __init__(self):
self.params = {'name': 'liz',
'formula': {'H': 4.0, 'Mg': 3.0, 'O': 9.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4369190.0,
'S_0': 212.0,
'V_0': 0.00010645,
'Cp': [614.7, -0.02077, -1721800.0, -5619.4],
'a_0': 2.2e-05,
'K_0': 71000e6,
'Kprime_0': 3.2,
'Kdprime_0': -4.5e-11,
'n': 18.0,
'molar_mass': 0.27711236}
Mineral.__init__(self)
class glt (Mineral):
def __init__(self):
self.params = {'name': 'glt',
'formula': {'Fe': 3.0, 'H': 4.0, 'O': 9.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3297620.0,
'S_0': 310.0,
'V_0': 0.0001198,
'Cp': [576.4, 0.002984, -3757000.0, -4166.2],
'a_0': 2.28e-05,
'K_0': 63000e6,
'Kprime_0': 4.0,
'Kdprime_0': -6.3e-11,
'n': 18.0,
'molar_mass': 0.37173236}
Mineral.__init__(self)
class fstp (Mineral):
def __init__(self):
self.params = {'name': 'fstp',
'formula': {'Al': 2.0, 'Fe': 5.0, 'H': 12.5, 'K': 0.5, 'O': 30.5, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -12551070.0,
'S_0': 930.2,
'V_0': 0.00037239,
'Cp': [1944.3, -0.012289, -4840200.0, -16635.0],
'a_0': 3.68e-05,
'K_0': 51300e6,
'Kprime_0': 7.33,
'Kdprime_0': -1.43e-10,
'n': 58.5,
'molar_mass': 1.0780021}
Mineral.__init__(self)
class mstp (Mineral):
def __init__(self):
self.params = {'name': 'mstp',
'formula': {'Al': 2.0, 'H': 12.5, 'K': 0.5, 'Mg': 5.0, 'O': 30.5, 'Si': 8.0},
'equation_of_state': 'hp_tmt',
'H_0': -14288380.0,
'S_0': 847.4,
'V_0': 0.00036577,
'Cp': [1862.2, -0.014018, -8983100.0, -14923.0],
'a_0': 3.71e-05,
'K_0': 51300e6,
'Kprime_0': 7.33,
'Kdprime_0': -1.43e-10,
'n': 58.5,
'molar_mass': 0.9203021}
Mineral.__init__(self)
class atg (Mineral):
def __init__(self):
self.params = {'name': 'atg',
'formula': {'H': 62.0, 'Mg': 48.0, 'O': 147.0, 'Si': 34.0},
'equation_of_state': 'hp_tmt',
'H_0': -71404690.0,
'S_0': 3620.0,
'V_0': 0.0017548,
'Cp': [9621.0, -0.091183, -35941600.0, -83034.2],
'a_0': 2.8e-05,
'K_0': 63100e6,
'Kprime_0': 5.92,
'Kdprime_0': -9.4e-11,
'n': 291.0,
'molar_mass': 4.53595108}
Mineral.__init__(self)
class ab (Mineral):
def __init__(self):
self.params = {'name': 'ab',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 8.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -3935480.0,
'S_0': 207.4,
'V_0': 0.00010067,
'Cp': [452.0, -0.013364, -1275900.0, -3953.6],
'a_0': 2.36e-05,
'K_0': 54100e6,
'Kprime_0': 5.91,
'Kdprime_0': -1.09e-10,
'n': 13.0,
'molar_mass': 0.262223}
self.property_modifiers = [['bragg_williams', {'deltaH': 14000.0,
'deltaV': 4.2e-07,
'Wh': 13000.0,
'Wv': 4.2e-07,
'n': 3.0,
'factor': 0.9}]]
Mineral.__init__(self)
class abh (Mineral):
def __init__(self):
self.params = {'name': 'abh',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 8.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -3921480.0,
'S_0': 224.3,
'V_0': 0.00010105,
'Cp': [452.0, -0.013364, -1275900.0, -3953.6],
'a_0': 2.41e-05,
'K_0': 54100e6,
'Kprime_0': 5.91,
'Kdprime_0': -1.09e-10,
'n': 13.0,
'molar_mass': 0.262223}
Mineral.__init__(self)
class mic (Mineral):
def __init__(self):
self.params = {'name': 'mic',
'formula': {'Al': 1.0, 'K': 1.0, 'O': 8.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -3975350.0,
'S_0': 214.3,
'V_0': 0.00010871,
'Cp': [448.8, -0.010075, -1007300.0, -3973.1],
'a_0': 1.66e-05,
'K_0': 58300e6,
'Kprime_0': 4.02,
'Kdprime_0': -6.9e-11,
'n': 13.0,
'molar_mass': 0.2783315}
Mineral.__init__(self)
class san (Mineral):
def __init__(self):
self.params = {'name': 'san',
'formula': {'Al': 1.0, 'K': 1.0, 'O': 8.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -3966700.0,
'S_0': 214.3,
'V_0': 0.00010871,
'Cp': [448.8, -0.010075, -1007300.0, -3973.1],
'a_0': 1.66e-05,
'K_0': 58300e6,
'Kprime_0': 4.02,
'Kdprime_0': -6.9e-11,
'n': 13.0,
'molar_mass': 0.2783315}
self.property_modifiers = [['bragg_williams', {'deltaH': 8650.0,
'deltaV': 2.4e-07,
'Wh': 8500.0,
'Wv': 2.4e-07,
'n': 3.0,
'factor': 0.8}]]
Mineral.__init__(self)
class an (Mineral):
def __init__(self):
self.params = {'name': 'an',
'formula': {'Al': 2.0, 'Ca': 1.0, 'O': 8.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -4232690.0,
'S_0': 200.5,
'V_0': 0.00010079,
'Cp': [370.5, 0.01001, -4339100.0, -1960.6],
'a_0': 1.41e-05,
'K_0': 86000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.8e-11,
'n': 13.0,
'molar_mass': 0.2782072}
self.property_modifiers = [['bragg_williams', {'deltaH': 42010.0,
'deltaV': 1e-06,
'Wh': 42000.0,
'Wv': 1e-06,
'n': 1.0,
'factor': 2.0}]]
Mineral.__init__(self)
class kcm (Mineral):
def __init__(self):
self.params = {'name': 'kcm',
'formula': {'Al': 1.0, 'H': 2.0, 'K': 1.0, 'O': 9.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -4232640.0,
'S_0': 281.5,
'V_0': 0.00011438,
'Cp': [536.5, -0.01009, -980400.0, -4735.0],
'a_0': 3.21e-05,
'K_0': 42500e6,
'Kprime_0': 2.0,
'Kdprime_0': -4.7e-11,
'n': 16.0,
'molar_mass': 0.29634678}
Mineral.__init__(self)
class wa (Mineral):
def __init__(self):
self.params = {'name': 'wa',
'formula': {'K': 2.0, 'O': 9.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -4271890.0,
'S_0': 254.0,
'V_0': 0.00010844,
'Cp': [499.1, 0.0, 0.0, -4350.1],
'a_0': 2.66e-05,
'K_0': 90000e6,
'Kprime_0': 4.0,
'Kdprime_0': -4.4e-11,
'n': 15.0,
'molar_mass': 0.3345332}
Mineral.__init__(self)
class hol (Mineral):
def __init__(self):
self.params = {'name': 'hol',
'formula': {'Al': 1.0, 'K': 1.0, 'O': 8.0, 'Si': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -3791960.0,
'S_0': 166.2,
'V_0': 7.128e-05,
'Cp': [417.6, -0.003617, -4748100.0, -2819.9],
'a_0': 2.8e-05,
'K_0': 180000e6,
'Kprime_0': 4.0,
'Kdprime_0': -2.2e-11,
'n': 13.0,
'molar_mass': 0.2783315}
Mineral.__init__(self)
class q (Mineral):
def __init__(self):
self.params = {'name': 'q',
'formula': {'O': 2.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -910720.0,
'S_0': 41.43,
'V_0': 2.269e-05,
'Cp': [92.9, -0.000642, -714900.0, -716.1],
'a_0': 0.0,
'K_0': 73000e6,
'Kprime_0': 6.0,
'Kdprime_0': -8.2e-11,
'n': 3.0,
'molar_mass': 0.0600843}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 847.0,
'S_D': 4.95,
'V_D': 1.188e-06}]]
Mineral.__init__(self)
class trd (Mineral):
def __init__(self):
self.params = {'name': 'trd',
'formula': {'O': 2.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -907110.0,
'S_0': 44.1,
'V_0': 2.8e-05,
'Cp': [74.9, 0.0031, -1174000.0, -236.7],
'a_0': 0.0,
'K_0': 15000e6,
'Kprime_0': 4.36,
'Kdprime_0': -2.91e-10,
'n': 3.0,
'molar_mass': 0.0600843}
Mineral.__init__(self)
class crst (Mineral):
def __init__(self):
self.params = {'name': 'crst',
'formula': {'O': 2.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -904270.0,
'S_0': 50.86,
'V_0': 2.745e-05,
'Cp': [72.7, 0.001304, -4129000.0, 0.0],
'a_0': 0.0,
'K_0': 16000e6,
'Kprime_0': 4.35,
'Kdprime_0': -2.72e-10,
'n': 3.0,
'molar_mass': 0.0600843}
Mineral.__init__(self)
class coe (Mineral):
def __init__(self):
self.params = {'name': 'coe',
'formula': {'O': 2.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -907000.0,
'S_0': 39.6,
'V_0': 2.064e-05,
'Cp': [107.8, -0.003279, -190300.0, -1041.6],
'a_0': 1.23e-05,
'K_0': 97900e6,
'Kprime_0': 4.19,
'Kdprime_0': -4.3e-11,
'n': 3.0,
'molar_mass': 0.0600843}
Mineral.__init__(self)
class stv (Mineral):
def __init__(self):
self.params = {'name': 'stv',
'formula': {'O': 2.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -876390.0,
'S_0': 24.0,
'V_0': 1.401e-05,
'Cp': [68.1, 0.00601, -1978200.0, -82.1],
'a_0': 1.58e-05,
'K_0': 309000e6,
'Kprime_0': 4.6,
'Kdprime_0': -1.5e-11,
'n': 3.0,
'molar_mass': 0.0600843}
Mineral.__init__(self)
class ne (Mineral):
def __init__(self):
self.params = {'name': 'ne',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2094560.0,
'S_0': 124.4,
'V_0': 5.419e-05,
'Cp': [272.7, -0.012398, 0.0, -2763.1],
'a_0': 4.63e-05,
'K_0': 46500e6,
'Kprime_0': 4.16,
'Kdprime_0': -8.9e-11,
'n': 7.0,
'molar_mass': 0.1420544}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 467.0,
'S_D': 10.0,
'V_D': 8e-07}]]
Mineral.__init__(self)
class cg (Mineral):
def __init__(self):
self.params = {'name': 'cg',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2091719.9999999998,
'S_0': 118.7,
'V_0': 5.603e-05,
'Cp': [116.1, 0.086021, -1992700.0, 0.0],
'a_0': 4.5e-05,
'K_0': 46500e6,
'Kprime_0': 4.16,
'Kdprime_0': -8.9e-11,
'n': 7.0,
'molar_mass': 0.1420544}
Mineral.__init__(self)
class cgh (Mineral):
def __init__(self):
self.params = {'name': 'cgh',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2078010.0000000002,
'S_0': 135.0,
'V_0': 5.67e-05,
'Cp': [229.2, 0.011876, 0.0, -1970.7],
'a_0': 4.67e-05,
'K_0': 46500e6,
'Kprime_0': 4.16,
'Kdprime_0': -8.9e-11,
'n': 7.0,
'molar_mass': 0.1420544}
Mineral.__init__(self)
class sdl (Mineral):
def __init__(self):
self.params = {'name': 'sdl',
'formula': {'Al': 6.0, 'Cl': 2.0, 'Na': 8.0, 'O': 24.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -13405530.0,
'S_0': 910.0,
'V_0': 0.0004213,
'Cp': [1532.7, 0.047747, -2972800.0, -12427.0],
'a_0': 4.63e-05,
'K_0': 46500e6,
'Kprime_0': 4.16,
'Kdprime_0': -8.9e-11,
'n': 46.0,
'molar_mass': 0.969212}
Mineral.__init__(self)
class kls (Mineral):
def __init__(self):
self.params = {'name': 'kls',
'formula': {'Al': 1.0, 'K': 1.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -2122960.0,
'S_0': 136.0,
'V_0': 6.052e-05,
'Cp': [242.0, -0.004482, -895800.0, -1935.8],
'a_0': 3.16e-05,
'K_0': 51400e6,
'Kprime_0': 2.0,
'Kdprime_0': -3.9e-11,
'n': 7.0,
'molar_mass': 0.1581629}
Mineral.__init__(self)
class lc (Mineral):
def __init__(self):
self.params = {'name': 'lc',
'formula': {'Al': 1.0, 'K': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3029270.0,
'S_0': 198.5,
'V_0': 8.826e-05,
'Cp': [369.8, -0.016332, 684700.0, -3683.1],
'a_0': 1.85e-05,
'K_0': 45000e6,
'Kprime_0': 5.7,
'Kdprime_0': -1.27e-10,
'n': 10.0,
'molar_mass': 0.2182472}
self.property_modifiers = [['bragg_williams', {'deltaH': 11610.0,
'deltaV': 4e-06,
'Wh': 11600.0,
'Wv': 4e-06,
'n': 2.0,
'factor': 0.7}]]
Mineral.__init__(self)
class me (Mineral):
def __init__(self):
self.params = {'name': 'me',
'formula': {'Al': 6.0, 'C': 1.0, 'Ca': 4.0, 'O': 27.0, 'Si': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -13841820.0,
'S_0': 752.0,
'V_0': 0.00033985,
'Cp': [1359.0, 0.036442, -8594700.0, -9598.2],
'a_0': 1.81e-05,
'K_0': 87000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.7e-11,
'n': 44.0,
'molar_mass': 0.9347085}
Mineral.__init__(self)
class wrk (Mineral):
def __init__(self):
self.params = {'name': 'wrk',
'formula': {'Al': 2.0, 'Ca': 1.0, 'H': 4.0, 'O': 14.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -6662450.0,
'S_0': 380.0,
'V_0': 0.0001904,
'Cp': [838.3, -0.02146, -2272000.0, -7292.3],
'a_0': 1.49e-05,
'K_0': 86000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.8e-11,
'n': 25.0,
'molar_mass': 0.43440636}
Mineral.__init__(self)
class lmt (Mineral):
def __init__(self):
self.params = {'name': 'lmt',
'formula': {'Al': 2.0, 'Ca': 1.0, 'H': 8.0, 'O': 16.0, 'Si': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -7262700.0,
'S_0': 465.0,
'V_0': 0.0002037,
'Cp': [1013.4, -0.021413, -2235800.0, -8806.7],
'a_0': 1.37e-05,
'K_0': 86000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.8e-11,
'n': 31.0,
'molar_mass': 0.47043692}
Mineral.__init__(self)
class heu (Mineral):
def __init__(self):
self.params = {'name': 'heu',
'formula': {'Al': 2.0, 'Ca': 1.0, 'H': 12.0, 'O': 24.0, 'Si': 7.0},
'equation_of_state': 'hp_tmt',
'H_0': -10545220.0,
'S_0': 783.0,
'V_0': 0.000317,
'Cp': [1504.8, -0.033224, -2959300.0, -13297.2],
'a_0': 1.57e-05,
'K_0': 27400e6,
'Kprime_0': 4.0,
'Kdprime_0': -1.46e-10,
'n': 46.0,
'molar_mass': 0.68672038}
Mineral.__init__(self)
class stlb (Mineral):
def __init__(self):
self.params = {'name': 'stlb',
'formula': {'Al': 2.0, 'Ca': 1.0, 'H': 14.0, 'O': 25.0, 'Si': 7.0},
'equation_of_state': 'hp_tmt',
'H_0': -10896760.0,
'S_0': 710.0,
'V_0': 0.0003287,
'Cp': [1588.4, -0.032043, -3071600.0, -13966.9],
'a_0': 1.51e-05,
'K_0': 86000e6,
'Kprime_0': 4.09,
'Kdprime_0': -4.8e-11,
'n': 49.0,
'molar_mass': 0.70473566}
Mineral.__init__(self)
class anl (Mineral):
def __init__(self):
self.params = {'name': 'anl',
'formula': {'Al': 1.0, 'H': 2.0, 'Na': 1.0, 'O': 7.0, 'Si': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -3307220.0,
'S_0': 232.0,
'V_0': 9.74e-05,
'Cp': [643.5, -0.016067, 9302300.0, -9179.6],
'a_0': 2.76e-05,
'K_0': 40000e6,
'Kprime_0': 4.18,
'Kdprime_0': -1.04e-10,
'n': 13.0,
'molar_mass': 0.22015398}
Mineral.__init__(self)
class lime (Mineral):
def __init__(self):
self.params = {'name': 'lime',
'formula': {'Ca': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -634530.0,
'S_0': 38.1,
'V_0': 1.676e-05,
'Cp': [52.4, 0.003673, -750700.0, -51.0],
'a_0': 3.41e-05,
'K_0': 113000e6,
'Kprime_0': 3.87,
'Kdprime_0': -3.4e-11,
'n': 2.0,
'molar_mass': 0.0560774}
Mineral.__init__(self)
class ru (Mineral):
def __init__(self):
self.params = {'name': 'ru',
'formula': {'O': 2.0, 'Ti': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -944360.0,
'S_0': 50.5,
'V_0': 1.882e-05,
'Cp': [90.4, 0.0029, 0.0, -623.8],
'a_0': 2.24e-05,
'K_0': 222000e6,
'Kprime_0': 4.24,
'Kdprime_0': -1.9e-11,
'n': 3.0,
'molar_mass': 0.0798658}
Mineral.__init__(self)
class per (Mineral):
def __init__(self):
self.params = {'name': 'per',
'formula': {'Mg': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -601530.0,
'S_0': 26.5,
'V_0': 1.125e-05,
'Cp': [60.5, 0.000362, -535800.0, -299.2],
'a_0': 3.11e-05,
'K_0': 161600e6,
'Kprime_0': 3.95,
'Kdprime_0': -2.4e-11,
'n': 2.0,
'molar_mass': 0.0403044}
Mineral.__init__(self)
class fper (Mineral):
def __init__(self):
self.params = {'name': 'fper',
'formula': {'Fe': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -259870.0,
'S_0': 58.6,
'V_0': 1.206e-05,
'Cp': [44.4, 0.00828, -1214200.0, 185.2],
'a_0': 3.22e-05,
'K_0': 152000e6,
'Kprime_0': 4.9,
'Kdprime_0': -3.2e-11,
'n': 2.0,
'molar_mass': 0.0718444}
Mineral.__init__(self)
class mang (Mineral):
def __init__(self):
self.params = {'name': 'mang',
'formula': {'Mn': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -385550.0,
'S_0': 59.7,
'V_0': 1.322e-05,
'Cp': [59.8, 0.0036, -31400.0, -282.6],
'a_0': 3.69e-05,
'K_0': 164500e6,
'Kprime_0': 4.46,
'Kdprime_0': -2.7e-11,
'n': 2.0,
'molar_mass': 0.0709374}
Mineral.__init__(self)
class cor (Mineral):
def __init__(self):
self.params = {'name': 'cor',
'formula': {'Al': 2.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -1675270.0,
'S_0': 50.9,
'V_0': 2.558e-05,
'Cp': [139.5, 0.00589, -2460600.0, -589.2],
'a_0': 1.8e-05,
'K_0': 254000e6,
'Kprime_0': 4.34,
'Kdprime_0': -1.7e-11,
'n': 5.0,
'molar_mass': 0.1019612}
Mineral.__init__(self)
class mcor (Mineral):
def __init__(self):
self.params = {'name': 'mcor',
'formula': {'Mg': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1474440.0,
'S_0': 59.3,
'V_0': 2.635e-05,
'Cp': [147.8, 0.002015, -2395000.0, -801.8],
'a_0': 2.12e-05,
'K_0': 211000e6,
'Kprime_0': 4.55,
'Kdprime_0': -2.2e-11,
'n': 5.0,
'molar_mass': 0.1003887}
Mineral.__init__(self)
class hem (Mineral):
def __init__(self):
self.params = {'name': 'hem',
'formula': {'Fe': 2.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -825610.0,
'S_0': 87.4,
'V_0': 3.027e-05,
'Cp': [163.9, 0.0, -2257200.0, -657.6],
'a_0': 2.79e-05,
'K_0': 223000e6,
'Kprime_0': 4.04,
'Kdprime_0': -1.8e-11,
'n': 5.0,
'molar_mass': 0.1596882}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 955.0,
'S_D': 15.6,
'V_D': 0.0}]]
Mineral.__init__(self)
class esk (Mineral):
def __init__(self):
self.params = {'name': 'esk',
'formula': {'Cr': 2.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -1137320.0,
'S_0': 83.0,
'V_0': 2.909e-05,
'Cp': [119.0, 0.009496, -1442000.0, -3.4],
'a_0': 1.59e-05,
'K_0': 238000e6,
'Kprime_0': 4.0,
'Kdprime_0': -1.7e-11,
'n': 5.0,
'molar_mass': 0.1519904}
Mineral.__init__(self)
class bix (Mineral):
def __init__(self):
self.params = {'name': 'bix',
'formula': {'Mn': 2.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -959000.0,
'S_0': 113.7,
'V_0': 3.137e-05,
'Cp': [145.1, 0.023534, 721600.0, -1008.4],
'a_0': 2.91e-05,
'K_0': 223000e6,
'Kprime_0': 4.04,
'Kdprime_0': -1.8e-11,
'n': 5.0,
'molar_mass': 0.1578742}
Mineral.__init__(self)
class NiO (Mineral):
def __init__(self):
self.params = {'name': 'NiO',
'formula': {'Ni': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -239470.0,
'S_0': 38.0,
'V_0': 1.097e-05,
'Cp': [47.7, 0.007824, -392500.0, 0.0],
'a_0': 3.3e-05,
'K_0': 200000e6,
'Kprime_0': 3.94,
'Kdprime_0': -2e-11,
'n': 2.0,
'molar_mass': 0.0746928}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 520.0,
'S_D': 5.7,
'V_D': 0.0}]]
Mineral.__init__(self)
class pnt (Mineral):
def __init__(self):
self.params = {'name': 'pnt',
'formula': {'Mn': 1.0, 'O': 3.0, 'Ti': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1361950.0,
'S_0': 105.5,
'V_0': 3.288e-05,
'Cp': [143.5, 0.003373, -1940700.0, -407.6],
'a_0': 2.4e-05,
'K_0': 170000e6,
'Kprime_0': 8.3,
'Kdprime_0': -4.9e-11,
'n': 5.0,
'molar_mass': 0.1508032}
Mineral.__init__(self)
class geik (Mineral):
def __init__(self):
self.params = {'name': 'geik',
'formula': {'Mg': 1.0, 'O': 3.0, 'Ti': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1568960.0,
'S_0': 73.6,
'V_0': 3.086e-05,
'Cp': [151.0, 0.0, -1890400.0, -652.2],
'a_0': 2.15e-05,
'K_0': 170000e6,
'Kprime_0': 8.3,
'Kdprime_0': -4.9e-11,
'n': 5.0,
'molar_mass': 0.1201702}
Mineral.__init__(self)
class ilm (Mineral):
def __init__(self):
self.params = {'name': 'ilm',
'formula': {'Fe': 1.0, 'O': 3.0, 'Ti': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1230450.0,
'S_0': 109.5,
'V_0': 3.169e-05,
'Cp': [138.9, 0.005081, -1288800.0, -463.7],
'a_0': 2.4e-05,
'K_0': 170000e6,
'Kprime_0': 8.3,
'Kdprime_0': -4.9e-11,
'n': 5.0,
'molar_mass': 0.1517102}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 1900.0,
'S_D': 12.0,
'V_D': 2e-07}]]
Mineral.__init__(self)
class bdy (Mineral):
def __init__(self):
self.params = {'name': 'bdy',
'formula': {'O': 2.0, 'Zr': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1100340.0,
'S_0': 50.4,
'V_0': 2.115e-05,
'Cp': [103.5, -0.004547, -416200.0, -713.6],
'a_0': 2e-05,
'K_0': 95300e6,
'Kprime_0': 3.88,
'Kdprime_0': -4.1e-11,
'n': 3.0,
'molar_mass': 0.1232228}
Mineral.__init__(self)
class ten (Mineral):
def __init__(self):
self.params = {'name': 'ten',
'formula': {'Cu': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -156100.0,
'S_0': 42.6,
'V_0': 1.222e-05,
'Cp': [31.0, 0.01374, -1258000.0, 369.3],
'a_0': 3.57e-05,
'K_0': 200000e6,
'Kprime_0': 3.94,
'Kdprime_0': -2e-11,
'n': 2.0,
'molar_mass': 0.0795454}
Mineral.__init__(self)
class cup (Mineral):
def __init__(self):
self.params = {'name': 'cup',
'formula': {'Cu': 2.0, 'O': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -170600.0,
'S_0': 92.4,
'V_0': 2.344e-05,
'Cp': [110.3, 0.0, 0.0, -674.8],
'a_0': 3.33e-05,
'K_0': 131000e6,
'Kprime_0': 5.7,
'Kdprime_0': -4.3e-11,
'n': 3.0,
'molar_mass': 0.1430914}
Mineral.__init__(self)
class sp (Mineral):
def __init__(self):
self.params = {'name': 'sp',
'formula': {'Al': 2.0, 'Mg': 1.0, 'O': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -2301190.0,
'S_0': 82.0,
'V_0': 3.978e-05,
'Cp': [222.9, 0.006127, -1686000.0, -1551.0],
'a_0': 1.93e-05,
'K_0': 192200e6,
'Kprime_0': 4.04,
'Kdprime_0': -2.1e-11,
'n': 7.0,
'molar_mass': 0.1422656}
self.property_modifiers = [['bragg_williams', {'deltaH': 8000.0,
'deltaV': 0.0,
'Wh': 1200.0,
'Wv': 0.0,
'n': 2.0,
'factor': 0.5}]]
Mineral.__init__(self)
class herc (Mineral):
def __init__(self):
self.params = {'name': 'herc',
'formula': {'Al': 2.0, 'Fe': 1.0, 'O': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -1953030.0,
'S_0': 113.9,
'V_0': 4.075e-05,
'Cp': [216.7, 0.005868, -2430200.0, -1178.3],
'a_0': 2.06e-05,
'K_0': 192200e6,
'Kprime_0': 4.04,
'Kdprime_0': -2.1e-11,
'n': 7.0,
'molar_mass': 0.1738056}
self.property_modifiers = [['bragg_williams', {'deltaH': 18300.0,
'deltaV': 0.0,
'Wh': 13600.0,
'Wv': 0.0,
'n': 2.0,
'factor': 1.0}]]
Mineral.__init__(self)
class mt (Mineral):
def __init__(self):
self.params = {'name': 'mt',
'formula': {'Fe': 3.0, 'O': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -1114500.0,
'S_0': 146.9,
'V_0': 4.452e-05,
'Cp': [262.5, -0.007205, -1926200.0, -1655.7],
'a_0': 3.71e-05,
'K_0': 185700e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.2e-11,
'n': 7.0,
'molar_mass': 0.2315326}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 848.0,
'S_D': 35.0,
'V_D': 0.0}]]
Mineral.__init__(self)
class mft (Mineral):
def __init__(self):
self.params = {'name': 'mft',
'formula': {'Fe': 2.0, 'Mg': 1.0, 'O': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -1442290.0,
'S_0': 121.0,
'V_0': 4.457e-05,
'Cp': [270.5, -0.007505, -999200.0, -2022.4],
'a_0': 3.63e-05,
'K_0': 185700e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.2e-11,
'n': 7.0,
'molar_mass': 0.1999926}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 665.0,
'S_D': 17.0,
'V_D': 0.0}]]
Mineral.__init__(self)
class usp (Mineral):
def __init__(self):
self.params = {'name': 'usp',
'formula': {'Fe': 2.0, 'O': 4.0, 'Ti': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1491120.0,
'S_0': 180.0,
'V_0': 4.682e-05,
'Cp': [-102.6, 0.14252, -9144500.0, 5270.7],
'a_0': 3.86e-05,
'K_0': 185700e6,
'Kprime_0': 4.05,
'Kdprime_0': -2.2e-11,
'n': 7.0,
'molar_mass': 0.2235546}
Mineral.__init__(self)
class picr (Mineral):
def __init__(self):
self.params = {'name': 'picr',
'formula': {'Cr': 2.0, 'Mg': 1.0, 'O': 4.0},
'equation_of_state': 'hp_tmt',
'H_0': -1762600.0,
'S_0': 118.3,
'V_0': 4.356e-05,
'Cp': [196.1, 0.005398, -3126000.0, -616.9],
'a_0': 1.8e-05,
'K_0': 192200e6,
'Kprime_0': 4.04,
'Kdprime_0': -2.1e-11,
'n': 7.0,
'molar_mass': 0.1922948}
self.property_modifiers = [['bragg_williams', {'deltaH': 8000.0,
'deltaV': 0.0,
'Wh': 1200.0,
'Wv': 0.0,
'n': 2.0,
'factor': 0.5}]]
Mineral.__init__(self)
class br (Mineral):
def __init__(self):
self.params = {'name': 'br',
'formula': {'H': 2.0, 'Mg': 1.0, 'O': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -925560.0,
'S_0': 63.2,
'V_0': 2.463e-05,
'Cp': [158.4, -0.004076, -1052300.0, -1171.3],
'a_0': 6.2e-05,
'K_0': 41500e6,
'Kprime_0': 6.45,
'Kdprime_0': -1.55e-10,
'n': 5.0,
'molar_mass': 0.05831968}
Mineral.__init__(self)
class dsp (Mineral):
def __init__(self):
self.params = {'name': 'dsp',
'formula': {'Al': 1.0, 'H': 1.0, 'O': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -999840.0,
'S_0': 34.5,
'V_0': 1.786e-05,
'Cp': [145.1, 0.008709, 584400.0, -1741.1],
'a_0': 3.57e-05,
'K_0': 228000e6,
'Kprime_0': 4.04,
'Kdprime_0': -1.8e-11,
'n': 4.0,
'molar_mass': 0.05998824}
Mineral.__init__(self)
class gth (Mineral):
def __init__(self):
self.params = {'name': 'gth',
'formula': {'Fe': 1.0, 'H': 1.0, 'O': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -561770.0,
'S_0': 60.3,
'V_0': 2.082e-05,
'Cp': [139.3, 0.000147, -212700.0, -1077.8],
'a_0': 4.35e-05,
'K_0': 250000e6,
'Kprime_0': 4.03,
'Kdprime_0': -1.6e-11,
'n': 4.0,
'molar_mass': 0.08885174}
Mineral.__init__(self)
class cc (Mineral):
def __init__(self):
self.params = {'name': 'cc',
'formula': {'C': 1.0, 'Ca': 1.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -1207760.0,
'S_0': 92.5,
'V_0': 3.689e-05,
'Cp': [140.9, 0.005029, -950700.0, -858.4],
'a_0': 2.52e-05,
'K_0': 73300e6,
'Kprime_0': 4.06,
'Kdprime_0': -5.5e-11,
'n': 5.0,
'molar_mass': 0.1000869}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 1240.0,
'S_D': 10.0,
'V_D': 4e-07}]]
Mineral.__init__(self)
class arag (Mineral):
def __init__(self):
self.params = {'name': 'arag',
'formula': {'C': 1.0, 'Ca': 1.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -1207650.0,
'S_0': 89.8,
'V_0': 3.415e-05,
'Cp': [167.1, 0.010695, 162000.0, -1564.9],
'a_0': 6.14e-05,
'K_0': 61400e6,
'Kprime_0': 5.87,
'Kdprime_0': -9.6e-11,
'n': 5.0,
'molar_mass': 0.1000869}
Mineral.__init__(self)
class mag (Mineral):
def __init__(self):
self.params = {'name': 'mag',
'formula': {'C': 1.0, 'Mg': 1.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -1110920.0,
'S_0': 65.5,
'V_0': 2.803e-05,
'Cp': [186.4, -0.003772, 0.0, -1886.2],
'a_0': 3.38e-05,
'K_0': 102800e6,
'Kprime_0': 5.41,
'Kdprime_0': -5.3e-11,
'n': 5.0,
'molar_mass': 0.0843139}
Mineral.__init__(self)
class sid (Mineral):
def __init__(self):
self.params = {'name': 'sid',
'formula': {'C': 1.0, 'Fe': 1.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -762220.0,
'S_0': 93.3,
'V_0': 2.943e-05,
'Cp': [168.4, 0.0, 0.0, -1483.6],
'a_0': 4.39e-05,
'K_0': 120000e6,
'Kprime_0': 4.07,
'Kdprime_0': -3.4e-11,
'n': 5.0,
'molar_mass': 0.1158539}
Mineral.__init__(self)
class rhc (Mineral):
def __init__(self):
self.params = {'name': 'rhc',
'formula': {'C': 1.0, 'Mn': 1.0, 'O': 3.0},
'equation_of_state': 'hp_tmt',
'H_0': -892280.0,
'S_0': 98.0,
'V_0': 3.107e-05,
'Cp': [169.5, 0.0, 0.0, -1534.3],
'a_0': 2.44e-05,
'K_0': 95300e6,
'Kprime_0': 3.88,
'Kdprime_0': -4.1e-11,
'n': 5.0,
'molar_mass': 0.1149469}
Mineral.__init__(self)
class dol (Mineral):
def __init__(self):
self.params = {'name': 'dol',
'formula': {'C': 2.0, 'Ca': 1.0, 'Mg': 1.0, 'O': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -2326220.0,
'S_0': 156.1,
'V_0': 6.429e-05,
'Cp': [358.9, -0.004905, 0.0, -3456.2],
'a_0': 3.28e-05,
'K_0': 94300e6,
'Kprime_0': 3.74,
'Kdprime_0': -4e-11,
'n': 10.0,
'molar_mass': 0.1844008}
self.property_modifiers = [['bragg_williams', {'deltaH': 11910.0,
'deltaV': 1.6e-07,
'Wh': 11900.0,
'Wv': 1.6e-07,
'n': 1.0,
'factor': 1.0}]]
Mineral.__init__(self)
class ank (Mineral):
def __init__(self):
self.params = {'name': 'ank',
'formula': {'C': 2.0, 'Ca': 1.0, 'Fe': 1.0, 'O': 6.0},
'equation_of_state': 'hp_tmt',
'H_0': -1971410.0,
'S_0': 188.46,
'V_0': 6.606e-05,
'Cp': [341.0, -0.001161, 0.0, -3054.8],
'a_0': 3.46e-05,
'K_0': 91400e6,
'Kprime_0': 3.88,
'Kdprime_0': -4.3e-11,
'n': 10.0,
'molar_mass': 0.2159408}
self.property_modifiers = [['bragg_williams', {'deltaH': 11910.0,
'deltaV': 1.6e-07,
'Wh': 11900.0,
'Wv': 1.6e-07,
'n': 1.0,
'factor': 1.0}]]
Mineral.__init__(self)
class syv (Mineral):
def __init__(self):
self.params = {'name': 'syv',
'formula': {'Cl': 1.0, 'K': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -436500.0,
'S_0': 82.6,
'V_0': 3.752e-05,
'Cp': [46.2, 0.01797, 0.0, 0.0],
'a_0': 0.0001109,
'K_0': 17000e6,
'Kprime_0': 5.0,
'Kdprime_0': -2.94e-10,
'n': 2.0,
'molar_mass': 0.0745513}
Mineral.__init__(self)
class hlt (Mineral):
def __init__(self):
self.params = {'name': 'hlt',
'formula': {'Cl': 1.0, 'Na': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -411300.0,
'S_0': 72.1,
'V_0': 2.702e-05,
'Cp': [45.2, 0.01797, 0.0, 0.0],
'a_0': 0.0001147,
'K_0': 23800e6,
'Kprime_0': 5.0,
'Kdprime_0': -2.1e-10,
'n': 2.0,
'molar_mass': 0.0584428}
Mineral.__init__(self)
class pyr (Mineral):
def __init__(self):
self.params = {'name': 'pyr',
'formula': {'Fe': 1.0, 'S': 2.0},
'equation_of_state': 'hp_tmt',
'H_0': -171640.0,
'S_0': 52.9,
'V_0': 2.394e-05,
'Cp': [37.3, 0.026715, -1817000.0, 649.3],
'a_0': 3.1e-05,
'K_0': 139500e6,
'Kprime_0': 4.09,
'Kdprime_0': -2.9e-11,
'n': 3.0,
'molar_mass': 0.119975}
Mineral.__init__(self)
class trot (Mineral):
def __init__(self):
self.params = {'name': 'trot',
'formula': {'Fe': 1.0, 'S': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -99030.0,
'S_0': 65.5,
'V_0': 1.819e-05,
'Cp': [50.2, 0.011052, -940000.0, 0.0],
'a_0': 5.68e-05,
'K_0': 65800e6,
'Kprime_0': 4.17,
'Kdprime_0': -6.3e-11,
'n': 2.0,
'molar_mass': 0.08791}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 598.0,
'S_D': 12.0,
'V_D': 4.1e-07}]]
Mineral.__init__(self)
class tro (Mineral):
def __init__(self):
self.params = {'name': 'tro',
'formula': {'Fe': 1.0, 'S': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -97760.0,
'S_0': 70.8,
'V_0': 1.819e-05,
'Cp': [50.2, 0.011052, -940000.0, 0.0],
'a_0': 5.73e-05,
'K_0': 65800e6,
'Kprime_0': 4.17,
'Kdprime_0': -6.3e-11,
'n': 2.0,
'molar_mass': 0.08791}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 598.0,
'S_D': 12.0,
'V_D': 4.1e-07}]]
Mineral.__init__(self)
class lot (Mineral):
def __init__(self):
self.params = {'name': 'lot',
'formula': {'Fe': 1.0, 'S': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -102160.0,
'S_0': 60.0,
'V_0': 1.818e-05,
'Cp': [50.2, 0.011052, -940000.0, 0.0],
'a_0': 4.93e-05,
'K_0': 65800e6,
'Kprime_0': 4.17,
'Kdprime_0': -6.3e-11,
'n': 2.0,
'molar_mass': 0.08791}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 420.0,
'S_D': 10.0,
'V_D': 0.0}]]
Mineral.__init__(self)
class trov (Mineral):
def __init__(self):
self.params = {'name': 'trov',
'formula': {'Fe': 0.875, 'S': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -96020.0,
'S_0': 57.5,
'V_0': 1.738e-05,
'Cp': [51.1, 0.008307, -669700.0, 0.0],
'a_0': 5.94e-05,
'K_0': 65800e6,
'Kprime_0': 4.17,
'Kdprime_0': -6.3e-11,
'n': 1.875,
'molar_mass': 0.080929375}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 595.0,
'S_D': 10.0,
'V_D': 1.6e-07}]]
Mineral.__init__(self)
class any (Mineral):
def __init__(self):
self.params = {'name': 'any',
'formula': {'Ca': 1.0, 'O': 4.0, 'S': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -1434400.0,
'S_0': 106.9,
'V_0': 4.594e-05,
'Cp': [128.7, 0.048545, -1223000.0, -560.5],
'a_0': 4.18e-05,
'K_0': 54379999999.99999,
'Kprime_0': 4.19,
'Kdprime_0': -7.7e-11,
'n': 6.0,
'molar_mass': 0.1361406}
Mineral.__init__(self)
class iron (Mineral):
def __init__(self):
self.params = {'name': 'iron',
'formula': {'Fe': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -0.0,
'S_0': 27.09,
'V_0': 7.09e-06,
'Cp': [46.2, 0.005159, 723100.0, -556.2],
'a_0': 3.56e-05,
'K_0': 164000e6,
'Kprime_0': 5.16,
'Kdprime_0': -3.1e-11,
'n': 1.0,
'molar_mass': 0.055845}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 1042.0,
'S_D': 8.3,
'V_D': 0.0}]]
Mineral.__init__(self)
class Ni (Mineral):
def __init__(self):
self.params = {'name': 'Ni',
'formula': {'Ni': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': 0.0,
'S_0': 29.87,
'V_0': 6.59e-06,
'Cp': [49.8, 0.0, 585900.0, -533.9],
'a_0': 4.28e-05,
'K_0': 190500e6,
'Kprime_0': 4.25,
'Kdprime_0': -2.2e-11,
'n': 1.0,
'molar_mass': 0.0586934}
self.property_modifiers = [['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 631.0,
'S_D': 3.0,
'V_D': 0.0}]]
Mineral.__init__(self)
class Cu (Mineral):
def __init__(self):
self.params = {'name': 'Cu',
'formula': {'Cu': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': -0.0,
'S_0': 33.14,
'V_0': 7.11e-06,
'Cp': [12.4, 0.00922, -379900.0, 233.5],
'a_0': 3.58e-05,
'K_0': 162500e6,
'Kprime_0': 4.24,
'Kdprime_0': -2.6e-11,
'n': 1.0,
'molar_mass': 0.063546}
Mineral.__init__(self)
class gph (Mineral):
def __init__(self):
self.params = {'name': 'gph',
'formula': {'C': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': 0.0,
'S_0': 5.76,
'V_0': 5.3e-06,
'Cp': [34.3, 0.0, -240700.0, -403.8],
'a_0': 1.65e-05,
'K_0': 31200e6,
'Kprime_0': 3.9,
'Kdprime_0': -1.25e-10,
'n': 1.0,
'molar_mass': 0.0120107}
Mineral.__init__(self)
class diam (Mineral):
def __init__(self):
self.params = {'name': 'diam',
'formula': {'C': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': 1890.0,
'S_0': 2.36,
'V_0': 3.42e-06,
'Cp': [40.0, 0.0, -28500.0, -580.5],
'a_0': 4e-06,
'K_0': 446500e6,
'Kprime_0': 1.61,
'Kdprime_0': -3.6e-12,
'n': 1.0,
'molar_mass': 0.0120107}
Mineral.__init__(self)
class S (Mineral):
def __init__(self):
self.params = {'name': 'S',
'formula': {'S': 1.0},
'equation_of_state': 'hp_tmt',
'H_0': 0.0,
'S_0': 32.05,
'V_0': 1.551e-05,
'Cp': [56.6, -0.004557, 638000.0, -681.8],
'a_0': 6.4e-05,
'K_0': 14500e6,
'Kprime_0': 7.0,
'Kdprime_0': -4.8e-10,
'n': 1.0,
'molar_mass': 0.032065}
Mineral.__init__(self)
class syvL (Mineral):
def __init__(self):
self.params = {'name': 'syvL',
'formula': {'Cl': 1.0, 'K': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -417410.0,
'S_0': 94.5,
'V_0': 3.822e-05,
'Cp': [66.9, 0.0, 0.0, 0.0],
'a_0': 0.000301,
'K_0': 5600e6,
'Kprime_0': 4.65,
'Kdprime_0': -8.3e-10,
'dKdT_0': -2e6,
'n': 2.0,
'molar_mass': 0.0745513}
Mineral.__init__(self)
class hltL (Mineral):
def __init__(self):
self.params = {'name': 'hltL',
'formula': {'Cl': 1.0, 'Na': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -392990.0,
'S_0': 80.1,
'V_0': 2.938e-05,
'Cp': [72.0, -0.003223, 0.0, 0.0],
'a_0': 0.000295,
'K_0': 6400e6,
'Kprime_0': 4.61,
'Kdprime_0': -7.2e-10,
'dKdT_0': -1500000.0,
'n': 2.0,
'molar_mass': 0.0584428}
Mineral.__init__(self)
class perL (Mineral):
def __init__(self):
self.params = {'name': 'perL',
'formula': {'Mg': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -654120.0,
'S_0': -64.3,
'V_0': 8.39e-06,
'Cp': [99.0, 0.0, 0.0, 0.0],
'a_0': 0.000226,
'K_0': 36200e6,
'Kprime_0': 10.06,
'Kdprime_0': -2.78e-10,
'dKdT_0': -4100000.0,
'n': 2.0,
'molar_mass': 0.0403044}
Mineral.__init__(self)
class limL (Mineral):
def __init__(self):
self.params = {'name': 'limL',
'formula': {'Ca': 1.0, 'O': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -692280.0,
'S_0': -47.5,
'V_0': 1.303e-05,
'Cp': [99.0, 0.0, 0.0, 0.0],
'a_0': 0.000175,
'K_0': 36200e6,
'Kprime_0': 10.06,
'Kdprime_0': -2.78e-10,
'dKdT_0': -4100000.0,
'n': 2.0,
'molar_mass': 0.0560774}
Mineral.__init__(self)
class corL (Mineral):
def __init__(self):
self.params = {'name': 'corL',
'formula': {'Al': 2.0, 'O': 3.0},
'equation_of_state': 'hp_tmtL',
'H_0': -1632160.0,
'S_0': 14.9,
'V_0': 3.369e-05,
'Cp': [157.6, 0.0, 0.0, 0.0],
'a_0': 7.03e-05,
'K_0': 15000e6,
'Kprime_0': 6.0,
'Kdprime_0': 4e-10,
'dKdT_0': -3500000.0000000005,
'n': 5.0,
'molar_mass': 0.1019612}
Mineral.__init__(self)
class qL (Mineral):
def __init__(self):
self.params = {'name': 'qL',
'formula': {'O': 2.0, 'Si': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -921070.0,
'S_0': 16.3,
'V_0': 2.73e-05,
'Cp': [82.5, 0.0, 0.0, 0.0],
'a_0': 0.0,
'K_0': 22000e6,
'Kprime_0': 9.46,
'Kdprime_0': -4.3e-10,
'dKdT_0': -3500000.0000000005,
'n': 3.0,
'molar_mass': 0.0600843}
Mineral.__init__(self)
class h2oL (Mineral):
def __init__(self):
self.params = {'name': 'h2oL',
'formula': {'H': 2.0, 'O': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -295010.0,
'S_0': 45.5,
'V_0': 1.39e-05,
'Cp': [80.0, 0.0, 0.0, 0.0],
'a_0': 0.000521,
'K_0': 5060e6,
'Kprime_0': 4.0,
'Kdprime_0': -7.9e-10,
'dKdT_0': -370000.0,
'n': 3.0,
'molar_mass': 0.01801528}
Mineral.__init__(self)
class foL (Mineral):
def __init__(self):
self.params = {'name': 'foL',
'formula': {'Mg': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -2237350.0,
'S_0': -62.0,
'V_0': 4.312e-05,
'Cp': [269.4, 0.0, 0.0, 0.0],
'a_0': 9.2e-05,
'K_0': 36200e6,
'Kprime_0': 10.06,
'Kdprime_0': -2.78e-10,
'dKdT_0': -4400000.0,
'n': 7.0,
'molar_mass': 0.1406931}
Mineral.__init__(self)
class faL (Mineral):
def __init__(self):
self.params = {'name': 'faL',
'formula': {'Fe': 2.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -1463020.0,
'S_0': 96.0,
'V_0': 4.677e-05,
'Cp': [243.7, 0.0, 0.0, 0.0],
'a_0': 0.0001071,
'K_0': 29000e6,
'Kprime_0': 10.42,
'Kdprime_0': -3.59e-10,
'dKdT_0': -5500000.0,
'n': 7.0,
'molar_mass': 0.2037731}
Mineral.__init__(self)
class woL (Mineral):
def __init__(self):
self.params = {'name': 'woL',
'formula': {'Ca': 1.0, 'O': 3.0, 'Si': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -1642220.0,
'S_0': 22.5,
'V_0': 3.965e-05,
'Cp': [167.4, 0.0, 0.0, 0.0],
'a_0': 6.69e-05,
'K_0': 30500e6,
'Kprime_0': 9.38,
'Kdprime_0': -3.08e-10,
'dKdT_0': -2e6,
'n': 5.0,
'molar_mass': 0.1161617}
Mineral.__init__(self)
class enL (Mineral):
def __init__(self):
self.params = {'name': 'enL',
'formula': {'Mg': 2.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmtL',
'H_0': -3096570.0,
'S_0': -4.0,
'V_0': 6.984e-05,
'Cp': [353.6, 0.0, 0.0, 0.0],
'a_0': 6.81e-05,
'K_0': 21800e6,
'Kprime_0': 7.2,
'Kdprime_0': -3.3e-10,
'dKdT_0': -2400000.0,
'n': 10.0,
'molar_mass': 0.2007774}
Mineral.__init__(self)
class diL (Mineral):
def __init__(self):
self.params = {'name': 'diL',
'formula': {'Ca': 1.0, 'Mg': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmtL',
'H_0': -3193870.0,
'S_0': 42.1,
'V_0': 7.288e-05,
'Cp': [334.0, 0.0, 0.0, 0.0],
'a_0': 8.51e-05,
'K_0': 24900e6,
'Kprime_0': 8.04,
'Kdprime_0': -3.23e-10,
'dKdT_0': -3730000.0,
'n': 10.0,
'molar_mass': 0.2165504}
Mineral.__init__(self)
class silL (Mineral):
def __init__(self):
self.params = {'name': 'silL',
'formula': {'Al': 2.0, 'O': 5.0, 'Si': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -2593430.0,
'S_0': 10.0,
'V_0': 6.051e-05,
'Cp': [253.0, 0.0, 0.0, 0.0],
'a_0': 4.08e-05,
'K_0': 22000e6,
'Kprime_0': 6.36,
'Kdprime_0': -2.89e-10,
'dKdT_0': -2900000.0,
'n': 8.0,
'molar_mass': 0.1620455}
Mineral.__init__(self)
class anL (Mineral):
def __init__(self):
self.params = {'name': 'anL',
'formula': {'Al': 2.0, 'Ca': 1.0, 'O': 8.0, 'Si': 2.0},
'equation_of_state': 'hp_tmtL',
'H_0': -4277970.0,
'S_0': 29.0,
'V_0': 0.00010014,
'Cp': [430.0, 0.0, 0.0, 0.0],
'a_0': 5.14e-05,
'K_0': 21000e6,
'Kprime_0': 6.38,
'Kdprime_0': -3.04e-10,
'dKdT_0': -5500000.0,
'n': 13.0,
'molar_mass': 0.2782072}
Mineral.__init__(self)
class kspL (Mineral):
def __init__(self):
self.params = {'name': 'kspL',
'formula': {'Al': 1.0, 'K': 1.0, 'O': 8.0, 'Si': 3.0},
'equation_of_state': 'hp_tmtL',
'H_0': -3985190.0,
'S_0': 129.2,
'V_0': 0.00011431,
'Cp': [368.0, 0.0, 0.0, 0.0],
'a_0': 4.93e-05,
'K_0': 17300e6,
'Kprime_0': 6.84,
'Kdprime_0': -3.93e-10,
'dKdT_0': -899999.9999999999,
'n': 13.0,
'molar_mass': 0.2783315}
Mineral.__init__(self)
class abL (Mineral):
def __init__(self):
self.params = {'name': 'abL',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 8.0, 'Si': 3.0},
'equation_of_state': 'hp_tmtL',
'H_0': -3926520.0,
'S_0': 149.9,
'V_0': 0.00010858,
'Cp': [358.0, 0.0, 0.0, 0.0],
'a_0': 3.37e-05,
'K_0': 17600e6,
'Kprime_0': 14.35,
'Kdprime_0': -8.15e-10,
'dKdT_0': -2600000.0,
'n': 13.0,
'molar_mass': 0.262223}
Mineral.__init__(self)
class neL (Mineral):
def __init__(self):
self.params = {'name': 'neL',
'formula': {'Al': 1.0, 'Na': 1.0, 'O': 4.0, 'Si': 1.0},
'equation_of_state': 'hp_tmtL',
'H_0': -2116730.0,
'S_0': 52.9,
'V_0': 5.2e-05,
'Cp': [216.5, 0.0, 0.0, 0.0],
'a_0': 0.000137,
'K_0': 25000e6,
'Kprime_0': 7.37,
'Kdprime_0': -2.95e-10,
'dKdT_0': -800000.0,
'n': 7.0,
'molar_mass': 0.1420544}
Mineral.__init__(self)
class lcL (Mineral):
def __init__(self):
self.params = {'name': 'lcL',
'formula': {'Al': 1.0, 'K': 1.0, 'O': 6.0, 'Si': 2.0},
'equation_of_state': 'hp_tmtL',
'H_0': -3068410.0,
'S_0': 102.0,
'V_0': 8.59e-05,
'Cp': [287.0, 0.0, 0.0, 0.0],
'a_0': 6.7e-05,
'K_0': 17500e6,
'Kprime_0': 7.0,
'Kdprime_0': -3.94e-10,
'dKdT_0': -0.0,
'n': 10.0,
'molar_mass': 0.2182472}
Mineral.__init__(self)
def cov():
"""
A function which loads and returns the variance-covariance matrix of the
zero-point energies of all the endmembers in the dataset.
Returns
-------
cov : dictionary
Dictionary keys are:
- endmember_names: a list of endmember names, and
- covariance_matrix: a 2D variance-covariance array for the
endmember zero-point energies of formation
"""
from .HP_2011_ds62_cov import cov
return cov
|
CaymanUnterbornREPO_NAMEExoPlexPATH_START.@ExoPlex_extracted@ExoPlex-master@ExoPlex@burnman@minerals@HP_2011_ds62.py@.PATH_END.py
|
{
"filename": "test_viewers.py",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/jdaviz/configs/imviz/tests/test_viewers.py",
"type": "Python"
}
|
import numpy as np
import pytest
from regions import CirclePixelRegion, PixCoord
from jdaviz.app import Application
from jdaviz.core.config import get_configuration
from jdaviz.configs.imviz.helper import Imviz
from jdaviz.configs.imviz.plugins.viewers import ImvizImageView
from jdaviz.configs.imviz.tests.utils import BaseImviz_WCS_NoWCS
@pytest.mark.parametrize(
('desired_name', 'actual_name'),
[(None, 'imviz-1'),
('babylon-5', 'babylon-5')])
def test_create_destroy_viewer(imviz_helper, desired_name, actual_name):
assert imviz_helper.app.get_viewer_ids() == ['imviz-0']
viewer = imviz_helper.create_image_viewer(viewer_name=desired_name)
viewer_names = sorted(['imviz-0', actual_name])
assert viewer.top_visible_data_label == ''
assert isinstance(viewer, ImvizImageView)
assert viewer is imviz_helper.app._viewer_store.get(actual_name), list(imviz_helper.app._viewer_store.keys()) # noqa
assert imviz_helper.app.get_viewer_ids() == viewer_names
# Make sure plugins that store viewer_items are updated.
assert sorted(imviz_helper.plugins['Compass'].viewer.labels) == viewer_names
po = imviz_helper.plugins['Plot Options']
po.multiselect = True
po.viewer = viewer_names
imviz_helper.destroy_viewer(actual_name)
assert imviz_helper.app.get_viewer_ids() == ['imviz-0']
assert po.viewer.selected == ['imviz-0']
assert po.viewer.labels == ['imviz-0']
def test_get_viewer_created(imviz_helper):
# This viewer has no reference but has ID.
viewer1 = imviz_helper.create_image_viewer()
viewer2 = imviz_helper.app.get_viewer('imviz-1')
assert viewer1 is viewer2
def test_destroy_viewer_invalid(imviz_helper):
assert imviz_helper.app.get_viewer_ids() == ['imviz-0']
imviz_helper.destroy_viewer('foo')
assert imviz_helper.app.get_viewer_ids() == ['imviz-0']
with pytest.raises(ValueError, match='cannot be destroyed'):
imviz_helper.destroy_viewer('imviz-0')
assert imviz_helper.app.get_viewer_ids() == ['imviz-0']
def test_destroy_viewer_with_subset(imviz_helper):
"""Regression test for https://github.com/spacetelescope/jdaviz/issues/1614"""
arr = np.ones((10, 10))
imviz_helper.load_data(arr, data_label='my_array')
# Create a second viewer.
imviz_helper.create_image_viewer(viewer_name='second')
# Add existing data to second viewer.
imviz_helper.app.add_data_to_viewer('second', 'my_array')
# Create a Subset.
reg = CirclePixelRegion(center=PixCoord(x=4, y=4), radius=2)
imviz_helper.plugins['Subset Tools'].import_region(reg)
# Remove the second viewer.
imviz_helper.destroy_viewer('second')
# Delete the Subset: Should have no traceback.
imviz_helper._delete_region('Subset 1')
def test_mastviz_config():
"""Use case from https://github.com/spacetelescope/jdaviz/issues/1037"""
# create a MAST config dict
cc = get_configuration('imviz')
cc['settings']['viewer_spec'] = cc['settings'].get('configuration', 'default')
cc['settings']['configuration'] = 'mastviz'
cc['settings']['visible'] = {'menu_bar': False, 'toolbar': False, 'tray': False,
'tab_headers': False}
cc['toolbar'].remove('g-data-tools') if cc['toolbar'].count('g-data-tools') else None
cc['toolbar'].remove('g-viewer-creator') if cc['toolbar'].count('g-viewer-creator') else None
cc['toolbar'].remove('g-image-viewer-creator') if cc['toolbar'].count('g-image-viewer-creator') else None # noqa
app = Application(cc)
im = Imviz(app)
im.load_data(np.ones((2, 2)), data_label='my_array')
assert im.app.get_viewer_ids() == ['mastviz-0']
assert im.app.data_collection[0].shape == (2, 2)
def test_zoom_center_radius_init(imviz_helper):
"""Regression test for https://github.com/spacetelescope/jdaviz/issues/3217"""
arr = np.ones((10, 10))
imviz_helper.load_data(arr, data_label='my_array')
assert imviz_helper.default_viewer._obj.state.zoom_center_x > 0
assert imviz_helper.default_viewer._obj.state.zoom_center_y > 0
assert imviz_helper.default_viewer._obj.state.zoom_radius > 0
class TestDeleteData(BaseImviz_WCS_NoWCS):
def test_plot_options_after_destroy(self):
self.imviz.create_image_viewer(viewer_name="imviz-1")
self.imviz.app.add_data_to_viewer('imviz-1', 'no_wcs[SCI,1]')
po = self.imviz.plugins['Plot Options']
po.open_in_tray()
po.viewer = "imviz-1"
po.stretch_function = "Square Root"
self.imviz.destroy_viewer("imviz-1")
assert len(po.layer.choices) == 2
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@jdaviz@configs@imviz@tests@test_viewers.py@.PATH_END.py
|
{
"filename": "MdB.py",
"repo_name": "Morisset/pyCloudy",
"repo_path": "pyCloudy_extracted/pyCloudy-master/pyCloudy/db/MdB.py",
"type": "Python"
}
|
import os
import sys
import subprocess
import numpy as np
from getpass import getpass
import pyCloudy as pc
from pyCloudy.utils.init import LIST_ELEM
from pyCloudy.utils.logging import my_logging
if pc.config.INSTALLED['pandas']:
import pandas as pd
import pandas.io.sql as psql
from io import StringIO
def _sql2numpy(sqltype):
if sqltype == 'float':
return 'f4'
if sqltype in ['double', 'real']:
return 'f8'
if sqltype[:7] == 'tinyint':
return 'i1'
if sqltype[:5] == 'short' or sqltype[:4] == 'int2':
return 'i2'
if sqltype[:4] == 'int(' or sqltype[:4] == 'int4':
return 'i4'
if sqltype[:4] == 'int8' or sqltype[:6] == 'bigint' or sqltype[:4] == 'long':
return 'i8'
if sqltype[:7] == 'varchar':
return 'S{0}'.format(sqltype.split('(')[1].split(')')[0])
if sqltype[:8] == 'datetime':
return 'S20'
return 'S50'
class MdB(object):
MdBlog_ = my_logging()
def __init__(self, OVN_dic = None, base_name = 'OVN',tmp_base_name = 'OVN_tmp',
user_name = 'OVN_user', user_passwd = 'getenv',
host = 'localhost', unix_socket = '/var/mysql/mysql.sock', port = 3306,
connect = True, master_table='tab'):
"""
This is the package to deal with MySQL OVN database.
You must have MySQL or PyMySQL library installed. The latest is easier to get working, as it comes with its own
mysql client.
Latter, we will also use the ODBC connector. Install the connector from MySQl: http://dev.mysql.com/downloads/connector/odbc/
and then use pyodbc with:
cnxn = pyodbc.connect('DRIVER={MySQL ODBC 5.2 Driver};SERVER=127.0.0.1;DATABASE=OVN;UID=OVN_user;PWD=oiii5007;SOCKET=/var/mysql/mysql.sock')
"""
self.log_ = self.__class__.MdBlog_
self.calling = 'MdB'
if pc.config.db_connector =='MySQL' and pc.config.INSTALLED['MySQL']:
import MySQLdb as SQLdb
elif pc.config.db_connector =='PyMySQL' and pc.config.INSTALLED['PyMySQL']:
import pymysql as SQLdb
else:
self.log_.error('No SQL connector available', calling='MdB')
self.SQLdb = SQLdb
if OVN_dic is not None:
if 'base_name' in OVN_dic:
base_name = OVN_dic['base_name']
if 'tmp_base_name' in OVN_dic:
tmp_base_name = OVN_dic['tmp_base_name']
if 'user_name' in OVN_dic:
user_name = OVN_dic['user_name']
if 'user_passwd' in OVN_dic:
user_passwd = OVN_dic['user_passwd']
if 'host' in OVN_dic:
host = OVN_dic['host']
if 'unix_socket' in OVN_dic:
unix_socket = OVN_dic['unix_socket']
if 'port' in OVN_dic:
port = OVN_dic['port']
if 'master_table' in OVN_dic:
master_table = OVN_dic['master_table']
else:
OVN_dic = {'base_name': base_name,
'tmp_base_name': tmp_base_name,
'user_name': user_name,
'user_passwd': user_passwd,
'host': host,
'unix_socket': unix_socket,
'port': port,
'master_table': master_table}
self.OVN_dic = OVN_dic
self.base_name = base_name
self.tmp_base_name = tmp_base_name
self.user_name = user_name
if user_passwd == 'getenv':
self.user_passwd = os.getenv('{0}_pass'.format(user_name))
elif user_passwd == 'getit':
self.user_passwd = getpass()
else:
self.user_passwd = user_passwd
self.port = port
self.host = host
self.unix_socket = unix_socket
self.table = master_table
self._dB = None
self._cursor = None
self._cursor_tuple = None
self.connected = False
if connect:
self.connect_dB()
def __del__(self):
if self.connected:
self.close_dB()
def connect_dB(self):
if self.connected:
self.log_.warn('Already connected', calling = self.calling)
return None
try:
if self.unix_socket is None or sys.version_info[0] >= 3:
self._dB = self.SQLdb.connect(host = self.host, user = self.user_name, passwd = self.user_passwd,
db = self.base_name, port = self.port)
else:
self._dB = self.SQLdb.connect(host = self.host, user = self.user_name, passwd = self.user_passwd,
db = self.base_name, port = self.port, unix_socket = self.unix_socket)
self.connected = True
self.log_.message('Connected to {0}'.format(self.host), calling = self.calling)
except:
self.log_.warn('Connection to {0} failed'.format(self.host), calling = self.calling)
try:
self._cursor = self._dB.cursor(self.SQLdb.cursors.DictCursor)
self._cursor_tuple = self._dB.cursor(self.SQLdb.cursors.Cursor)
except:
self.log_.warn('Cursor to {0} failed'.format(self.host), calling = self.calling)
def use_dB(self, base_name = None):
if not self.connected:
pc.log_.error('Not connected to the serevr')
return None
if base_name is None:
self._dB.select_db(self.base_name)
else:
self._dB.select_db(base_name)
def use_dB_tmp(self, tmp_base_name = None):
if not self.connected:
pc.log_.error('Not connected to the server')
return None
if tmp_base_name is None:
self._dB.select_db(self.tmp_base_name)
else:
self._dB.select_db(tmp_base_name)
def show_tables(self):
print(self.exec_dB('show tables'))
def share_dB_cursor(self, m):
self._dB = m._dB
self._cursor = m._cursor
self._cursor_tuple = m._cursor_tuple
def close_dB(self):
if self.connected:
self._cursor.close()
self._cursor_tuple.close()
self._dB.close()
self.connected = False
self.log_.message('Disconnected', calling = self.calling)
else:
self.log_.warn('Not connected', calling = self.calling)
def exec_dB(self, command, format_ = 'dict', return_descr=False, commit=False):
if format_ not in ('dict', 'tuple', 'numpy', 'dict2', 'pandas', 'rec'):
self.log_.error('format"{0}" not recognized'.format(format_), calling = self.calling)
if not self.connected:
self.log_.error('Not connected to a database', calling = self.calling)
return None
self.log_.message('Command sent: {0}'.format(command), calling = self.calling)
if format_ == 'pandas':
if pc.config.INSTALLED['pandas']:
res = psql.frame_query(command, con=self._dB)
return res, len(res)
else:
pc.log_.error('pandas is not available, use another format', calling=self.calling)
if format_[:4] == 'dict' or format_ == 'rec':
cursor = self._cursor
else:
cursor = self._cursor_tuple
try:
N = cursor.execute(command)
except:
self.log_.error('Error on executing {0}'.format(command), calling = self.calling)
if commit:
try:
self._dB.commit()
except:
self.log_.error('Error on commiting {0}'.format(command), calling = self.calling)
try:
res = cursor.fetchall()
except:
self.log_.error('Error on reading result of {0}'.format(command), calling = self.calling)
if format_ == 'rec':
res = np.rec.fromrecords([list(e.values()) for e in res], names = list(res[0].keys()))
if return_descr:
return res, N, cursor.description
else:
return res, N
def select_dB(self, select_ = '*', from_ = None, where_ = None, order_ = None, group_ = None,
limit_ = 1, format_ = 'dict', dtype_ = None, commit=False):
"""
Usage:
dd, n = mdb.select_dB(select_ = 'L_1, L_26, L_21', from_='tab',
where_ = 'ref like "DIG12HR_"',
limit_ = 100000,
format_='numpy')
loglog(dd['L_26']/dd['L_1'], dd['L_21']/dd['L_1'], 'r+')
"""
if from_ is None:
from_ = self.table
if isinstance(select_, (tuple, list)):
this_select = ''
for w in select_:
this_select += w + ', '
this_select = this_select[:-2]
else:
this_select = select_
if isinstance(where_, (tuple, list)):
this_where = ''
for w in where_:
this_where += w + ' and '
this_where = this_where[:-5]
else:
this_where = where_
if isinstance(from_, (tuple, list)):
this_from = ''
for w in from_:
this_from += w + ', '
this_from = this_from[:-2]
else:
this_from = from_
req = 'SELECT {0} FROM {1} '.format(this_select, this_from)
if where_ is not None:
req += 'WHERE ({0}) '.format(this_where)
if group_ is not None:
req += 'GROUP BY {0} '.format(group_)
if order_ is not None:
req += 'ORDER BY {0} '.format(order_)
if limit_ is not None:
req += 'LIMIT {0:d}'.format(limit_)
if format_ == 'pandas':
if not pc.config.INSTALLED['pandas']:
pc.log_.error('pandas not installed', calling='MdB.select_dB')
res = pd.read_sql(req, con=self._dB)
N = len(res)
else:
res, N = self.exec_dB(req, format_ = format_, commit=commit)
if N == 0:
res = None
elif format_ == 'numpy':
if dtype_ is None:
dtype_ = self.get_dtype(select_ = select_, from_ = from_)
res = np.fromiter(res, dtype_)
elif format_ == 'dict2':
res2 = {}
for key in res[0]:
res2[key] = np.array([r[key] for r in res])
res = res2
return res, N
def count_dB(self, from_ = None, where_ = None, commit=False):
if from_ is None:
from_ = self.table
req = 'SELECT count(*) FROM {0}'.format(from_)
if where_ is not None:
req += ' WHERE ({0})'.format(where_)
res, N = self.exec_dB(req, commit=commit)
return res[0]['count(*)']
def get_fields(self, from_ = None):
if from_ is None:
from_ = self.table
froms = from_.split(',')
if len(froms) == 1:
res, N = self.exec_dB('SHOW COLUMNS FROM {0}'.format(from_))
fields = [res[i]['Field'] for i in range(len(res))]
fields.sort()
else:
fields = []
for this_from in froms:
fields.extend(self.get_fields(from_ = this_from))
return fields
def get_cols(self, select_ = '*', from_ = None):
if from_ is None:
from_ = self.table
froms = from_.split(',')
if len(froms) == 1:
if select_ == '*':
res, N = self.exec_dB('SHOW COLUMNS FROM {0}'.format(from_))
else:
req = 'SHOW COLUMNS FROM {0} WHERE'.format(from_)
fields = select_.split(',')
for field in fields:
req += ' FIELD = "{0}" OR'.format(field.strip())
req = req[:-3]
res, N = self.exec_dB(req)
else:
res = []
for this_from in froms:
res += self.get_cols(select_ = select_, from_ = this_from)
return res
def get_dtype(self, select_ = '*', from_ = None):
if from_ is None:
from_ = self.table
dtype_list = []
if select_ == '*':
cols = self.get_cols(select_ = select_, from_ = from_)
for col in cols:
name = col['Field']
sqltype = col['Type']
ntype = _sql2numpy(sqltype)
if (name, ntype) not in dtype_list:
dtype_list.append((name, ntype))
else:
fields = select_.split(',')
for field in fields:
name = None
if "as" in field:
name = field.split('as')[1].strip()
field = field.split('as')[0].strip()
col = self.get_cols(select_ = field.strip(), from_ = from_)[0]
if name is None:
name = col['Field']
sqltype = col['Type']
ntype = _sql2numpy(sqltype)
if (name, ntype) not in dtype_list:
dtype_list.append((name, ntype))
return np.dtype(dtype_list)
def __repr__(self):
if self.connected:
return "<MdB connected to {0.base_name}@{0.host}>".format(self)
else:
return "<MdB disconnected from {0.base_name}@{0.host}>".format(self)
class MdB_subproc(object):
"""
Alternative way, when MySQLdb not available. Still in development.
"""
MdBlog_ = my_logging()
def __init__(self, OVN_dic = None, base_name = 'OVN',tmp_base_name = 'OVN_tmp',
user_name = 'OVN_user', user_passwd = 'getenv',
host = 'localhost', unix_socket = '/var/mysql/mysql.sock', port = 3306,
connect = True, master_table=None):
self.log_ = self.__class__.MdBlog_
self.calling = 'MdB'
if OVN_dic is not None:
if 'base_name' in OVN_dic:
base_name = OVN_dic['base_name']
if 'tmp_base_name' in OVN_dic:
tmp_base_name = OVN_dic['tmp_base_name']
if 'user_name' in OVN_dic:
user_name = OVN_dic['user_name']
if 'user_passwd' in OVN_dic:
user_passwd = OVN_dic['user_passwd']
if 'host' in OVN_dic:
host = OVN_dic['host']
if 'unix_socket' in OVN_dic:
unix_socket = OVN_dic['unix_socket']
if 'port' in OVN_dic:
port = OVN_dic['port']
if 'master_table' in OVN_dic:
master_table = OVN_dic['master_table']
self.base_name = base_name
self.tmp_base_name = tmp_base_name
self.user_name = user_name
if user_passwd == 'getenv':
self.user_passwd = os.getenv('{0}_pass'.format(user_name))
elif user_passwd == 'getit':
self.user_passwd = getpass()
else:
self.user_passwd = user_passwd
self.port = port
self.host = host
self.unix_socket = unix_socket
self.table = master_table
self._dB = None
self._cursor = None
self._cursor_tuple = None
self.connected = True
def connect_dB(self):
pass
def close_dB(self):
pass
def exec_dB(self, command, outfile=None):
if not self.connected:
self.log_.error('Not connected to a database', calling = self.calling)
return None
self.log_.message('Command sent: {0}'.format(command), calling = self.calling)
if outfile is None:
stdout=subprocess.PIPE
else:
stdout=file(outfile, 'w')
proc = subprocess.Popen(["mysql",
"--host={0}".format(self.host),
"--user={0}".format(self.user_name),
"--password={0}".format(self.user_passwd),
"--port={0}".format(self.port),
"{0}".format(self.base_name)],
stdin=subprocess.PIPE,
stdout=stdout)
out, err = proc.communicate(command)
if outfile is not None:
stdout.close()
try:
N = len(out)
except:
N = None
return out, N
def select_dB(self, select_ = '*', from_ = None, where_ = None, order_ = None, group_ = None, limit_ = 1,
format_ = 'dict2', dtype_ = None, outfile=None):
"""
Usage:
dd, n = mdb.select_dB(select_ = 'L_1, L_26, L_21', from_='tab,'
where_ = 'ref like "DIG12HR_"',
limit_ = 100000,
format_='numpy')
loglog(dd['L_26']/dd['L_1'], dd['L_21']/dd['L_1'], 'r+')
"""
if from_ is None:
from_ = self.table
req = 'SELECT {0} FROM {1} '.format(select_, from_)
if where_ is not None:
req += 'WHERE ({0}) '.format(where_)
if order_ is not None:
req += 'ORDER BY {0} '.format(order_)
if group_ is not None:
req += 'GROUP BY {0} '.format(group_)
if limit_ is not None:
req += 'LIMIT {0:d}'.format(limit_)
res_tmp, N = self.exec_dB(req, outfile=outfile)
if N == 0 or N is None:
res = None
else:
res = np.genfromtxt(StringIO(res_tmp[0]), names=True, delimiter='\\t')
N = len(res)
"""
if format_ in ('dict', 'dict2'):
res = {}
resnp = np.array(res_tmp[1:-1])
for i, key in enumerate(res_tmp[0]):
try:
res[key] = np.array(resnp[:,i], dtype='float')
except:
res[key] = resnp[:,i]
"""
if outfile is not None:
return res, N
|
MorissetREPO_NAMEpyCloudyPATH_START.@pyCloudy_extracted@pyCloudy-master@pyCloudy@db@MdB.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymapbox/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="shadow",
parent_name="densitymapbox.hoverlabel.font",
**kwargs,
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymapbox@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/legendgrouptitle/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="surface.legendgrouptitle", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@legendgrouptitle@_text.py@.PATH_END.py
|
{
"filename": "test_decorators.py",
"repo_name": "NuSpaceSim/nuSpaceSim",
"repo_path": "nuSpaceSim_extracted/nuSpaceSim-main/test/utils/test_decorators.py",
"type": "Python"
}
|
import numpy as np
# from nuspacesim import results_table
from nuspacesim.utils import decorators
# def test_nss_result_store():
# @decorators.nss_result_store("columnA", "columnB")
# def test_base_f(input1, input2):
# """this is the docstring"""
# return input1 + input2, input1 * input2
#
# iA, iB = np.random.randn(2, 128)
# cA, cB = test_base_f(iA, iB)
#
# assert np.array_equal(cA, iA + iB)
# assert np.array_equal(cB, iA * iB)
#
# sim = results_table.init()
# cA, cB = test_base_f(iA, iB, store=sim)
#
# assert np.array_equal(cA, iA + iB)
# assert np.array_equal(cB, iA * iB)
# assert np.array_equal(sim["columnA"], cA)
# assert np.array_equal(sim["columnB"], cB)
#
# assert test_base_f.__doc__ == "this is the docstring"
#
#
# def test_nss_result_store_scalar():
# @decorators.nss_result_store_scalar(
# ["valueA", "valueB"],
# ["terse comment", "Verbose, pompous, and overly long commentB"],
# )
# def test_base_f(input1, input2):
# """this is the docstring"""
# return float(input1 + input2), int(input1 * input2)
#
# iAB = np.random.randn(2, 1)
# iA, iB = iAB[0, 0], iAB[1, 0]
# vA, vB = test_base_f(iA, iB)
#
# assert vA == iA + iB
# assert vB == int(iA * iB)
#
# sim = results_table.init()
# vA, vB = test_base_f(iA, iB, store=sim)
#
# assert vA == iA + iB
# assert vB == int(iA * iB)
# assert sim.meta["valueA"][0] == vA
# assert sim.meta["valueB"][0] == vB
# assert sim.meta["valueA"][1] == "terse comment"
# assert sim.meta["valueB"][1] == "Verbose, pompous, and overly long commentB"
#
# assert test_base_f.__doc__ == "this is the docstring"
def test_nss_result_plot():
plot_written = False
iA, iB = np.random.randn(2, 128)
def plotter(inputs, results, *args, **kwargs):
nonlocal plot_written
plot_written = True
assert plot_written
assert len(inputs) == 2
assert len(results) == 2
assert len(args) == 0
assert len(kwargs) == 0
assert np.array_equal(inputs[0], iA)
assert np.array_equal(inputs[1], iB)
assert np.all(np.equal(results[0], 0.0))
assert np.all(np.equal(results[1], 1.0))
@decorators.nss_result_plot(plotter)
def test_base_f(input1, input2):
"""this is the docstring"""
return np.zeros_like(input1), np.ones_like(input2)
from nuspacesim.utils.plot_function_registry import registry
assert plotter.__name__ in registry
# test plotter is not called without a plot argument
assert not plot_written
cA, cB = test_base_f(iA, iB)
assert not plot_written
assert np.all(np.equal(cA, 0.0))
assert np.all(np.equal(cB, 1.0))
# test plotter is called with a callable plot argument
plot_written = False
cA, cB = test_base_f(iA, iB, plot=plotter)
assert plot_written
assert np.all(np.equal(cA, 0.0))
assert np.all(np.equal(cB, 1.0))
# test plotter is called with a string plot argument
plot_written = False
cA, cB = test_base_f(iA, iB, plot=plotter.__name__)
assert plot_written
assert np.all(np.equal(cA, 0.0))
assert np.all(np.equal(cB, 1.0))
# test plotter is called with a list of callable plot arguments
plot_written = False
cA, cB = test_base_f(iA, iB, plot=list([plotter]))
assert plot_written
assert np.all(np.equal(cA, 0.0))
assert np.all(np.equal(cB, 1.0))
# test plotter is called with a list of string plot arguments
plot_written = False
cA, cB = test_base_f(iA, iB, plot=list([plotter.__name__]))
assert plot_written
assert np.all(np.equal(cA, 0.0))
assert np.all(np.equal(cB, 1.0))
|
NuSpaceSimREPO_NAMEnuSpaceSimPATH_START.@nuSpaceSim_extracted@nuSpaceSim-main@test@utils@test_decorators.py@.PATH_END.py
|
{
"filename": "update_benchmarks.py",
"repo_name": "andycasey/ges-idr5",
"repo_path": "ges-idr5_extracted/ges-idr5-master/scripts/update_benchmarks.py",
"type": "Python"
}
|
"""
Update the benchmark parameters to include some values -- even if they are
uncertain -- and to include a less-biased value for HD 140283.
"""
from astropy.table import Table
input_path = "../fits-templates/benchmarks/GES_iDR5_FGKMCoolWarm_Benchmarks_AcceptedParams_01082016.fits"
output_path = "../fits-templates/benchmarks/GES_iDR5_FGKM_Benchmarks_ARC_29092016.fits"
overwrite = False
benchmarks = Table.read(input_path)
print("Read in benchmarks from {}".format(input_path))
updated_values = {
"HD140283": {
"TEFF": 5700,
"E_TEFF": 200,
"LOGG": 3.58,
"E_LOGG": 0.11,
"FEH": -2.43,
},
"HD220009": {
"TEFF": 4217,
"E_TEFF": 60,
"LOGG": 1.43,
"E_LOGG": 0.12,
"FEH": -0.75,
}
}
for ges_fld, params in updated_values.items():
match = np.array([each.strip() == ges_fld for each in benchmarks["GES_FLD"]])
for key, value in params.items():
benchmarks[key][match] = value
print("Updated {} = {} for {}".format(key, value, ges_fld))
benchmarks.write(output_path, overwrite=overwrite)
print("Written new file to {}".format(output_path))
|
andycaseyREPO_NAMEges-idr5PATH_START.@ges-idr5_extracted@ges-idr5-master@scripts@update_benchmarks.py@.PATH_END.py
|
{
"filename": "test_serializable.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/load/test_serializable.py",
"type": "Python"
}
|
import importlib
import inspect
import pkgutil
from types import ModuleType
from langchain_core.load.mapping import SERIALIZABLE_MAPPING
def import_all_modules(package_name: str) -> dict:
package = importlib.import_module(package_name)
classes: dict = {}
def _handle_module(module: ModuleType) -> None:
# Iterate over all members of the module
names = dir(module)
if hasattr(module, "__all__"):
names += list(module.__all__)
names = sorted(set(names))
for name in names:
# Check if it's a class or function
attr = getattr(module, name)
if not inspect.isclass(attr):
continue
if not hasattr(attr, "is_lc_serializable") or not isinstance(attr, type):
continue
if (
isinstance(attr.is_lc_serializable(), bool) # type: ignore
and attr.is_lc_serializable() # type: ignore
):
key = tuple(attr.lc_id()) # type: ignore
value = tuple(attr.__module__.split(".") + [attr.__name__])
if key in classes and classes[key] != value:
raise ValueError
classes[key] = value
_handle_module(package)
for importer, modname, ispkg in pkgutil.walk_packages(
package.__path__, package.__name__ + "."
):
try:
module = importlib.import_module(modname)
except ModuleNotFoundError:
continue
_handle_module(module)
return classes
def test_import_all_modules() -> None:
"""Test import all modules works as expected"""
all_modules = import_all_modules("langchain")
filtered_modules = [
k
for k in all_modules
if len(k) == 4 and tuple(k[:2]) == ("langchain", "chat_models")
]
# This test will need to be updated if new serializable classes are added
# to community
assert sorted(filtered_modules) == sorted(
[
("langchain", "chat_models", "azure_openai", "AzureChatOpenAI"),
("langchain", "chat_models", "bedrock", "BedrockChat"),
("langchain", "chat_models", "anthropic", "ChatAnthropic"),
("langchain", "chat_models", "fireworks", "ChatFireworks"),
("langchain", "chat_models", "google_palm", "ChatGooglePalm"),
("langchain", "chat_models", "openai", "ChatOpenAI"),
("langchain", "chat_models", "vertexai", "ChatVertexAI"),
]
)
def test_serializable_mapping() -> None:
to_skip = {
# This should have had a different namespace, as it was never
# exported from the langchain module, but we keep for whoever has
# already serialized it.
("langchain", "prompts", "image", "ImagePromptTemplate"): (
"langchain_core",
"prompts",
"image",
"ImagePromptTemplate",
),
# This is not exported from langchain, only langchain_core
("langchain_core", "prompts", "structured", "StructuredPrompt"): (
"langchain_core",
"prompts",
"structured",
"StructuredPrompt",
),
# This is not exported from langchain, only langchain_core
("langchain", "schema", "messages", "RemoveMessage"): (
"langchain_core",
"messages",
"modifier",
"RemoveMessage",
),
("langchain", "chat_models", "mistralai", "ChatMistralAI"): (
"langchain_mistralai",
"chat_models",
"ChatMistralAI",
),
("langchain_groq", "chat_models", "ChatGroq"): (
"langchain_groq",
"chat_models",
"ChatGroq",
),
# TODO(0.3): For now we're skipping the below two tests. Need to fix
# so that it only runs when langchain-aws, langchain-google-genai
# are installed.
("langchain", "chat_models", "bedrock", "ChatBedrock"): (
"langchain_aws",
"chat_models",
"bedrock",
"ChatBedrock",
),
("langchain_google_genai", "chat_models", "ChatGoogleGenerativeAI"): (
"langchain_google_genai",
"chat_models",
"ChatGoogleGenerativeAI",
),
}
serializable_modules = import_all_modules("langchain")
missing = set(SERIALIZABLE_MAPPING).difference(
set(serializable_modules).union(to_skip)
)
assert missing == set()
extra = set(serializable_modules).difference(SERIALIZABLE_MAPPING)
assert extra == set()
for k, import_path in serializable_modules.items():
import_dir, import_obj = import_path[:-1], import_path[-1]
# Import module
mod = importlib.import_module(".".join(import_dir))
# Import class
cls = getattr(mod, import_obj)
assert list(k) == cls.lc_id()
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@load@test_serializable.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "RadioAstronomySoftwareGroup/pyuvdata",
"repo_path": "pyuvdata_extracted/pyuvdata-main/src/pyuvdata/data/__init__.py",
"type": "Python"
}
|
"""Init file for data directory."""
DATA_PATH = __path__[0]
|
RadioAstronomySoftwareGroupREPO_NAMEpyuvdataPATH_START.@pyuvdata_extracted@pyuvdata-main@src@pyuvdata@data@__init__.py@.PATH_END.py
|
{
"filename": "deprecated_module.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/util/deprecated_module.py",
"type": "Python"
}
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deprecated module.
For testing `deprecation.deprecate_moved_module`.
"""
from tensorflow.python.util import deprecated_module_new
from tensorflow.python.util import deprecation
__getattr__ = deprecation.deprecate_moved_module(
__name__, deprecated_module_new, "2.9")
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@util@deprecated_module.py@.PATH_END.py
|
{
"filename": "corner_modified.py",
"repo_name": "t-brandt/orvara",
"repo_path": "orvara_extracted/orvara-master/orvara/corner_modified.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import logging
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
__all__ = ["corner", "hist2d", "quantile"]
def corner(xs, bins=20, range=None, weights=None, color="k", hist_bin_factor=1,
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False, reverse=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like[nsamples, ndim]
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
hist_bin_factor : float or array_like[ndim,]
This is a factor (or list of factors, one for each dimension) that
will multiply the bin specifications when making the 1-D histograms.
This is generally used to increase the number of bins in the 1-D plots
to provide more resolution.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
reverse : bool
If true, plot the corner plot starting in the upper-right corner
instead of the usual bottom-left corner
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : matplotlib.Figure
Overplot onto the provided figure object.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [int(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
try:
hist_bin_factor = [float(hist_bin_factor) for _ in range]
except TypeError:
if len(hist_bin_factor) != len(range):
raise ValueError("Dimension mismatch between hist_bin_factor and "
"range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
if reverse:
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.5 * factor # size of top/right margin
else:
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
if reverse:
ax = axes[K-i-1, K-i-1]
else:
ax = axes[i, i]
# Plot the histograms.
if smooth1d is None:
bins_1d = int(max(1, np.round(hist_bin_factor[i] * bins[i])))
n, _, _ = ax.hist(x, bins=bins_1d, weights=weights,
range=np.sort(range[i]), **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=weights,
range=np.sort(range[i]))
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
#fmt = "{{0:{0}}}".format(title_fmt).format
# modified to keep 2 significant figures in the errors
idecimal_m = np.floor(np.log10(np.float('%.1g'%(q_m))))
idecimal_p = np.floor(np.log10(np.float('%.1g'%(q_p))))
if idecimal_m < 2:
fmt_m_e = "{{0:{0}}}".format(".%df"%(-idecimal_m + 1)).format
else:
fmt_m_e = "{{0:{0}}}".format(".0f").format
if idecimal_p < 2:
fmt_p_e = "{{0:{0}}}".format(".%df"%(-idecimal_p + 1)).format
else:
fmt_p_e = "{{0:{0}}}".format(".0f").format
min_decimals = min(idecimal_m, idecimal_p)
if min_decimals < 2:
fmt = "{{0:{0}}}".format(".%df"%(-min_decimals + 1)).format
else:
fmt = "{{0:{0}}}".format(".0f").format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt_m_e(q_m), fmt_p_e(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
if reverse:
ax.set_xlabel(title, **title_kwargs)
else:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(NullLocator())
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
if reverse:
ax.xaxis.tick_top()
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
if reverse:
ax.set_title(labels[i], y=1.25, **label_kwargs)
else:
ax.set_xlabel(labels[i], **label_kwargs)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
if reverse:
ax = axes[K-i-1, K-j-1]
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
if reverse:
ax.xaxis.tick_top()
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
if reverse:
ax.xaxis.set_label_coords(0.5, 1.4)
else:
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
if reverse:
ax.yaxis.tick_right()
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
if reverse:
ax.set_ylabel(labels[i], rotation=-90, **label_kwargs)
ax.yaxis.set_label_coords(1.3, 0.5)
else:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig
def quantile(x, q, weights=None):
"""
Compute sample quantiles with support for weighted samples.
Note
----
When ``weights`` is ``None``, this method simply calls numpy's percentile
function with the values of ``q`` multiplied by 100.
Parameters
----------
x : array_like[nsamples,]
The samples.
q : array_like[nquantiles,]
The list of quantiles to compute. These should all be in the range
``[0, 1]``.
weights : Optional[array_like[nsamples,]]
An optional weight corresponding to each sample. These
Returns
-------
quantiles : array_like[nquantiles,]
The sample quantiles computed at ``q``.
Raises
------
ValueError
For invalid quantiles; ``q`` not in ``[0, 1]`` or dimension mismatch
between ``x`` and ``weights``.
"""
x = np.atleast_1d(x)
q = np.atleast_1d(q)
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0 and 1")
if weights is None:
return np.percentile(x, list(100.0 * q))
else:
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x)")
idx = np.argsort(x)
sw = weights[idx]
cdf = np.cumsum(sw)[:-1]
cdf /= cdf[-1]
cdf = np.append(0, cdf)
return np.interp(q, cdf, x[idx]).tolist()
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, quiet=False,
plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
pcolor_kwargs=None, **kwargs):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x : array_like[nsamples,]
The samples.
y : array_like[nsamples,]
The samples.
quiet : bool
If true, suppress warnings for small datasets.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool
Draw the individual data points.
plot_density : bool
Draw the density colormap.
plot_contours : bool
Draw the contours.
no_fill_contours : bool
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool
Fill the contours.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
pcolor_kwargs : dict
Any additional keyword arguments to pass to the `pcolor` method when
adding the density colormap.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=list(map(np.sort, range)),
weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
if plot_contours or plot_density:
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m) and not quiet:
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
if pcolor_kwargs is None:
pcolor_kwargs = dict()
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap, **pcolor_kwargs)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1])
|
t-brandtREPO_NAMEorvaraPATH_START.@orvara_extracted@orvara-master@orvara@corner_modified.py@.PATH_END.py
|
{
"filename": "_tick0.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/minor/_tick0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="tick0", parent_name="layout.yaxis.minor", **kwargs):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@yaxis@minor@_tick0.py@.PATH_END.py
|
{
"filename": "chktrcmp.py",
"repo_name": "Caltech-IPAC/Montage",
"repo_path": "Montage_extracted/Montage-main/lib/src/freetype-2.5.4/src/tools/chktrcmp.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009, 2013
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
|
Caltech-IPACREPO_NAMEMontagePATH_START.@Montage_extracted@Montage-main@lib@src@freetype-2.5.4@src@tools@chktrcmp.py@.PATH_END.py
|
{
"filename": "process_ps_pt.py",
"repo_name": "FRBs/zdm",
"repo_path": "zdm_extracted/zdm-main/papers/GWFRB/process_ps_pt.py",
"type": "Python"
}
|
"""
This script processes the output from "calc_PS_all_GW.py"
Copy over the p-values for that script to the right-most column
of "summary_2hr.txt". If you wish to perform calculations for
a different time window, do that too.
The idea is to calculate the effective extra trials from considering
other GW events.
"""
import numpy as np
def main():
tranges=[2.,26.]
for i,infile in enumerate(['summary_2hr.txt']):
process(infile,tranges[i]/24.,verbose=False)
def process(infile,trange,verbose=False):
data=np.loadtxt(infile, dtype='str')
pvals=data[:,3].astype('float')
names = data[:,0]
# this first rate is the rate about GW190425
# rate = 1.93 # units: days
# this rate is the time-averaged rate over
# the 85 day overlap period of CHIME catalog 1
# and O3.
rate = 1.62 # 85 days, 138 FRBs
# gets rid of the event in question
remove = np.where(names == 'GW190425')[0]
pvals[remove]=0. # set to ignore this
expected = rate*trange
extra=np.sum(pvals)*expected
print(infile,"Total expected number is ",extra)
print("Extra trials are ",extra/0.135+1)
#calculates p some = 1-pnone for temporal
p_none_T = np.exp(-rate*trange)
p_some_T = 1.-p_none_T
# spatial
p_none_S = np.exp(-pvals)
p_some_S = 1.-p_none_S
# p some in total is 1-total chance of nothing
p_some = p_some_S * p_some_T
p_none = 1.-p_some
p_none_total = np.prod(p_none)
p_some_total = 1.-p_none_total
print(infile,"Chance of detecting another event is ",p_some_total)
if verbose:
for i,pval in enumerate(pvals):
print(names[i]," P_S = ",pval," p_none = ",p_none_S[i]," total p some = ",p_some[i])
main()
|
FRBsREPO_NAMEzdmPATH_START.@zdm_extracted@zdm-main@papers@GWFRB@process_ps_pt.py@.PATH_END.py
|
{
"filename": "dust.py",
"repo_name": "SAMI-Galaxy-Survey/sami",
"repo_path": "sami_extracted/sami-master/dr/dust.py",
"type": "Python"
}
|
"""
Functions for defining the effect of Milky Way dust on SAMI observations.
For a variety of published maps (currently 2) this module will download the
maps and use them to measure the E(B-V) value at the position of a SAMI
observation. Then for different parameterisations of the dust attenuation
law it will calculate the transmission curve for the observation. The
results are saved in the FITS file, but no correction is made to the data
itself.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
from math import pi, sqrt, sin, cos
from matplotlib import pyplot as plt
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import astropy.io.fits as pf
try :
import healpy as hp
HEALPY_AVAILABLE = True
except ImportError:
HEALPY_AVAILABLE = False
# print('Requires healpy -- developed with py27-healpy @1.8.1_0')
# print('If you use Macports to manage your python installation,')
# print('type "sudo port install py27-healpy".')
from astropy import coordinates as co
from astropy import units as u
MAPS = {}
MAPS_FILES = {
'planck': {
'filename': 'dust/HFI_CompMap_ThermalDustModel_2048_R1.20.fits',
'field': 2,
'url': 'http://pla.esac.esa.int/pla/aio/product-action?MAP.MAP_ID=HFI_CompMap_ThermalDustModel_2048_R1.20.fits',
'header_key': 'EBVPLNCK',
'comment_name': 'Planck v1.20'},
'sfd98': {
'filename': 'dust/lambda_sfd_ebv.fits',
'field': 0,
'url': 'http://lambda.gsfc.nasa.gov/data/foregrounds/SFD/lambda_sfd_ebv.fits',
'header_key': 'EBVSFD98',
'comment_name': 'SFD98'},
# 'schlafly': {
# 'filename': 'dust/ps1-ebv-4.5kpc.fits',
# 'field': 0,
# 'url': 'http://lambda.gsfc.nasa.gov/data/foregrounds/EBV/ps1-ebv-4.5kpc.fits',
# 'header_key': 'EBVSCHLA',
# 'comment_name': 'Schlafly et al 2014'},
}
def load_map(name, force_reload=False):
"""Load the dust maps from various sources."""
if name not in MAPS or force_reload:
map_info = MAPS_FILES[name]
try:
MAPS[name] = hp.read_map(
map_info['filename'], field=map_info['field'])
except IOError:
return False
return True
def download_map(name, overwrite=False):
"""Download a single dust map."""
map_info = MAPS_FILES[name]
if os.path.exists(map_info['filename']) and not overwrite:
print('{} map already downloaded; returning')
return
dirname = os.path.dirname(map_info['filename'])
if not os.path.exists(dirname):
os.makedirs(dirname)
response = urllib2.urlopen(map_info['url'])
with open(map_info['filename'], 'w') as f_out:
f_out.write(response.read())
def download_all_maps(overwrite=False):
"""Download all the dust maps."""
for name in MAPS_FILES:
download_map(name, overwrite=overwrite)
# planck_dust_map_filename = 'HFI_CompMap_ThermalDustModel_2048_R1.20.fits'
# if not os.path.exists( planck_dust_map_filename ):
# print('WARNING: Cannot find Planck dust map with file name:')
# print(' '*8, planck_dust_map_filename)
# print()
# print('This can be downloaded via the Planck Explanatory Supplement wiki:')
# print(' '*8, 'http://wiki.cosmos.esa.int/planckpla')
# print('(Look under Mission Products > CMB and astrophysical component maps,')
# print('and be sure to download the higher resolution Nside=2048 version.')
# print('Note that this is a ~1.6 Gb download!)')
# print()
# else :
# print('reading Planck dust map from file:')
# print(' '*8, planck_dust_map_filename)
# planckDustMap = hp.read_map( planck_dust_map_filename, field=2 )
# hp.mollview( planckDustMap, min=0., max=1., fig=0,
# title='Planck dust map: %s'
# % planck_dust_map_filename.split('/')[-1] )
# print(); print()
# schlegel_dust_map_filename = 'lambda_sfd_ebv.fits'
# if not os.path.exists( schlegel_dust_map_filename ):
# print('WARNING: Cannot find Schlegel et al. dust map with file name:')
# print(' '*8, schlegel_dust_map_filename)
# print()
# print("This can be downloaded via NASA-Goddard's LAMBDA data archive:")
# print(' '*8, 'http://lambda.gsfc.nasa.gov/product/foreground/f_products.cfm')
# print('(Look under data > foreground > products > Reddening (E(B-V)) Map,')
# print('and be sure to download the healpix version.')
# print()
# else :
# print('reading Schlegel et al. dust map from file:')
# print(' '*8, schlegel_dust_map_filename)
# schlegelDustMap = hp.read_map( schlegel_dust_map_filename, field=0 )
# hp.mollview( planckDustMap, min=0., max=1., fig=1,
# title='Schlegel, Finkbinder & Davis (1998) dust map: %s'
# % schlegel_dust_map_filename.split('/')[-1] )
# print(); print()
def healpixAngularCoords( ra, dec ):
pos = co.SkyCoord( ra*u.deg, dec*u.deg ).galactic
theta, phi = pi/2. - pos.b.rad, pos.l.rad
return theta, phi
# def Planck_EBV( theta, phi ):
# return hp.get_interp_val( planckDustMap, theta, phi )
# def Schlegel_EBV( theta, phi ):
# return hp.get_interp_val( schlegelDustMap, theta, phi )
def EBV(name, theta, phi):
"""
Return E(B-V) for given map at given location.
Valid names are 'planck', 'sfd98' or 'schlafly'.
"""
success = load_map(name)
if success:
return hp.get_interp_val(MAPS[name], theta, phi)
else:
return None
# def foregroundCorrection( ra, dec, wavelength ):
# print('Looking up MW dust redding at (RA, Dec) = (%10.6f, %+10.6f).')
# theta, phi = healpixAngularCoords( ra, dec )
# EBV1 = Schlegel_EBV( theta, phi )
# print('E(B-V) from Schlegel dust map is: %.4f' % EBV1)
# EBV2 = Planck_EBV( theta, phi )
# print('E(B-V) from Planck dust map is: %.4f' % EBV2)
# correction = MilkyWayDustCorrection( wavelength, EBV2, dustlaw='CCM89' )
# # this is the multiplicative scaling to correct for foreground dust
# return correction, EBV1, EBV2
def dustCorrectSAMICube( path, overwrite=False ):
if not HEALPY_AVAILABLE:
print('healpy not installed; cannot process dust data.')
return
hdulist = pf.open(path, 'update')
try:
hdu = hdulist['DUST']
except KeyError:
# HDU does not exist; make it
hdu = pf.ImageHDU()
hdu.name = 'DUST'
hdulist.append(hdu)
else:
# HDU does exist. Do we want to overwrite it?
if not overwrite:
# Don't overwrite; get out of here!
hdulist.close()
return
print('Recording dust data for ' + os.path.basename(path))
header = hdulist[0].header
ra, dec = header[ 'CATARA' ], header[ 'CATADEC' ]
wl = header[ 'CRVAL3' ] + ( header[ 'CDELT3' ] *
(1 + np.arange( header[ 'NAXIS3' ] )
- header[ 'CRPIX3' ] ))
theta, phi = healpixAngularCoords( ra, dec )
for name, map_info in MAPS_FILES.items():
ebv = EBV(name, theta, phi)
if ebv is not None:
# We were able to find the dust map
hdu.header[map_info['header_key']] = (
ebv, 'MW reddening E(B-V) from {}'.format(
map_info['comment_name']))
if name == 'planck':
correction = MilkyWayDustCorrection(wl, ebv)
hdu.data = correction
else:
# We were not able to find the dust map
print('Warning: {} dust map not available'.format(
map_info['comment_name']))
if name == 'planck':
print('No dust curve recorded')
hdulist.flush()
hdulist.close()
return
def MilkyWayDustCorrection( wavelength, EBV, dustlaw='CCM89' ):
# MW dust extinction law taken from Cardelli, Clayton & Mathis (1989)
# my implementation of this follows Madusha's
# C89 parameterise dust extinction ito a parameter R_v=A_v/E(B-V)
# here i assume R_v = 3.1; this is Calzetti (2001)'s value for SFers
# this is also the value given by C89 for the diffuse ISM
Rv = 3.1
# C89 give k(lam) normalized to 1 Av = 1. * R_v / E(B-V)
# i do things in E(B-V) = Av/Rv
# everything is parameterized according to x = 1 / lambda [um]
# i assume <wavelength> is given in [Angstroms] = 10000 * [um]
x = 1./(wavelength/1e4)
infrared = ( ( 0.3 <= x ) & ( x <= 1.1 ) )
optical = ( ( 1.1 < x ) & ( x <= 3.3 ) )
a = np.where( infrared, +0.574 * x**1.61, 0. )
b = np.where( infrared, -0.527 * x**1.61, 0. )
y = x - 1.82
# NB. np.polyval does p[0]*X**(N-1) + p[1]*X**(N-2) + ... + p[-2]*X + p[-1]
# ie. polynmoial coefficients are for higher through to lower powers
if dustlaw == 'CCM89' :
acoeffs = ( +0.32999, -0.77530, +0.01979, +0.72085,
-0.02427, -0.50447, +0.17699, 1. )
bcoeffs = ( -2.09002, +5.30260, -0.62251, -5.38434,
+1.07233, +2.28305, +1.41338, 0. )
elif dustlaw == 'OD94' :
acoeffs = ( -0.505, +1.647, -0.827, -1.718,
+1.137, +0.701, -0.609, +0.104, 1. )
bcoeffs = ( +3.347,-10.805, +5.491,+11.102,
-7.985, -3.989, +2.908, +1.952, 0. )
else :
print('Do not recognise the given dust law:', dustlaw)
print('Recognised options are:')
print('--- CCM89 (Cardelli, Clayton & Mathis, 1989, ApJ 345, 245)')
print("--- OD94 (O'Donnell, 1994, ApJ 422, 1580")
print('No dust correction will be performed.')
return 1.
a = np.where( optical, np.polyval( acoeffs, y ), a )
b = np.where( optical, np.polyval( bcoeffs, y ), b )
attenuation = a + b / Rv
# Rv is Av / E(B-V) ; correction is normalised to Av = 1
# so to scale to E(B-V)=1, scale correction by Rv
attenuation *= Rv
transmission = 10**( -0.4 * EBV * attenuation )
# this is the fraction of light transmitted through the foreground
correction = 1./transmission
# this is the multiplicative scaling to correct for foreground dust
return correction
def gamaTest( ):
import atpy
print('requires InputCatA.fits; download from GAMA DR2 webpages.')
gama = atpy.Table( '/Users/ent/data/gama/dr2/InputCatA.fits' )
ebv0 = gama.EXTINCTION_R / 2.751
plt.figure( 5 ) ; plt.clf()
plt.xlabel( 'EBV from GAMA catalogues' )
plt.ylabel( 'EBV from this code' )
plt.title( 'red = Schlegel+98 dust map; black = Planck dust map' )
for i, ( ra, dec ) in enumerate( zip( gama.RA, gama.DEC ) ):
theta, phi = thetaPhiFromRaDec( ra, dec )
dust = Planck_EBV( theta, phi )
dust2 = Schlegel_EBV( theta, phi )
plt.scatter( gama.EXTINCTION_R[ i ]/3.1, dust, 1, 'k', edgecolors='none' )
plt.scatter( gama.EXTINCTION_R[ i ]/3.1, dust2, 1, 'r', edgecolors='none' )
if i % 1000 == 999 :
plt.draw()
|
SAMI-Galaxy-SurveyREPO_NAMEsamiPATH_START.@sami_extracted@sami-master@dr@dust.py@.PATH_END.py
|
{
"filename": "visualisation.md",
"repo_name": "PaulHancock/Robbie",
"repo_path": "Robbie_extracted/Robbie-main/docs/source/visualisation.md",
"type": "Markdown"
}
|
(visualisation)=
## Visualisation
### Running locally with Docker
To start the Docker container containing the Bokeh server, run the following script in the main Nextflow directory:
``` bash
./run_robbie_viewer.sh
```
This will run the viewer using the images output from Robbie within the default ``results`` directory. If your output directory is different to the default, you can add either the relative or absolute path as an optional argument:
``` bash
./run_robbie_viewer.sh -p path_to_dir
```
When plotting large images, it is recommended to also specify an RA and DEC position, as well as a size in coordinate units, to cutout a portion of the image for plotting. For example, if we want to plot an image with centre position of RA 335°, DEC -15° and size of 5°:
``` bash
./run_robbie_viewer.sh -p path_to_dir -c 335,-15,5
```
### Running on a cluster with Singularity
The Robbie Viewer is available on Pawsey as a part of the SHPC (Singularity Recipe HPC). To install it, we will load the SHPC module, install the viewer as a module and then load it:
``` bash
module load shpc/0.0.53
shpc install cjproud/robbie_viewer
module load cjproud/robbie_viewer/latest/module
```
Now, the viewer will be available on our path and we can run it as normal:
``` bash
./run_robbie_viewer.sh -p path_to_dir -c RA,DEC,PAD
```
### Visualising transients
Visualising different transient candidates can be done in multiple ways. For example, the transient candidate can be selected using the table, sky plot or variables plot as shown below:

### Visualising transient Epoch's
Cycling through Epoch's for each transient candidate is just as easy, for example you can use either the Epoch slider or select each Epoch in the Peak Flux vs. Epoch graph:

### Transient candidate selection
Bokeh has multiple ways to interact with the data shown in the plots and table. To select multiple transient candidates, one option is to hold ``shift`` and click on the table entries. Once we zoom out, we can see all the selected transients on each plot:

The Box select tool can also be used to select transient candidates. After drawing the bounding box for selection, the transient candidates are highlighted in the other plots as well as the table below:

|
PaulHancockREPO_NAMERobbiePATH_START.@Robbie_extracted@Robbie-main@docs@source@visualisation.md@.PATH_END.py
|
{
"filename": "datasets.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/mayavi/tests/datasets.py",
"type": "Python"
}
|
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import numpy
from numpy import linspace, cos, sin, pi, empty, sqrt,array,arange,random
# Enthought library imports
from tvtk.api import tvtk
def generate_annulus(r=None, theta=None, z=None):
""" Generate points for structured grid for a cylindrical annular
volume. This method is useful for generating a unstructured
cylindrical mesh for VTK (and perhaps other tools).
Parameters
----------
r : array : The radial values of the grid points.
It defaults to linspace(1.0, 2.0, 11).
theta : array : The angular values of the x axis for the grid
points. It defaults to linspace(0,2*pi,11).
z: array : The values along the z axis of the grid points.
It defaults to linspace(0,0,1.0, 11).
Return
------
points : array
Nx3 array of points that make up the volume of the annulus.
They are organized in planes starting with the first value
of z and with the inside "ring" of the plane as the first
set of points. The default point array will be 1331x3.
"""
# Default values for the annular grid.
if r is None: r = linspace(1.0,2.0, 11)
if theta is None: theta = linspace(0,2*pi,11)
if z is None: z = linspace(0.0,1.0, 11)
# Find the x values and y values for each plane.
x_plane = (cos(theta)*r[:,None]).ravel()
y_plane = (sin(theta)*r[:,None]).ravel()
# Allocate an array for all the points. We'll have len(x_plane)
# points on each plane, and we have a plane for each z value, so
# we need len(x_plane)*len(z) points.
points = empty([len(x_plane)*len(z),3])
# Loop through the points for each plane and fill them with the
# correct x,y,z values.
start = 0
for z_plane in z:
end = start+len(x_plane)
# slice out a plane of the output points and fill it
# with the x,y, and z values for this plane. The x,y
# values are the same for every plane. The z value
# is set to the current z
plane_points = points[start:end]
plane_points[:,0] = x_plane
plane_points[:,1] = y_plane
plane_points[:,2] = z_plane
start = end
return points
def single_type_ug():
"""Simple example showing how to create an unstructured grid
consisting of cells of a single type.
"""
points = array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], # tets
[1,0,0], [2,0,0], [1,1,0], [1,0,1],
[2,0,0], [3,0,0], [2,1,0], [2,0,1],
], 'f')
tets = array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
tet_type = tvtk.Tetra().cell_type
ug = tvtk.UnstructuredGrid(points=points)
ug.set_cells(tet_type, tets)
return ug
def mixed_type_ug():
"""A slightly more complex example of how to generate an
unstructured grid with different cell types. Returns a created
unstructured grid.
"""
points = array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], # tetra
[2,0,0], [3,0,0], [3,1,0], [2,1,0],
[2,0,1], [3,0,1], [3,1,1], [2,1,1], # Hex
], 'f')
# shift the points so we can show both.
points[:,1] += 2.0
# The cells
cells = array([4, 0, 1, 2, 3, # tetra
8, 4, 5, 6, 7, 8, 9, 10, 11 # hex
])
# The offsets for the cells, i.e. the indices where the cells
# start.
offset = array([0, 5])
tetra_type = tvtk.Tetra().cell_type # VTK_TETRA == 10
hex_type = tvtk.Hexahedron().cell_type # VTK_HEXAHEDRON == 12
cell_types = array([tetra_type, hex_type])
# Create the array of cells unambiguously.
cell_array = tvtk.CellArray()
cell_array.set_cells(2, cells)
# Now create the UG.
ug = tvtk.UnstructuredGrid(points=points)
# Now just set the cell types and reuse the ug locations and cells.
ug.set_cells(cell_types, offset, cell_array)
return ug
def generateStructuredGrid():
"""Generates Structured Grid"""
dims = (32, 32, 12)
sgrid = tvtk.StructuredGrid(dimensions=(dims[1], dims[0], dims[2]))
r = linspace(1, 10, dims[0])
theta = linspace(0, 2*numpy.pi, dims[1])
z = linspace(0, 5, dims[2])
pts = generate_annulus(r, theta, z)
sgrid.points = pts
s = sqrt(pts[:,0]**2 + pts[:,1]**2 + pts[:,2]**2)
sgrid.point_data.scalars = numpy.ravel(s.copy())
sgrid.point_data.scalars.name = 'scalars'
return sgrid
def generateUnstructuredGrid_single():
"""Generates Untructured Grid"""
ug = single_type_ug()
temperature = arange(0, 120, 10, 'd')
velocity = random.randn(12, 3)
ug.point_data.scalars = temperature
ug.point_data.scalars.name = 'temperature'
# Some vectors.
ug.point_data.vectors = velocity
ug.point_data.vectors.name = 'velocity'
return ug
def generateUnstructuredGrid_mixed():
"""Generates Untructured Grid"""
ug = mixed_type_ug()
temperature = arange(0, 120, 10, 'd')
velocity = random.randn(12, 3)
ug.point_data.scalars = temperature
ug.point_data.scalars.name = 'temperature'
# Some vectors.
ug.point_data.vectors = velocity
ug.point_data.vectors.name = 'velocity'
return ug
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@mayavi@tests@datasets.py@.PATH_END.py
|
{
"filename": "frierson_debug.py",
"repo_name": "ExeClim/Isca",
"repo_path": "Isca_extracted/Isca-master/exp/debug/frierson_debug.py",
"type": "Python"
}
|
import os
import numpy as np
from isca import IscaCodeBase, DiagTable, Experiment, Namelist, GFDL_BASE
#To run Isca with the Intel debugger, three changes should be made, as are in the below example.
# 1. Set NCORES=1
# 2. set the 'debug=True' argument in the 'cb.compile'
# 3. Add the 'run_idb=True' option to the first instance of exp.run
NCORES = 1
base_dir = os.path.dirname(os.path.realpath(__file__))
# a CodeBase can be a directory on the computer,
# useful for iterative development
cb = IscaCodeBase.from_directory(GFDL_BASE)
# or it can point to a specific git repo and commit id.
# This method should ensure future, independent, reproducibility of results.
# cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1')
# compilation depends on computer specific settings. The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers. The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.
cb.compile(debug=True) # compile the source code to working directory $GFDL_WORK/codebase
# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('frierson_debug_test_experiment', codebase=cb)
#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')
#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
diag.add_field('dynamics', 'vcomp', time_avg=True)
diag.add_field('dynamics', 'temp', time_avg=True)
diag.add_field('dynamics', 'vor', time_avg=True)
diag.add_field('dynamics', 'div', time_avg=True)
exp.diag_table = diag
#Empty the run directory ready to run
exp.clear_rundir()
#Define values for the 'core' namelist
exp.namelist = namelist = Namelist({
'main_nml':{
'days' : 30,
'hours' : 0,
'minutes': 0,
'seconds': 0,
'dt_atmos':720,
'current_date' : [1,1,1,0,0,0],
'calendar' : 'thirty_day'
},
'idealized_moist_phys_nml': {
'do_damping': True,
'turb':True,
'mixed_layer_bc':True,
'do_virtual' :False,
'do_simple': True,
'roughness_mom':3.21e-05,
'roughness_heat':3.21e-05,
'roughness_moist':3.21e-05,
'two_stream_gray': True, #Use grey radiation
'convection_scheme': 'SIMPLE_BETTS_MILLER', #Use the simple Betts Miller convection scheme from Frierson
},
'vert_turb_driver_nml': {
'do_mellor_yamada': False, # default: True
'do_diffusivity': True, # default: False
'do_simple': True, # default: False
'constant_gust': 0.0, # default: 1.0
'use_tau': False
},
'diffusivity_nml': {
'do_entrain':False,
'do_simple': True,
},
'surface_flux_nml': {
'use_virtual_temp': False,
'do_simple': True,
'old_dtaudv': True
},
'atmosphere_nml': {
'idealized_moist_model': True
},
#Use a large mixed-layer depth, and the Albedo of the CTRL case in Jucker & Gerber, 2017
'mixed_layer_nml': {
'tconst' : 285.,
'prescribe_initial_dist':True,
'evaporation':True,
'depth': 2.5, #Depth of mixed layer used
'albedo_value': 0.31, #Albedo value used
},
'qe_moist_convection_nml': {
'rhbm':0.7,
'Tmin':160.,
'Tmax':350.
},
'betts_miller_nml': {
'rhbm': .7 ,
'do_simp': False,
'do_shallower': True,
},
'lscale_cond_nml': {
'do_simple':True,
'do_evap':True
},
'sat_vapor_pres_nml': {
'do_simple':True
},
'damping_driver_nml': {
'do_rayleigh': True,
'trayfric': -0.25, # neg. value: time in *days*
'sponge_pbottom': 5000., #Bottom of the model's sponge down to 50hPa (units are Pa)
'do_conserve_energy': True,
},
'two_stream_gray_rad_nml': {
'rad_scheme': 'frierson', #Select radiation scheme to use, which in this case is Frierson
'do_seasonal': False, #do_seasonal=false uses the p2 insolation profile from Frierson 2006. do_seasonal=True uses the GFDL astronomy module to calculate seasonally-varying insolation.
'atm_abs': 0.2, # default: 0.0
},
# FMS Framework configuration
'diag_manager_nml': {
'mix_snapshot_average_fields': False # time avg fields are labelled with time in middle of window
},
'fms_nml': {
'domains_stack_size': 600000 # default: 0
},
'fms_io_nml': {
'threading_write': 'single', # default: multi
'fileset_write': 'single', # default: multi
},
'spectral_dynamics_nml': {
'damping_order': 4,
'water_correction_limit': 200.e2,
'reference_sea_level_press':1.0e5,
'num_levels':25, #How many model pressure levels to use
'valid_range_t':[100.,800.],
'initial_sphum':[2.e-6],
'vert_coord_option':'input', #Use the vertical levels from Frierson 2006
'surf_res':0.5,
'scale_heights' : 11.0,
'exponent':7.0,
'robert_coeff':0.03
},
'vert_coordinate_nml': {
'bk': [0.000000, 0.0117665, 0.0196679, 0.0315244, 0.0485411, 0.0719344, 0.1027829, 0.1418581, 0.1894648, 0.2453219, 0.3085103, 0.3775033, 0.4502789, 0.5244989, 0.5977253, 0.6676441, 0.7322627, 0.7900587, 0.8400683, 0.8819111, 0.9157609, 0.9422770, 0.9625127, 0.9778177, 0.9897489, 1.0000000],
'pk': [0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
}
})
#Lets do a run!
if __name__=="__main__":
exp.run(1, use_restart=False, num_cores=NCORES, run_idb=True)
for i in range(2,121):
exp.run(i, num_cores=NCORES, run_idb=True)
|
ExeClimREPO_NAMEIscaPATH_START.@Isca_extracted@Isca-master@exp@debug@frierson_debug.py@.PATH_END.py
|
{
"filename": "multimodal_prompts.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/how_to/multimodal_prompts.ipynb",
"type": "Jupyter Notebook"
}
|
# How to use multimodal prompts
Here we demonstrate how to use prompt templates to format [multimodal](/docs/concepts/multimodality/) inputs to models.
In this example we will ask a [model](/docs/concepts/chat_models/#multimodality) to describe an image.
```python
import base64
import httpx
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
```
```python
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4o")
```
```python
prompt = ChatPromptTemplate.from_messages(
[
("system", "Describe the image provided"),
(
"user",
[
{
"type": "image_url",
"image_url": {"url": "data:image/jpeg;base64,{image_data}"},
}
],
),
]
)
```
```python
chain = prompt | model
```
```python
response = chain.invoke({"image_data": image_data})
print(response.content)
```
The image depicts a sunny day with a beautiful blue sky filled with scattered white clouds. The sky has varying shades of blue, ranging from a deeper hue near the horizon to a lighter, almost pale blue higher up. The white clouds are fluffy and scattered across the expanse of the sky, creating a peaceful and serene atmosphere. The lighting and cloud patterns suggest pleasant weather conditions, likely during the daytime hours on a mild, sunny day in an outdoor natural setting.
We can also pass in multiple images.
```python
prompt = ChatPromptTemplate.from_messages(
[
("system", "compare the two pictures provided"),
(
"user",
[
{
"type": "image_url",
"image_url": {"url": "data:image/jpeg;base64,{image_data1}"},
},
{
"type": "image_url",
"image_url": {"url": "data:image/jpeg;base64,{image_data2}"},
},
],
),
]
)
```
```python
chain = prompt | model
```
```python
response = chain.invoke({"image_data1": image_data, "image_data2": image_data})
print(response.content)
```
The two images provided are identical. Both images feature a wooden boardwalk path extending through a lush green field under a bright blue sky with some clouds. The perspective, colors, and elements in both images are exactly the same.
```python
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@how_to@multimodal_prompts.ipynb@.PATH_END.py
|
{
"filename": "layer.py",
"repo_name": "huggingface/peft",
"repo_path": "peft_extracted/peft-main/src/peft/tuners/ln_tuning/layer.py",
"type": "Python"
}
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from copy import deepcopy
from typing import List, Optional
import torch
import torch.nn as nn
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class LNTuningLayer(nn.Module, BaseTunerLayer):
"""
Selects a layer from the model.
"""
adapter_layer_names = ("ln_tuning_layers",)
def __init__(self, base_layer: nn.Module, adapter_name: str):
super().__init__()
self.base_layer = base_layer
self.ln_tuning_layers = nn.ModuleDict({})
self.update_layer(self.base_layer, adapter_name)
self._active_adapter = adapter_name
self.merged_adapters = []
def update_layer(self, layer: nn.Module, adapter_name: str):
self.ln_tuning_layers[adapter_name] = deepcopy(layer)
def enable_adapters(self, enabled: bool) -> None:
"""Toggle the enabling and disabling of adapters
Takes care of setting the requires_grad flag for the adapter weights.
Args:
enabled (bool): True to enable adapters, False to disable adapters
"""
if enabled:
self.set_adapter(self.active_adapters)
self._disable_adapters = False
else:
if self.merged:
self.unmerge()
# disable grads on all adapter layers
for layer_name in self.adapter_layer_names:
layer = getattr(self, layer_name)
layer.requires_grad_(False)
self._disable_adapters = True
def merge(self, adapter_names: Optional[List[str]] = None):
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
if len(adapter_names) > 1:
raise ValueError(
f"Trying to merge {len(adapter_names)} adapters, but LN "
f"tuning does not allow merging more than one adapter at a time"
)
merged_adapters = set(self.merged_adapters)
if merged_adapters:
warnings.warn(f"Already merged with {merged_adapters}. Unmerging first.")
self.unmerge()
self.base_layer, self.ln_tuning_layers[adapter_names[0]] = (
self.ln_tuning_layers[adapter_names[0]],
self.base_layer,
)
self.merged_adapters.append(adapter_names[0])
def unmerge(self):
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
# popping one element is sufficient because LN
# tuning does not allow merging more than one adapter at a time.
merged_name = self.merged_adapters.pop()
self.base_layer, self.ln_tuning_layers[merged_name] = (
self.ln_tuning_layers[merged_name],
self.base_layer,
)
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
if len(self.active_adapters) != 1:
raise ValueError(
f"Trying to run forward with {len(self.active_adapters)} active "
f"adapters, but LN tuning does not allow inference with more than one adapter at a time"
)
active_adapter = self.active_adapters[0]
result = self.ln_tuning_layers[active_adapter](x, *args, **kwargs)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "ln_tuning." + rep
|
huggingfaceREPO_NAMEpeftPATH_START.@peft_extracted@peft-main@src@peft@tuners@ln_tuning@layer.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "stammler/dustpy",
"repo_path": "dustpy_extracted/dustpy-master/docs_source/source/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
from pathlib import Path
import sys
sys.path.insert(0, Path(__file__).parents[1].absolute())
# -- Project information -----------------------------------------------------
project = 'dustpy'
copyright = '2020-2023, Sebastian Stammler & Tilman Birnstiel'
author = 'Sebastian Stammler & Tilman Birnstiel'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'nbsphinx',
'sphinx_automodapi.automodapi',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'**.ipynb_checkpoints',
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# -- Logo --------------------------------------------------------------------
html_static_path = ['_static']
html_logo = "_static/logo.png"
html_theme_options = {
'logo_only': True,
'display_version': False,
}
|
stammlerREPO_NAMEdustpyPATH_START.@dustpy_extracted@dustpy-master@docs_source@source@conf.py@.PATH_END.py
|
{
"filename": "breebs.md",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/providers/breebs.md",
"type": "Markdown"
}
|
# Breebs (Open Knowledge)
>[Breebs](https://www.breebs.com/) is an open collaborative knowledge platform.
>Anybody can create a `Breeb`, a knowledge capsule based on PDFs stored on a Google Drive folder.
>A `Breeb` can be used by any LLM/chatbot to improve its expertise, reduce hallucinations and give access to sources.
>Behind the scenes, `Breebs` implements several `Retrieval Augmented Generation (RAG)` models
> to seamlessly provide useful context at each iteration.
## Retriever
```python
from langchain.retrievers import BreebsRetriever
```
[See a usage example (Retrieval & ConversationalRetrievalChain)](/docs/integrations/retrievers/breebs)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@providers@breebs.md@.PATH_END.py
|
{
"filename": "arraysetops.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/lib/arraysetops.py",
"type": "Python"
}
|
"""
Set operations for arrays based on sorting.
:Contains:
unique,
isin,
ediff1d,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d', 'isin'
]
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
# force a 1d array
ary = np.asanyarray(ary).ravel()
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
if to_begin is None:
l_begin = 0
else:
to_begin = np.asanyarray(to_begin).ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
to_end = np.asanyarray(to_end).ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
l_diff = max(len(ary) - 1, 0)
result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
result = ary.__array_wrap__(result)
if l_begin > 0:
result[:l_begin] = to_begin
if l_end > 0:
result[l_begin + l_diff:] = to_end
np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
return result
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, the indices of the unique array that
reconstruct the input array, and the number of times each unique value
comes up in the input array.
Parameters
----------
ar : array_like
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
.. versionadded:: 1.9.0
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened beforehand.
Otherwise, duplicate items will be removed along the provided axis,
with all the other axes belonging to the each of the unique elements.
Object arrays or structured arrays that contain objects are not
supported if the `axis` kwarg is used.
.. versionadded:: 1.13.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1, 0, 0], [2, 3, 4]])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
"""
ar = np.asanyarray(ar)
if axis is None:
return _unique1d(ar, return_index, return_inverse, return_counts)
if not (-ar.ndim <= axis < ar.ndim):
raise ValueError('Invalid axis kwarg specified for unique')
ar = np.swapaxes(ar, axis, 0)
orig_shape, orig_dtype = ar.shape, ar.dtype
# Must reshape to a contiguous 2D array for this to work...
ar = ar.reshape(orig_shape[0], -1)
ar = np.ascontiguousarray(ar)
if ar.dtype.char in (np.typecodes['AllInteger'] +
np.typecodes['Datetime'] + 'S'):
# Optimization: Creating a view of your data with a np.void data type of
# size the number of bytes in a full row. Handles any type where items
# have a unique binary representation, i.e. 0 is only 0, not +0 and -0.
dtype = np.dtype((np.void, ar.dtype.itemsize * ar.shape[1]))
else:
dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
try:
consolidated = ar.view(dtype)
except TypeError:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
raise TypeError(msg.format(dt=ar.dtype))
def reshape_uniq(uniq):
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(-1, *orig_shape[1:])
uniq = np.swapaxes(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
return_inverse, return_counts)
if not (return_index or return_inverse or return_counts):
return reshape_uniq(output)
else:
uniq = reshape_uniq(output[0])
return (uniq,) + output[1:]
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
"""
Find the unique elements of an array, ignoring shape.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.intp),)
if return_inverse:
ret += (np.empty(0, np.intp),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
def intersect1d(ar1, ar2, assume_unique=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
To intersect more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
"""
if not assume_unique:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
return aux[flag[1:] & flag[:-1]]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
We recommend using :func:`isin` instead of `in1d` for new code.
Parameters
----------
ar1 : (M,) array_like
Input array.
ar2 : array_like
The values against which to test each value of `ar1`.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted (that is,
False where an element of `ar1` is in `ar2` and True otherwise).
Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
to (but is faster than) ``np.invert(in1d(a, b))``.
.. versionadded:: 1.8.0
Returns
-------
in1d : (M,) ndarray, bool
The values `ar1[in1d]` are in `ar2`.
See Also
--------
isin : Version of this function that preserves the
shape of ar1.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
equivalent to ``np.array([item in b for item in a])``.
However, this idea fails if `ar2` is a set, or similar (non-sequence)
container: As ``ar2`` is converted to an array, in those cases
``asarray(ar2)`` is an object array rather than the expected array of
contained values.
.. versionadded:: 1.4.0
Examples
--------
>>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
>>> mask = np.in1d(test, states)
>>> mask
array([ True, False, True, False, True])
>>> test[mask]
array([0, 2, 0])
>>> mask = np.in1d(test, states, invert=True)
>>> mask
array([False, True, False, True, False])
>>> test[mask]
array([1, 5])
"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# Check if one of the arrays may contain arbitrary objects
contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
# This code is run when
# a) the first condition is true, making the code significantly faster
# b) the second condition is true (i.e. `ar1` or `ar2` may contain
# arbitrary objects), since then sorting is not guaranteed to work
if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
if invert:
mask = np.ones(len(ar1), dtype=bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
ret = np.empty(ar.shape, dtype=bool)
ret[order] = flag
if assume_unique:
return ret[:len(ar1)]
else:
return ret[rev_idx]
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input array.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is an array or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted, as if
calculating `element not in test_elements`. Default is False.
``np.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``np.invert(np.isin(a, b))``.
Returns
-------
isin : ndarray, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to arrays if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object array with one element, rather than an
array of the values contained in `test_elements`. This is a consequence
of the `array` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
.. versionadded:: 1.13.0
Examples
--------
>>> element = 2*np.arange(4).reshape((2, 2))
>>> element
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = np.isin(element, test_elements)
>>> mask
array([[ False, True],
[ True, False]])
>>> element[mask]
array([2, 4])
>>> mask = np.isin(element, test_elements, invert=True)
>>> mask
array([[ True, False],
[ False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> np.isin(element, test_set)
array([[ False, False],
[ False, False]])
Casting the set to a list gives the expected result:
>>> np.isin(element, list(test_set))
array([[ False, True],
[ True, False]])
"""
element = np.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the sorted, unique values in `ar1` that are not in `ar2`.
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setdiff1d : ndarray
Sorted 1D array of values in `ar1` that are not in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4, 1])
>>> b = np.array([3, 4, 5, 6])
>>> np.setdiff1d(a, b)
array([1, 2])
"""
if assume_unique:
ar1 = np.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@lib@arraysetops.py@.PATH_END.py
|
{
"filename": "test_resampled.py",
"repo_name": "mj-will/nessai",
"repo_path": "nessai_extracted/nessai-main/tests/test_flows/test_distributions/test_resampled.py",
"type": "Python"
}
|
from unittest.mock import MagicMock, create_autospec
from nessai.flows.distributions import ResampledGaussian
def test_finalise():
"""Test the finalise method"""
dist = create_autospec(ResampledGaussian)
dist.estimate_normalisation_constant = MagicMock()
ResampledGaussian.finalise(dist, n_samples=100, n_batches=10)
dist.estimate_normalisation_constant.assert_called_once_with(
n_samples=100,
n_batches=10,
)
|
mj-willREPO_NAMEnessaiPATH_START.@nessai_extracted@nessai-main@tests@test_flows@test_distributions@test_resampled.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "kevin218/Eureka",
"repo_path": "Eureka_extracted/Eureka-main/joss/paper.md",
"type": "Markdown"
}
|
---
title: '`Eureka!`: An End-to-End Pipeline for JWST Time-Series Observations'
tags:
- Python
- JWST
- HST
- astronomy
- exoplanets
- spectroscopy
- photometry
authors:
- name: Taylor J. Bell
orcid: 0000-0003-4177-2149
affiliation: 1
- name: Eva-Maria Ahrer
orcid: 0000-0003-0973-8426
affiliation: 2
- name: Jonathan Brande
orcid: 0000-0002-2072-6541
affiliation: 3
- name: Aarynn L. Carter
orcid: 0000-0001-5365-4815
affiliation: 4
- name: Adina D. Feinstein
orcid: 0000-0002-9464-8101
affiliation: 5
- name: Giannina {Guzman Caloca}
orcid: 0000-0001-6340-8220
affiliation: 6
- name: Megan Mansfield
orcid: 0000-0003-4241-7413
affiliation: "7, 8"
- name: Sebastian Zieba
orcid: 0000-0003-0562-6750
affiliation: 9
- name: Caroline Piaulet
orcid: 0000-0002-2875-917X
affiliation: 10
- name: Björn Benneke
orcid: 0000-0001-5578-1498
affiliation: 10
- name: Joseph Filippazzo
orcid: 0000-0002-0201-8306
affiliation: 11
- name: Erin M. May
orcid: 0000-0002-2739-1465
affiliation: 12
- name: Pierre-Alexis Roy
orcid: 0000-0001-6809-3520
affiliation: 10
- name: Laura Kreidberg
orcid: 0000-0003-0514-1147
affiliation: 9
- name: Kevin B. Stevenson
orcid: 0000-0002-7352-7941
affiliation: 12
affiliations:
- name: BAER Institute, NASA Ames Research Center, Moffet Field, CA 94035, USA
index: 1
- name: Department of Physics, University of Warwick, Gibbet Hill Road, CV4 7AL Coventry, UK
index: 2
- name: Department of Physics and Astronomy, University of Kansas, 1082 Malott, 1251 Wescoe Hall Dr., Lawrence, KS 66045, USA
index: 3
- name: Department of Astronomy and Astrophysics, University of California, Santa Cruz, 1156 High Street, Santa Cruz, CA 95064, USA
index: 4
- name: Department of Astronomy & Astrophysics, University of Chicago, 5640 S. Ellis Avenue, Chicago, IL 60637, USA
index: 5
- name: Department of Astronomy, University of Maryland, College Park, MD USA
index: 6
- name: Steward Observatory, University of Arizona, Tucson, AZ 85719, USA
index: 7
- name: NHFP Sagan Fellow
index: 8
- name: Max-Planck-Institut für Astronomie, Königstuhl 17, D-69117 Heidelberg, Germany
index: 9
- name: Department of Physics and Institute for Research on Exoplanets, Université de Montréal, Montreal, QC, Canada
index: 10
- name: Space Telescope Science Institute, 3700 San Martin Drive, Baltimore, MD 21218, USA
index: 11
- name: Johns Hopkins APL, 11100 Johns Hopkins Road, Laurel, MD 20723, USA
index: 12
date: 30 May 2022
bibliography: paper.bib
---
# Summary
`Eureka!` is a data reduction and analysis pipeline for exoplanet time-series observations, with a particular focus on James Webb Space Telescope [JWST, @JWST:2006] data. JWST was launched on December 25, 2021 and over the next 1-2 decades will pursue four main science themes: Early Universe, Galaxies Over Time, Star Lifecycle, and Other Worlds. Our focus is on providing the astronomy community with an open source tool for the reduction and analysis of time-series observations of exoplanets in pursuit of the fourth of these themes, Other Worlds. The goal of `Eureka!` is to provide an end-to-end pipeline that starts with raw, uncalibrated FITS files and ultimately yields precise exoplanet transmission and/or emission spectra. The pipeline has a modular structure with six stages, and each stage uses a "Eureka! Control File" (ECF; these files use the .ecf file extension) to allow for easy control of the pipeline's behavior. Stage 5 also uses a "Eureka! Parameter File" (EPF; these files use the .epf file extension) to control the fitted parameters. We provide template ECFs for the MIRI [@MIRI:2015], NIRCam [@NIRCam:2004], NIRISS [@NIRISS:2017], and NIRSpec [@NIRSpec:2007] instruments on JWST and the WFC3 instrument [@WFC3:2008] on the Hubble Space Telescope [HST, @HST:1986]. These templates give users a good starting point for their analyses, but `Eureka!` is not intended to be used as a black box tool, and users should expect to fine-tune some settings for each observation in order to achieve optimal results. At each stage, the pipeline creates intermediate figures and outputs that allow users to compare `Eureka!`'s performance using different parameter settings or to compare `Eureka!` with an independent pipeline. The ECF used to run each stage is also copied into the output folder from each stage to enhance reproducibility. Finally, while `Eureka!` has been optimized for exoplanet observations (especially the latter stages of the code), much of the core functionality could also be repurposed for JWST time-series observations in other research domains thanks to `Eureka!`'s modularity.
# Outline of `Eureka!`'s Stages
`Eureka!` is broken down into six stages, which are as follows (also summarized in \autoref{fig:overview}):
- Stage 1: An optional step that calibrates raw data (converts ramps to slopes for JWST observations). This step can be skipped within `Eureka!` if you would rather use the Stage 1 outputs from the `jwst` pipeline [@jwst:2022].
- Stage 2: An optional step that further calibrates Stage 1 data (performs flat-fielding, unit conversion, etc. for JWST observations). This step can be skipped within `Eureka!` if you would rather use the Stage 2 outputs from the `jwst` pipeline.
- Stage 3: Using Stage 2 outputs, performs background subtraction and optimal spectral extraction. For spectroscopic observations, this stage generates a time series of 1D spectra. For photometric observations, this stage generates a single light curve of flux versus time.
- Stage 4: Using Stage 3 outputs, generates spectroscopic light curves by binning the time series of 1D spectra along the wavelength axis. Optionally removes drift/jitter along the dispersion direction and/or sigma clips outliers.
- Stage 5: Fits the light curves with noise and astrophysical models using different optimization or sampling algorithms.
- Stage 6: Displays the planet spectrum in figure and table form using results from the Stage 5 fits.
# Differences From the `jwst` Pipeline
Eureka's Stage 1 offers a few alternative, experimental ramp fitting methods compared to the `jwst` pipeline, but mostly acts as a wrapper to allow you to call the `jwst` pipeline in the same format as `Eureka!`. Similarly, `Eureka!`'s Stage 2 acts solely as a wrapper for the `jwst` pipeline. Meanwhile, `Eureka!`'s Stages 3 through 6 completely depart from the `jwst` pipeline and offer specialized background subtraction, source extraction, wavelength binning, sigma clipping, fitting, and plotting routines with heritage from past space-based exoplanet science.
{width=100%}
# Statement of Need
The calibration, reduction, and fitting of exoplanet time-series observations is a challenging problem with many tunable parameters across many stages, many of which will significantly impact the final results. Typically, the default calibration pipeline from astronomical observatories is insufficiently tailored for exoplanet time-series observations as the pipeline is more optimized for other science use cases. As such, it is common practice to develop a custom data analysis pipeline that starts from the original, uncalibrated images. Historically, data analysis pipelines have often been proprietary, so each new user of an instrument or telescope has had to develop their own pipeline. Also, clearly specifying the analysis procedure can be challenging, especially with proprietary code, which erodes reproducibility. `Eureka!` seeks to be a next-generation data analysis pipeline for next-generation observations from JWST with open-source and well-documented code for easier adoption; modular code for easier customization while maintaining a consistent framework; and easy-to-use but powerful inputs and outputs for increased automation, increased reproducibility, and more thorough intercomparisons. By also allowing for analyses of HST observations within the same framework, users will be able to combine new and old observations to develop a more complete understanding of individual targets or even entire populations.
# Documentation
Documentation for `Eureka!` is available at [https://eurekadocs.readthedocs.io/en/latest/](https://eurekadocs.readthedocs.io/en/latest/).
# Similar Tools
We will now discuss the broader data reduction and fitting ecosystem in which `Eureka!` lives. Several similar open-source tools are discussed below to provide additional context, but this is not meant to be a comprehensive list.
As mentioned above, `Eureka!` makes use of the first two stages of [`jwst`](https://github.com/spacetelescope/jwst) [@jwst:2022] while offering significantly different extraction routines and novel spectral binning and fitting routines beyond what is contained in `jwst`. `Eureka!` bears similarities to the [`POET`](https://github.com/kevin218/POET) [@Stevenson:2012; @Cubillos:2013] and [`WFC3`](https://github.com/kevin218/WFC3) [@Stevenson:2014a] pipelines, developed for Spitzer/IRAC and HST/WFC3 observations respectively; in fact, much of the code from those pipelines has been incorporated into `Eureka!`. `Eureka!` is near feature parity with `WFC3`, but the Spitzer specific parts of the `POET` pipeline have not been encorporated into `Eureka!`. The [`SPCA`](https://github.com/lisadang27/SPCA) [@Dang:2018; @Bell:2021] pipeline developed for the reduction and fitting of Spitzer/IRAC observations also bears some similarity to this pipeline, and some snippets of that pipeline have also been encorporated into `Eureka!`. The [`tshirt`](https://github.com/eas342/tshirt) [@tshirt:2022] package also offers spectral and photometric extraction routines that work for HST and JWST data. [`PACMAN`](https://github.com/sebastian-zieba/PACMAN) [@Kreidberg:2014; @pacman:2022] is another open-source end-to-end pipeline developed for HST/WFC3 observations. The [`exoplanet`](https://github.com/exoplanet-dev/exoplanet) [@exoplanet:2021] and [`juliet`](https://github.com/nespinoza/juliet) [@juliet:2019] packages offer some similar capabilities as the observation fitting parts of `Eureka!`.
# Acknowledgements
`Eureka!` allows for some variations upon the STScI's [`jwst`](https://github.com/spacetelescope/jwst) pipeline [@jwst:2022] for Stages 1 and 2, but presently these stages mostly act as wrappers around the `jwst` pipeline. This allows `Eureka!` to run the `jwst` pipeline in the same manner as `Eureka!`'s latter stages. `Eureka!` then uses its own custom code for additional calibration steps, spectral or photometric extraction, and light curve fitting. Several parts of the spectroscopy-focused code in Stages 3 and 4 of `Eureka!` were inspired by, or were initially written for, the [`WFC3`](https://github.com/kevin218/WFC3) [@Stevenson:2014a] pipeline. Other parts of the spectroscopy code and several parts of the photometry focused code in Stage 3 were inspired by, or were initially written for, the [`POET`](https://github.com/kevin218/POET) pipeline [@Stevenson:2012; @Cubillos:2013]. Some of the Stage 5 code comes from @Kreidberg:2014 and [`PACMAN`](https://github.com/sebastian-zieba/PACMAN) [@pacman:2022]. Small pieces of the [`SPCA`](https://github.com/lisadang27/SPCA) [@Dang:2018; @Bell:2021] and [`Bell_EBM`](https://github.com/taylorbell57/Bell_EBM) [@Bell:2018] repositories have also been reused.
ALC is supported by a grant from STScI (_JWST_-ERS-01386) under NASA contract NAS5-03127. ADF acknowledges support by the National Science Foundation Graduate Research Fellowship Program under Grant No. (DGE-1746045). CP acknowledges financial support by the Fonds de Recherche Québécois—Nature et Technologie (FRQNT; Québec), the Technologies for Exo-Planetary Science (TEPS) Trainee Program and the Natural Sciences and Engineering Research Council (NSERC) Vanier Scholarship. JB acknowledges support from the NASA Interdisciplinary Consortia for Astrobiology Research (ICAR). KBS is supported by _JWST_-ERS-01366. MM acknowledges support through the NASA Hubble Fellowship grant HST-HF2-51485.001-A awarded by STScI, which is operated by the Association of Universities for Research in Astronomy, Inc., for NASA, under contract NAS5-26555. We also thank Ivelina Momcheva for useful discussions. Support for this work was provided in part by NASA through a grant from the Space Telescope Science Institute, which is operated by the Association of Universities for Research in Astronomy, Inc., under NASA contract NAS 5-03127. In addition, we would like to thank the Transiting Exoplanet Community Early Release Science program for organizing meetings that contributed to the writing of `Eureka!`.
# References
|
kevin218REPO_NAMEEurekaPATH_START.@Eureka_extracted@Eureka-main@joss@paper.md@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "joshspeagle/dynesty",
"repo_path": "dynesty_extracted/dynesty-master/py/dynesty/utils.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A collection of useful functions.
"""
import sys
import warnings
import math
import copy
import time
import os
import shutil
from collections import namedtuple
from functools import partial
import pickle as pickle_module
# To allow replacing of the pickler
import numpy as np
from scipy.special import logsumexp
from ._version import __version__ as DYNESTY_VERSION
try:
import tqdm
except ImportError:
tqdm = None
try:
import h5py
except ImportError:
h5py = None
__all__ = [
"unitcheck", "resample_equal", "mean_and_cov", "quantile", "jitter_run",
"resample_run", "reweight_run", "unravel_run", "merge_runs", "kld_error",
"get_enlarge_bootstrap", "LoglOutput", "LogLikelihood", "RunRecord",
"DelayTimer"
]
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
IteratorResult = namedtuple('IteratorResult', [
'worst', 'ustar', 'vstar', 'loglstar', 'logvol', 'logwt', 'logz',
'logzvar', 'h', 'nc', 'worst_it', 'boundidx', 'bounditer', 'eff',
'delta_logz', 'blob'
])
IteratorResultShort = namedtuple('IteratorResultShort', [
'worst', 'ustar', 'vstar', 'loglstar', 'nc', 'worst_it', 'boundidx',
'bounditer', 'eff'
])
_LOWL_VAL = -1e300
class LoglOutput:
"""
Class that encapsulates the output of the likelihood function.
The reason we need this wrapper is to preserve the blob associated with
the likelihood function.
"""
def __init__(self, v, blob_flag):
"""
Initialize the object
Parameters
----------
v: float or tuple
if blob_flag is true v have to be a tuple of logl and blob
if it is False v is just logl
blob_flag: boolean
flag to mark whether the v has a blob or not
"""
if blob_flag:
self.val = v[0]
self.blob = v[1]
else:
self.val = v
def __lt__(self, v1):
"""
Comparison override, we just use .val attribute in the comparison
"""
return float(self) < float(v1)
def __gt__(self, v1):
"""
Comparison override, we just use .val attribute in the comparison
"""
return float(self) > float(v1)
def __le__(self, v1):
"""
Comparison override, we just use .val attribute in the comparison
"""
return float(self) <= float(v1)
def __ge__(self, v1):
"""
Comparison override, we just use .val attribute in the comparison
"""
return float(self) >= float(v1)
def __eq__(self, v1):
"""
Comparison override, we just use .val attribute in the comparison
"""
return float(self) == float(v1)
def __float__(self):
"""
Comparison override, we just use .val attribute in the comparison
"""
return float(self.val)
class LogLikelihood:
"""
Class that calls the likelihood function (using a pool if provided)
Also if requested it saves the history of evaluations
"""
def __init__(self,
loglikelihood,
ndim,
pool=None,
save=False,
history_filename=None,
blob=False):
""" Initialize the object.
Parameters
----------
loglikelihood: function
ndim: int
Dimensionality
pool: Pool (optional)
Any kind of pool capable of performing map()
save: bool
if True the function evaluations will be saved in the hdf5 file
history_filename: string
The filename where the history will go
blob: boolean
if True we expect the logl output to be a tuple of logl value and
a blob, otherwise it'll be logl value only
"""
self.loglikelihood = loglikelihood
self.pool = pool
self.history_pars = []
self.history_logl = []
self.save_every = 10000
self.save = save
self.history_filename = history_filename
self.ndim = ndim
self.failed_save = False
self.blob = blob
if save:
self.history_init()
def map(self, pars):
"""
Evaluate the likelihood function on the list of vectors
The pool is used if it was provided when the object was created
Returns
-------
ret: The list of LoglOutput objects
"""
if self.pool is None:
ret = list([
LoglOutput(_, self.blob) for _ in map(self.loglikelihood, pars)
])
else:
ret = [
LoglOutput(_, self.blob)
for _ in self.pool.map(self.loglikelihood, pars)
]
if self.save:
self.history_append([_.val for _ in ret], pars)
return ret
def __call__(self, x):
"""
Evaluate the likelihood f-n once
"""
ret = LoglOutput(self.loglikelihood(x), self.blob)
if self.save:
self.history_append([ret.val], [x])
return ret
def history_append(self, logls, pars):
"""
Append to the internal history the list of loglikelihood values
And points
"""
self.history_logl.extend(logls)
self.history_pars.extend(pars)
if len(self.history_logl) > self.save_every:
self.history_save()
def history_init(self):
""" Initialize the hdf5 storage of evaluations """
if h5py is None:
raise RuntimeError(
'h5py module is required for saving history of calls')
self.history_counter = 0
try:
with h5py.File(self.history_filename, mode='w') as fp:
fp.create_dataset('param', (self.save_every, self.ndim),
maxshape=(None, self.ndim))
fp.create_dataset('logl', (self.save_every, ),
maxshape=(None, ))
except OSError:
print('Failed to initialize history file')
raise
def history_save(self):
"""
Save the actual history from an internal buffer into the file
"""
if self.failed_save or not self.save:
# if failed to save before, do not try again
# also quickly return if saving is not needed
return
try:
with h5py.File(self.history_filename, mode='a') as fp:
# pylint: disable=no-member
nadd = len(self.history_logl)
fp['param'].resize(self.history_counter + nadd, axis=0)
fp['logl'].resize(self.history_counter + nadd, axis=0)
fp['param'][-nadd:, :] = np.array(self.history_pars)
fp['logl'][-nadd:] = np.array(self.history_logl)
self.history_pars = []
self.history_logl = []
self.history_counter += nadd
except OSError:
warnings.warn(
'Failed to save history of evaluations. Will not try again.')
self.failed_save = True
def __getstate__(self):
"""Get state information for pickling."""
state = self.__dict__.copy()
if 'pool' in state:
del state['pool']
return state
class RunRecord:
"""
This is the dictionary like class that saves the results of the nested
run so it is basically a collection of various lists of
quantities
"""
def __init__(self, dynamic=False):
"""
If dynamic is true. We initialize the class for
a dynamic nested run
"""
D = {}
keys = [
'id', # live point labels
'u', # unit cube samples
'v', # transformed variable samples
'logl', # loglikelihoods of samples
'logvol', # expected ln(volume)
'logwt', # ln(weights)
'logz', # cumulative ln(evidence)
'logzvar', # cumulative error on ln(evidence)
'h', # cumulative information
'nc', # number of calls at each iteration
'boundidx', # index of bound dead point was drawn from
'it', # iteration the live (now dead) point was proposed
'n', # number of live points interior to dead point
'bounditer', # active bound at a specific iteration
'scale', # scale factor at each iteration
'blob' # blobs output by the log-likelihood
]
if dynamic:
keys.extend([
'batch', # live point batch ID
# these are special since their length
# is == the number of batches
'batch_nlive', # number of live points added in batch
'batch_bounds' # loglikelihood bounds used in batch
])
for k in keys:
D[k] = []
self.D = D
def append(self, newD):
"""
append new information to the RunRecord in the form a dictionary
i.e. run.append(dict(batch=3, niter=44))
"""
for k in newD.keys():
self.D[k].append(newD[k])
def __getitem__(self, k):
return self.D[k]
def __setitem__(self, k, v):
self.D[k] = v
def keys(self):
return self.D.keys()
class DelayTimer:
""" Utility class that allows us to detect a certain
time has passed"""
def __init__(self, delay):
""" Initialise the time with delay of dt seconds
Parameters
----------
delay: float
The number of seconds in the timer
"""
self.delay = delay
self.last_time = time.time()
def is_time(self):
"""
Returns true if more than self.dt seconds has passed
since the initialization or last call of successful is_time()
Returns
-------
ret: bool
True if specified amout of time has passed since the
initialization or last successful is_time() call
"""
curt = time.time()
if curt - self.last_time > self.delay:
self.last_time = curt
return True
return False
PrintFnArgs = namedtuple('PrintFnArgs',
['niter', 'short_str', 'mid_str', 'long_str'])
def print_fn(results,
niter,
ncall,
add_live_it=None,
dlogz=None,
stop_val=None,
nbatch=None,
logl_min=-np.inf,
logl_max=np.inf,
pbar=None):
"""
The default function used to print out results in real time.
Parameters
----------
results : tuple
Collection of variables output from the current state of the sampler.
Currently includes:
(1) particle index,
(2) unit cube position,
(3) parameter position,
(4) ln(likelihood),
(5) ln(volume),
(6) ln(weight),
(7) ln(evidence),
(8) Var[ln(evidence)],
(9) information,
(10) number of (current) function calls,
(11) iteration when the point was originally proposed,
(12) index of the bounding object originally proposed from,
(13) index of the bounding object active at a given iteration,
(14) cumulative efficiency, and
(15) estimated remaining ln(evidence).
niter : int
The current iteration of the sampler.
ncall : int
The total number of function calls at the current iteration.
add_live_it : int, optional
If the last set of live points are being added explicitly, this
quantity tracks the sorted index of the current live point being added.
dlogz : float, optional
The evidence stopping criterion. If not provided, the provided
stopping value will be used instead.
stop_val : float, optional
The current stopping criterion (for dynamic nested sampling). Used if
the `dlogz` value is not specified.
nbatch : int, optional
The current batch (for dynamic nested sampling).
logl_min : float, optional
The minimum log-likelihood used when starting sampling. Default is
`-np.inf`.
logl_max : float, optional
The maximum log-likelihood used when stopping sampling. Default is
`np.inf`.
"""
if pbar is None:
print_fn_fallback(results,
niter,
ncall,
add_live_it=add_live_it,
dlogz=dlogz,
stop_val=stop_val,
nbatch=nbatch,
logl_min=logl_min,
logl_max=logl_max)
else:
print_fn_tqdm(pbar,
results,
niter,
ncall,
add_live_it=add_live_it,
dlogz=dlogz,
stop_val=stop_val,
nbatch=nbatch,
logl_min=logl_min,
logl_max=logl_max)
def get_print_fn_args(results,
niter,
ncall,
add_live_it=None,
dlogz=None,
stop_val=None,
nbatch=None,
logl_min=-np.inf,
logl_max=np.inf):
# Extract results at the current iteration.
loglstar = results.loglstar
logz = results.logz
logzvar = results.logzvar
delta_logz = results.delta_logz
bounditer = results.bounditer
nc = results.nc
eff = results.eff
# Adjusting outputs for printing.
if delta_logz > 1e6:
delta_logz = np.inf
if logzvar >= 0. and logzvar <= 1e6:
logzerr = np.sqrt(logzvar)
else:
logzerr = np.nan
if logz <= -1e6:
logz = -np.inf
if loglstar <= -1e6:
loglstar = -np.inf
# Constructing output.
long_str = []
# long_str.append("iter: {:d}".format(niter))
if add_live_it is not None:
long_str.append("+{:d}".format(add_live_it))
short_str = list(long_str)
if nbatch is not None:
long_str.append("batch: {:d}".format(nbatch))
long_str.append("bound: {:d}".format(bounditer))
long_str.append("nc: {:d}".format(nc))
long_str.append("ncall: {:d}".format(ncall))
long_str.append("eff(%): {:6.3f}".format(eff))
short_str.append(long_str[-1])
long_str.append("loglstar: {:6.3f} < {:6.3f} < {:6.3f}".format(
logl_min, loglstar, logl_max))
short_str.append("logl*: {:6.1f}<{:6.1f}<{:6.1f}".format(
logl_min, loglstar, logl_max))
long_str.append("logz: {:6.3f} +/- {:6.3f}".format(logz, logzerr))
short_str.append("logz: {:6.1f}+/-{:.1f}".format(logz, logzerr))
mid_str = list(short_str)
if dlogz is not None:
long_str.append("dlogz: {:6.3f} > {:6.3f}".format(delta_logz, dlogz))
mid_str.append("dlogz: {:6.1f}>{:6.1f}".format(delta_logz, dlogz))
else:
long_str.append("stop: {:6.3f}".format(stop_val))
mid_str.append("stop: {:6.3f}".format(stop_val))
return PrintFnArgs(niter=niter,
short_str=short_str,
mid_str=mid_str,
long_str=long_str)
def print_fn_tqdm(pbar,
results,
niter,
ncall,
add_live_it=None,
dlogz=None,
stop_val=None,
nbatch=None,
logl_min=-np.inf,
logl_max=np.inf):
"""
This is a function that does the status printing using tqdm module
"""
fn_args = get_print_fn_args(results,
niter,
ncall,
add_live_it=add_live_it,
dlogz=dlogz,
stop_val=stop_val,
nbatch=nbatch,
logl_min=logl_min,
logl_max=logl_max)
pbar.set_postfix_str(" | ".join(fn_args.long_str), refresh=False)
pbar.update(fn_args.niter - pbar.n)
def print_fn_fallback(results,
niter,
ncall,
add_live_it=None,
dlogz=None,
stop_val=None,
nbatch=None,
logl_min=-np.inf,
logl_max=np.inf):
"""
This is a function that does the status printing using just
standard printing into the console
"""
fn_args = get_print_fn_args(results,
niter,
ncall,
add_live_it=add_live_it,
dlogz=dlogz,
stop_val=stop_val,
nbatch=nbatch,
logl_min=logl_min,
logl_max=logl_max)
niter, short_str, mid_str, long_str = (fn_args.niter, fn_args.short_str,
fn_args.mid_str, fn_args.long_str)
long_str = ["iter: {:d}".format(niter)] + long_str
# Printing.
long_str = ' | '.join(long_str)
mid_str = ' | '.join(mid_str)
short_str = '|'.join(short_str)
if sys.stderr.isatty() and hasattr(shutil, 'get_terminal_size'):
columns = shutil.get_terminal_size(fallback=(80, 25))[0]
else:
columns = 200
if columns > len(long_str):
sys.stderr.write("\r" + long_str + ' ' * (columns - len(long_str) - 2))
elif columns > len(mid_str):
sys.stderr.write("\r" + mid_str + ' ' * (columns - len(mid_str) - 2))
else:
sys.stderr.write("\r" + short_str + ' ' *
(columns - len(short_str) - 2))
sys.stderr.flush()
# List of results attributes as
# Name, type, description, shape (if array)
_RESULTS_STRUCTURE = [
('logl', 'array[float]', 'Log likelihood', 'niter'),
('samples_it', 'array[int]',
"the sampling iteration when the sample was proposed "
"(e.g., iteration 570)", 'niter'),
('samples_id', 'array[int]',
'The unique ID of the sample XXX (within nlive points)', None),
('samples_n', 'array[int]',
'The number of live points at the point when the sample was proposed',
'niter'),
('samples_u', 'array[float]', '''The coordinates of live points in the
unit cube coordinate system''', 'niter,ndim'),
('samples_v', 'array[float]', '''The coordinates of live points''',
'niter,ndim'),
('samples', 'array',
'''the location (in original coordinates). Identical to samples_v''',
'niter,ndim'), ('niter', 'int', 'number of iterations', None),
('ncall', 'int', 'Total number likelihood calls', None),
('logz', 'array', 'Array of cumulative log(Z) integrals', 'niter'),
('logzerr', 'array', 'Array of uncertainty of log(Z)', 'niter'),
('logwt', 'array', 'Array of log-posterior weights', 'niter'),
('eff', 'float', 'Sampling efficiency', None),
('nlive', 'int', 'Number of live points for a static run', None),
('logvol', 'array[float]', 'Logvolumes of dead points', 'niter'),
('information', 'array[float]', 'Information Integral H', 'niter'),
('bound', 'array[object]',
"the set of bounding objects used to condition proposals for the "
"base run", 'nbound'),
('bound_iter', 'array[int]',
"index of the bound being used for an iteration that generated the point",
'niter'),
('samples_bound', 'array[int]',
"The index of the bound that the corresponding sample was drawn from",
'niter'),
('samples_batch', 'array[int]',
"Tracks the batch during which the samples were proposed", 'niter'),
('batch_bounds', 'array[tuple]',
"The log-likelihood bounds used to run a batch.", 'nbatch'),
('batch_nlive', 'array[int]',
"The number of live points used for given batch", 'nbatch'),
('scale', 'array[float]', "Scalar scale applied for proposals", 'niter'),
('blob', 'array[]',
'The auxiliary blobs computed by the log-likelihood function', 'niter')
]
class Results:
"""
Contains the full output of a run along with a set of helper
functions for summarizing the output.
The object is meant to be unchangeable record of the static or
dynamic nested run.
Results attributes (name, type, description, array size):
"""
_ALLOWED = set([_[0] for _ in _RESULTS_STRUCTURE])
def __init__(self, key_values):
"""
Initialize the results using the list of key value pairs
or a dictionary
Results([('logl', [1, 2, 3]), ('samples_it',[1,2,3])])
Results(dict(logl=[1, 2, 3], samples_it=[1,2,3]))
"""
self._keys = []
self._initialized = False
if isinstance(key_values, dict):
key_values_list = key_values.items()
else:
key_values_list = key_values
for k, v in key_values_list:
assert k not in self._keys # ensure no duplicates
assert k in Results._ALLOWED, k
self._keys.append(k)
setattr(self, k, copy.copy(v))
required_keys = ['samples_u', 'samples_id', 'logl', 'samples']
# TODO I need to add here logz, logzerr
# but that requires ensuring that merge_runs always computes logz
for k in required_keys:
if k not in self._keys:
raise ValueError('Key %s must be provided' % k)
if 'nlive' in self._keys:
self._dynamic = False
elif 'samples_n' in self._keys:
self._dynamic = True
else:
raise ValueError(
'Trying to construct results object without nlive '
'or samples_n information')
self._initialized = True
def __copy__(self):
# this will be a deep copy
return Results(self.asdict().items())
def copy(self):
'''
return a copy of the object
all numpy arrays will be copied too
'''
return self.__copy__()
def __setattr__(self, name, value):
if name[0] != '_' and self._initialized:
raise RuntimeError("Cannot set attributes directly")
super().__setattr__(name, value)
def __getitem__(self, name):
if name in self._keys:
return getattr(self, name)
else:
raise KeyError(name)
def __repr__(self):
m = max(list(map(len, list(self._keys)))) + 1
return '\n'.join(
[k.rjust(m) + ': ' + repr(getattr(self, k)) for k in self._keys])
def __contains__(self, key):
return key in self._keys
def keys(self):
""" Return the list of attributes/keys stored in Results """
return self._keys
def items(self):
"""
Return the list of items in the results object as list of key,value pairs
"""
return ((k, getattr(self, k)) for k in self._keys)
def asdict(self):
"""
Return contents of the Results object as dictionary
"""
# importantly here we copy attribute values
return dict((k, copy.copy(getattr(self, k))) for k in self._keys)
def isdynamic(self):
""" Return true if the results was constructed using dynamic
nested sampling run with (potentially) variable number of
live-points"""
return self._dynamic
def importance_weights(self):
"""
Return the importance weights for the each sample.
"""
logwt = self['logwt'] - self['logz'][-1]
wt = np.exp(logwt)
wt = wt / wt.sum()
return wt
def samples_equal(self, rstate=None):
"""
Return the equally weighted samples in random order.
"""
if rstate is None:
rstate = get_random_generator()
return resample_equal(self['samples'],
self.importance_weights(),
rstate=rstate)
def summary(self):
"""Return a formatted string giving a quick summary
of the results."""
if self._dynamic:
res = ("niter: {:d}\n"
"ncall: {:d}\n"
"eff(%): {:6.3f}\n"
"logz: {:6.3f} +/- {:6.3f}".format(self['niter'],
np.sum(self['ncall']),
self['eff'],
self['logz'][-1],
self['logzerr'][-1]))
else:
res = ("nlive: {:d}\n"
"niter: {:d}\n"
"ncall: {:d}\n"
"eff(%): {:6.3f}\n"
"logz: {:6.3f} +/- {:6.3f}".format(self['nlive'],
self['niter'],
np.sum(self['ncall']),
self['eff'],
self['logz'][-1],
self['logzerr'][-1]))
print('Summary\n=======\n' + res)
Results.__doc__ += '\n\n' + str('\n'.join(
['| ' + str(_) for _ in _RESULTS_STRUCTURE])) + '\n'
def results_substitute(results, kw_dict):
""" This is an utility method that takes a Result object and
substituted certain keys in it. It returns a copy object!
"""
new_list = []
for k, w in results.items():
if k not in kw_dict:
new_list.append((k, w))
else:
new_list.append((k, kw_dict[k]))
return Results(new_list)
def get_enlarge_bootstrap(sample, enlarge, bootstrap):
"""
Determine the enlarge, bootstrap for a given run
"""
# we should make it dimension dependent I think...
DEFAULT_ENLARGE = 1.25
DEFAULT_UNIF_BOOTSTRAP = 5
if enlarge is not None and bootstrap is None:
# If enlarge is specified and bootstrap is not we just use enlarge
# with no nootstrapping
assert enlarge >= 1
return enlarge, 0
elif enlarge is None and bootstrap is not None:
# If bootstrap is specified but enlarge is not we just use bootstrap
# And if we allow zero bootstrap if we want to force no bootstrap
assert ((bootstrap > 1) or (bootstrap == 0))
return 1, bootstrap
elif enlarge is None and bootstrap is None:
# If neither enlarge or bootstrap are specified we are doing
# things in auto-mode. I.e. use enlarge unless the uniform
# sampler is selected
if sample == 'unif':
return 1, DEFAULT_UNIF_BOOTSTRAP
else:
return DEFAULT_ENLARGE, 0
else:
# Both enlarge and bootstrap were specified
if bootstrap == 0 or enlarge == 1:
return enlarge, bootstrap
else:
raise ValueError('Enlarge and bootstrap together do not make '
'sense unless bootstrap=0 or enlarge = 1')
def get_nonbounded(ndim, periodic, reflective):
"""
Return a boolean mask for dimensions that are either
periodic or reflective. It will be true for normal dimension
and false for periodic/reflective
"""
if periodic is not None and reflective is not None:
if np.intersect1d(periodic, reflective) != 0:
raise ValueError("You have specified a parameter as both "
"periodic and reflective.")
if periodic is not None or reflective is not None:
nonbounded = np.ones(ndim, dtype=bool)
if periodic is not None:
if np.max(periodic) >= ndim:
raise ValueError(
'Incorrect periodic variable index (larger than ndim')
nonbounded[periodic] = False
if reflective is not None:
if np.max(reflective) >= ndim:
raise ValueError(
'Incorrect periodic variable index (larger than ndim')
nonbounded[reflective] = False
else:
nonbounded = None
return nonbounded
def get_print_func(print_func, print_progress):
pbar = None
if print_func is None:
if tqdm is None or not print_progress:
print_func = print_fn
else:
pbar = tqdm.tqdm()
print_func = partial(print_fn, pbar=pbar)
return pbar, print_func
def get_random_generator(seed=None):
"""
Return a random generator (using the seed provided if available)
"""
if isinstance(seed, np.random.Generator):
return seed
return np.random.Generator(np.random.PCG64(seed))
def get_seed_sequence(rstate, nitems):
"""
Return the list of seeds to initialize random generators
This is useful when distributing work across a pool
"""
seeds = np.random.SeedSequence(rstate.integers(0, 2**63 - 1,
size=4)).spawn(nitems)
return seeds
def get_neff_from_logwt(logwt):
"""
Compute the number of effective samples from an array of unnormalized
log-weights. We use Kish Effective Sample Size (ESS) formula.
Parameters
----------
logwt: numpy array
Array of unnormalized weights
Returns
-------
int
The effective number of samples
"""
# If weights are normalized to the sum of 1,
# the estimate is N = 1/\sum(w_i^2)
# if the weights are not normalized
# N = (\sum w_i)^2 / \sum(w_i^2)
W = np.exp(logwt - logwt.max())
return W.sum()**2 / (W**2).sum()
def unitcheck(u, nonbounded=None):
"""Check whether `u` is inside the unit cube. Given a masked array
`nonbounded`, also allows periodic boundaries conditions to exceed
the unit cube."""
if nonbounded is None:
# No periodic boundary conditions provided.
return u.min() > 0 and u.max() < 1
else:
# Alternating periodic and non-periodic boundary conditions.
unb = u[nonbounded]
# pylint: disable=invalid-unary-operand-type
ub = u[~nonbounded]
return (unb.min() > 0 and unb.max() < 1 and ub.min() > -0.5
and ub.max() < 1.5)
def apply_reflect(u):
"""
Iteratively reflect a number until it is contained in [0, 1].
This is for priors with a reflective boundary condition, all numbers in the
set `u = 2n +/- x` should be mapped to x.
For the `+` case we just take `u % 1`.
For the `-` case we take `1 - (u % 1)`.
E.g., -0.9, 1.1, and 2.9 should all map to 0.9.
Parameters
----------
u: array-like
The array of points to map to the unit cube
Returns
-------
u: array-like
The input array, modified in place.
"""
idxs_even = np.mod(u, 2) < 1
u[idxs_even] = np.mod(u[idxs_even], 1)
u[~idxs_even] = 1 - np.mod(u[~idxs_even], 1)
return u
def mean_and_cov(samples, weights):
"""
Compute the weighted mean and covariance of the samples.
Parameters
----------
samples : `~numpy.ndarray` with shape (nsamples, ndim)
2-D array containing data samples. This ordering is equivalent to
using `rowvar=False` in `~numpy.cov`.
weights : `~numpy.ndarray` with shape (nsamples,)
1-D array of sample weights.
Returns
-------
mean : `~numpy.ndarray` with shape (ndim,)
Weighted sample mean vector.
cov : `~numpy.ndarray` with shape (ndim, ndim)
Weighted sample covariance matrix.
Notes
-----
Implements the formulae found `here <https://goo.gl/emWFLR>`_.
"""
# Compute the weighted mean.
mean = np.average(samples, weights=weights, axis=0)
# Compute the weighted covariance.
dx = samples - mean
wsum = np.sum(weights)
w2sum = np.sum(weights**2)
cov = wsum / (wsum**2 - w2sum) * np.einsum('i,ij,ik', weights, dx, dx)
return mean, cov
def resample_equal(samples, weights, rstate=None):
"""
Resample a set of points from the weighted set of inputs
such that they all have equal weight. The points are also
randomly shuffled.
Each input sample appears in the output array either
`floor(weights[i] * nsamples)` or `ceil(weights[i] * nsamples)` times,
with `floor` or `ceil` randomly selected (weighted by proximity).
Parameters
----------
samples : `~numpy.ndarray` with shape (nsamples,)
Set of unequally weighted samples.
weights : `~numpy.ndarray` with shape (nsamples,)
Corresponding weight of each sample.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
Returns
-------
equal_weight_samples : `~numpy.ndarray` with shape (nsamples,)
New set of samples with equal weights in random order.
Examples
--------
>>> x = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
>>> w = np.array([0.6, 0.2, 0.15, 0.05])
>>> utils.resample_equal(x, w)
array([[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 3., 3.]])
Notes
-----
Implements the systematic resampling method described in `Hol, Schon, and
Gustafsson (2006) <doi:10.1109/NSSPW.2006.4378824>`_.
"""
if rstate is None:
rstate = get_random_generator()
cumulative_sum = np.cumsum(weights)
if abs(cumulative_sum[-1] - 1.) > SQRTEPS:
# same tol as in numpy's random.choice.
# Guarantee that the weights will sum to 1.
warnings.warn("Weights do not sum to 1 and have been renormalized.")
cumulative_sum /= cumulative_sum[-1]
# this ensures that the last element is strictly == 1
# Make N subdivisions and choose positions with a consistent random offset.
nsamples = len(weights)
positions = (rstate.random() + np.arange(nsamples)) / nsamples
# Resample the data.
idx = np.zeros(nsamples, dtype=int)
i, j = 0, 0
while i < nsamples:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
return rstate.permutation(samples[idx])
def quantile(x, q, weights=None):
"""
Compute (weighted) quantiles from an input set of samples.
Parameters
----------
x : `~numpy.ndarray` with shape (nsamps,)
Input samples.
q : `~numpy.ndarray` with shape (nquantiles,)
The list of quantiles to compute from `[0., 1.]`.
weights : `~numpy.ndarray` with shape (nsamps,), optional
The associated weight from each sample.
Returns
-------
quantiles : `~numpy.ndarray` with shape (nquantiles,)
The weighted sample quantiles computed at `q`.
"""
# Initial check.
x = np.atleast_1d(x)
q = np.atleast_1d(q)
# Quantile check.
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0. and 1.")
if weights is None:
# If no weights provided, this simply calls `np.percentile`.
return np.percentile(x, list(100.0 * q))
else:
# If weights are provided, compute the weighted quantiles.
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x).")
idx = np.argsort(x) # sort samples
sw = weights[idx] # sort weights
cdf = np.cumsum(sw)[:-1] # compute CDF
cdf /= cdf[-1] # normalize CDF
cdf = np.append(0, cdf) # ensure proper span
quantiles = np.interp(q, cdf, x[idx]).tolist()
return quantiles
def _get_nsamps_samples_n(res):
""" Helper function for calculating the number of samples
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
Returns
-------
nsamps: int
The total number of samples/iterations
samples_n: array
Number of live points at a given iteration
"""
if res.isdynamic():
# Check if the number of live points explicitly changes.
samples_n = res.samples_n
nsamps = len(samples_n)
else:
# If the number of live points is constant, compute `samples_n`.
niter = res.niter
nlive = res.nlive
nsamps = len(res.logvol)
if nsamps == niter:
samples_n = np.ones(niter, dtype=int) * nlive
elif nsamps == (niter + nlive):
samples_n = np.minimum(np.arange(nsamps, 0, -1), nlive)
else:
raise ValueError("Final number of samples differs from number of "
"iterations and number of live points.")
return nsamps, samples_n
def _find_decrease(samples_n):
"""
Find all instances where the number of live points is either constant
or increasing.
Return the mask,
the values of nlive when nlives starts to decrease
The ranges of decreasing nlives
v=[3,2,1,13,13,12,23,22];
> print(dynesty.utils._find_decrease(v))
(array([ True, False, False, True, True, False, True, False]),
[3, 13, 23],
[[0, 3], [4, 6], (6, 8)])
"""
nsamps = len(samples_n)
nlive_flag = np.zeros(nsamps, dtype=bool)
nlive_start, bounds = [], []
nlive_flag[1:] = np.diff(samples_n) < 0
# For all the portions that are decreasing, find out where they start,
# where they end, and how many live points are present at that given
# iteration.
ids = np.nonzero(nlive_flag)[0]
if len(ids) > 0:
boundl = ids[0] - 1
last = ids[0]
nlive_start.append(samples_n[boundl])
for curi in ids[1:]:
if curi == last + 1:
last += 1
# we are in the interval of continuisly decreasing values
continue
else:
# we need to close the last interval
bounds.append([boundl, last + 1])
nlive_start.append(samples_n[curi - 1])
last = curi
boundl = curi - 1
# we need to close the last interval
bounds.append((boundl, last + 1))
nlive_start = np.array(nlive_start)
return ~nlive_flag, nlive_start, bounds
def jitter_run(res, rstate=None, approx=False):
"""
Probes **statistical uncertainties** on a nested sampling run by
explicitly generating a *realization* of the prior volume associated
with each sample (dead point). Companion function to :meth:`resample_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
weights based on our "jittered" prior volume realizations.
"""
if rstate is None:
rstate = get_random_generator()
# Initialize evolution of live points over the course of the run.
nsamps, samples_n = _get_nsamps_samples_n(res)
logl = res.logl
# Simulate the prior volume shrinkage associated with our set of "dead"
# points. At each iteration, if the number of live points is constant or
# increasing, our prior volume compresses by the maximum value of a set
# of `K_i` uniformly distributed random numbers (i.e. as `Beta(K_i, 1)`).
# If instead the number of live points is decreasing, that means we're
# instead sampling down a set of uniform random variables
# (i.e. uniform order statistics).
if approx:
nlive_flag = np.ones(nsamps, dtype=bool)
nlive_start, bounds = [], []
else:
nlive_flag, nlive_start, bounds = _find_decrease(samples_n)
# The maximum out of a set of `K_i` uniformly distributed random variables
# has a marginal distribution of `Beta(K_i, 1)`.
t_arr = np.zeros(nsamps)
t_arr[nlive_flag] = rstate.beta(a=samples_n[nlive_flag], b=1)
# If we instead are sampling the set of uniform order statistics,
# we note that the jth largest value is marginally distributed as
# `Beta(j, K_i-j+1)`. The full joint distribution is::
#
# X_(j) / X_N = (Y_1 + ... + Y_j) / (Y_1 + ... + Y_{K+1})
#
# where X_(j) is the prior volume of the live point with the `j`-th
# *best* likelihood (i.e. prior volume shrinks as likelihood increases)
# and the `Y_i`'s are i.i.d. exponentially distributed random variables.
nunif = len(nlive_start)
for i in range(nunif):
nstart = nlive_start[i]
bound = bounds[i]
sn = samples_n[bound[0]:bound[1]]
y_arr = rstate.exponential(scale=1.0, size=nstart + 1)
ycsum = y_arr.cumsum()
ycsum /= ycsum[-1]
uorder = ycsum[np.append(nstart, sn - 1)]
rorder = uorder[1:] / uorder[:-1]
t_arr[bound[0]:bound[1]] = rorder
# These are the "compression factors" at each iteration. Let's turn
# these into associated ln(volumes).
logvol = np.log(t_arr).cumsum()
(saved_logwt, saved_logz, saved_logzvar,
saved_h) = compute_integrals(logl=logl, logvol=logvol)
# Overwrite items with our new estimates.
substitute = {
'logvol': logvol,
'logwt': saved_logwt,
'logz': saved_logz,
'logzerr': np.sqrt(np.maximum(saved_logzvar, 0)),
'h': saved_h
}
new_res = results_substitute(res, substitute)
return new_res
def compute_integrals(logl=None, logvol=None, reweight=None):
"""
Compute weights, logzs and variances using quadratic estimator.
Returns logwt, logz, logzvar, h
Parameters:
-----------
logl: array
array of log likelihoods
logvol: array
array of log volumes
reweight: array (or None)
(optional) reweighting array to reweight posterior
"""
# pylint: disable=invalid-unary-operand-type
# Unfortunately pylint doesn't get the asserts
assert logl is not None
assert logvol is not None
loglstar_pad = np.concatenate([[-1.e300], logl])
# we want log(exp(logvol_i)-exp(logvol_(i+1)))
# assuming that logvol0 = 0
# log(exp(LV_{i})-exp(LV_{i+1})) =
# = LV{i} + log(1-exp(LV_{i+1}-LV{i}))
# = LV_{i+1} - (LV_{i+1} -LV_i) + log(1-exp(LV_{i+1}-LV{i}))
dlogvol = np.diff(logvol, prepend=0)
logdvol = logvol - dlogvol + np.log1p(-np.exp(dlogvol))
# logdvol is log(delta(volumes)) i.e. log (X_i-X_{i-1})
logdvol2 = logdvol + math.log(0.5)
# These are log(1/2(X_(i+1)-X_i))
dlogvol = -np.diff(logvol, prepend=0)
# this are delta(log(volumes)) of the run
# These are log((L_i+L_{i_1})*(X_i+1-X_i)/2)
saved_logwt = np.logaddexp(loglstar_pad[1:], loglstar_pad[:-1]) + logdvol2
if reweight is not None:
saved_logwt = saved_logwt + reweight
saved_logz = np.logaddexp.accumulate(saved_logwt)
# This implements eqn 16 of Speagle2020
logzmax = saved_logz[-1]
# we'll need that to just normalize likelihoods to avoid overflows
# H is defined as
# H = 1/z int( L * ln(L) dX,X=0..1) - ln(z)
# incomplete H can be defined as
# H = int( L/Z * ln(L) dX,X=0..x) - z_x/Z * ln(Z)
h_part1 = np.cumsum(
(np.exp(loglstar_pad[1:] - logzmax + logdvol2) * loglstar_pad[1:] +
np.exp(loglstar_pad[:-1] - logzmax + logdvol2) * loglstar_pad[:-1]))
# here we divide the likelihood by zmax to avoid to overflow
saved_h = h_part1 - logzmax * np.exp(saved_logz - logzmax)
# changes in h in each step
dh = np.diff(saved_h, prepend=0)
# I'm applying abs() here to avoid nans down the line
# because partial H integrals could be negative
saved_logzvar = np.abs(np.cumsum(dh * dlogvol))
return saved_logwt, saved_logz, saved_logzvar, saved_h
def progress_integration(loglstar, loglstar_new, logz, logzvar, logvol,
dlogvol, h):
"""
This is the calculation of weights and logz/var estimates one step at the
time.
Importantly the calculation of H is somewhat different from
compute_integrals as incomplete integrals of H() of require knowing Z
Return logwt, logz, logzvar, h
"""
# Compute relative contribution to results.
logdvol = logsumexp(a=[logvol + dlogvol, logvol], b=[0.5, -0.5])
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol # weight
logz_new = np.logaddexp(logz, logwt) # ln(evidence)
lzterm = (math.exp(loglstar - logz_new + logdvol) * loglstar +
math.exp(loglstar_new - logz_new + logdvol) * loglstar_new)
h_new = (lzterm + math.exp(logz - logz_new) * (h + logz) - logz_new
) # information
dh = h_new - h
logzvar_new = logzvar + dh * dlogvol
# var[ln(evidence)] estimate
return logwt, logz_new, logzvar_new, h_new
def resample_run(res, rstate=None, return_idx=False):
"""
Probes **sampling uncertainties** on a nested sampling run using bootstrap
resampling techniques to generate a *realization* of the (expected) prior
volume(s) associated with each sample (dead point). This effectively
splits a nested sampling run with `K` particles (live points) into a
series of `K` "strands" (i.e. runs with a single live point) which are then
bootstrapped to construct a new "resampled" run. Companion function to
:meth:`jitter_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
return_idx : bool, optional
Whether to return the list of resampled indices used to construct
the new run. Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
samples and weights based on our "bootstrapped" samples and
(expected) prior volumes.
"""
if rstate is None:
rstate = get_random_generator()
# Check whether the final set of live points were added to the
# run.
nsamps = len(res.ncall)
if res.isdynamic():
# Check if the number of live points explicitly changes.
samples_n = res.samples_n
samples_batch = res.samples_batch
batch_bounds = res.batch_bounds
added_final_live = True
else:
# If the number of live points is constant, compute `samples_n` and
# set up the `added_final_live` flag.
nlive = res.nlive
niter = res.niter
if nsamps == niter:
samples_n = np.ones(niter, dtype=int) * nlive
added_final_live = False
elif nsamps == (niter + nlive):
samples_n = np.minimum(np.arange(nsamps, 0, -1), nlive)
added_final_live = True
else:
raise ValueError("Final number of samples differs from number of "
"iterations and number of live points.")
samples_batch = np.zeros(len(samples_n), dtype=int)
batch_bounds = np.array([(-np.inf, np.inf)])
batch_llmin = batch_bounds[:, 0]
# Identify unique particles that make up each strand.
ids = np.unique(res.samples_id)
# Split the set of strands into two groups: a "baseline" group that
# contains points initially sampled from the prior, which gives information
# on the evidence, and an "add-on" group, which gives additional
# information conditioned on our baseline strands.
base_ids = []
addon_ids = []
for i in ids:
sbatch = samples_batch[res.samples_id == i]
if np.any(batch_llmin[sbatch] == -np.inf):
base_ids.append(i)
else:
addon_ids.append(i)
nbase, nadd = len(base_ids), len(addon_ids)
base_ids, addon_ids = np.array(base_ids), np.array(addon_ids)
# Resample strands.
if nbase > 0 and nadd > 0:
live_idx = np.append(base_ids[rstate.integers(0, nbase, size=nbase)],
addon_ids[rstate.integers(0, nadd, size=nadd)])
elif nbase > 0:
live_idx = base_ids[rstate.integers(0, nbase, size=nbase)]
elif nadd > 0:
raise ValueError("The provided `Results` does not include any points "
"initially sampled from the prior!")
else:
raise ValueError("The provided `Results` does not appear to have "
"any particles!")
# Find corresponding indices within the original run.
samp_idx = np.arange(len(res.ncall))
samp_idx = np.concatenate(
[samp_idx[res.samples_id == idx] for idx in live_idx])
# Derive new sample size.
nsamps = len(samp_idx)
# Sort the loglikelihoods (there will be duplicates).
logls = res.logl[samp_idx]
idx_sort = np.argsort(logls)
samp_idx = samp_idx[idx_sort]
logl = res.logl[samp_idx]
if added_final_live:
# Compute the effective number of live points for each sample.
samp_n = np.zeros(nsamps, dtype=int)
uidxs, uidxs_n = np.unique(live_idx, return_counts=True)
for uidx, uidx_n in zip(uidxs, uidxs_n):
sel = (res.samples_id == uidx) # selection flag
sbatch = samples_batch[sel][0] # corresponding batch ID
lower = batch_llmin[sbatch] # lower bound
upper = max(res.logl[sel]) # upper bound
# Add number of live points between endpoints equal to number of
# times the strand has been resampled.
samp_n[(logl > lower) & (logl < upper)] += uidx_n
# At the endpoint, divide up the final set of points into `uidx_n`
# (roughly) equal chunks and have live points decrease across them.
endsel = (logl == upper)
endsel_n = np.count_nonzero(endsel)
chunk = endsel_n / uidx_n # define our chunk
counters = np.array(np.arange(endsel_n) / chunk, dtype=int)
nlive_end = counters[::-1] + 1 # decreasing number of live points
samp_n[endsel] += nlive_end # add live point sequence
else:
# If we didn't add the final set of live points, the run has a constant
# number of live points and can simply be re-ordered.
samp_n = samples_n[samp_idx]
# Assign log(volume) to samples.
logvol = np.cumsum(np.log(samp_n / (samp_n + 1.)))
saved_logwt, saved_logz, saved_logzvar, saved_h = compute_integrals(
logl=logl, logvol=logvol)
# Compute sampling efficiency.
eff = 100. * len(res.ncall[samp_idx]) / sum(res.ncall[samp_idx])
# Copy results.
# Overwrite items with our new estimates.
new_res_dict = dict(niter=len(res.ncall[samp_idx]),
ncall=res.ncall[samp_idx],
eff=eff,
blob=res.blob[samp_idx],
samples=res.samples[samp_idx],
samples_id=res.samples_id[samp_idx],
samples_it=res.samples_it[samp_idx],
samples_u=res.samples_u[samp_idx],
samples_n=samp_n,
logwt=np.asarray(saved_logwt),
logl=logl,
logvol=logvol,
logz=np.asarray(saved_logz),
logzerr=np.sqrt(
np.maximum(np.asarray(saved_logzvar), 0)),
information=np.asarray(saved_h))
new_res = Results(new_res_dict)
if return_idx:
return new_res, samp_idx
else:
return new_res
def reweight_run(res, logp_new, logp_old=None):
"""
Reweight a given run based on a new target distribution.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
logp_new : `~numpy.ndarray` with shape (nsamps,)
New target distribution evaluated at the location of the samples.
logp_old : `~numpy.ndarray` with shape (nsamps,)
Old target distribution evaluated at the location of the samples.
If not provided, the `logl` values from `res` will be used.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
weights based on our reweighted samples.
"""
# Extract info.
if logp_old is None:
logp_old = res['logl']
logrwt = logp_new - logp_old # ln(reweight)
logvol = res['logvol']
logl = res['logl']
saved_logwt, saved_logz, saved_logzvar, saved_h = compute_integrals(
logl=logl, logvol=logvol, reweight=logrwt)
# Overwrite items with our new estimates.
substitute = {
'logvol': logvol,
'logwt': saved_logwt,
'logz': saved_logz,
'logzerr': np.sqrt(np.maximum(saved_logzvar, 0)),
'h': saved_h
}
new_res = results_substitute(res, substitute)
return new_res
def unravel_run(res, print_progress=True):
"""
Unravels a run with `K` live points into `K` "strands" (a nested sampling
run with only 1 live point). **WARNING: the anciliary quantities provided
with each unraveled "strand" are only valid if the point was initialized
from the prior.**
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
print_progress : bool, optional
Whether to output the current progress to `~sys.stderr`.
Default is `True`.
Returns
-------
new_res : list of :class:`~dynesty.results.Results` instances
A list of new :class:`~dynesty.results.Results` instances
for each individual strand.
"""
idxs = res.samples_id # label for each live/dead point
# Check if we added in the last set of dead points.
added_live = True
try:
if len(idxs) != (res.niter + res.nlive):
added_live = False
except AttributeError:
pass
if (np.diff(res.logl) == 0).sum() == 0:
warnings.warn('The likelihood seem to have plateaus. '
'The unraveling such runs may be inaccurate')
# Recreate the nested sampling run for each strand.
new_res = []
nstrands = len(np.unique(idxs))
for counter, idx in enumerate(np.unique(idxs)):
# Select strand `idx`.
strand = (idxs == idx)
nsamps = sum(strand)
logl = res.logl[strand]
# Assign log(volume) to samples. With K=1 live point, the expected
# shrinking in `logvol` at each iteration is `-log(2)` (i.e.
# shrinking by 1/2). If the final set of live points were added,
# the expected value of the final live point is a uniform
# sample and so has an expected value of half the volume
# of the final dead point.
if added_live:
niter = nsamps - 1
logvol_dead = -math.log(2) * (1. + np.arange(niter))
if niter > 0:
logvol_live = logvol_dead[-1] + math.log(0.5)
logvol = np.append(logvol_dead, logvol_live)
else: # point always live
logvol = np.array([math.log(0.5)])
else:
niter = nsamps
logvol = -math.log(2) * (1. + np.arange(niter))
saved_logwt, saved_logz, saved_logzvar, saved_h = compute_integrals(
logl=logl, logvol=logvol)
# Compute sampling efficiency.
eff = 100. * nsamps / sum(res.ncall[strand])
# Save results.
rdict = dict(nlive=1,
niter=niter,
ncall=res.ncall[strand],
eff=eff,
samples=res.samples[strand],
samples_id=res.samples_id[strand],
samples_it=res.samples_it[strand],
samples_u=res.samples_u[strand],
blob=res.blob[strand],
logwt=saved_logwt,
logl=logl,
logvol=logvol,
logz=saved_logz,
logzerr=np.sqrt(saved_logzvar),
information=saved_h)
# Add on batch information (if available).
try:
rdict['samples_batch'] = res.samples_batch[strand]
rdict['batch_bounds'] = res.batch_bounds
except AttributeError:
pass
# Append to list of strands.
new_res.append(Results(rdict))
# Print progress.
if print_progress:
sys.stderr.write(f'\rStrand: {counter+1}/{nstrands} ')
return new_res
def merge_runs(res_list, print_progress=True):
"""
Merges a set of runs with differing (possibly variable) numbers of
live points into one run.
Parameters
----------
res_list : list of :class:`~dynesty.results.Results` instances
A list of :class:`~dynesty.results.Results` instances returned from
previous runs.
print_progress : bool, optional
Whether to output the current progress to `~sys.stderr`.
Default is `True`.
Returns
-------
combined_res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance for the combined run.
"""
ntot = len(res_list)
counter = 0
# Establish our set of baseline runs and "add-on" runs.
rlist_base = []
rlist_add = []
for r in res_list:
try:
if np.any(r.samples_batch == 0):
rlist_base.append(r)
else:
rlist_add.append(r)
except AttributeError:
rlist_base.append(r)
nbase, nadd = len(rlist_base), len(rlist_add)
if nbase == 1 and nadd == 1:
rlist_base = res_list
rlist_add = []
# Merge baseline runs while there are > 2 remaining results.
if len(rlist_base) > 1:
while len(rlist_base) > 2:
rlist_new = []
nruns = len(rlist_base)
i = 0
while i < nruns:
try:
# Ignore posterior quantities while merging the runs.
r1, r2 = rlist_base[i], rlist_base[i + 1]
res = _merge_two(r1, r2, compute_aux=False)
rlist_new.append(res)
except IndexError:
# Append the odd run to the new list.
rlist_new.append(rlist_base[i])
i += 2
counter += 1
# Print progress.
if print_progress:
sys.stderr.write(f'\rMerge: {counter}/{ntot} ')
# Overwrite baseline set of results with merged results.
rlist_base = copy.copy(rlist_new)
# Compute posterior quantities after merging the final baseline runs.
res = _merge_two(rlist_base[0], rlist_base[1], compute_aux=True)
else:
res = rlist_base[0]
# Iteratively merge any remaining "add-on" results.
nruns = len(rlist_add)
for i, r in enumerate(rlist_add):
if i < nruns - 1:
res = _merge_two(res, r, compute_aux=False)
else:
res = _merge_two(res, r, compute_aux=True)
counter += 1
# Print progress.
if print_progress:
sys.stderr.write(f'\rMerge: {counter}/{ntot} ')
res = check_result_static(res)
return res
def check_result_static(res):
""" If the run was from a dynamic run but had constant
number of live points, return a new Results object with
nlive parameter, so we could use it as static run
"""
samples_n = _get_nsamps_samples_n(res)[1]
nlive = max(samples_n)
niter = res.niter
standard_run = False
# Check if we have a constant number of live points.
if samples_n.size == niter and np.all(samples_n == nlive):
standard_run = True
# Check if we have a constant number of live points where we have
# recycled the final set of live points.
nlive_test = np.minimum(np.arange(niter, 0, -1), nlive)
if samples_n.size == niter and np.all(samples_n == nlive_test):
standard_run = True
# If the number of live points is consistent with a standard nested
# sampling run, slightly modify the format to keep with previous usage.
if standard_run:
resdict = res.asdict()
resdict['nlive'] = nlive
resdict['niter'] = niter - nlive
res = Results(resdict)
return res
def kld_error(res,
error='jitter',
rstate=None,
return_new=False,
approx=False):
"""
Computes the `Kullback-Leibler (KL) divergence
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`_ *from* the
discrete probability distribution defined by `res` *to* the discrete
probability distribution defined by a **realization** of `res`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
:class:`~dynesty.results.Results` instance for the distribution we
are computing the KL divergence *from*.
error : {`'jitter'`, `'resample'`}, optional
The error method employed, corresponding to :meth:`jitter_run` or
:meth:`resample_run`. Default is `'jitter'`.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
return_new : bool, optional
Whether to return the realization of the run used to compute the
KL divergence. Default is `False`.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
kld : `~numpy.ndarray` with shape (nsamps,)
The cumulative KL divergence defined *from* `res` *to* a
random realization of `res`.
new_res : :class:`~dynesty.results.Results` instance, optional
The :class:`~dynesty.results.Results` instance corresponding to
the random realization we computed the KL divergence *to*.
"""
# Define our original importance weights.
logp2 = res.logwt - res.logz[-1]
# Compute a random realization of our run.
if error == 'jitter':
new_res = jitter_run(res, rstate=rstate, approx=approx)
elif error == 'resample':
new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)
logp2 = logp2[samp_idx] # re-order our original results to match
else:
raise ValueError("Input `'error'` option '{error}' is not valid.")
# Define our new importance weights.
logp1 = new_res['logwt'] - new_res['logz'][-1]
# Compute the KL divergence.
kld = np.cumsum(np.exp(logp1) * (logp1 - logp2))
if return_new:
return kld, new_res
else:
return kld
def _prepare_for_merge(res):
"""
Internal method used to prepare a run for merging with another run.
It takes the results object and it returns the dictionary with basic run
info and the number of live points at each iteration.
"""
# Initialize the first ("base") run.
run_info = dict(id=res.samples_id,
u=res.samples_u,
v=res.samples,
logl=res.logl,
nc=res.ncall,
it=res.samples_it,
blob=res.blob)
nrun = len(run_info['id'])
# Number of live points throughout the run.
if res.isdynamic():
run_nlive = res.samples_n
else:
niter, nlive = res.niter, res.nlive
if nrun == niter:
run_nlive = np.ones(niter, dtype=int) * nlive
elif nrun == (niter + nlive):
# this is the case where the last live points are added
# one by one in the end of the run
run_nlive = np.minimum(np.arange(nrun, 0, -1), nlive)
else:
raise ValueError("Final number of samples differs from number of "
"iterations and number of live points in `res1`.")
# Batch information (if available).
# note we also check for existance of batch_bounds
# because unravel_run makes 'static' runs of 1 livepoint
# but some will have bounds
if res.isdynamic() or 'batch_bounds' in res.keys():
run_info['batch'] = res.samples_batch
run_info['bounds'] = res.batch_bounds
else:
run_info['batch'] = np.zeros(nrun, dtype=int)
run_info['bounds'] = np.array([(-np.inf, np.inf)])
return run_nlive, run_info
def _merge_two(res1, res2, compute_aux=False):
"""
Internal method used to merges two runs with differing (possibly variable)
numbers of live points into one run.
Parameters
----------
res1 : :class:`~dynesty.results.Results` instance
The "base" nested sampling run.
res2 : :class:`~dynesty.results.Results` instance
The "new" nested sampling run.
compute_aux : bool, optional
Whether to compute auxiliary quantities (evidences, etc.) associated
with a given run. **WARNING: these are only valid if `res1` or `res2`
was initialized from the prior *and* their sampling bounds overlap.**
Default is `False`.
Returns
-------
res : :class:`~dynesty.results.Results` instances
:class:`~dynesty.results.Results` instance from the newly combined
nested sampling run.
"""
base_nlive, base_info = _prepare_for_merge(res1)
new_nlive, new_info = _prepare_for_merge(res2)
base_nsamples = len(base_info['id'])
new_nsamples = len(new_info['id'])
# Initialize our new combined run.
combined_info = dict()
for curk in [
'id', 'u', 'v', 'logl', 'logvol', 'logwt', 'logz', 'logzvar', 'h',
'nc', 'it', 'n', 'batch', 'blob'
]:
combined_info[curk] = []
# These are merged batch bounds
combined_bounds = np.unique(np.concatenate(
(base_info['bounds'], new_info['bounds'])),
axis=0)
# Here we try to find where the new bounds are in the combined bounds
new_bound_map = {}
base_bound_map = {}
for i in range(len(new_info['bounds'])):
new_bound_map[i] = np.where(
np.all(new_info['bounds'][i] == combined_bounds, axis=1))[0][0]
for i in range(len(base_info['bounds'])):
base_bound_map[i] = np.where(
np.all(base_info['bounds'][i] == combined_bounds, axis=1))[0][0]
base_lowedge = np.min(base_info['bounds'][base_info['batch']])
new_lowedge = np.min(new_info['bounds'][new_info['batch']])
# Iteratively walk through both set of samples to simulate
# a combined run.
combined_nsamples = base_nsamples + new_nsamples
# Start our counters at the beginning of each set of dead points.
base_idx, new_idx = 0, 0
for i in range(combined_nsamples):
# Attempt to step along our samples. If we're out of samples,
# set values to defaults.
if base_idx < base_nsamples:
base_cur_logl = base_info['logl'][base_idx]
base_cur_nlive = base_nlive[base_idx]
else:
base_cur_logl = np.inf
base_cur_nlive = 0
# TODO this is potentially incorrect
# It is not clear what nlive should be when we
# are past the end of one of the run
if new_idx < new_nsamples:
new_cur_logl = new_info['logl'][new_idx]
new_cur_nlive = new_nlive[new_idx]
else:
new_cur_logl = np.inf
new_cur_nlive = 0
if base_cur_logl > new_lowedge and new_cur_logl > base_lowedge:
# If our samples from the both runs are past the each others'
# lower log-likelihood bound, both runs are now "active".
cur_nlive = base_cur_nlive + new_cur_nlive
elif base_cur_logl <= new_lowedge:
# If instead our collection of dead points from the "base" run
# are below the bound, just use those.
cur_nlive = base_cur_nlive
else:
# Our collection of dead points from the "new" run
# are below the bound, so just use those.
cur_nlive = new_cur_nlive
# Increment our position along depending on
# which dead point (saved or new) is worse.
if base_cur_logl <= new_cur_logl:
add_idx = base_idx
from_run = base_info
from_map = base_bound_map
base_idx += 1
else:
add_idx = new_idx
from_run = new_info
from_map = new_bound_map
new_idx += 1
combined_info['batch'].append(from_map[from_run['batch'][add_idx]])
for curk in ['id', 'u', 'v', 'logl', 'nc', 'it', 'blob']:
combined_info[curk].append(from_run[curk][add_idx])
combined_info['n'].append(cur_nlive)
plateau_mode = False
plateau_counter = 0
plateau_logdvol = 0
logvol = 0.
logl_array = np.array(combined_info['logl'])
nlive_array = np.array(combined_info['n'])
for i, (curl, nlive) in enumerate(zip(logl_array, nlive_array)):
# Save the number of live points and expected ln(volume).
if not plateau_mode:
plateau_mask = (logl_array[i:] == curl)
nplateau = plateau_mask.sum()
if nplateau > 1:
# the number of live points should not change throughout
# the plateau
# assert np.ptp(nlive_array[i:][plateau_mask]) == 0
# TODO currently I disabled this check
plateau_counter = nplateau
plateau_logdvol = logvol + np.log(1. / (nlive + 1))
plateau_mode = True
if not plateau_mode:
logvol -= math.log((nlive + 1.) / nlive)
else:
logvol = logvol + np.log1p(-np.exp(plateau_logdvol - logvol))
combined_info['logvol'].append(logvol)
if plateau_mode:
plateau_counter -= 1
if plateau_counter == 0:
plateau_mode = False
# Compute sampling efficiency.
eff = 100. * combined_nsamples / sum(combined_info['nc'])
# Save results.
r = dict(niter=combined_nsamples,
ncall=np.asarray(combined_info['nc']),
eff=eff,
samples=np.asarray(combined_info['v']),
logl=np.asarray(combined_info['logl']),
logvol=np.asarray(combined_info['logvol']),
batch_bounds=np.asarray(combined_bounds),
blob=np.asarray(combined_info['blob']))
for curk in ['id', 'it', 'n', 'u', 'batch']:
r['samples_' + curk] = np.asarray(combined_info[curk])
# Compute the posterior quantities of interest if desired.
if compute_aux:
(r['logwt'], r['logz'], combined_logzvar,
r['information']) = compute_integrals(logvol=r['logvol'],
logl=r['logl'])
r['logzerr'] = np.sqrt(np.maximum(combined_logzvar, 0))
# Compute batch information.
combined_id = np.asarray(combined_info['id'])
batch_nlive = [
len(np.unique(combined_id[combined_info['batch'] == i]))
for i in np.unique(combined_info['batch'])
]
# Add to our results.
r['batch_nlive'] = np.array(batch_nlive, dtype=int)
# Combine to form final results object.
res = Results(r)
return res
def _kld_error(args):
""" Internal `pool.map`-friendly wrapper for :meth:`kld_error`
used by :meth:`stopping_function`."""
# Extract arguments.
results, error, approx, rseed = args
rstate = get_random_generator(rseed)
return kld_error(results,
error,
rstate=rstate,
return_new=True,
approx=approx)
def old_stopping_function(results,
args=None,
rstate=None,
M=None,
return_vals=False):
"""
The old stopping function utilized by :class:`DynamicSampler`.
Zipped parameters are passed to the function via :data:`args`.
Assigns the run a stopping value based on a weighted average of the
stopping values for the posterior and evidence::
stop = pfrac * stop_post + (1.- pfrac) * stop_evid
The evidence stopping value is based on the estimated evidence error
(i.e. standard deviation) relative to a given threshold::
stop_evid = evid_std / evid_thresh
The posterior stopping value is based on the fractional error (i.e.
standard deviation / mean) in the Kullback-Leibler (KL) divergence
relative to a given threshold::
stop_post = (kld_std / kld_mean) / post_thresh
Estimates of the mean and standard deviation are computed using `n_mc`
realizations of the input using a provided `'error'` keyword (either
`'jitter'` or `'resample'`).
Returns the boolean `stop <= 1`. If `True`, the :class:`DynamicSampler`
will stop adding new samples to our results.
Parameters
----------
results : :class:`Results` instance
:class:`Results` instance.
args : dictionary of keyword arguments, optional
Arguments used to set the stopping values. Default values are
`pfrac = 1.0`, `evid_thresh = 0.1`, `post_thresh = 0.02`,
`n_mc = 128`, `error = 'jitter'`, and `approx = True`.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
M : `map` function, optional
An alias to a `map`-like function. This allows users to pass
functions from pools (e.g., `pool.map`) to compute realizations in
parallel. By default the standard `map` function is used.
return_vals : bool, optional
Whether to return the stopping value (and its components). Default
is `False`.
Returns
-------
stop_flag : bool
Boolean flag indicating whether we have passed the desired stopping
criteria.
stop_vals : tuple of shape (3,), optional
The individual stopping values `(stop_post, stop_evid, stop)` used
to determine the stopping criteria.
"""
with warnings.catch_warnings():
warnings.filterwarnings("once")
warnings.warn(
"This an old stopping function that will "
"be removed in future releases", DeprecationWarning)
# Initialize values.
if args is None:
args = {}
if M is None:
M = map
# Initialize hyperparameters.
pfrac = args.get('pfrac', 1.0)
if not 0. <= pfrac <= 1.:
raise ValueError(
f"The provided `pfrac` {pfrac} is not between 0. and 1.")
evid_thresh = args.get('evid_thresh', 0.1)
if pfrac < 1. and evid_thresh < 0.:
raise ValueError(
f"The provided `evid_thresh` {evid_thresh} is not non-negative "
f"even though `pfrac` is {pfrac}.")
post_thresh = args.get('post_thresh', 0.02)
if pfrac > 0. and post_thresh < 0.:
raise ValueError(
f"The provided `post_thresh` {post_thresh} is not non-negative "
f"even though `pfrac` is {pfrac}.")
n_mc = args.get('n_mc', 128)
if n_mc <= 1:
raise ValueError(f"The number of realizations {n_mc} must be greater "
"than 1.")
if n_mc < 20:
warnings.warn("Using a small number of realizations might result in "
"excessively noisy stopping value estimates.")
error = args.get('error', 'jitter')
if error not in {'jitter', 'resample'}:
raise ValueError(f"The chosen `'error'` option {error} is not valid.")
approx = args.get('approx', True)
# Compute realizations of ln(evidence) and the KL divergence.
rlist = [results for i in range(n_mc)]
error_list = [error for i in range(n_mc)]
approx_list = [approx for i in range(n_mc)]
seeds = get_seed_sequence(rstate, n_mc)
args = zip(rlist, error_list, approx_list, seeds)
outputs = list(M(_kld_error, args))
kld_arr, lnz_arr = np.array([(kld[-1], res.logz[-1])
for kld, res in outputs]).T
# Evidence stopping value.
lnz_std = np.std(lnz_arr)
stop_evid = lnz_std / evid_thresh
# Posterior stopping value.
kld_mean, kld_std = np.mean(kld_arr), np.std(kld_arr)
stop_post = (kld_std / kld_mean) / post_thresh
# Effective stopping value.
stop = pfrac * stop_post + (1. - pfrac) * stop_evid
if return_vals:
return stop <= 1., (stop_post, stop_evid, stop)
else:
return stop <= 1.
def restore_sampler(fname, pool=None):
"""
Restore the dynamic sampler from a file.
It is assumed that the file was created using .save() method
of DynamicNestedSampler or as a result of checkpointing during
run_nested()
Parameters
----------
fname: string
Filename of the save file.
pool: object(optional)
The multiprocessing pool-like object that supports map()
calls that will be used in the restored object.
Returns
-------
Static or dynamic nested sampling object
"""
with open(fname, 'rb') as fp:
res = pickle_module.load(fp)
sampler = res['sampler']
save_ver = res['version']
dynesty_format_version = 1
file_format_version = res['format_version']
if file_format_version != dynesty_format_version:
raise RuntimeError('Incorrect format version')
if save_ver != DYNESTY_VERSION:
warnings.warn(
f'The dynesty version in the checkpoint file ({save_ver})'
f'does not match the current dynesty version'
f'({DYNESTY_VERSION}). That is *NOT* guaranteed to work')
if pool is not None:
mapper = pool.map
else:
mapper = map
if hasattr(sampler, 'sampler'):
# This is the case of the dynamic sampler
# this is better be written as isinstanceof()
# but I couldn't do it due to circular imports
# TODO
# Here we are dealing with the special case of dynamic sampler
# where it has internal samplers that also need their pool configured
# this is the initial sampler
samplers = [sampler, sampler.sampler]
if sampler.batch_sampler is not None:
samplers.append(sampler.batch_sampler)
else:
samplers = [sampler]
for cursamp in samplers:
cursamp.M = mapper
cursamp.pool = pool
cursamp.loglikelihood.pool = pool
return sampler
def save_sampler(sampler, fname):
"""
Save the state of the dynamic sampler in a file
Parameters
----------
sampler: object
Dynamic or Static nested sampler
fname: string
Filename of the save file.
"""
format_version = 1
# this is an internal version of the format we are
# using. Increase this if incompatible changes are being made
D = {
'sampler': sampler,
'version': DYNESTY_VERSION,
'format_version': format_version
}
tmp_fname = fname + '.tmp'
try:
with open(tmp_fname, 'wb') as fp:
pickle_module.dump(D, fp)
try:
os.rename(tmp_fname, fname)
except FileExistsError:
# this can happen in Windows, See #450
shutil.move(tmp_fname, fname)
except: # noqa
try:
os.unlink(tmp_fname)
except: # noqa
pass
raise
|
joshspeagleREPO_NAMEdynestyPATH_START.@dynesty_extracted@dynesty-master@py@dynesty@utils.py@.PATH_END.py
|
{
"filename": "_ticksuffix.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/marker/colorbar/_ticksuffix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="ticksuffix",
parent_name="scattercarpet.marker.colorbar",
**kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@marker@colorbar@_ticksuffix.py@.PATH_END.py
|
{
"filename": "sigma_so.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/rsd/power/gal/derivatives/sigma_so.py",
"type": "Python"
}
|
from . import PgalDerivative
import numpy
from pyRSD.rsd.tools import k_AP, mu_AP
class dPgal_dsigma_so(PgalDerivative):
"""
The partial derivative of :func:`GalaxySpectrum.power` with respect to
``sigma_so``
"""
param = 'sigma_so'
@staticmethod
def eval(m, pars, k, mu):
if not m.use_so_correction:
return numpy.zeros(len(k))
kprime = k_AP(k, mu, m.alpha_perp, m.alpha_par)
muprime = mu_AP(mu, m.alpha_perp, m.alpha_par)
G = m.FOG(kprime, muprime, m.sigma_c)
G2 = m.FOG(kprime, muprime, m.sigma_so)
Gprime = m.FOG.derivative_sigma(kprime, muprime, m.sigma_so)
with m.preserve(use_so_correction=False):
# Pcc with no FOG kernels
m.sigma_c = 0.
Pcc = m.Pgal_cc(k, mu)
# derivative of the SO correction terms
term1 = 2*m.f_so*(1-m.f_so) * G * Pcc
term2 = 2*m.f_so**2 * G2 * Pcc
term3 = 2*G*m.f_so*m.fcB*m.NcBs / (m.alpha_perp**2 * m.alpha_par)
toret = (term1 + term2 + term3) * Gprime
return (1-m.fs)**2 * toret
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@rsd@power@gal@derivatives@sigma_so.py@.PATH_END.py
|
{
"filename": "PCA_features.ipynb",
"repo_name": "snad-space/zwad",
"repo_path": "zwad_extracted/zwad-master/notebooks/PCA_features.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import sys
from collections import OrderedDict
import warnings
warnings.filterwarnings("ignore")
from zwad.utils import latex_feature_names
```
# $\LaTeX$ feature and field names
```python
feature_names = latex_feature_names('../data/latex_feature_names.csv')
field_names = {
'm31': r'\textsc{M\,31}',
'deep': r'\textsc{Deep}',
'disk': r'\textsc{Disk}',
}
```
# M31 PRINCIPAL COMPONENT ANALYSIS
```python
# M31
m31_oid = np.memmap('../data/oid_m31.dat', mode='r', dtype=np.uint64)
m31_names = open('../data/feature_m31.name').read().split()
m31_x = np.memmap('../data/feature_m31.dat', mode='r', dtype=np.float32, shape=(m31_oid.size, len(m31_names)))
```
```python
m31 = pd.DataFrame(m31_x, index=m31_oid, columns=m31_names)
m31
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>695211400017839</th>
<td>0.699500</td>
<td>0.227941</td>
<td>0.036765</td>
<td>0.128972</td>
<td>1.576097</td>
<td>4.729054e+09</td>
<td>0.212000</td>
<td>0.450199</td>
<td>3.944156</td>
<td>-0.001640</td>
<td>...</td>
<td>0.148243</td>
<td>0.038084</td>
<td>0.959692</td>
<td>1.045485</td>
<td>9.733143</td>
<td>1.391858</td>
<td>-1.312442</td>
<td>0.202145</td>
<td>0.664184</td>
<td>20.516939</td>
</tr>
<tr>
<th>695211400043887</th>
<td>0.443000</td>
<td>0.288889</td>
<td>0.044444</td>
<td>0.179944</td>
<td>1.524735</td>
<td>3.644123e+09</td>
<td>0.204000</td>
<td>0.400000</td>
<td>0.133404</td>
<td>-0.000005</td>
<td>...</td>
<td>0.156987</td>
<td>0.032495</td>
<td>0.875076</td>
<td>0.984689</td>
<td>10.104938</td>
<td>0.548229</td>
<td>-0.357512</td>
<td>0.163288</td>
<td>0.792986</td>
<td>20.698317</td>
</tr>
<tr>
<th>695211400043454</th>
<td>0.589499</td>
<td>0.280000</td>
<td>0.032000</td>
<td>0.191169</td>
<td>1.652675</td>
<td>2.317022e+09</td>
<td>0.204500</td>
<td>0.484001</td>
<td>1.439840</td>
<td>0.000048</td>
<td>...</td>
<td>0.144973</td>
<td>0.031337</td>
<td>0.856762</td>
<td>0.939969</td>
<td>7.261847</td>
<td>0.791332</td>
<td>-0.746378</td>
<td>0.190502</td>
<td>0.728758</td>
<td>20.749649</td>
</tr>
<tr>
<th>695211400042791</th>
<td>0.604000</td>
<td>0.261745</td>
<td>0.053691</td>
<td>0.158801</td>
<td>1.574722</td>
<td>1.996893e+09</td>
<td>0.203499</td>
<td>0.433001</td>
<td>1.735631</td>
<td>0.000804</td>
<td>...</td>
<td>0.159723</td>
<td>0.033665</td>
<td>0.761747</td>
<td>0.886971</td>
<td>8.016976</td>
<td>0.915853</td>
<td>-0.816090</td>
<td>0.178804</td>
<td>0.737000</td>
<td>20.493862</td>
</tr>
<tr>
<th>695211400016239</th>
<td>0.825500</td>
<td>0.196203</td>
<td>0.025316</td>
<td>0.085341</td>
<td>1.951849</td>
<td>2.571876e+09</td>
<td>0.155001</td>
<td>0.323599</td>
<td>18.212532</td>
<td>-0.002264</td>
<td>...</td>
<td>0.176922</td>
<td>0.049399</td>
<td>0.618860</td>
<td>0.638475</td>
<td>5.355614</td>
<td>1.734685</td>
<td>-2.598536</td>
<td>0.162091</td>
<td>0.504324</td>
<td>20.329548</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>695211200027762</th>
<td>0.137500</td>
<td>0.300000</td>
<td>0.032000</td>
<td>0.076794</td>
<td>1.583493</td>
<td>6.722693e+09</td>
<td>0.053001</td>
<td>0.105000</td>
<td>0.686157</td>
<td>-0.000049</td>
<td>...</td>
<td>0.172669</td>
<td>0.028054</td>
<td>0.722513</td>
<td>0.826720</td>
<td>9.868394</td>
<td>1.089929</td>
<td>-0.198571</td>
<td>0.041880</td>
<td>0.782368</td>
<td>18.673153</td>
</tr>
<tr>
<th>695211200001880</th>
<td>0.049500</td>
<td>0.283465</td>
<td>0.055118</td>
<td>0.151784</td>
<td>1.451552</td>
<td>2.788117e+09</td>
<td>0.018000</td>
<td>0.037000</td>
<td>0.999901</td>
<td>-0.000003</td>
<td>...</td>
<td>0.165232</td>
<td>0.017666</td>
<td>0.749936</td>
<td>0.936336</td>
<td>12.354101</td>
<td>1.967448</td>
<td>0.421042</td>
<td>0.015429</td>
<td>0.763788</td>
<td>14.979458</td>
</tr>
<tr>
<th>695211200027621</th>
<td>0.073500</td>
<td>0.274590</td>
<td>0.049180</td>
<td>0.083840</td>
<td>1.638867</td>
<td>3.939941e+09</td>
<td>0.033501</td>
<td>0.057198</td>
<td>0.447904</td>
<td>-0.000009</td>
<td>...</td>
<td>0.165867</td>
<td>0.023609</td>
<td>0.749228</td>
<td>0.822715</td>
<td>9.576206</td>
<td>1.469046</td>
<td>-0.069856</td>
<td>0.024325</td>
<td>0.793072</td>
<td>17.515738</td>
</tr>
<tr>
<th>695211200002462</th>
<td>0.044000</td>
<td>0.311024</td>
<td>0.074803</td>
<td>0.152871</td>
<td>1.594203</td>
<td>2.863704e+09</td>
<td>0.020000</td>
<td>0.041000</td>
<td>0.126569</td>
<td>0.000034</td>
<td>...</td>
<td>0.174980</td>
<td>0.017528</td>
<td>0.722896</td>
<td>0.866926</td>
<td>11.782808</td>
<td>1.851398</td>
<td>0.099805</td>
<td>0.016328</td>
<td>0.791217</td>
<td>15.804447</td>
</tr>
<tr>
<th>695211200070946</th>
<td>0.195500</td>
<td>0.228346</td>
<td>0.055118</td>
<td>0.080264</td>
<td>2.166719</td>
<td>3.407947e+09</td>
<td>0.051250</td>
<td>0.126400</td>
<td>2.358748</td>
<td>-0.000295</td>
<td>...</td>
<td>0.157390</td>
<td>0.064581</td>
<td>0.539544</td>
<td>0.521232</td>
<td>3.815655</td>
<td>1.502394</td>
<td>-0.244482</td>
<td>0.055769</td>
<td>0.713932</td>
<td>18.852880</td>
</tr>
</tbody>
</table>
<p>57546 rows × 42 columns</p>
</div>
```python
def applypca(dataset):
X = dataset
#In general a good idea is to scale the data
scaler = StandardScaler()
scaler.fit(X)
X=scaler.transform(X)
pca = PCA()
x_new = pca.fit_transform(X)
score = x_new[:,0:2]
coeff = np.transpose(pca.components_[0:2, :])
return pca, x_new, score, coeff
```
```python
def pcaplot(score, coeff, datacolor):
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
plt.scatter(xs * scalex,ys * scaley, c=datacolor, s=6, alpha=0.6)
for i in range(n):
plt.arrow(0, 0, coeff[i,0], coeff[i,1], head_width= .003, color = 'r',alpha = 0.5)
plt.text(coeff[i,0]* 1.12, coeff[i,1] * 1.12, list(m31.columns.values)[i],
color = 'darkblue', ha = 'center', va = 'center', fontsize = 6)
```
```python
fig, ax = plt.subplots(figsize=(14, 14))
pca, x_new, score, coeff = applypca(m31)
plt.xlim(-0.2,0.4)
plt.ylim(-0.4,0.4)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.title("M31")
#Call the function. Use only the 2 PCs.
pcaplot(x_new[:,0:2], np.transpose(pca.components_[0:2, :]), 'white') #just show components, not data
#plt.savefig('../figs/pca/m31_pca_importance.png', dpi=400, bbox_inches='tight')
```

```python
fig, ax = plt.subplots(figsize=(14, 14))
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.title("M31")
pcaplot(x_new[:,0:2], np.transpose(pca.components_[0:2, :]), 'darkorange')
#plt.savefig('../figs/pca/m31_pca_overlay_data.png', dpi=400, bbox_inches='tight')
```

### Find variance of 42 principal components...
```python
m31_pca_var_ratio = pca.explained_variance_ratio_
m31_pca_var_ratio
```
array([2.9282624e-01, 1.7764686e-01, 8.1576906e-02, 5.9318133e-02,
4.5673586e-02, 3.8711138e-02, 3.5529505e-02, 2.6652120e-02,
2.4116229e-02, 2.3249835e-02, 2.3104051e-02, 2.2038981e-02,
1.8754052e-02, 1.8128498e-02, 1.7137550e-02, 1.3575229e-02,
1.2608401e-02, 1.1598360e-02, 8.5148998e-03, 7.3589208e-03,
7.0195072e-03, 5.7306495e-03, 4.8937360e-03, 4.4126213e-03,
3.9169118e-03, 3.7042154e-03, 3.3586489e-03, 3.1325680e-03,
1.8620116e-03, 9.9342316e-04, 6.8924855e-04, 6.3981506e-04,
4.1580538e-04, 3.5827671e-04, 2.7329530e-04, 1.9533768e-04,
1.0103920e-04, 8.3455627e-05, 5.7678477e-05, 4.0850806e-05,
1.2683306e-06, 1.7554333e-07], dtype=float32)
```python
np.cumsum(m31_pca_var_ratio)
```
array([0.29282624, 0.4704731 , 0.55205 , 0.6113681 , 0.6570417 ,
0.69575286, 0.73128235, 0.75793445, 0.78205067, 0.8053005 ,
0.82840455, 0.85044354, 0.8691976 , 0.8873261 , 0.90446365,
0.9180389 , 0.9306473 , 0.94224566, 0.95076054, 0.95811945,
0.965139 , 0.9708696 , 0.9757633 , 0.980176 , 0.9840929 ,
0.9877971 , 0.99115574, 0.9942883 , 0.9961503 , 0.99714375,
0.997833 , 0.9984728 , 0.9988886 , 0.9992469 , 0.9995202 ,
0.9997155 , 0.99981654, 0.9999 , 0.9999577 , 0.9999985 ,
0.99999976, 0.99999994], dtype=float32)
### PC1 explains 29.3% and PC2 17.8%. Together, if we keep PC1 and PC2 only, they explain 47.1%. Not bad for 42 features?
### Now let's find most important features mathematically
```python
print(abs( pca.components_ )) #has shape (42, 42) [n_components, n_features].
```
[[2.70220488e-01 8.88836384e-03 1.59023702e-03 ... 2.81599462e-01
4.89639118e-02 1.92176148e-01]
[6.01112588e-05 7.43303746e-02 4.20616493e-02 ... 2.44465694e-02
8.47878009e-02 9.15185809e-02]
[7.63525069e-02 3.22336465e-01 4.04378176e-02 ... 6.62775291e-03
2.72183478e-01 2.48516172e-01]
...
[1.02007076e-01 9.12577845e-03 8.88883695e-03 ... 8.48711014e-01
1.10804383e-02 2.53667757e-02]
[1.14453919e-02 1.21567631e-04 1.12139387e-04 ... 4.61369157e-02
3.50912195e-03 7.00042009e-01]
[2.52862024e-04 3.97829608e-05 9.75814328e-06 ... 1.19953230e-03
1.20662153e-04 2.63015926e-03]]
```python
pca_num = []
for i in range(1, 43):
pca_num.append('PCA{}'.format(i))
m31_pca_comp = pd.DataFrame(pca.components_, index=pca_num, columns=m31_names)
#m31_pca_comp.to_csv('../data/m31_pca_feature_importance.csv')
m31_pca_comp
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>PCA1</th>
<td>0.270220</td>
<td>0.008888</td>
<td>-0.001590</td>
<td>0.091567</td>
<td>-0.078800</td>
<td>-0.102478</td>
<td>0.273897</td>
<td>0.278250</td>
<td>-0.017195</td>
<td>-0.073426</td>
<td>...</td>
<td>0.007447</td>
<td>0.041709</td>
<td>0.114543</td>
<td>0.077620</td>
<td>-0.031790</td>
<td>0.104492</td>
<td>-0.115696</td>
<td>0.281599</td>
<td>-0.048964</td>
<td>0.192176</td>
</tr>
<tr>
<th>PCA2</th>
<td>-0.000060</td>
<td>0.074330</td>
<td>-0.042062</td>
<td>0.239880</td>
<td>-0.267781</td>
<td>-0.113904</td>
<td>0.040137</td>
<td>0.031887</td>
<td>-0.044017</td>
<td>-0.019775</td>
<td>...</td>
<td>-0.007698</td>
<td>-0.121068</td>
<td>0.099130</td>
<td>0.296348</td>
<td>0.346211</td>
<td>0.037850</td>
<td>0.010693</td>
<td>0.024447</td>
<td>0.084788</td>
<td>-0.091519</td>
</tr>
<tr>
<th>PCA3</th>
<td>0.076353</td>
<td>-0.322336</td>
<td>0.040438</td>
<td>0.009092</td>
<td>-0.037840</td>
<td>-0.032995</td>
<td>-0.062708</td>
<td>-0.036987</td>
<td>0.241820</td>
<td>-0.264892</td>
<td>...</td>
<td>0.114642</td>
<td>0.069852</td>
<td>0.019305</td>
<td>-0.040777</td>
<td>-0.022052</td>
<td>0.347273</td>
<td>-0.039459</td>
<td>0.006628</td>
<td>-0.272183</td>
<td>-0.248516</td>
</tr>
<tr>
<th>PCA4</th>
<td>-0.053699</td>
<td>0.324534</td>
<td>-0.126995</td>
<td>-0.106357</td>
<td>0.157180</td>
<td>0.160718</td>
<td>0.081438</td>
<td>0.040464</td>
<td>-0.211928</td>
<td>0.006079</td>
<td>...</td>
<td>-0.101264</td>
<td>-0.080606</td>
<td>-0.130911</td>
<td>-0.094907</td>
<td>-0.037241</td>
<td>0.331369</td>
<td>0.112065</td>
<td>0.015037</td>
<td>0.393288</td>
<td>-0.160194</td>
</tr>
<tr>
<th>PCA5</th>
<td>0.114042</td>
<td>-0.190602</td>
<td>0.001752</td>
<td>-0.235740</td>
<td>0.231478</td>
<td>0.235326</td>
<td>0.023916</td>
<td>0.031950</td>
<td>0.191424</td>
<td>0.212244</td>
<td>...</td>
<td>-0.269133</td>
<td>-0.074913</td>
<td>-0.373341</td>
<td>-0.177580</td>
<td>0.104772</td>
<td>0.055025</td>
<td>0.035645</td>
<td>0.049430</td>
<td>-0.195764</td>
<td>0.144635</td>
</tr>
<tr>
<th>PCA6</th>
<td>0.026341</td>
<td>-0.110977</td>
<td>-0.380974</td>
<td>0.061750</td>
<td>-0.084515</td>
<td>-0.185655</td>
<td>0.021447</td>
<td>-0.019344</td>
<td>0.415640</td>
<td>0.390316</td>
<td>...</td>
<td>0.168372</td>
<td>-0.146539</td>
<td>0.108881</td>
<td>0.090557</td>
<td>-0.021491</td>
<td>0.050218</td>
<td>0.201816</td>
<td>-0.003943</td>
<td>-0.148721</td>
<td>-0.095493</td>
</tr>
<tr>
<th>PCA7</th>
<td>-0.051217</td>
<td>0.002529</td>
<td>0.427992</td>
<td>0.075854</td>
<td>-0.062889</td>
<td>-0.065425</td>
<td>-0.029443</td>
<td>0.021045</td>
<td>-0.256978</td>
<td>0.410174</td>
<td>...</td>
<td>0.016073</td>
<td>-0.047112</td>
<td>0.096075</td>
<td>0.077805</td>
<td>-0.040946</td>
<td>0.165902</td>
<td>-0.023760</td>
<td>0.001783</td>
<td>0.104607</td>
<td>-0.063377</td>
</tr>
<tr>
<th>PCA8</th>
<td>0.003550</td>
<td>0.014802</td>
<td>0.115546</td>
<td>-0.039661</td>
<td>-0.031105</td>
<td>0.108727</td>
<td>0.030318</td>
<td>0.040312</td>
<td>0.071309</td>
<td>-0.090170</td>
<td>...</td>
<td>0.502621</td>
<td>-0.613671</td>
<td>-0.243585</td>
<td>-0.024858</td>
<td>-0.057870</td>
<td>-0.073713</td>
<td>0.209216</td>
<td>0.026488</td>
<td>0.060294</td>
<td>-0.044928</td>
</tr>
<tr>
<th>PCA9</th>
<td>-0.028840</td>
<td>-0.021959</td>
<td>0.135525</td>
<td>-0.028702</td>
<td>0.015588</td>
<td>0.000430</td>
<td>0.027211</td>
<td>0.045393</td>
<td>0.205811</td>
<td>-0.114746</td>
<td>...</td>
<td>-0.179314</td>
<td>-0.141795</td>
<td>0.168532</td>
<td>0.081884</td>
<td>0.016256</td>
<td>-0.056148</td>
<td>0.584243</td>
<td>0.023129</td>
<td>0.162121</td>
<td>-0.002490</td>
</tr>
<tr>
<th>PCA10</th>
<td>-0.017144</td>
<td>-0.007181</td>
<td>0.059180</td>
<td>0.247308</td>
<td>0.010283</td>
<td>0.297033</td>
<td>0.017831</td>
<td>0.020512</td>
<td>0.142356</td>
<td>0.015373</td>
<td>...</td>
<td>-0.034610</td>
<td>0.258497</td>
<td>-0.001064</td>
<td>-0.058553</td>
<td>0.000339</td>
<td>-0.039041</td>
<td>0.411627</td>
<td>0.011375</td>
<td>0.148134</td>
<td>0.021789</td>
</tr>
<tr>
<th>PCA11</th>
<td>-0.006082</td>
<td>-0.016930</td>
<td>0.005043</td>
<td>0.103452</td>
<td>0.030787</td>
<td>0.061127</td>
<td>-0.001099</td>
<td>-0.000229</td>
<td>0.059149</td>
<td>0.021374</td>
<td>...</td>
<td>-0.182390</td>
<td>0.142952</td>
<td>-0.062868</td>
<td>-0.060928</td>
<td>-0.014665</td>
<td>-0.011196</td>
<td>0.138483</td>
<td>-0.000538</td>
<td>0.025316</td>
<td>-0.002313</td>
</tr>
<tr>
<th>PCA12</th>
<td>0.008284</td>
<td>0.024062</td>
<td>-0.040978</td>
<td>0.212392</td>
<td>-0.049717</td>
<td>0.397640</td>
<td>-0.011968</td>
<td>-0.015300</td>
<td>-0.048794</td>
<td>0.064301</td>
<td>...</td>
<td>0.445002</td>
<td>0.130321</td>
<td>-0.174808</td>
<td>-0.053693</td>
<td>-0.000027</td>
<td>-0.028024</td>
<td>-0.208133</td>
<td>-0.008686</td>
<td>-0.035838</td>
<td>-0.002320</td>
</tr>
<tr>
<th>PCA13</th>
<td>0.018835</td>
<td>-0.038147</td>
<td>0.181475</td>
<td>-0.160019</td>
<td>-0.086105</td>
<td>0.191264</td>
<td>0.036277</td>
<td>0.018443</td>
<td>0.028899</td>
<td>0.048251</td>
<td>...</td>
<td>0.310594</td>
<td>0.202507</td>
<td>0.393780</td>
<td>0.131034</td>
<td>0.150888</td>
<td>-0.021253</td>
<td>0.058375</td>
<td>0.024820</td>
<td>0.062161</td>
<td>0.067774</td>
</tr>
<tr>
<th>PCA14</th>
<td>-0.009937</td>
<td>0.297037</td>
<td>-0.616853</td>
<td>-0.016749</td>
<td>-0.022819</td>
<td>0.207697</td>
<td>-0.023166</td>
<td>0.059798</td>
<td>-0.005286</td>
<td>-0.011615</td>
<td>...</td>
<td>-0.045846</td>
<td>-0.037919</td>
<td>0.152997</td>
<td>0.076117</td>
<td>0.020576</td>
<td>-0.023153</td>
<td>-0.069608</td>
<td>-0.002399</td>
<td>0.008877</td>
<td>-0.035844</td>
</tr>
<tr>
<th>PCA15</th>
<td>0.006424</td>
<td>-0.147293</td>
<td>0.110634</td>
<td>0.010100</td>
<td>-0.048418</td>
<td>0.482027</td>
<td>-0.023940</td>
<td>-0.052777</td>
<td>0.003825</td>
<td>-0.010163</td>
<td>...</td>
<td>-0.374629</td>
<td>-0.352021</td>
<td>0.330514</td>
<td>0.225444</td>
<td>-0.036848</td>
<td>-0.041329</td>
<td>-0.224358</td>
<td>-0.023113</td>
<td>-0.145211</td>
<td>-0.137219</td>
</tr>
<tr>
<th>PCA16</th>
<td>-0.002063</td>
<td>-0.168585</td>
<td>0.205472</td>
<td>-0.010080</td>
<td>-0.013063</td>
<td>0.015048</td>
<td>-0.037268</td>
<td>-0.004535</td>
<td>0.026070</td>
<td>0.000587</td>
<td>...</td>
<td>0.017070</td>
<td>-0.012075</td>
<td>0.008174</td>
<td>0.025528</td>
<td>0.002738</td>
<td>0.093954</td>
<td>-0.001176</td>
<td>-0.014270</td>
<td>0.031753</td>
<td>0.081927</td>
</tr>
<tr>
<th>PCA17</th>
<td>-0.076026</td>
<td>0.022042</td>
<td>-0.102762</td>
<td>0.008285</td>
<td>-0.072648</td>
<td>0.143629</td>
<td>-0.124735</td>
<td>-0.125751</td>
<td>0.070077</td>
<td>0.006334</td>
<td>...</td>
<td>0.077311</td>
<td>-0.031217</td>
<td>0.088107</td>
<td>0.115966</td>
<td>0.024177</td>
<td>0.412227</td>
<td>0.063386</td>
<td>-0.107849</td>
<td>0.028850</td>
<td>0.367692</td>
</tr>
<tr>
<th>PCA18</th>
<td>0.019450</td>
<td>0.125767</td>
<td>0.055768</td>
<td>-0.057001</td>
<td>0.131326</td>
<td>-0.145015</td>
<td>-0.006288</td>
<td>0.000064</td>
<td>-0.016017</td>
<td>0.026961</td>
<td>...</td>
<td>-0.055694</td>
<td>-0.471672</td>
<td>0.172574</td>
<td>-0.184457</td>
<td>0.049056</td>
<td>0.044585</td>
<td>-0.183040</td>
<td>-0.001878</td>
<td>-0.009696</td>
<td>0.091864</td>
</tr>
<tr>
<th>PCA19</th>
<td>-0.021691</td>
<td>0.252819</td>
<td>0.164661</td>
<td>0.289167</td>
<td>-0.264943</td>
<td>0.032503</td>
<td>-0.000042</td>
<td>0.017567</td>
<td>0.221052</td>
<td>0.044605</td>
<td>...</td>
<td>-0.094664</td>
<td>0.060087</td>
<td>-0.135603</td>
<td>-0.210687</td>
<td>-0.055187</td>
<td>0.090900</td>
<td>-0.115150</td>
<td>0.011546</td>
<td>-0.034678</td>
<td>-0.261440</td>
</tr>
<tr>
<th>PCA20</th>
<td>-0.113954</td>
<td>0.451467</td>
<td>0.258380</td>
<td>-0.200684</td>
<td>0.147970</td>
<td>0.050106</td>
<td>0.012605</td>
<td>0.023315</td>
<td>0.546956</td>
<td>-0.004417</td>
<td>...</td>
<td>0.113969</td>
<td>0.089791</td>
<td>0.013249</td>
<td>0.115141</td>
<td>0.044122</td>
<td>-0.044960</td>
<td>-0.271406</td>
<td>-0.020855</td>
<td>0.026399</td>
<td>0.068173</td>
</tr>
<tr>
<th>PCA21</th>
<td>-0.132111</td>
<td>-0.100866</td>
<td>-0.036165</td>
<td>0.384744</td>
<td>-0.301621</td>
<td>0.115073</td>
<td>-0.050716</td>
<td>-0.055408</td>
<td>0.029672</td>
<td>0.048477</td>
<td>...</td>
<td>-0.105299</td>
<td>-0.112471</td>
<td>-0.047063</td>
<td>-0.105964</td>
<td>-0.084927</td>
<td>-0.087080</td>
<td>-0.076907</td>
<td>-0.067666</td>
<td>0.032604</td>
<td>0.206070</td>
</tr>
<tr>
<th>PCA22</th>
<td>-0.072769</td>
<td>-0.167861</td>
<td>-0.109828</td>
<td>-0.429811</td>
<td>-0.110501</td>
<td>0.031351</td>
<td>-0.044331</td>
<td>-0.041303</td>
<td>-0.215577</td>
<td>0.059558</td>
<td>...</td>
<td>0.197363</td>
<td>0.115134</td>
<td>0.224721</td>
<td>-0.164092</td>
<td>-0.099317</td>
<td>-0.025526</td>
<td>0.045114</td>
<td>-0.036308</td>
<td>0.014429</td>
<td>-0.108872</td>
</tr>
<tr>
<th>PCA23</th>
<td>-0.031919</td>
<td>0.110320</td>
<td>0.044184</td>
<td>-0.485270</td>
<td>-0.596947</td>
<td>0.038451</td>
<td>0.019849</td>
<td>0.009443</td>
<td>0.093185</td>
<td>0.062610</td>
<td>...</td>
<td>-0.179474</td>
<td>0.058827</td>
<td>-0.206518</td>
<td>0.063224</td>
<td>0.080718</td>
<td>-0.008347</td>
<td>0.028814</td>
<td>-0.005538</td>
<td>-0.050749</td>
<td>-0.029160</td>
</tr>
<tr>
<th>PCA24</th>
<td>-0.143272</td>
<td>-0.200358</td>
<td>-0.035632</td>
<td>0.051767</td>
<td>0.427991</td>
<td>0.102910</td>
<td>0.042646</td>
<td>0.019356</td>
<td>0.029437</td>
<td>0.178316</td>
<td>...</td>
<td>0.024236</td>
<td>0.067691</td>
<td>-0.078611</td>
<td>0.236175</td>
<td>0.272655</td>
<td>-0.014250</td>
<td>-0.095658</td>
<td>-0.006384</td>
<td>0.040224</td>
<td>-0.150950</td>
</tr>
<tr>
<th>PCA25</th>
<td>-0.079101</td>
<td>-0.393636</td>
<td>-0.111903</td>
<td>-0.080859</td>
<td>-0.228273</td>
<td>0.104471</td>
<td>0.117075</td>
<td>0.084462</td>
<td>0.018100</td>
<td>-0.016135</td>
<td>...</td>
<td>0.010286</td>
<td>-0.062485</td>
<td>-0.276416</td>
<td>0.042982</td>
<td>0.117514</td>
<td>-0.002596</td>
<td>-0.195490</td>
<td>0.057333</td>
<td>0.396090</td>
<td>0.032353</td>
</tr>
<tr>
<th>PCA26</th>
<td>-0.147943</td>
<td>-0.174059</td>
<td>-0.073523</td>
<td>0.022716</td>
<td>0.020764</td>
<td>-0.286847</td>
<td>0.040330</td>
<td>0.029379</td>
<td>0.237851</td>
<td>0.357615</td>
<td>...</td>
<td>-0.054097</td>
<td>0.019570</td>
<td>0.113826</td>
<td>-0.083088</td>
<td>-0.147995</td>
<td>-0.024992</td>
<td>-0.196585</td>
<td>-0.024434</td>
<td>0.302781</td>
<td>0.011027</td>
</tr>
<tr>
<th>PCA27</th>
<td>-0.063652</td>
<td>-0.125765</td>
<td>-0.031184</td>
<td>0.002883</td>
<td>0.041537</td>
<td>-0.191658</td>
<td>-0.104296</td>
<td>-0.064417</td>
<td>0.171948</td>
<td>-0.545979</td>
<td>...</td>
<td>-0.041545</td>
<td>0.005139</td>
<td>-0.019393</td>
<td>0.052668</td>
<td>0.041642</td>
<td>0.033409</td>
<td>-0.147929</td>
<td>-0.042142</td>
<td>0.329892</td>
<td>-0.016509</td>
</tr>
<tr>
<th>PCA28</th>
<td>0.019649</td>
<td>-0.118430</td>
<td>-0.016836</td>
<td>-0.056286</td>
<td>0.036205</td>
<td>0.280475</td>
<td>0.090570</td>
<td>0.085198</td>
<td>0.204044</td>
<td>-0.082148</td>
<td>...</td>
<td>-0.001044</td>
<td>0.016710</td>
<td>0.277037</td>
<td>-0.161615</td>
<td>-0.233274</td>
<td>0.000281</td>
<td>-0.099283</td>
<td>0.070756</td>
<td>0.239413</td>
<td>-0.033304</td>
</tr>
<tr>
<th>PCA29</th>
<td>0.396350</td>
<td>0.030105</td>
<td>0.030327</td>
<td>-0.026910</td>
<td>-0.009337</td>
<td>0.013676</td>
<td>-0.270833</td>
<td>-0.087515</td>
<td>0.059989</td>
<td>0.224066</td>
<td>...</td>
<td>0.001610</td>
<td>0.002205</td>
<td>-0.016246</td>
<td>0.008069</td>
<td>0.053924</td>
<td>-0.044538</td>
<td>-0.042132</td>
<td>0.041337</td>
<td>0.415570</td>
<td>-0.040189</td>
</tr>
<tr>
<th>PCA30</th>
<td>0.045246</td>
<td>-0.000243</td>
<td>0.004132</td>
<td>-0.049068</td>
<td>0.050486</td>
<td>-0.010981</td>
<td>0.024495</td>
<td>-0.013838</td>
<td>-0.019266</td>
<td>0.003379</td>
<td>...</td>
<td>-0.004649</td>
<td>0.028906</td>
<td>-0.180932</td>
<td>0.499746</td>
<td>-0.212222</td>
<td>0.001782</td>
<td>-0.006446</td>
<td>-0.015904</td>
<td>0.014635</td>
<td>-0.022131</td>
</tr>
<tr>
<th>PCA31</th>
<td>0.045071</td>
<td>-0.023275</td>
<td>0.013500</td>
<td>0.015520</td>
<td>-0.021314</td>
<td>0.014414</td>
<td>0.114919</td>
<td>0.003003</td>
<td>0.018975</td>
<td>-0.012011</td>
<td>...</td>
<td>0.016421</td>
<td>-0.019003</td>
<td>0.165508</td>
<td>-0.474281</td>
<td>0.258007</td>
<td>0.006679</td>
<td>-0.009194</td>
<td>-0.023719</td>
<td>0.022497</td>
<td>-0.042029</td>
</tr>
<tr>
<th>PCA32</th>
<td>0.184480</td>
<td>-0.031177</td>
<td>0.029257</td>
<td>0.013773</td>
<td>0.000364</td>
<td>0.008244</td>
<td>0.276494</td>
<td>-0.069245</td>
<td>-0.002848</td>
<td>0.002829</td>
<td>...</td>
<td>-0.002638</td>
<td>0.022685</td>
<td>-0.052815</td>
<td>0.148671</td>
<td>-0.095870</td>
<td>0.007208</td>
<td>-0.004994</td>
<td>-0.101860</td>
<td>0.081385</td>
<td>-0.063637</td>
</tr>
<tr>
<th>PCA33</th>
<td>0.073517</td>
<td>0.003765</td>
<td>-0.035177</td>
<td>-0.002728</td>
<td>-0.001940</td>
<td>0.023234</td>
<td>-0.305427</td>
<td>0.278102</td>
<td>-0.002170</td>
<td>-0.009059</td>
<td>...</td>
<td>-0.003742</td>
<td>0.002045</td>
<td>-0.018843</td>
<td>0.028351</td>
<td>-0.016651</td>
<td>-0.011891</td>
<td>0.012557</td>
<td>0.200013</td>
<td>-0.071876</td>
<td>-0.033196</td>
</tr>
<tr>
<th>PCA34</th>
<td>-0.121765</td>
<td>-0.001490</td>
<td>0.008137</td>
<td>-0.002484</td>
<td>-0.004495</td>
<td>-0.002865</td>
<td>0.049063</td>
<td>-0.051814</td>
<td>0.001155</td>
<td>-0.003701</td>
<td>...</td>
<td>0.003707</td>
<td>-0.006753</td>
<td>0.034195</td>
<td>-0.135694</td>
<td>0.113160</td>
<td>0.005188</td>
<td>0.000195</td>
<td>-0.025334</td>
<td>0.002410</td>
<td>0.005882</td>
</tr>
<tr>
<th>PCA35</th>
<td>0.753832</td>
<td>0.010344</td>
<td>-0.016719</td>
<td>0.010014</td>
<td>-0.006715</td>
<td>0.004617</td>
<td>-0.055937</td>
<td>-0.127061</td>
<td>0.034480</td>
<td>-0.009555</td>
<td>...</td>
<td>0.001059</td>
<td>0.005538</td>
<td>0.011193</td>
<td>-0.032691</td>
<td>0.033876</td>
<td>-0.029427</td>
<td>-0.031208</td>
<td>-0.038283</td>
<td>0.033433</td>
<td>0.010560</td>
</tr>
<tr>
<th>PCA36</th>
<td>0.073157</td>
<td>-0.046039</td>
<td>0.016571</td>
<td>0.000881</td>
<td>-0.001905</td>
<td>0.001237</td>
<td>-0.045801</td>
<td>0.453050</td>
<td>-0.004169</td>
<td>-0.014338</td>
<td>...</td>
<td>0.001934</td>
<td>-0.000481</td>
<td>0.005292</td>
<td>-0.005100</td>
<td>-0.006035</td>
<td>0.003981</td>
<td>-0.010492</td>
<td>-0.004612</td>
<td>0.013520</td>
<td>0.000746</td>
</tr>
<tr>
<th>PCA37</th>
<td>0.062844</td>
<td>0.022944</td>
<td>-0.008563</td>
<td>-0.002003</td>
<td>-0.001176</td>
<td>-0.000044</td>
<td>0.666319</td>
<td>-0.393629</td>
<td>0.004910</td>
<td>-0.005330</td>
<td>...</td>
<td>-0.000462</td>
<td>-0.000818</td>
<td>0.000832</td>
<td>0.004627</td>
<td>-0.010293</td>
<td>-0.022867</td>
<td>-0.000841</td>
<td>-0.002788</td>
<td>-0.008353</td>
<td>0.009097</td>
</tr>
<tr>
<th>PCA38</th>
<td>0.013212</td>
<td>-0.004686</td>
<td>-0.001257</td>
<td>-0.003572</td>
<td>-0.003939</td>
<td>-0.000905</td>
<td>-0.341410</td>
<td>-0.479425</td>
<td>0.004594</td>
<td>-0.016428</td>
<td>...</td>
<td>-0.001923</td>
<td>-0.002038</td>
<td>-0.000327</td>
<td>0.003273</td>
<td>-0.002177</td>
<td>-0.168079</td>
<td>-0.005841</td>
<td>-0.304942</td>
<td>0.026061</td>
<td>0.035230</td>
</tr>
<tr>
<th>PCA39</th>
<td>0.000220</td>
<td>0.003075</td>
<td>-0.003562</td>
<td>0.002892</td>
<td>0.000815</td>
<td>-0.001467</td>
<td>-0.082884</td>
<td>-0.194335</td>
<td>0.000257</td>
<td>0.035141</td>
<td>...</td>
<td>0.000068</td>
<td>0.001291</td>
<td>-0.000211</td>
<td>0.003236</td>
<td>-0.006481</td>
<td>0.672618</td>
<td>0.000055</td>
<td>0.123536</td>
<td>-0.006704</td>
<td>-0.030725</td>
</tr>
<tr>
<th>PCA40</th>
<td>-0.102007</td>
<td>0.009126</td>
<td>-0.008889</td>
<td>-0.001568</td>
<td>0.001657</td>
<td>0.002474</td>
<td>-0.126929</td>
<td>-0.357335</td>
<td>0.000664</td>
<td>-0.000589</td>
<td>...</td>
<td>-0.000994</td>
<td>-0.002376</td>
<td>-0.000246</td>
<td>-0.000389</td>
<td>-0.005592</td>
<td>-0.154675</td>
<td>-0.001793</td>
<td>0.848711</td>
<td>0.011080</td>
<td>-0.025367</td>
</tr>
<tr>
<th>PCA41</th>
<td>0.011445</td>
<td>-0.000122</td>
<td>-0.000112</td>
<td>0.001012</td>
<td>-0.000683</td>
<td>0.000132</td>
<td>-0.000668</td>
<td>-0.004297</td>
<td>-0.000297</td>
<td>0.002977</td>
<td>...</td>
<td>0.000197</td>
<td>0.000209</td>
<td>-0.000037</td>
<td>0.000420</td>
<td>0.002994</td>
<td>-0.030379</td>
<td>0.001795</td>
<td>-0.046137</td>
<td>-0.003509</td>
<td>-0.700042</td>
</tr>
<tr>
<th>PCA42</th>
<td>0.000253</td>
<td>-0.000040</td>
<td>0.000010</td>
<td>-0.000048</td>
<td>0.000277</td>
<td>0.000022</td>
<td>0.000364</td>
<td>0.000133</td>
<td>-0.000495</td>
<td>0.000068</td>
<td>...</td>
<td>0.001349</td>
<td>-0.000243</td>
<td>-0.006791</td>
<td>0.001204</td>
<td>-0.706974</td>
<td>-0.000630</td>
<td>-0.000003</td>
<td>-0.001200</td>
<td>-0.000121</td>
<td>-0.002630</td>
</tr>
</tbody>
</table>
<p>42 rows × 42 columns</p>
</div>
```python
labels = m31_names
pca1 = abs(np.array(m31_pca_comp.loc[['PCA1']])[0])
pca2 = abs(np.array(m31_pca_comp.loc[['PCA2']])[0])
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots(figsize=(14, 8))
rects1 = ax.bar(x - width/2, pca1, width, label='PCA 1 (29.3% of var)', color='g')
rects2 = ax.bar(x + width/2, pca2, width, label='PCA 2 (17.8% of var)', color='r')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Absolute value of Importance')
ax.set_title('Importance of features by principal component for M31')
ax.set_xticks(x)
ax.set_xticklabels(labels, rotation='vertical')
ax.legend(loc='upper left')
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{0:.2f}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
#plt.savefig('../figs/pca/m31_pca_barchart.png', dpi=400, bbox_inches='tight')
```

```python
```
```python
```
```python
m31_pca_comp.loc[['PCA1']] #most important principal component (remember 29.3% variance)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>PCA1</th>
<td>0.27022</td>
<td>0.008888</td>
<td>-0.00159</td>
<td>0.091567</td>
<td>-0.0788</td>
<td>-0.102478</td>
<td>0.273897</td>
<td>0.27825</td>
<td>-0.017195</td>
<td>-0.073426</td>
<td>...</td>
<td>0.007447</td>
<td>0.041709</td>
<td>0.114543</td>
<td>0.07762</td>
<td>-0.03179</td>
<td>0.104492</td>
<td>-0.115696</td>
<td>0.281599</td>
<td>-0.048964</td>
<td>0.192176</td>
</tr>
</tbody>
</table>
<p>1 rows × 42 columns</p>
</div>
```python
ordered_m31_pca1 = OrderedDict(sorted(m31_pca_comp.loc[['PCA1']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_m31_pca1.items():
print(key, value)
```
period_s_to_n_2 PCA1 -0.130181
Name: period_s_to_n_2, dtype: float32
median_buffer_range_percentage_5 PCA1 -0.130151
Name: median_buffer_range_percentage_5, dtype: float32
period_s_to_n_1 PCA1 -0.11707
Name: period_s_to_n_1, dtype: float32
skew PCA1 -0.115696
Name: skew, dtype: float32
eta_e PCA1 -0.102478
Name: eta_e, dtype: float32
period_s_to_n_0 PCA1 -0.097057
Name: period_s_to_n_0, dtype: float32
eta PCA1 -0.0788
Name: eta, dtype: float32
linear_fit_slope PCA1 -0.073426
Name: linear_fit_slope, dtype: float32
linear_trend PCA1 -0.050854
Name: linear_trend, dtype: float32
stetson_K PCA1 -0.048964
Name: stetson_K, dtype: float32
periodogram_percent_amplitude PCA1 -0.03179
Name: periodogram_percent_amplitude, dtype: float32
periodogram_amplitude PCA1 -0.030639
Name: periodogram_amplitude, dtype: float32
kurtosis PCA1 -0.017195
Name: kurtosis, dtype: float32
periodogram_beyond_2_std PCA1 -0.007211
Name: periodogram_beyond_2_std, dtype: float32
magnitude_percentage_ratio_40_5 PCA1 -0.002592
Name: magnitude_percentage_ratio_40_5, dtype: float32
beyond_2_std PCA1 -0.00159
Name: beyond_2_std, dtype: float32
periodogram_beyond_1_std PCA1 -0.001515
Name: periodogram_beyond_1_std, dtype: float32
magnitude_percentage_ratio_20_10 PCA1 0.000656
Name: magnitude_percentage_ratio_20_10, dtype: float32
periodogram_cusum PCA1 0.007447
Name: periodogram_cusum, dtype: float32
beyond_1_std PCA1 0.008888
Name: beyond_1_std, dtype: float32
periodogram_eta PCA1 0.041709
Name: periodogram_eta, dtype: float32
period_2 PCA1 0.043743
Name: period_2, dtype: float32
period_1 PCA1 0.048892
Name: period_1, dtype: float32
period_0 PCA1 0.077319
Name: period_0, dtype: float32
periodogram_standard_deviation PCA1 0.07762
Name: periodogram_standard_deviation, dtype: float32
cusum PCA1 0.091567
Name: cusum, dtype: float32
linear_fit_reduced_chi2 PCA1 0.102381
Name: linear_fit_reduced_chi2, dtype: float32
chi2 PCA1 0.104492
Name: chi2, dtype: float32
periodogram_inter_percentile_range_25 PCA1 0.114543
Name: periodogram_inter_percentile_range_25, dtype: float32
weighted_mean PCA1 0.192176
Name: weighted_mean, dtype: float32
mean PCA1 0.198221
Name: mean, dtype: float32
linear_fit_slope_sigma PCA1 0.200557
Name: linear_fit_slope_sigma, dtype: float32
maximum_slope PCA1 0.219379
Name: maximum_slope, dtype: float32
percent_amplitude PCA1 0.259667
Name: percent_amplitude, dtype: float32
amplitude PCA1 0.27022
Name: amplitude, dtype: float32
percent_difference_magnitude_percentile_20 PCA1 0.273894
Name: percent_difference_magnitude_percentile_20, dtype: float32
inter_percentile_range_25 PCA1 0.273897
Name: inter_percentile_range_25, dtype: float32
median_absolute_deviation PCA1 0.273974
Name: median_absolute_deviation, dtype: float32
linear_trend_sigma PCA1 0.274282
Name: linear_trend_sigma, dtype: float32
percent_difference_magnitude_percentile_5 PCA1 0.27758
Name: percent_difference_magnitude_percentile_5, dtype: float32
inter_percentile_range_10 PCA1 0.27825
Name: inter_percentile_range_10, dtype: float32
standard_deviation PCA1 0.281599
Name: standard_deviation, dtype: float32
# ABOVE IS ORDERED LEAST IMPORTANT - MOST IMPORTANT FEATURES IN 1ST PRINCIPAL COMPONENT (29.3% var)
### Thus, by looking at the PC1 (First Principal Component) which is the first row in pca_feature_importance.csv...
### we can conclude that standard_deviation, inter_percentile_range_10, percent_difference_magnitude_percentile_5, linear_trend_sigma, median_absolute_deviation, are the most important.
# FOR SECOND PRINCIPAL COMPONENT (17.8% var), we find...
```python
ordered_m31_pca2 = OrderedDict(sorted(m31_pca_comp.loc[['PCA2']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_m31_pca2.items():
print(key, value)
```
periodogram_beyond_2_std PCA2 -0.300604
Name: periodogram_beyond_2_std, dtype: float32
periodogram_beyond_1_std PCA2 -0.279435
Name: periodogram_beyond_1_std, dtype: float32
eta PCA2 -0.267781
Name: eta, dtype: float32
periodogram_eta PCA2 -0.121068
Name: periodogram_eta, dtype: float32
eta_e PCA2 -0.113904
Name: eta_e, dtype: float32
weighted_mean PCA2 -0.091519
Name: weighted_mean, dtype: float32
mean PCA2 -0.089205
Name: mean, dtype: float32
linear_fit_slope_sigma PCA2 -0.04543
Name: linear_fit_slope_sigma, dtype: float32
maximum_slope PCA2 -0.044441
Name: maximum_slope, dtype: float32
kurtosis PCA2 -0.044017
Name: kurtosis, dtype: float32
beyond_2_std PCA2 -0.042062
Name: beyond_2_std, dtype: float32
median_buffer_range_percentage_5 PCA2 -0.038928
Name: median_buffer_range_percentage_5, dtype: float32
linear_fit_slope PCA2 -0.019775
Name: linear_fit_slope, dtype: float32
linear_trend PCA2 -0.017034
Name: linear_trend, dtype: float32
periodogram_cusum PCA2 -0.007698
Name: periodogram_cusum, dtype: float32
amplitude PCA2 -0.00006
Name: amplitude, dtype: float32
percent_amplitude PCA2 0.003665
Name: percent_amplitude, dtype: float32
skew PCA2 0.010693
Name: skew, dtype: float32
linear_trend_sigma PCA2 0.016899
Name: linear_trend_sigma, dtype: float32
standard_deviation PCA2 0.024447
Name: standard_deviation, dtype: float32
period_2 PCA2 0.031177
Name: period_2, dtype: float32
percent_difference_magnitude_percentile_5 PCA2 0.031585
Name: percent_difference_magnitude_percentile_5, dtype: float32
inter_percentile_range_10 PCA2 0.031887
Name: inter_percentile_range_10, dtype: float32
median_absolute_deviation PCA2 0.035938
Name: median_absolute_deviation, dtype: float32
linear_fit_reduced_chi2 PCA2 0.036438
Name: linear_fit_reduced_chi2, dtype: float32
chi2 PCA2 0.03785
Name: chi2, dtype: float32
magnitude_percentage_ratio_20_10 PCA2 0.039366
Name: magnitude_percentage_ratio_20_10, dtype: float32
inter_percentile_range_25 PCA2 0.040137
Name: inter_percentile_range_25, dtype: float32
percent_difference_magnitude_percentile_20 PCA2 0.045495
Name: percent_difference_magnitude_percentile_20, dtype: float32
magnitude_percentage_ratio_40_5 PCA2 0.055345
Name: magnitude_percentage_ratio_40_5, dtype: float32
period_1 PCA2 0.063745
Name: period_1, dtype: float32
beyond_1_std PCA2 0.07433
Name: beyond_1_std, dtype: float32
stetson_K PCA2 0.084788
Name: stetson_K, dtype: float32
periodogram_inter_percentile_range_25 PCA2 0.09913
Name: periodogram_inter_percentile_range_25, dtype: float32
period_0 PCA2 0.138276
Name: period_0, dtype: float32
cusum PCA2 0.23988
Name: cusum, dtype: float32
period_s_to_n_2 PCA2 0.278525
Name: period_s_to_n_2, dtype: float32
period_s_to_n_1 PCA2 0.290998
Name: period_s_to_n_1, dtype: float32
periodogram_standard_deviation PCA2 0.296348
Name: periodogram_standard_deviation, dtype: float32
period_s_to_n_0 PCA2 0.310306
Name: period_s_to_n_0, dtype: float32
periodogram_percent_amplitude PCA2 0.346211
Name: periodogram_percent_amplitude, dtype: float32
periodogram_amplitude PCA2 0.346471
Name: periodogram_amplitude, dtype: float32
### Thus, by looking at the PC2 (2nd Principal Component) which is the second row in pca_feature_importance.csv...
### we can conclude that periodogram_amplitude, periodogram_percent_amplitude, period_s_to_n_0, periodogram_standard_deviation, period_s_to_n_1 are the most important.
### 3rd PC
```python
ordered_m31_pca1 = OrderedDict(sorted(m31_pca_comp.loc[['PCA3']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_m31_pca1.items():
print(key, value)
```
beyond_1_std PCA3 -0.322336
Name: beyond_1_std, dtype: float32
median_buffer_range_percentage_5 PCA3 -0.283504
Name: median_buffer_range_percentage_5, dtype: float32
stetson_K PCA3 -0.272183
Name: stetson_K, dtype: float32
linear_fit_slope PCA3 -0.264892
Name: linear_fit_slope, dtype: float32
linear_fit_slope_sigma PCA3 -0.262888
Name: linear_fit_slope_sigma, dtype: float32
weighted_mean PCA3 -0.248516
Name: weighted_mean, dtype: float32
mean PCA3 -0.240167
Name: mean, dtype: float32
linear_trend PCA3 -0.224903
Name: linear_trend, dtype: float32
magnitude_percentage_ratio_40_5 PCA3 -0.177742
Name: magnitude_percentage_ratio_40_5, dtype: float32
magnitude_percentage_ratio_20_10 PCA3 -0.152462
Name: magnitude_percentage_ratio_20_10, dtype: float32
median_absolute_deviation PCA3 -0.069924
Name: median_absolute_deviation, dtype: float32
inter_percentile_range_25 PCA3 -0.062708
Name: inter_percentile_range_25, dtype: float32
maximum_slope PCA3 -0.048859
Name: maximum_slope, dtype: float32
periodogram_standard_deviation PCA3 -0.040777
Name: periodogram_standard_deviation, dtype: float32
skew PCA3 -0.039459
Name: skew, dtype: float32
eta PCA3 -0.03784
Name: eta, dtype: float32
inter_percentile_range_10 PCA3 -0.036987
Name: inter_percentile_range_10, dtype: float32
eta_e PCA3 -0.032995
Name: eta_e, dtype: float32
linear_trend_sigma PCA3 -0.029143
Name: linear_trend_sigma, dtype: float32
percent_difference_magnitude_percentile_20 PCA3 -0.026696
Name: percent_difference_magnitude_percentile_20, dtype: float32
periodogram_percent_amplitude PCA3 -0.022052
Name: periodogram_percent_amplitude, dtype: float32
periodogram_amplitude PCA3 -0.021868
Name: periodogram_amplitude, dtype: float32
standard_deviation PCA3 0.006628
Name: standard_deviation, dtype: float32
cusum PCA3 0.009092
Name: cusum, dtype: float32
period_s_to_n_0 PCA3 0.01896
Name: period_s_to_n_0, dtype: float32
periodogram_inter_percentile_range_25 PCA3 0.019305
Name: periodogram_inter_percentile_range_25, dtype: float32
percent_difference_magnitude_percentile_5 PCA3 0.025502
Name: percent_difference_magnitude_percentile_5, dtype: float32
period_2 PCA3 0.030678
Name: period_2, dtype: float32
period_s_to_n_1 PCA3 0.032094
Name: period_s_to_n_1, dtype: float32
period_s_to_n_2 PCA3 0.03776
Name: period_s_to_n_2, dtype: float32
beyond_2_std PCA3 0.040438
Name: beyond_2_std, dtype: float32
period_1 PCA3 0.046037
Name: period_1, dtype: float32
period_0 PCA3 0.046159
Name: period_0, dtype: float32
periodogram_beyond_2_std PCA3 0.059001
Name: periodogram_beyond_2_std, dtype: float32
periodogram_eta PCA3 0.069852
Name: periodogram_eta, dtype: float32
amplitude PCA3 0.076353
Name: amplitude, dtype: float32
periodogram_beyond_1_std PCA3 0.076935
Name: periodogram_beyond_1_std, dtype: float32
periodogram_cusum PCA3 0.114642
Name: periodogram_cusum, dtype: float32
percent_amplitude PCA3 0.128699
Name: percent_amplitude, dtype: float32
kurtosis PCA3 0.24182
Name: kurtosis, dtype: float32
linear_fit_reduced_chi2 PCA3 0.34202
Name: linear_fit_reduced_chi2, dtype: float32
chi2 PCA3 0.347273
Name: chi2, dtype: float32
# DISK
```python
# DISK
disk_oid = np.memmap('../data/oid_disk.dat', mode='r', dtype=np.uint64)
disk_names = open('../data/feature_disk.name').read().split()
disk_x = np.memmap('../data/feature_disk.dat', mode='r', dtype=np.float32, shape=(disk_oid.size, len(disk_names)))
```
```python
disk = pd.DataFrame(disk_x, index=disk_oid, columns=disk_names)
disk
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>807216300041250</th>
<td>0.370500</td>
<td>0.294828</td>
<td>0.055172</td>
<td>0.059051</td>
<td>2.035710</td>
<td>4.324502e+11</td>
<td>0.131500</td>
<td>0.256500</td>
<td>0.560421</td>
<td>-0.000141</td>
<td>...</td>
<td>0.193735</td>
<td>0.019562</td>
<td>0.656306</td>
<td>0.772382</td>
<td>8.776011</td>
<td>1.653118</td>
<td>0.239919</td>
<td>0.103299</td>
<td>0.778457</td>
<td>19.047564</td>
</tr>
<tr>
<th>807216300004675</th>
<td>0.366500</td>
<td>0.303030</td>
<td>0.044657</td>
<td>0.047631</td>
<td>2.088702</td>
<td>4.230982e+11</td>
<td>0.150501</td>
<td>0.286200</td>
<td>0.176171</td>
<td>-0.000165</td>
<td>...</td>
<td>0.155171</td>
<td>0.023204</td>
<td>0.648063</td>
<td>0.742648</td>
<td>8.712999</td>
<td>1.459253</td>
<td>0.251311</td>
<td>0.114242</td>
<td>0.803487</td>
<td>19.250351</td>
</tr>
<tr>
<th>807216300006368</th>
<td>0.352500</td>
<td>0.316770</td>
<td>0.052795</td>
<td>0.039514</td>
<td>2.060871</td>
<td>4.241239e+11</td>
<td>0.144501</td>
<td>0.266199</td>
<td>0.288710</td>
<td>-0.000204</td>
<td>...</td>
<td>0.152846</td>
<td>0.022364</td>
<td>0.679952</td>
<td>0.643210</td>
<td>6.947167</td>
<td>1.555116</td>
<td>0.241078</td>
<td>0.110432</td>
<td>0.790517</td>
<td>19.168779</td>
</tr>
<tr>
<th>807216300007936</th>
<td>0.076500</td>
<td>0.256975</td>
<td>0.042584</td>
<td>0.166819</td>
<td>1.589441</td>
<td>2.227283e+11</td>
<td>0.018000</td>
<td>0.035400</td>
<td>4.314334</td>
<td>-0.000122</td>
<td>...</td>
<td>0.128832</td>
<td>0.027912</td>
<td>0.707155</td>
<td>1.412706</td>
<td>54.132122</td>
<td>2.081866</td>
<td>0.952919</td>
<td>0.015871</td>
<td>0.731919</td>
<td>15.379960</td>
</tr>
<tr>
<th>807216300005656</th>
<td>0.272000</td>
<td>0.307810</td>
<td>0.041348</td>
<td>0.103563</td>
<td>1.808017</td>
<td>3.770301e+11</td>
<td>0.118250</td>
<td>0.230200</td>
<td>0.278995</td>
<td>-0.000120</td>
<td>...</td>
<td>0.170926</td>
<td>0.016058</td>
<td>0.734269</td>
<td>1.178830</td>
<td>20.001444</td>
<td>1.367357</td>
<td>0.503628</td>
<td>0.090566</td>
<td>0.794674</td>
<td>18.977293</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>807201400043063</th>
<td>0.134999</td>
<td>0.261194</td>
<td>0.052239</td>
<td>0.074626</td>
<td>1.939203</td>
<td>3.478263e+11</td>
<td>0.038000</td>
<td>0.090500</td>
<td>1.905449</td>
<td>0.000004</td>
<td>...</td>
<td>0.204491</td>
<td>0.024706</td>
<td>0.661476</td>
<td>0.982081</td>
<td>14.566538</td>
<td>8.745687</td>
<td>-0.273337</td>
<td>0.037209</td>
<td>0.726424</td>
<td>16.256903</td>
</tr>
<tr>
<th>807201300057959</th>
<td>0.354500</td>
<td>0.315789</td>
<td>0.044892</td>
<td>0.068742</td>
<td>2.002020</td>
<td>3.240108e+11</td>
<td>0.130999</td>
<td>0.260000</td>
<td>0.215598</td>
<td>-0.000283</td>
<td>...</td>
<td>0.159282</td>
<td>0.024428</td>
<td>0.763629</td>
<td>0.731055</td>
<td>7.406523</td>
<td>1.563799</td>
<td>-0.021221</td>
<td>0.100870</td>
<td>0.791641</td>
<td>19.259731</td>
</tr>
<tr>
<th>807201300060502</th>
<td>0.355000</td>
<td>0.294753</td>
<td>0.050926</td>
<td>0.045662</td>
<td>1.968179</td>
<td>3.700885e+11</td>
<td>0.129000</td>
<td>0.260000</td>
<td>0.810251</td>
<td>0.000067</td>
<td>...</td>
<td>0.154592</td>
<td>0.041831</td>
<td>0.691424</td>
<td>0.706343</td>
<td>6.929576</td>
<td>1.593399</td>
<td>-0.093009</td>
<td>0.102527</td>
<td>0.761554</td>
<td>19.280340</td>
</tr>
<tr>
<th>807201400031737</th>
<td>0.217500</td>
<td>0.282511</td>
<td>0.043348</td>
<td>0.043773</td>
<td>1.984369</td>
<td>4.224671e+11</td>
<td>0.075249</td>
<td>0.151600</td>
<td>0.663024</td>
<td>-0.000127</td>
<td>...</td>
<td>0.169853</td>
<td>0.021363</td>
<td>0.733726</td>
<td>0.743227</td>
<td>7.366379</td>
<td>1.184451</td>
<td>0.327773</td>
<td>0.059975</td>
<td>0.778571</td>
<td>18.675014</td>
</tr>
<tr>
<th>807201400053684</th>
<td>0.263000</td>
<td>0.288462</td>
<td>0.044872</td>
<td>0.049965</td>
<td>1.924481</td>
<td>3.133998e+11</td>
<td>0.093000</td>
<td>0.197201</td>
<td>0.428822</td>
<td>-0.000145</td>
<td>...</td>
<td>0.192112</td>
<td>0.017298</td>
<td>0.741327</td>
<td>0.870961</td>
<td>11.738986</td>
<td>1.249155</td>
<td>0.291988</td>
<td>0.075488</td>
<td>0.789339</td>
<td>18.929688</td>
</tr>
</tbody>
</table>
<p>1790565 rows × 42 columns</p>
</div>
```python
fig, ax = plt.subplots(figsize=(14, 14))
pca, x_new, score, coeff = applypca(disk) #apply DISK field
plt.xlim(-0.2,0.4)
plt.ylim(-0.4,0.4)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.title("DISK")
#Call the function. Use only the 2 PCs.
pcaplot(x_new[:,0:2], np.transpose(pca.components_[0:2, :]), 'white') #just show components, not data
#plt.savefig('../figs/pca/disk_pca_importance.png', dpi=400, bbox_inches='tight')
```

```python
fig, ax = plt.subplots(figsize=(14, 14))
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.title("DISK")
pcaplot(x_new[:,0:2], np.transpose(pca.components_[0:2, :]), 'darkorange')
#plt.savefig('../figs/pca/disk_pca_overlay_data.png', dpi=400, bbox_inches='tight')
```

```python
disk_pca_var_ratio = pca.explained_variance_ratio_
disk_pca_var_ratio
```
array([3.30243754e-01, 1.71544391e-01, 7.60493006e-02, 5.18341272e-02,
4.42003375e-02, 4.33271063e-02, 3.48041088e-02, 2.97722478e-02,
2.46489335e-02, 2.32871078e-02, 2.18472874e-02, 2.08234802e-02,
1.88368236e-02, 1.46515528e-02, 1.33906995e-02, 1.31082127e-02,
1.26431818e-02, 1.02635468e-02, 8.50527563e-03, 6.67313759e-03,
5.67093795e-03, 4.52080435e-03, 3.95706905e-03, 3.42818513e-03,
2.97480215e-03, 2.03388978e-03, 1.93743825e-03, 1.18908135e-03,
1.03840753e-03, 9.96852284e-04, 4.92882867e-04, 4.01221758e-04,
2.62222107e-04, 2.00544302e-04, 1.63934052e-04, 1.09356880e-04,
8.25847751e-05, 3.84569844e-05, 2.94954493e-05, 1.71215415e-05,
9.11665012e-08, 7.85825298e-09])
```python
np.cumsum(disk_pca_var_ratio)
```
array([0.33024375, 0.50178814, 0.57783745, 0.62967157, 0.67387191,
0.71719902, 0.75200313, 0.78177537, 0.80642431, 0.82971141,
0.8515587 , 0.87238218, 0.89121901, 0.90587056, 0.91926126,
0.93236947, 0.94501265, 0.9552762 , 0.96378147, 0.97045461,
0.97612555, 0.98064635, 0.98460342, 0.98803161, 0.99100641,
0.9930403 , 0.99497774, 0.99616682, 0.99720523, 0.99820208,
0.99869496, 0.99909618, 0.99935841, 0.99955895, 0.99972289,
0.99983224, 0.99991483, 0.99995328, 0.99998278, 0.9999999 ,
0.99999999, 1. ])
### PC1 explains 33.0% and PC2 17.2% of variance. Together, if we keep PC1 and PC2 only, they explain 50.2%.
```python
print(abs( pca.components_ )) #has shape (42, 42) [n_components, n_features].
```
[[2.40640506e-01 6.76635504e-02 5.11589646e-02 ... 2.48953044e-01
6.96978644e-02 2.42666930e-01]
[1.22834064e-01 3.38873416e-02 3.64881754e-02 ... 1.30213022e-01
2.24334709e-02 1.57322909e-03]
[1.10667907e-01 4.40623730e-01 2.15234458e-02 ... 4.65435274e-02
4.64297146e-01 8.49625915e-02]
...
[2.60194950e-02 4.48444812e-03 2.85762688e-03 ... 7.90706158e-01
1.65004432e-02 1.25783104e-02]
[7.65001052e-04 4.08623135e-04 1.88930717e-04 ... 1.41352564e-02
3.61874700e-04 7.04173505e-01]
[6.62038583e-05 1.84123455e-05 5.32308968e-06 ... 2.67438591e-04
1.95056200e-05 4.15108353e-03]]
```python
pca_num = []
for i in range(1, 43):
pca_num.append('PCA{}'.format(i))
disk_pca_comp = pd.DataFrame(pca.components_, index=pca_num, columns=disk_names)
#disk_pca_comp.to_csv('../data/disk_pca_feature_importance.csv')
disk_pca_comp
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>PCA1</th>
<td>0.240641</td>
<td>0.067664</td>
<td>0.051159</td>
<td>-0.115029</td>
<td>0.137302</td>
<td>0.075372</td>
<td>0.248003</td>
<td>0.249652</td>
<td>-0.031399</td>
<td>-0.132705</td>
<td>...</td>
<td>0.014084</td>
<td>-0.079665</td>
<td>-0.053486</td>
<td>-0.075832</td>
<td>-0.099372</td>
<td>0.004554</td>
<td>0.029942</td>
<td>0.248953</td>
<td>0.069698</td>
<td>0.242667</td>
</tr>
<tr>
<th>PCA2</th>
<td>0.122834</td>
<td>-0.033887</td>
<td>-0.036488</td>
<td>0.294892</td>
<td>-0.252656</td>
<td>-0.244697</td>
<td>0.128756</td>
<td>0.127219</td>
<td>0.015257</td>
<td>-0.060617</td>
<td>...</td>
<td>-0.084805</td>
<td>-0.101684</td>
<td>-0.070917</td>
<td>0.276726</td>
<td>0.297989</td>
<td>0.077459</td>
<td>-0.002049</td>
<td>0.130213</td>
<td>-0.022433</td>
<td>-0.001573</td>
</tr>
<tr>
<th>PCA3</th>
<td>0.110668</td>
<td>-0.440624</td>
<td>0.021523</td>
<td>-0.032994</td>
<td>-0.048265</td>
<td>-0.196781</td>
<td>0.005286</td>
<td>0.028598</td>
<td>0.308892</td>
<td>-0.033628</td>
<td>...</td>
<td>0.096934</td>
<td>0.203207</td>
<td>0.209245</td>
<td>-0.185627</td>
<td>-0.143603</td>
<td>0.064536</td>
<td>-0.091605</td>
<td>0.046544</td>
<td>-0.464297</td>
<td>-0.084963</td>
</tr>
<tr>
<th>PCA4</th>
<td>0.000494</td>
<td>-0.002268</td>
<td>-0.073268</td>
<td>-0.059571</td>
<td>0.005476</td>
<td>0.065601</td>
<td>-0.014987</td>
<td>-0.004225</td>
<td>0.065093</td>
<td>0.161278</td>
<td>...</td>
<td>0.008447</td>
<td>0.011321</td>
<td>-0.067701</td>
<td>0.077782</td>
<td>0.047622</td>
<td>0.551772</td>
<td>-0.047553</td>
<td>0.005017</td>
<td>-0.010306</td>
<td>-0.022813</td>
</tr>
<tr>
<th>PCA5</th>
<td>-0.057242</td>
<td>0.174753</td>
<td>-0.039261</td>
<td>0.049704</td>
<td>-0.034902</td>
<td>-0.115325</td>
<td>0.014649</td>
<td>0.009976</td>
<td>-0.164154</td>
<td>-0.448059</td>
<td>...</td>
<td>0.129398</td>
<td>0.199709</td>
<td>0.219406</td>
<td>-0.145979</td>
<td>-0.097172</td>
<td>0.138034</td>
<td>0.091447</td>
<td>-0.000318</td>
<td>0.203978</td>
<td>-0.099504</td>
</tr>
<tr>
<th>PCA6</th>
<td>-0.052052</td>
<td>0.026644</td>
<td>0.520149</td>
<td>-0.001366</td>
<td>0.024414</td>
<td>0.144256</td>
<td>-0.027831</td>
<td>0.011104</td>
<td>-0.391716</td>
<td>0.052947</td>
<td>...</td>
<td>0.011393</td>
<td>-0.206131</td>
<td>0.026194</td>
<td>-0.117807</td>
<td>-0.069435</td>
<td>0.138908</td>
<td>0.213843</td>
<td>0.005031</td>
<td>0.145054</td>
<td>0.024405</td>
</tr>
<tr>
<th>PCA7</th>
<td>0.000641</td>
<td>-0.010174</td>
<td>-0.042834</td>
<td>-0.073012</td>
<td>0.296996</td>
<td>0.234968</td>
<td>-0.022913</td>
<td>-0.018516</td>
<td>0.111184</td>
<td>-0.061087</td>
<td>...</td>
<td>-0.570779</td>
<td>0.250415</td>
<td>-0.491595</td>
<td>-0.123302</td>
<td>-0.011683</td>
<td>0.041699</td>
<td>-0.034945</td>
<td>-0.016387</td>
<td>-0.047450</td>
<td>0.089390</td>
</tr>
<tr>
<th>PCA8</th>
<td>-0.058006</td>
<td>-0.143728</td>
<td>0.120051</td>
<td>0.010825</td>
<td>-0.008915</td>
<td>0.009628</td>
<td>-0.077229</td>
<td>-0.050950</td>
<td>0.102136</td>
<td>-0.416295</td>
<td>...</td>
<td>-0.088800</td>
<td>-0.079826</td>
<td>-0.226489</td>
<td>0.201895</td>
<td>0.123057</td>
<td>-0.014219</td>
<td>0.110834</td>
<td>-0.051049</td>
<td>-0.112473</td>
<td>-0.050104</td>
</tr>
<tr>
<th>PCA9</th>
<td>0.002833</td>
<td>0.056718</td>
<td>-0.051131</td>
<td>-0.011987</td>
<td>0.080884</td>
<td>0.054448</td>
<td>-0.001808</td>
<td>-0.005495</td>
<td>0.012896</td>
<td>-0.037137</td>
<td>...</td>
<td>-0.058358</td>
<td>-0.131013</td>
<td>0.103630</td>
<td>-0.177863</td>
<td>-0.149497</td>
<td>0.041703</td>
<td>-0.237385</td>
<td>-0.005505</td>
<td>-0.008996</td>
<td>0.013492</td>
</tr>
<tr>
<th>PCA10</th>
<td>-0.007350</td>
<td>0.079928</td>
<td>-0.052378</td>
<td>-0.006122</td>
<td>0.080461</td>
<td>0.119500</td>
<td>-0.023525</td>
<td>-0.022095</td>
<td>0.070454</td>
<td>-0.104252</td>
<td>...</td>
<td>0.032126</td>
<td>-0.311276</td>
<td>0.119936</td>
<td>-0.170156</td>
<td>-0.154859</td>
<td>0.047062</td>
<td>-0.461035</td>
<td>-0.022598</td>
<td>-0.047959</td>
<td>0.038239</td>
</tr>
<tr>
<th>PCA11</th>
<td>0.036876</td>
<td>-0.115330</td>
<td>-0.258293</td>
<td>-0.071032</td>
<td>0.084707</td>
<td>0.018674</td>
<td>-0.001190</td>
<td>-0.012292</td>
<td>0.132360</td>
<td>0.045014</td>
<td>...</td>
<td>0.007227</td>
<td>-0.092285</td>
<td>0.064317</td>
<td>-0.141875</td>
<td>-0.113569</td>
<td>0.018588</td>
<td>0.765386</td>
<td>-0.001335</td>
<td>0.002876</td>
<td>-0.011816</td>
</tr>
<tr>
<th>PCA12</th>
<td>0.004734</td>
<td>-0.052915</td>
<td>-0.126789</td>
<td>-0.009220</td>
<td>0.082592</td>
<td>0.166069</td>
<td>-0.034086</td>
<td>-0.032807</td>
<td>0.190759</td>
<td>-0.077221</td>
<td>...</td>
<td>0.178812</td>
<td>-0.339969</td>
<td>-0.075946</td>
<td>-0.022355</td>
<td>-0.040953</td>
<td>0.019562</td>
<td>0.172845</td>
<td>-0.026163</td>
<td>-0.058344</td>
<td>0.046325</td>
</tr>
<tr>
<th>PCA13</th>
<td>-0.005587</td>
<td>0.001179</td>
<td>-0.055742</td>
<td>0.047197</td>
<td>0.065374</td>
<td>0.257101</td>
<td>-0.037338</td>
<td>-0.030673</td>
<td>0.222847</td>
<td>-0.052574</td>
<td>...</td>
<td>0.461221</td>
<td>-0.310133</td>
<td>-0.238545</td>
<td>0.072345</td>
<td>0.085410</td>
<td>-0.003109</td>
<td>-0.048246</td>
<td>-0.027078</td>
<td>-0.051360</td>
<td>0.103942</td>
</tr>
<tr>
<th>PCA14</th>
<td>0.001132</td>
<td>0.277850</td>
<td>-0.693217</td>
<td>0.024322</td>
<td>-0.045599</td>
<td>-0.005664</td>
<td>-0.017913</td>
<td>0.021937</td>
<td>-0.133113</td>
<td>0.002663</td>
<td>...</td>
<td>-0.101236</td>
<td>-0.004210</td>
<td>0.102162</td>
<td>0.002587</td>
<td>-0.010594</td>
<td>0.034099</td>
<td>-0.024186</td>
<td>-0.008129</td>
<td>0.078710</td>
<td>0.024371</td>
</tr>
<tr>
<th>PCA15</th>
<td>-0.005029</td>
<td>-0.026142</td>
<td>0.064898</td>
<td>0.080879</td>
<td>-0.179285</td>
<td>0.000770</td>
<td>-0.025082</td>
<td>-0.028483</td>
<td>0.141100</td>
<td>-0.040496</td>
<td>...</td>
<td>-0.534688</td>
<td>-0.332057</td>
<td>0.343991</td>
<td>0.028365</td>
<td>-0.045333</td>
<td>0.114883</td>
<td>0.071142</td>
<td>-0.020697</td>
<td>-0.030744</td>
<td>0.045714</td>
</tr>
<tr>
<th>PCA16</th>
<td>0.004344</td>
<td>-0.018409</td>
<td>-0.010291</td>
<td>0.035501</td>
<td>-0.066132</td>
<td>0.012670</td>
<td>0.013340</td>
<td>0.002374</td>
<td>0.013115</td>
<td>0.033414</td>
<td>...</td>
<td>-0.138509</td>
<td>-0.094483</td>
<td>0.081697</td>
<td>0.015386</td>
<td>-0.016675</td>
<td>-0.390903</td>
<td>0.009329</td>
<td>0.005140</td>
<td>-0.017881</td>
<td>-0.001378</td>
</tr>
<tr>
<th>PCA17</th>
<td>-0.000206</td>
<td>0.146968</td>
<td>-0.183784</td>
<td>0.021856</td>
<td>0.033902</td>
<td>-0.046982</td>
<td>0.017500</td>
<td>0.000717</td>
<td>-0.099167</td>
<td>0.010701</td>
<td>...</td>
<td>0.135309</td>
<td>0.106541</td>
<td>-0.187159</td>
<td>0.032833</td>
<td>-0.006432</td>
<td>-0.004206</td>
<td>-0.004230</td>
<td>0.003690</td>
<td>-0.040634</td>
<td>-0.057638</td>
</tr>
<tr>
<th>PCA18</th>
<td>0.044209</td>
<td>-0.207805</td>
<td>-0.059411</td>
<td>0.083855</td>
<td>0.105117</td>
<td>-0.186948</td>
<td>0.039229</td>
<td>0.029028</td>
<td>-0.320286</td>
<td>0.001877</td>
<td>...</td>
<td>0.025783</td>
<td>-0.046318</td>
<td>-0.373210</td>
<td>-0.078978</td>
<td>-0.218598</td>
<td>-0.033364</td>
<td>-0.034271</td>
<td>0.029890</td>
<td>-0.052757</td>
<td>-0.296635</td>
</tr>
<tr>
<th>PCA19</th>
<td>-0.094198</td>
<td>0.408071</td>
<td>0.230123</td>
<td>0.154498</td>
<td>0.011647</td>
<td>-0.190583</td>
<td>0.041828</td>
<td>0.052840</td>
<td>0.616092</td>
<td>0.056925</td>
<td>...</td>
<td>0.076108</td>
<td>0.211938</td>
<td>-0.100282</td>
<td>-0.056432</td>
<td>-0.132161</td>
<td>0.023336</td>
<td>0.061519</td>
<td>0.017087</td>
<td>0.239751</td>
<td>-0.047652</td>
</tr>
<tr>
<th>PCA20</th>
<td>0.049538</td>
<td>-0.138698</td>
<td>0.013414</td>
<td>0.207638</td>
<td>-0.060274</td>
<td>0.091472</td>
<td>-0.092790</td>
<td>-0.069706</td>
<td>-0.165134</td>
<td>0.081504</td>
<td>...</td>
<td>0.141829</td>
<td>0.419570</td>
<td>0.085213</td>
<td>0.035433</td>
<td>-0.054516</td>
<td>0.163407</td>
<td>0.037662</td>
<td>-0.046447</td>
<td>-0.086362</td>
<td>0.422359</td>
</tr>
<tr>
<th>PCA21</th>
<td>-0.042559</td>
<td>-0.034867</td>
<td>-0.028720</td>
<td>-0.043837</td>
<td>0.015518</td>
<td>-0.212229</td>
<td>-0.037542</td>
<td>0.028482</td>
<td>-0.004819</td>
<td>-0.014371</td>
<td>...</td>
<td>-0.030465</td>
<td>-0.162114</td>
<td>-0.102707</td>
<td>-0.043345</td>
<td>0.010423</td>
<td>0.638374</td>
<td>-0.013597</td>
<td>0.024006</td>
<td>0.002166</td>
<td>-0.031681</td>
</tr>
<tr>
<th>PCA22</th>
<td>-0.228958</td>
<td>-0.188267</td>
<td>-0.089563</td>
<td>-0.129594</td>
<td>-0.088180</td>
<td>-0.508942</td>
<td>0.015613</td>
<td>-0.019329</td>
<td>-0.028387</td>
<td>-0.020021</td>
<td>...</td>
<td>-0.018221</td>
<td>-0.186664</td>
<td>-0.209884</td>
<td>-0.060606</td>
<td>-0.085880</td>
<td>-0.152705</td>
<td>-0.024798</td>
<td>-0.032951</td>
<td>0.047459</td>
<td>0.312649</td>
</tr>
<tr>
<th>PCA23</th>
<td>0.056933</td>
<td>0.172141</td>
<td>0.070585</td>
<td>-0.857342</td>
<td>-0.030632</td>
<td>-0.145804</td>
<td>-0.017874</td>
<td>-0.003445</td>
<td>-0.001970</td>
<td>0.019748</td>
<td>...</td>
<td>0.039765</td>
<td>0.060237</td>
<td>0.062243</td>
<td>0.118373</td>
<td>0.104205</td>
<td>0.003214</td>
<td>0.000058</td>
<td>-0.002035</td>
<td>-0.089250</td>
<td>0.001817</td>
</tr>
<tr>
<th>PCA24</th>
<td>0.180182</td>
<td>0.517640</td>
<td>0.129440</td>
<td>0.122561</td>
<td>-0.234052</td>
<td>-0.208333</td>
<td>-0.103658</td>
<td>-0.086518</td>
<td>-0.100011</td>
<td>0.117439</td>
<td>...</td>
<td>-0.044966</td>
<td>-0.131316</td>
<td>-0.210032</td>
<td>-0.087689</td>
<td>-0.120031</td>
<td>-0.028670</td>
<td>0.102360</td>
<td>-0.051700</td>
<td>-0.462913</td>
<td>0.055770</td>
</tr>
<tr>
<th>PCA25</th>
<td>-0.030678</td>
<td>0.190202</td>
<td>0.046785</td>
<td>0.137412</td>
<td>0.674942</td>
<td>-0.195940</td>
<td>-0.050142</td>
<td>-0.047605</td>
<td>-0.088240</td>
<td>-0.134557</td>
<td>...</td>
<td>0.020941</td>
<td>-0.016050</td>
<td>0.241149</td>
<td>0.062308</td>
<td>0.175701</td>
<td>-0.026249</td>
<td>0.060122</td>
<td>-0.054781</td>
<td>-0.329852</td>
<td>0.007348</td>
</tr>
<tr>
<th>PCA26</th>
<td>0.066198</td>
<td>-0.077247</td>
<td>0.005815</td>
<td>0.069202</td>
<td>0.340387</td>
<td>-0.153164</td>
<td>0.014470</td>
<td>0.034856</td>
<td>0.021025</td>
<td>0.616209</td>
<td>...</td>
<td>-0.014440</td>
<td>-0.115640</td>
<td>0.071750</td>
<td>-0.003320</td>
<td>0.060235</td>
<td>-0.031329</td>
<td>-0.029519</td>
<td>0.048406</td>
<td>0.138691</td>
<td>-0.037908</td>
</tr>
<tr>
<th>PCA27</th>
<td>0.239095</td>
<td>-0.124817</td>
<td>0.023166</td>
<td>0.000236</td>
<td>0.111053</td>
<td>-0.310641</td>
<td>-0.210334</td>
<td>-0.162801</td>
<td>0.062868</td>
<td>-0.103001</td>
<td>...</td>
<td>-0.004703</td>
<td>-0.045091</td>
<td>-0.030409</td>
<td>-0.026564</td>
<td>0.013595</td>
<td>-0.035718</td>
<td>-0.075394</td>
<td>-0.124010</td>
<td>0.506254</td>
<td>0.047413</td>
</tr>
<tr>
<th>PCA28</th>
<td>-0.080207</td>
<td>-0.009065</td>
<td>-0.032127</td>
<td>-0.046323</td>
<td>-0.250914</td>
<td>0.175689</td>
<td>-0.099333</td>
<td>-0.071586</td>
<td>0.006977</td>
<td>0.310165</td>
<td>...</td>
<td>0.009154</td>
<td>0.041356</td>
<td>-0.033900</td>
<td>-0.079612</td>
<td>0.037934</td>
<td>-0.020325</td>
<td>0.006913</td>
<td>-0.061666</td>
<td>-0.014156</td>
<td>-0.086544</td>
</tr>
<tr>
<th>PCA29</th>
<td>0.156670</td>
<td>-0.013321</td>
<td>0.002684</td>
<td>-0.018627</td>
<td>-0.128636</td>
<td>0.097118</td>
<td>-0.027244</td>
<td>-0.022227</td>
<td>-0.052704</td>
<td>-0.067800</td>
<td>...</td>
<td>0.091168</td>
<td>-0.025258</td>
<td>-0.079039</td>
<td>0.006274</td>
<td>-0.020304</td>
<td>-0.020875</td>
<td>-0.005247</td>
<td>-0.008123</td>
<td>0.036221</td>
<td>-0.079521</td>
</tr>
<tr>
<th>PCA30</th>
<td>0.146298</td>
<td>-0.012426</td>
<td>0.011742</td>
<td>-0.027359</td>
<td>0.008988</td>
<td>0.194287</td>
<td>-0.012322</td>
<td>-0.008942</td>
<td>0.061038</td>
<td>-0.092601</td>
<td>...</td>
<td>-0.089658</td>
<td>0.046456</td>
<td>0.075578</td>
<td>-0.042959</td>
<td>0.045592</td>
<td>-0.029651</td>
<td>-0.014095</td>
<td>-0.005212</td>
<td>0.064274</td>
<td>-0.084055</td>
</tr>
<tr>
<th>PCA31</th>
<td>0.022710</td>
<td>0.001593</td>
<td>-0.002430</td>
<td>0.032269</td>
<td>-0.062053</td>
<td>-0.048181</td>
<td>0.010745</td>
<td>0.006545</td>
<td>-0.009663</td>
<td>-0.030150</td>
<td>...</td>
<td>0.034425</td>
<td>-0.000842</td>
<td>-0.042136</td>
<td>-0.644836</td>
<td>0.296190</td>
<td>-0.020525</td>
<td>0.006889</td>
<td>0.003259</td>
<td>-0.010279</td>
<td>0.013968</td>
</tr>
<tr>
<th>PCA32</th>
<td>-0.017751</td>
<td>-0.000618</td>
<td>0.000108</td>
<td>0.003238</td>
<td>0.083049</td>
<td>0.009161</td>
<td>-0.020272</td>
<td>0.000947</td>
<td>-0.003769</td>
<td>0.011980</td>
<td>...</td>
<td>-0.006540</td>
<td>0.010821</td>
<td>0.032532</td>
<td>0.456761</td>
<td>-0.213179</td>
<td>0.015308</td>
<td>-0.000528</td>
<td>0.005205</td>
<td>-0.002792</td>
<td>-0.007243</td>
</tr>
<tr>
<th>PCA33</th>
<td>-0.002868</td>
<td>0.026244</td>
<td>-0.025867</td>
<td>0.002622</td>
<td>-0.002577</td>
<td>0.011764</td>
<td>-0.467914</td>
<td>0.211628</td>
<td>0.007334</td>
<td>-0.032221</td>
<td>...</td>
<td>-0.000845</td>
<td>0.009329</td>
<td>0.003253</td>
<td>-0.021642</td>
<td>0.005216</td>
<td>-0.072613</td>
<td>0.001546</td>
<td>0.296408</td>
<td>-0.013935</td>
<td>-0.002919</td>
</tr>
<tr>
<th>PCA34</th>
<td>-0.026443</td>
<td>-0.001591</td>
<td>0.002463</td>
<td>0.000753</td>
<td>0.028572</td>
<td>-0.000819</td>
<td>0.047129</td>
<td>-0.028442</td>
<td>-0.001298</td>
<td>0.002292</td>
<td>...</td>
<td>-0.002267</td>
<td>0.000218</td>
<td>0.009405</td>
<td>0.130491</td>
<td>-0.060913</td>
<td>0.005582</td>
<td>-0.001590</td>
<td>-0.024588</td>
<td>0.003002</td>
<td>-0.001730</td>
</tr>
<tr>
<th>PCA35</th>
<td>-0.812310</td>
<td>0.004598</td>
<td>0.003996</td>
<td>0.000629</td>
<td>0.003962</td>
<td>0.009683</td>
<td>0.054059</td>
<td>0.076250</td>
<td>-0.020401</td>
<td>0.007678</td>
<td>...</td>
<td>-0.000222</td>
<td>0.004939</td>
<td>0.008694</td>
<td>-0.044460</td>
<td>0.018280</td>
<td>-0.004009</td>
<td>-0.000156</td>
<td>0.036358</td>
<td>-0.026127</td>
<td>-0.001438</td>
</tr>
<tr>
<th>PCA36</th>
<td>0.012100</td>
<td>-0.001681</td>
<td>0.002681</td>
<td>0.008802</td>
<td>0.015643</td>
<td>-0.028994</td>
<td>-0.041793</td>
<td>0.343373</td>
<td>-0.003455</td>
<td>0.007963</td>
<td>...</td>
<td>0.000256</td>
<td>-0.005317</td>
<td>-0.003570</td>
<td>0.034445</td>
<td>-0.024594</td>
<td>-0.040196</td>
<td>0.001193</td>
<td>0.035619</td>
<td>-0.016471</td>
<td>-0.000891</td>
</tr>
<tr>
<th>PCA37</th>
<td>0.151902</td>
<td>-0.009286</td>
<td>0.006200</td>
<td>0.000140</td>
<td>-0.003337</td>
<td>-0.000200</td>
<td>0.088191</td>
<td>0.168948</td>
<td>0.003019</td>
<td>0.018931</td>
<td>...</td>
<td>-0.000296</td>
<td>-0.001071</td>
<td>0.001100</td>
<td>-0.038186</td>
<td>0.014096</td>
<td>0.028753</td>
<td>-0.000849</td>
<td>-0.091708</td>
<td>0.008815</td>
<td>0.004999</td>
</tr>
<tr>
<th>PCA38</th>
<td>0.008729</td>
<td>0.017715</td>
<td>-0.006295</td>
<td>0.001414</td>
<td>0.007425</td>
<td>-0.008788</td>
<td>0.294994</td>
<td>-0.720212</td>
<td>0.004241</td>
<td>-0.005715</td>
<td>...</td>
<td>-0.000063</td>
<td>0.003126</td>
<td>0.001594</td>
<td>-0.001645</td>
<td>-0.002430</td>
<td>0.005842</td>
<td>-0.000237</td>
<td>-0.092075</td>
<td>-0.003790</td>
<td>0.008037</td>
</tr>
<tr>
<th>PCA39</th>
<td>-0.020767</td>
<td>-0.010868</td>
<td>-0.000046</td>
<td>0.000628</td>
<td>0.000815</td>
<td>-0.001051</td>
<td>-0.659468</td>
<td>-0.093424</td>
<td>0.000015</td>
<td>-0.002566</td>
<td>...</td>
<td>0.000504</td>
<td>0.002696</td>
<td>-0.000307</td>
<td>0.010625</td>
<td>-0.004643</td>
<td>0.005401</td>
<td>-0.000643</td>
<td>-0.384429</td>
<td>0.005039</td>
<td>0.010506</td>
</tr>
<tr>
<th>PCA40</th>
<td>-0.026019</td>
<td>0.004484</td>
<td>-0.002858</td>
<td>-0.000138</td>
<td>0.006447</td>
<td>-0.003653</td>
<td>-0.266905</td>
<td>-0.367819</td>
<td>0.002672</td>
<td>-0.002477</td>
<td>...</td>
<td>-0.000297</td>
<td>0.001469</td>
<td>0.001341</td>
<td>0.002253</td>
<td>-0.000654</td>
<td>-0.002980</td>
<td>-0.004527</td>
<td>0.790706</td>
<td>0.016500</td>
<td>-0.012578</td>
</tr>
<tr>
<th>PCA41</th>
<td>0.000765</td>
<td>-0.000409</td>
<td>-0.000189</td>
<td>-0.000188</td>
<td>-0.000057</td>
<td>-0.000034</td>
<td>-0.000419</td>
<td>-0.004671</td>
<td>-0.000078</td>
<td>0.000691</td>
<td>...</td>
<td>-0.000018</td>
<td>-0.000138</td>
<td>0.000059</td>
<td>-0.000053</td>
<td>0.004317</td>
<td>0.000259</td>
<td>0.000353</td>
<td>-0.014135</td>
<td>-0.000362</td>
<td>-0.704174</td>
</tr>
<tr>
<th>PCA42</th>
<td>0.000066</td>
<td>0.000018</td>
<td>0.000005</td>
<td>-0.000117</td>
<td>-0.000280</td>
<td>0.000046</td>
<td>0.000045</td>
<td>0.000016</td>
<td>0.000103</td>
<td>-0.000028</td>
<td>...</td>
<td>-0.000228</td>
<td>0.000127</td>
<td>0.000923</td>
<td>-0.000547</td>
<td>0.707478</td>
<td>-0.000005</td>
<td>-0.000006</td>
<td>-0.000267</td>
<td>0.000020</td>
<td>0.004151</td>
</tr>
</tbody>
</table>
<p>42 rows × 42 columns</p>
</div>
```python
labels = disk_names
pca1 = abs(np.array(disk_pca_comp.loc[['PCA1']])[0])
pca2 = abs(np.array(disk_pca_comp.loc[['PCA2']])[0])
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots(figsize=(14, 8))
rects1 = ax.bar(x - width/2, pca1, width, label='PCA 1 (33.0% of var)', color='g')
rects2 = ax.bar(x + width/2, pca2, width, label='PCA 2 (17.2% of var)', color='r')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Absolute value of Importance')
ax.set_title('Importance of features by principal component for DISK')
ax.set_xticks(x)
ax.set_xticklabels(labels, rotation='vertical')
ax.legend(loc='upper left')
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{0:.2f}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
#plt.savefig('../figs/pca/disk_pca_barchart.png', dpi=400, bbox_inches='tight')
```

## DISK PCA1
```python
disk_pca_comp.loc[['PCA1']] #most important principal component (remember 33.0% variance)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>PCA1</th>
<td>0.240641</td>
<td>0.067664</td>
<td>0.051159</td>
<td>-0.115029</td>
<td>0.137302</td>
<td>0.075372</td>
<td>0.248003</td>
<td>0.249652</td>
<td>-0.031399</td>
<td>-0.132705</td>
<td>...</td>
<td>0.014084</td>
<td>-0.079665</td>
<td>-0.053486</td>
<td>-0.075832</td>
<td>-0.099372</td>
<td>0.004554</td>
<td>0.029942</td>
<td>0.248953</td>
<td>0.069698</td>
<td>0.242667</td>
</tr>
</tbody>
</table>
<p>1 rows × 42 columns</p>
</div>
```python
ordered_disk_pca1 = OrderedDict(sorted(disk_pca_comp.loc[['PCA1']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_disk_pca1.items():
print(key, value)
```
period_s_to_n_2 PCA1 -0.153468
Name: period_s_to_n_2, dtype: float32
period_s_to_n_1 PCA1 -0.153266
Name: period_s_to_n_1, dtype: float32
period_s_to_n_0 PCA1 -0.151558
Name: period_s_to_n_0, dtype: float32
linear_fit_slope PCA1 -0.132705
Name: linear_fit_slope, dtype: float32
cusum PCA1 -0.115029
Name: cusum, dtype: float32
periodogram_amplitude PCA1 -0.099479
Name: periodogram_amplitude, dtype: float32
periodogram_percent_amplitude PCA1 -0.099372
Name: periodogram_percent_amplitude, dtype: float32
linear_trend PCA1 -0.090612
Name: linear_trend, dtype: float32
periodogram_eta PCA1 -0.079665
Name: periodogram_eta, dtype: float32
periodogram_standard_deviation PCA1 -0.075832
Name: periodogram_standard_deviation, dtype: float32
period_0 PCA1 -0.065251
Name: period_0, dtype: float32
periodogram_inter_percentile_range_25 PCA1 -0.053486
Name: periodogram_inter_percentile_range_25, dtype: float32
period_1 PCA1 -0.040894
Name: period_1, dtype: float32
period_2 PCA1 -0.037593
Name: period_2, dtype: float32
kurtosis PCA1 -0.031399
Name: kurtosis, dtype: float32
magnitude_percentage_ratio_40_5 PCA1 -0.026648
Name: magnitude_percentage_ratio_40_5, dtype: float32
magnitude_percentage_ratio_20_10 PCA1 -0.020617
Name: magnitude_percentage_ratio_20_10, dtype: float32
median_buffer_range_percentage_5 PCA1 -0.014832
Name: median_buffer_range_percentage_5, dtype: float32
chi2 PCA1 0.004554
Name: chi2, dtype: float32
linear_fit_reduced_chi2 PCA1 0.006584
Name: linear_fit_reduced_chi2, dtype: float32
periodogram_cusum PCA1 0.014084
Name: periodogram_cusum, dtype: float32
skew PCA1 0.029942
Name: skew, dtype: float32
beyond_2_std PCA1 0.051159
Name: beyond_2_std, dtype: float32
beyond_1_std PCA1 0.067664
Name: beyond_1_std, dtype: float32
stetson_K PCA1 0.069698
Name: stetson_K, dtype: float32
eta_e PCA1 0.075372
Name: eta_e, dtype: float32
periodogram_beyond_2_std PCA1 0.112541
Name: periodogram_beyond_2_std, dtype: float32
periodogram_beyond_1_std PCA1 0.11353
Name: periodogram_beyond_1_std, dtype: float32
eta PCA1 0.137302
Name: eta, dtype: float32
percent_amplitude PCA1 0.226214
Name: percent_amplitude, dtype: float32
amplitude PCA1 0.240641
Name: amplitude, dtype: float32
weighted_mean PCA1 0.242667
Name: weighted_mean, dtype: float32
mean PCA1 0.243224
Name: mean, dtype: float32
percent_difference_magnitude_percentile_20 PCA1 0.246865
Name: percent_difference_magnitude_percentile_20, dtype: float32
percent_difference_magnitude_percentile_5 PCA1 0.247874
Name: percent_difference_magnitude_percentile_5, dtype: float32
inter_percentile_range_25 PCA1 0.248003
Name: inter_percentile_range_25, dtype: float32
maximum_slope PCA1 0.248175
Name: maximum_slope, dtype: float32
linear_trend_sigma PCA1 0.248537
Name: linear_trend_sigma, dtype: float32
standard_deviation PCA1 0.248953
Name: standard_deviation, dtype: float32
median_absolute_deviation PCA1 0.249465
Name: median_absolute_deviation, dtype: float32
linear_fit_slope_sigma PCA1 0.249618
Name: linear_fit_slope_sigma, dtype: float32
inter_percentile_range_10 PCA1 0.249652
Name: inter_percentile_range_10, dtype: float32
### most imporant features for PCA1: inter_percentile_range_10, linear_fit_slope_sigma, median_absolute_deviation, standard_deviation, linear_trend_sigma
## DISK PCA2
```python
ordered_disk_pca2 = OrderedDict(sorted(disk_pca_comp.loc[['PCA2']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_disk_pca2.items():
print(key, value)
```
periodogram_beyond_1_std PCA2 -0.273865
Name: periodogram_beyond_1_std, dtype: float32
periodogram_beyond_2_std PCA2 -0.273664
Name: periodogram_beyond_2_std, dtype: float32
eta PCA2 -0.252656
Name: eta, dtype: float32
eta_e PCA2 -0.244697
Name: eta_e, dtype: float32
periodogram_eta PCA2 -0.101684
Name: periodogram_eta, dtype: float32
periodogram_cusum PCA2 -0.084805
Name: periodogram_cusum, dtype: float32
periodogram_inter_percentile_range_25 PCA2 -0.070917
Name: periodogram_inter_percentile_range_25, dtype: float32
median_buffer_range_percentage_5 PCA2 -0.06979
Name: median_buffer_range_percentage_5, dtype: float32
linear_fit_slope PCA2 -0.060617
Name: linear_fit_slope, dtype: float32
linear_trend PCA2 -0.052556
Name: linear_trend, dtype: float32
beyond_2_std PCA2 -0.036488
Name: beyond_2_std, dtype: float32
beyond_1_std PCA2 -0.033887
Name: beyond_1_std, dtype: float32
stetson_K PCA2 -0.022433
Name: stetson_K, dtype: float32
skew PCA2 -0.002049
Name: skew, dtype: float32
weighted_mean PCA2 -0.001573
Name: weighted_mean, dtype: float32
mean PCA2 -0.000263
Name: mean, dtype: float32
magnitude_percentage_ratio_20_10 PCA2 0.002208
Name: magnitude_percentage_ratio_20_10, dtype: float32
kurtosis PCA2 0.015257
Name: kurtosis, dtype: float32
period_2 PCA2 0.025377
Name: period_2, dtype: float32
period_1 PCA2 0.025451
Name: period_1, dtype: float32
magnitude_percentage_ratio_40_5 PCA2 0.032996
Name: magnitude_percentage_ratio_40_5, dtype: float32
period_0 PCA2 0.051506
Name: period_0, dtype: float32
chi2 PCA2 0.077459
Name: chi2, dtype: float32
linear_fit_slope_sigma PCA2 0.078176
Name: linear_fit_slope_sigma, dtype: float32
maximum_slope PCA2 0.089273
Name: maximum_slope, dtype: float32
linear_fit_reduced_chi2 PCA2 0.096925
Name: linear_fit_reduced_chi2, dtype: float32
median_absolute_deviation PCA2 0.122776
Name: median_absolute_deviation, dtype: float32
amplitude PCA2 0.122834
Name: amplitude, dtype: float32
inter_percentile_range_10 PCA2 0.127219
Name: inter_percentile_range_10, dtype: float32
percent_amplitude PCA2 0.128006
Name: percent_amplitude, dtype: float32
linear_trend_sigma PCA2 0.128698
Name: linear_trend_sigma, dtype: float32
inter_percentile_range_25 PCA2 0.128756
Name: inter_percentile_range_25, dtype: float32
standard_deviation PCA2 0.130213
Name: standard_deviation, dtype: float32
percent_difference_magnitude_percentile_5 PCA2 0.131169
Name: percent_difference_magnitude_percentile_5, dtype: float32
percent_difference_magnitude_percentile_20 PCA2 0.133391
Name: percent_difference_magnitude_percentile_20, dtype: float32
period_s_to_n_0 PCA2 0.225299
Name: period_s_to_n_0, dtype: float32
period_s_to_n_1 PCA2 0.235406
Name: period_s_to_n_1, dtype: float32
period_s_to_n_2 PCA2 0.244242
Name: period_s_to_n_2, dtype: float32
periodogram_standard_deviation PCA2 0.276726
Name: periodogram_standard_deviation, dtype: float32
cusum PCA2 0.294892
Name: cusum, dtype: float32
periodogram_percent_amplitude PCA2 0.297989
Name: periodogram_percent_amplitude, dtype: float32
periodogram_amplitude PCA2 0.297989
Name: periodogram_amplitude, dtype: float32
### most important features for PCA2: periodogram_amplitude, periodogram_percent_amplitude, cusum, periodogram_standard_deviation, period_s_to_n_2
### PC3
```python
ordered_disk_pca1 = OrderedDict(sorted(disk_pca_comp.loc[['PCA3']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_disk_pca1.items():
print(key, value)
```
stetson_K PCA3 -0.464297
Name: stetson_K, dtype: float32
beyond_1_std PCA3 -0.440624
Name: beyond_1_std, dtype: float32
magnitude_percentage_ratio_40_5 PCA3 -0.280382
Name: magnitude_percentage_ratio_40_5, dtype: float32
magnitude_percentage_ratio_20_10 PCA3 -0.255634
Name: magnitude_percentage_ratio_20_10, dtype: float32
eta_e PCA3 -0.196781
Name: eta_e, dtype: float32
periodogram_standard_deviation PCA3 -0.185627
Name: periodogram_standard_deviation, dtype: float32
periodogram_percent_amplitude PCA3 -0.143603
Name: periodogram_percent_amplitude, dtype: float32
periodogram_amplitude PCA3 -0.143333
Name: periodogram_amplitude, dtype: float32
skew PCA3 -0.091605
Name: skew, dtype: float32
weighted_mean PCA3 -0.084963
Name: weighted_mean, dtype: float32
mean PCA3 -0.083782
Name: mean, dtype: float32
median_buffer_range_percentage_5 PCA3 -0.061623
Name: median_buffer_range_percentage_5, dtype: float32
eta PCA3 -0.048265
Name: eta, dtype: float32
linear_trend PCA3 -0.04322
Name: linear_trend, dtype: float32
linear_fit_slope PCA3 -0.033628
Name: linear_fit_slope, dtype: float32
cusum PCA3 -0.032994
Name: cusum, dtype: float32
inter_percentile_range_25 PCA3 0.005286
Name: inter_percentile_range_25, dtype: float32
percent_difference_magnitude_percentile_20 PCA3 0.006329
Name: percent_difference_magnitude_percentile_20, dtype: float32
median_absolute_deviation PCA3 0.008163
Name: median_absolute_deviation, dtype: float32
beyond_2_std PCA3 0.021523
Name: beyond_2_std, dtype: float32
linear_fit_slope_sigma PCA3 0.025726
Name: linear_fit_slope_sigma, dtype: float32
inter_percentile_range_10 PCA3 0.028598
Name: inter_percentile_range_10, dtype: float32
percent_difference_magnitude_percentile_5 PCA3 0.041166
Name: percent_difference_magnitude_percentile_5, dtype: float32
linear_trend_sigma PCA3 0.045981
Name: linear_trend_sigma, dtype: float32
standard_deviation PCA3 0.046544
Name: standard_deviation, dtype: float32
linear_fit_reduced_chi2 PCA3 0.055942
Name: linear_fit_reduced_chi2, dtype: float32
period_1 PCA3 0.056291
Name: period_1, dtype: float32
chi2 PCA3 0.064536
Name: chi2, dtype: float32
maximum_slope PCA3 0.064921
Name: maximum_slope, dtype: float32
period_2 PCA3 0.064985
Name: period_2, dtype: float32
periodogram_beyond_2_std PCA3 0.072285
Name: periodogram_beyond_2_std, dtype: float32
period_s_to_n_2 PCA3 0.076699
Name: period_s_to_n_2, dtype: float32
period_s_to_n_1 PCA3 0.091518
Name: period_s_to_n_1, dtype: float32
periodogram_beyond_1_std PCA3 0.092281
Name: periodogram_beyond_1_std, dtype: float32
period_0 PCA3 0.095636
Name: period_0, dtype: float32
periodogram_cusum PCA3 0.096934
Name: periodogram_cusum, dtype: float32
period_s_to_n_0 PCA3 0.107897
Name: period_s_to_n_0, dtype: float32
amplitude PCA3 0.110668
Name: amplitude, dtype: float32
percent_amplitude PCA3 0.145778
Name: percent_amplitude, dtype: float32
periodogram_eta PCA3 0.203207
Name: periodogram_eta, dtype: float32
periodogram_inter_percentile_range_25 PCA3 0.209245
Name: periodogram_inter_percentile_range_25, dtype: float32
kurtosis PCA3 0.308892
Name: kurtosis, dtype: float32
```python
```
# DEEP
```python
# DEEP
deep_oid = np.memmap('../data/oid_deep.dat', mode='r', dtype=np.uint64)
deep_names = open('../data/feature_deep.name').read().split()
deep_x = np.memmap('../data/feature_deep.dat', mode='r', dtype=np.float32, shape=(deep_oid.size, len(deep_names)))
```
```python
deep = pd.DataFrame(deep_x, index=deep_oid, columns=deep_names)
deep
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>795216100016711</th>
<td>0.540999</td>
<td>0.384615</td>
<td>0.038462</td>
<td>0.132028</td>
<td>1.397621</td>
<td>5.995161e+08</td>
<td>0.384998</td>
<td>0.626499</td>
<td>-0.370707</td>
<td>0.000028</td>
<td>...</td>
<td>0.158873</td>
<td>0.046190</td>
<td>0.988132</td>
<td>1.153393</td>
<td>12.302343</td>
<td>2.094434</td>
<td>-0.561686</td>
<td>0.243355</td>
<td>0.819022</td>
<td>20.531246</td>
</tr>
<tr>
<th>795216200000160</th>
<td>0.555500</td>
<td>0.309091</td>
<td>0.045455</td>
<td>0.150656</td>
<td>1.558752</td>
<td>2.858594e+09</td>
<td>0.257999</td>
<td>0.555500</td>
<td>0.195134</td>
<td>-0.000409</td>
<td>...</td>
<td>0.177428</td>
<td>0.015411</td>
<td>0.810837</td>
<td>0.988667</td>
<td>8.658415</td>
<td>1.084765</td>
<td>-0.186345</td>
<td>0.204943</td>
<td>0.773804</td>
<td>20.622486</td>
</tr>
<tr>
<th>795216200000423</th>
<td>0.682500</td>
<td>0.263566</td>
<td>0.062016</td>
<td>0.135337</td>
<td>1.415388</td>
<td>1.171590e+09</td>
<td>0.249001</td>
<td>0.525002</td>
<td>2.017293</td>
<td>-0.000368</td>
<td>...</td>
<td>0.137916</td>
<td>0.049931</td>
<td>0.924430</td>
<td>1.028984</td>
<td>6.990223</td>
<td>1.522744</td>
<td>-0.782636</td>
<td>0.212639</td>
<td>0.718653</td>
<td>20.564688</td>
</tr>
<tr>
<th>795216200021594</th>
<td>0.471500</td>
<td>0.324561</td>
<td>0.035088</td>
<td>0.120368</td>
<td>1.609693</td>
<td>1.157334e+09</td>
<td>0.191000</td>
<td>0.423700</td>
<td>0.339782</td>
<td>0.000081</td>
<td>...</td>
<td>0.154165</td>
<td>0.064802</td>
<td>1.049195</td>
<td>1.079942</td>
<td>10.374885</td>
<td>1.000001</td>
<td>-0.109285</td>
<td>0.165479</td>
<td>0.763212</td>
<td>20.291206</td>
</tr>
<tr>
<th>795216100016924</th>
<td>0.707000</td>
<td>0.330275</td>
<td>0.027523</td>
<td>0.222071</td>
<td>1.255422</td>
<td>1.167658e+09</td>
<td>0.350498</td>
<td>0.581999</td>
<td>0.391907</td>
<td>-0.000741</td>
<td>...</td>
<td>0.189391</td>
<td>0.012856</td>
<td>0.738287</td>
<td>1.210104</td>
<td>9.334059</td>
<td>1.520369</td>
<td>-0.238332</td>
<td>0.242174</td>
<td>0.775877</td>
<td>20.811619</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>795202400008499</th>
<td>0.091500</td>
<td>0.288390</td>
<td>0.033708</td>
<td>0.260133</td>
<td>1.086497</td>
<td>8.582294e+09</td>
<td>0.023000</td>
<td>0.047600</td>
<td>4.213814</td>
<td>0.000180</td>
<td>...</td>
<td>0.165052</td>
<td>0.029675</td>
<td>0.666062</td>
<td>1.225024</td>
<td>46.324841</td>
<td>3.317819</td>
<td>-0.502687</td>
<td>0.020036</td>
<td>0.753349</td>
<td>14.698151</td>
</tr>
<tr>
<th>795202400035369</th>
<td>0.509000</td>
<td>0.274336</td>
<td>0.079646</td>
<td>0.154595</td>
<td>1.399596</td>
<td>3.628485e+09</td>
<td>0.209000</td>
<td>0.486799</td>
<td>0.559496</td>
<td>0.000914</td>
<td>...</td>
<td>0.191887</td>
<td>0.031261</td>
<td>0.762117</td>
<td>0.960851</td>
<td>12.348410</td>
<td>3.105192</td>
<td>-0.516846</td>
<td>0.189333</td>
<td>0.753971</td>
<td>20.020897</td>
</tr>
<tr>
<th>795202400008473</th>
<td>0.248000</td>
<td>0.225806</td>
<td>0.046595</td>
<td>0.153000</td>
<td>1.614792</td>
<td>5.882888e+09</td>
<td>0.056499</td>
<td>0.110001</td>
<td>5.453317</td>
<td>0.000202</td>
<td>...</td>
<td>0.122866</td>
<td>0.031222</td>
<td>0.750759</td>
<td>0.913361</td>
<td>16.291544</td>
<td>1.183683</td>
<td>0.588551</td>
<td>0.051008</td>
<td>0.729488</td>
<td>18.820463</td>
</tr>
<tr>
<th>795202400026124</th>
<td>0.506001</td>
<td>0.285124</td>
<td>0.053719</td>
<td>0.077786</td>
<td>1.764071</td>
<td>1.560587e+10</td>
<td>0.168001</td>
<td>0.353199</td>
<td>1.486499</td>
<td>0.000263</td>
<td>...</td>
<td>0.157890</td>
<td>0.029492</td>
<td>0.697904</td>
<td>0.765475</td>
<td>10.299134</td>
<td>1.628370</td>
<td>-0.241796</td>
<td>0.145307</td>
<td>0.733241</td>
<td>20.069075</td>
</tr>
<tr>
<th>795202400015860</th>
<td>0.158999</td>
<td>0.284698</td>
<td>0.049822</td>
<td>0.108138</td>
<td>1.959518</td>
<td>1.425223e+10</td>
<td>0.067249</td>
<td>0.124401</td>
<td>0.671093</td>
<td>0.000180</td>
<td>...</td>
<td>0.177229</td>
<td>0.031867</td>
<td>0.700854</td>
<td>0.812803</td>
<td>11.200914</td>
<td>1.078436</td>
<td>-0.171548</td>
<td>0.050611</td>
<td>0.776411</td>
<td>18.901840</td>
</tr>
</tbody>
</table>
<p>406611 rows × 42 columns</p>
</div>
```python
fig, ax = plt.subplots(figsize=(14, 14))
pca, x_new, score, coeff = applypca(deep) #apply Deep field
plt.xlim(-0.2,0.4)
plt.ylim(-0.4,0.4)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.title("DEEP")
#Call the function. Use only the 2 PCs.
pcaplot(x_new[:,0:2], np.transpose(pca.components_[0:2, :]), 'white') #just show components, not data
#plt.savefig('../figs/pca/deep_pca_importance.png', dpi=400, bbox_inches='tight')
```

```python
fig, ax = plt.subplots(figsize=(14, 14))
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.title("DEEP")
pcaplot(x_new[:,0:2], np.transpose(pca.components_[0:2, :]), 'darkorange')
#plt.savefig('../figs/pca/deep_pca_overlay_data.png', dpi=400, bbox_inches='tight')
```

```python
deep_pca_var_ratio = pca.explained_variance_ratio_
deep_pca_var_ratio
```
array([3.30839083e-01, 1.65770292e-01, 6.68013957e-02, 5.67478651e-02,
4.40056176e-02, 4.11021357e-02, 3.46333497e-02, 2.47985825e-02,
2.33381880e-02, 2.17114871e-02, 2.11133758e-02, 1.99657858e-02,
1.92385017e-02, 1.88338924e-02, 1.52617986e-02, 1.48779401e-02,
1.32618745e-02, 1.07423864e-02, 9.15490527e-03, 7.64960475e-03,
6.25245674e-03, 5.59673682e-03, 5.07861806e-03, 4.85461658e-03,
4.26060876e-03, 3.84531208e-03, 3.11307751e-03, 2.53717334e-03,
1.58270998e-03, 6.46678733e-04, 5.19820489e-04, 4.32550518e-04,
4.03452494e-04, 3.10351281e-04, 2.51909685e-04, 1.83818112e-04,
1.38031121e-04, 5.94990636e-05, 5.33841947e-05, 2.99102749e-05,
1.10973491e-06, 1.12428792e-07])
```python
np.cumsum(deep_pca_var_ratio)
```
array([0.33083908, 0.49660938, 0.56341077, 0.62015864, 0.66416425,
0.70526639, 0.73989974, 0.76469832, 0.78803651, 0.809748 ,
0.83086137, 0.85082716, 0.87006566, 0.88889955, 0.90416135,
0.91903929, 0.93230117, 0.94304355, 0.95219846, 0.95984806,
0.96610052, 0.97169726, 0.97677587, 0.98163049, 0.9858911 ,
0.98973641, 0.99284949, 0.99538666, 0.99696937, 0.99761605,
0.99813587, 0.99856842, 0.99897187, 0.99928223, 0.99953414,
0.99971795, 0.99985598, 0.99991548, 0.99996887, 0.99999878,
0.99999989, 1. ])
### PC1 explains 33.1% and PC2 16.6% of variance. Together, if we keep PC1 and PC2 only, they explain 49.7% of variance
```python
print(abs( pca.components_ )) #has shape (42, 42) [n_components, n_features].
```
[[2.5027907e-01 4.8895895e-02 3.6697686e-03 ... 2.6399526e-01
2.0056916e-02 2.2212499e-01]
[2.7381703e-02 1.1195697e-02 1.2580261e-02 ... 3.5933591e-02
1.9924736e-03 4.7486845e-02]
[1.2206027e-01 3.7620080e-01 3.3774674e-02 ... 3.4099955e-02
3.7678537e-01 8.6400993e-02]
...
[7.8610204e-02 7.1074963e-03 9.4995610e-03 ... 7.9523170e-01
1.3340116e-02 4.0870424e-02]
[6.7367093e-03 3.5857444e-04 8.4200059e-05 ... 5.3456873e-02
5.6267977e-03 7.0098525e-01]
[1.3206521e-04 3.3836768e-05 4.5038905e-07 ... 2.3014843e-04
1.3013138e-04 2.1422207e-03]]
```python
pca_num = []
for i in range(1, 43):
pca_num.append('PCA{}'.format(i))
deep_pca_comp = pd.DataFrame(pca.components_, index=pca_num, columns=disk_names)
#deep_pca_comp.to_csv('../data/deep_pca_feature_importance.csv')
deep_pca_comp
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>PCA1</th>
<td>0.250279</td>
<td>0.048896</td>
<td>-3.669769e-03</td>
<td>0.085912</td>
<td>-0.101881</td>
<td>-0.189234</td>
<td>0.261276</td>
<td>0.262704</td>
<td>-0.022062</td>
<td>-0.054601</td>
<td>...</td>
<td>0.046677</td>
<td>0.173186</td>
<td>0.152227</td>
<td>0.095246</td>
<td>-0.064890</td>
<td>0.041467</td>
<td>-0.113004</td>
<td>0.263995</td>
<td>-0.020057</td>
<td>0.222125</td>
</tr>
<tr>
<th>PCA2</th>
<td>0.027382</td>
<td>0.011196</td>
<td>-1.258026e-02</td>
<td>0.261779</td>
<td>-0.271932</td>
<td>-0.103574</td>
<td>0.035435</td>
<td>0.036006</td>
<td>-0.020417</td>
<td>0.071858</td>
<td>...</td>
<td>-0.006077</td>
<td>-0.040501</td>
<td>0.035786</td>
<td>0.287994</td>
<td>0.342611</td>
<td>0.058168</td>
<td>-0.052625</td>
<td>0.035934</td>
<td>-0.001992</td>
<td>-0.047487</td>
</tr>
<tr>
<th>PCA3</th>
<td>0.122060</td>
<td>-0.376201</td>
<td>3.377467e-02</td>
<td>-0.037247</td>
<td>0.019887</td>
<td>0.039662</td>
<td>-0.014930</td>
<td>0.005635</td>
<td>0.308662</td>
<td>0.080546</td>
<td>...</td>
<td>0.105780</td>
<td>-0.087479</td>
<td>-0.078182</td>
<td>-0.082969</td>
<td>-0.021108</td>
<td>0.383842</td>
<td>-0.089703</td>
<td>0.034100</td>
<td>-0.376785</td>
<td>-0.086401</td>
</tr>
<tr>
<th>PCA4</th>
<td>-0.042274</td>
<td>0.303286</td>
<td>-1.538903e-01</td>
<td>-0.088381</td>
<td>0.121655</td>
<td>0.090487</td>
<td>0.065495</td>
<td>0.029510</td>
<td>-0.117118</td>
<td>-0.153463</td>
<td>...</td>
<td>-0.125996</td>
<td>-0.019933</td>
<td>-0.053059</td>
<td>-0.023941</td>
<td>0.038872</td>
<td>0.406520</td>
<td>0.113223</td>
<td>0.013447</td>
<td>0.365421</td>
<td>-0.093342</td>
</tr>
<tr>
<th>PCA5</th>
<td>-0.110140</td>
<td>-0.063028</td>
<td>-1.674090e-01</td>
<td>0.265454</td>
<td>-0.192903</td>
<td>-0.132160</td>
<td>-0.048810</td>
<td>-0.056193</td>
<td>0.167754</td>
<td>-0.167759</td>
<td>...</td>
<td>0.137101</td>
<td>-0.020719</td>
<td>0.362520</td>
<td>0.217307</td>
<td>-0.098622</td>
<td>0.057046</td>
<td>0.031138</td>
<td>-0.054714</td>
<td>-0.053785</td>
<td>-0.257235</td>
</tr>
<tr>
<th>PCA6</th>
<td>0.054127</td>
<td>-0.146556</td>
<td>-9.024492e-02</td>
<td>-0.152121</td>
<td>0.077747</td>
<td>0.023958</td>
<td>-0.021559</td>
<td>-0.019421</td>
<td>0.226393</td>
<td>-0.595149</td>
<td>...</td>
<td>-0.212131</td>
<td>0.038284</td>
<td>-0.110976</td>
<td>-0.006140</td>
<td>0.104398</td>
<td>-0.024831</td>
<td>-0.023798</td>
<td>-0.003740</td>
<td>-0.207311</td>
<td>0.031491</td>
</tr>
<tr>
<th>PCA7</th>
<td>-0.077636</td>
<td>0.008747</td>
<td>5.413842e-01</td>
<td>0.007617</td>
<td>-0.008544</td>
<td>0.017616</td>
<td>-0.026538</td>
<td>0.029141</td>
<td>-0.375440</td>
<td>-0.142187</td>
<td>...</td>
<td>-0.171153</td>
<td>-0.017399</td>
<td>0.097517</td>
<td>0.073942</td>
<td>-0.022919</td>
<td>0.112751</td>
<td>0.047101</td>
<td>0.009219</td>
<td>0.159885</td>
<td>-0.054061</td>
</tr>
<tr>
<th>PCA8</th>
<td>0.000089</td>
<td>0.029287</td>
<td>4.127665e-02</td>
<td>0.045063</td>
<td>-0.023573</td>
<td>0.046430</td>
<td>0.008440</td>
<td>0.006012</td>
<td>-0.051306</td>
<td>-0.085055</td>
<td>...</td>
<td>0.238073</td>
<td>-0.067362</td>
<td>-0.167565</td>
<td>-0.089774</td>
<td>-0.044443</td>
<td>0.007548</td>
<td>-0.044045</td>
<td>0.005379</td>
<td>0.036097</td>
<td>0.003867</td>
</tr>
<tr>
<th>PCA9</th>
<td>-0.019879</td>
<td>0.090935</td>
<td>1.105561e-01</td>
<td>0.062842</td>
<td>-0.140756</td>
<td>0.013909</td>
<td>0.007983</td>
<td>0.007724</td>
<td>-0.066282</td>
<td>-0.210813</td>
<td>...</td>
<td>0.766920</td>
<td>-0.109741</td>
<td>-0.317285</td>
<td>-0.108635</td>
<td>-0.043330</td>
<td>0.024120</td>
<td>-0.093441</td>
<td>0.002532</td>
<td>0.089133</td>
<td>-0.017030</td>
</tr>
<tr>
<th>PCA10</th>
<td>-0.002294</td>
<td>-0.004299</td>
<td>1.630130e-02</td>
<td>0.017776</td>
<td>0.002147</td>
<td>-0.016709</td>
<td>0.003156</td>
<td>0.004390</td>
<td>0.029274</td>
<td>-0.005224</td>
<td>...</td>
<td>0.023792</td>
<td>0.040977</td>
<td>0.015290</td>
<td>-0.014618</td>
<td>-0.000064</td>
<td>-0.008849</td>
<td>0.070714</td>
<td>0.002683</td>
<td>0.015541</td>
<td>0.000217</td>
</tr>
<tr>
<th>PCA11</th>
<td>0.006750</td>
<td>-0.067736</td>
<td>6.506315e-02</td>
<td>0.030078</td>
<td>0.008604</td>
<td>-0.048010</td>
<td>0.057251</td>
<td>0.063566</td>
<td>0.310883</td>
<td>0.022350</td>
<td>...</td>
<td>0.132799</td>
<td>0.147638</td>
<td>-0.001585</td>
<td>-0.038973</td>
<td>0.038239</td>
<td>-0.085235</td>
<td>0.835852</td>
<td>0.045742</td>
<td>0.153659</td>
<td>-0.024092</td>
</tr>
<tr>
<th>PCA12</th>
<td>-0.041433</td>
<td>0.106581</td>
<td>-5.055264e-03</td>
<td>-0.193734</td>
<td>-0.118410</td>
<td>-0.365454</td>
<td>-0.042416</td>
<td>-0.025455</td>
<td>0.025639</td>
<td>0.022772</td>
<td>...</td>
<td>0.208139</td>
<td>0.429642</td>
<td>0.191607</td>
<td>0.114276</td>
<td>0.129528</td>
<td>0.134858</td>
<td>-0.071169</td>
<td>-0.035387</td>
<td>0.038727</td>
<td>-0.037006</td>
</tr>
<tr>
<th>PCA13</th>
<td>-0.002024</td>
<td>-0.255688</td>
<td>6.045628e-01</td>
<td>0.060051</td>
<td>0.022852</td>
<td>-0.088160</td>
<td>-0.004197</td>
<td>-0.067338</td>
<td>0.013474</td>
<td>-0.074983</td>
<td>...</td>
<td>-0.039458</td>
<td>0.250514</td>
<td>0.086164</td>
<td>-0.091776</td>
<td>0.006233</td>
<td>0.050459</td>
<td>-0.100628</td>
<td>-0.015350</td>
<td>-0.002737</td>
<td>0.022257</td>
</tr>
<tr>
<th>PCA14</th>
<td>-0.008484</td>
<td>0.229596</td>
<td>-2.814992e-01</td>
<td>0.138552</td>
<td>0.100212</td>
<td>-0.071815</td>
<td>-0.030869</td>
<td>0.007931</td>
<td>-0.068367</td>
<td>-0.101989</td>
<td>...</td>
<td>-0.058596</td>
<td>0.366878</td>
<td>-0.006932</td>
<td>-0.254734</td>
<td>-0.029344</td>
<td>0.022011</td>
<td>-0.194677</td>
<td>-0.018926</td>
<td>0.016612</td>
<td>0.013532</td>
</tr>
<tr>
<th>PCA15</th>
<td>-0.020050</td>
<td>0.052996</td>
<td>1.297933e-01</td>
<td>0.035455</td>
<td>0.003057</td>
<td>0.185170</td>
<td>-0.051977</td>
<td>-0.017213</td>
<td>0.243429</td>
<td>-0.018558</td>
<td>...</td>
<td>0.003615</td>
<td>-0.171301</td>
<td>0.105201</td>
<td>0.105607</td>
<td>0.038868</td>
<td>0.266612</td>
<td>0.008578</td>
<td>-0.032974</td>
<td>0.159564</td>
<td>0.219806</td>
</tr>
<tr>
<th>PCA16</th>
<td>-0.020092</td>
<td>0.250272</td>
<td>-9.450806e-02</td>
<td>0.010606</td>
<td>0.003813</td>
<td>0.094470</td>
<td>-0.012569</td>
<td>-0.033855</td>
<td>0.193535</td>
<td>0.002145</td>
<td>...</td>
<td>-0.010778</td>
<td>-0.106197</td>
<td>0.048766</td>
<td>0.053569</td>
<td>0.018418</td>
<td>0.139857</td>
<td>0.012962</td>
<td>-0.023871</td>
<td>0.046698</td>
<td>0.100624</td>
</tr>
<tr>
<th>PCA17</th>
<td>-0.024224</td>
<td>-0.008345</td>
<td>1.496303e-02</td>
<td>0.073849</td>
<td>0.095791</td>
<td>-0.413481</td>
<td>-0.011627</td>
<td>-0.010355</td>
<td>0.068402</td>
<td>0.078832</td>
<td>...</td>
<td>-0.173874</td>
<td>0.208188</td>
<td>-0.499673</td>
<td>-0.205440</td>
<td>-0.142725</td>
<td>0.148337</td>
<td>0.095356</td>
<td>-0.010431</td>
<td>0.005697</td>
<td>-0.006260</td>
</tr>
<tr>
<th>PCA18</th>
<td>0.070507</td>
<td>-0.349030</td>
<td>-2.876777e-01</td>
<td>-0.006966</td>
<td>-0.070426</td>
<td>0.100597</td>
<td>-0.037634</td>
<td>-0.055219</td>
<td>-0.573772</td>
<td>-0.010053</td>
<td>...</td>
<td>0.050781</td>
<td>0.199386</td>
<td>0.057219</td>
<td>0.050940</td>
<td>-0.025065</td>
<td>0.161126</td>
<td>0.280011</td>
<td>-0.025839</td>
<td>-0.169652</td>
<td>-0.034670</td>
</tr>
<tr>
<th>PCA19</th>
<td>-0.069118</td>
<td>0.160785</td>
<td>1.009238e-01</td>
<td>0.101246</td>
<td>-0.147913</td>
<td>0.281794</td>
<td>-0.002862</td>
<td>0.001156</td>
<td>0.246549</td>
<td>0.056756</td>
<td>...</td>
<td>-0.045887</td>
<td>0.414707</td>
<td>-0.006581</td>
<td>0.000227</td>
<td>-0.118741</td>
<td>-0.039426</td>
<td>-0.167874</td>
<td>-0.015136</td>
<td>0.022229</td>
<td>-0.290349</td>
</tr>
<tr>
<th>PCA20</th>
<td>0.067080</td>
<td>0.034290</td>
<td>1.170901e-02</td>
<td>0.379022</td>
<td>-0.285628</td>
<td>-0.104090</td>
<td>0.066356</td>
<td>0.061984</td>
<td>-0.067809</td>
<td>-0.098182</td>
<td>...</td>
<td>-0.251536</td>
<td>-0.355941</td>
<td>-0.019683</td>
<td>-0.273166</td>
<td>-0.132032</td>
<td>0.027933</td>
<td>0.047234</td>
<td>0.061984</td>
<td>0.000324</td>
<td>-0.223641</td>
</tr>
<tr>
<th>PCA21</th>
<td>-0.013797</td>
<td>-0.121420</td>
<td>-8.873282e-02</td>
<td>0.072135</td>
<td>-0.485580</td>
<td>0.004113</td>
<td>-0.070574</td>
<td>-0.073427</td>
<td>0.044544</td>
<td>-0.159048</td>
<td>...</td>
<td>-0.056702</td>
<td>0.076148</td>
<td>0.155632</td>
<td>-0.132680</td>
<td>-0.272370</td>
<td>0.046170</td>
<td>0.036043</td>
<td>-0.069011</td>
<td>0.174402</td>
<td>0.331609</td>
</tr>
<tr>
<th>PCA22</th>
<td>-0.144112</td>
<td>-0.406916</td>
<td>-1.531998e-01</td>
<td>0.469528</td>
<td>0.273190</td>
<td>0.266348</td>
<td>-0.008680</td>
<td>-0.019240</td>
<td>0.003717</td>
<td>0.024946</td>
<td>...</td>
<td>0.066925</td>
<td>0.159126</td>
<td>-0.040581</td>
<td>-0.030922</td>
<td>0.082395</td>
<td>0.021574</td>
<td>-0.132195</td>
<td>-0.019829</td>
<td>0.300732</td>
<td>0.096412</td>
</tr>
<tr>
<th>PCA23</th>
<td>-0.031933</td>
<td>-0.044014</td>
<td>-6.975994e-03</td>
<td>0.201239</td>
<td>-0.224303</td>
<td>-0.115881</td>
<td>-0.040453</td>
<td>-0.039006</td>
<td>0.120779</td>
<td>-0.169474</td>
<td>...</td>
<td>-0.214390</td>
<td>0.055439</td>
<td>-0.363676</td>
<td>0.238386</td>
<td>0.180012</td>
<td>-0.032325</td>
<td>-0.029966</td>
<td>-0.038303</td>
<td>0.099877</td>
<td>0.061933</td>
</tr>
<tr>
<th>PCA24</th>
<td>-0.008502</td>
<td>-0.388359</td>
<td>-1.417370e-01</td>
<td>-0.359827</td>
<td>0.045901</td>
<td>-0.240315</td>
<td>0.089195</td>
<td>0.064587</td>
<td>0.059390</td>
<td>-0.241240</td>
<td>...</td>
<td>0.012393</td>
<td>-0.137171</td>
<td>0.050579</td>
<td>0.014797</td>
<td>-0.009503</td>
<td>-0.055039</td>
<td>-0.183252</td>
<td>0.055458</td>
<td>0.502950</td>
<td>-0.174900</td>
</tr>
<tr>
<th>PCA25</th>
<td>0.162484</td>
<td>0.194560</td>
<td>8.989872e-02</td>
<td>0.191090</td>
<td>0.298832</td>
<td>0.095124</td>
<td>-0.048376</td>
<td>-0.033756</td>
<td>-0.066193</td>
<td>-0.539421</td>
<td>...</td>
<td>0.081807</td>
<td>0.081254</td>
<td>0.105976</td>
<td>0.065050</td>
<td>-0.002133</td>
<td>0.021419</td>
<td>0.129325</td>
<td>0.004536</td>
<td>-0.121443</td>
<td>-0.029590</td>
</tr>
<tr>
<th>PCA26</th>
<td>0.050482</td>
<td>-0.039288</td>
<td>6.067143e-03</td>
<td>-0.295308</td>
<td>-0.408925</td>
<td>0.530456</td>
<td>0.083876</td>
<td>0.080704</td>
<td>-0.008262</td>
<td>-0.057421</td>
<td>...</td>
<td>-0.073495</td>
<td>0.212181</td>
<td>-0.199266</td>
<td>-0.059606</td>
<td>0.072338</td>
<td>-0.001333</td>
<td>0.020227</td>
<td>0.084195</td>
<td>0.035392</td>
<td>-0.070626</td>
</tr>
<tr>
<th>PCA27</th>
<td>-0.143790</td>
<td>0.066836</td>
<td>-5.550228e-03</td>
<td>-0.140129</td>
<td>-0.265287</td>
<td>-0.155063</td>
<td>-0.131969</td>
<td>-0.111932</td>
<td>-0.145775</td>
<td>-0.127567</td>
<td>...</td>
<td>0.014325</td>
<td>-0.087952</td>
<td>-0.109696</td>
<td>-0.093985</td>
<td>0.163087</td>
<td>0.047859</td>
<td>0.052533</td>
<td>-0.090528</td>
<td>-0.159245</td>
<td>0.050455</td>
</tr>
<tr>
<th>PCA28</th>
<td>-0.184175</td>
<td>0.025287</td>
<td>-7.269748e-03</td>
<td>-0.221399</td>
<td>0.027261</td>
<td>0.018522</td>
<td>-0.021471</td>
<td>-0.021558</td>
<td>0.115777</td>
<td>-0.096967</td>
<td>...</td>
<td>-0.033823</td>
<td>-0.022963</td>
<td>0.215350</td>
<td>-0.014516</td>
<td>-0.174633</td>
<td>0.070683</td>
<td>-0.002511</td>
<td>-0.037768</td>
<td>-0.096326</td>
<td>0.013140</td>
</tr>
<tr>
<th>PCA29</th>
<td>0.392090</td>
<td>0.014420</td>
<td>3.903848e-02</td>
<td>-0.052422</td>
<td>-0.037282</td>
<td>-0.017822</td>
<td>-0.245224</td>
<td>-0.152928</td>
<td>0.033214</td>
<td>0.167798</td>
<td>...</td>
<td>-0.002098</td>
<td>-0.003420</td>
<td>0.020999</td>
<td>-0.007980</td>
<td>-0.017737</td>
<td>-0.018861</td>
<td>-0.004604</td>
<td>-0.061183</td>
<td>0.353588</td>
<td>-0.072989</td>
</tr>
<tr>
<th>PCA30</th>
<td>0.010751</td>
<td>-0.006782</td>
<td>-3.820986e-03</td>
<td>-0.031554</td>
<td>0.044405</td>
<td>0.020346</td>
<td>0.019051</td>
<td>0.001385</td>
<td>-0.047079</td>
<td>0.011962</td>
<td>...</td>
<td>-0.025081</td>
<td>0.004076</td>
<td>-0.279075</td>
<td>0.624871</td>
<td>-0.258110</td>
<td>0.115212</td>
<td>0.003738</td>
<td>0.000942</td>
<td>-0.018034</td>
<td>-0.017373</td>
</tr>
<tr>
<th>PCA31</th>
<td>0.061644</td>
<td>-0.014004</td>
<td>1.151111e-02</td>
<td>0.015984</td>
<td>0.005680</td>
<td>0.009675</td>
<td>0.144268</td>
<td>0.027726</td>
<td>0.010317</td>
<td>0.072563</td>
<td>...</td>
<td>0.006578</td>
<td>-0.017383</td>
<td>0.056169</td>
<td>-0.123724</td>
<td>0.071544</td>
<td>0.569134</td>
<td>-0.010169</td>
<td>-0.017230</td>
<td>0.010663</td>
<td>-0.046416</td>
</tr>
<tr>
<th>PCA32</th>
<td>-0.125905</td>
<td>0.016732</td>
<td>-1.803771e-02</td>
<td>-0.018923</td>
<td>0.010988</td>
<td>-0.024878</td>
<td>-0.234393</td>
<td>0.084336</td>
<td>-0.008116</td>
<td>0.043284</td>
<td>...</td>
<td>-0.004039</td>
<td>0.015101</td>
<td>-0.107920</td>
<td>0.215464</td>
<td>-0.116780</td>
<td>0.277131</td>
<td>0.005707</td>
<td>0.117861</td>
<td>-0.013633</td>
<td>0.024765</td>
</tr>
<tr>
<th>PCA33</th>
<td>-0.060590</td>
<td>0.010712</td>
<td>-1.664007e-02</td>
<td>-0.008444</td>
<td>-0.010151</td>
<td>-0.003043</td>
<td>-0.240835</td>
<td>0.106785</td>
<td>0.023706</td>
<td>0.025503</td>
<td>...</td>
<td>0.001556</td>
<td>-0.010727</td>
<td>0.099691</td>
<td>-0.240740</td>
<td>0.117156</td>
<td>0.210922</td>
<td>-0.001053</td>
<td>0.124155</td>
<td>-0.005602</td>
<td>0.006423</td>
</tr>
<tr>
<th>PCA34</th>
<td>-0.327383</td>
<td>-0.001396</td>
<td>3.165001e-02</td>
<td>-0.020203</td>
<td>-0.014972</td>
<td>-0.030815</td>
<td>0.310018</td>
<td>-0.204406</td>
<td>-0.008490</td>
<td>0.043695</td>
<td>...</td>
<td>-0.001906</td>
<td>0.003352</td>
<td>0.000121</td>
<td>-0.027307</td>
<td>0.014598</td>
<td>0.122756</td>
<td>0.014376</td>
<td>-0.171155</td>
<td>0.012346</td>
<td>0.032744</td>
</tr>
<tr>
<th>PCA35</th>
<td>0.694058</td>
<td>0.016601</td>
<td>-5.430207e-03</td>
<td>-0.007712</td>
<td>-0.008768</td>
<td>-0.001979</td>
<td>0.013609</td>
<td>-0.134449</td>
<td>0.016573</td>
<td>0.008967</td>
<td>...</td>
<td>-0.000506</td>
<td>0.009424</td>
<td>-0.004460</td>
<td>0.009245</td>
<td>0.001150</td>
<td>0.077037</td>
<td>-0.026302</td>
<td>-0.071232</td>
<td>-0.001201</td>
<td>0.005764</td>
</tr>
<tr>
<th>PCA36</th>
<td>0.018288</td>
<td>-0.000850</td>
<td>4.859413e-04</td>
<td>0.002095</td>
<td>-0.009190</td>
<td>0.002877</td>
<td>-0.002151</td>
<td>0.003621</td>
<td>0.001990</td>
<td>-0.000943</td>
<td>...</td>
<td>0.000553</td>
<td>-0.004722</td>
<td>0.014774</td>
<td>-0.052898</td>
<td>0.035517</td>
<td>0.001307</td>
<td>-0.001713</td>
<td>0.003690</td>
<td>0.000934</td>
<td>0.003432</td>
</tr>
<tr>
<th>PCA37</th>
<td>-0.024784</td>
<td>-0.043949</td>
<td>1.949339e-02</td>
<td>-0.001486</td>
<td>-0.001579</td>
<td>0.000938</td>
<td>-0.123411</td>
<td>0.486287</td>
<td>-0.008163</td>
<td>0.003641</td>
<td>...</td>
<td>-0.000636</td>
<td>0.001020</td>
<td>-0.002304</td>
<td>0.000942</td>
<td>-0.000269</td>
<td>0.006849</td>
<td>-0.002408</td>
<td>-0.003545</td>
<td>0.005044</td>
<td>0.000910</td>
</tr>
<tr>
<th>PCA38</th>
<td>0.040445</td>
<td>0.013457</td>
<td>-2.772940e-03</td>
<td>-0.000128</td>
<td>-0.000205</td>
<td>0.001813</td>
<td>0.705399</td>
<td>-0.233784</td>
<td>0.001530</td>
<td>0.001866</td>
<td>...</td>
<td>-0.000057</td>
<td>0.001345</td>
<td>-0.000540</td>
<td>0.002547</td>
<td>-0.004879</td>
<td>0.019647</td>
<td>0.001214</td>
<td>0.024371</td>
<td>-0.010283</td>
<td>0.003957</td>
</tr>
<tr>
<th>PCA39</th>
<td>-0.021968</td>
<td>0.005154</td>
<td>-2.709841e-04</td>
<td>0.000908</td>
<td>0.002052</td>
<td>-0.002857</td>
<td>0.198485</td>
<td>0.487546</td>
<td>-0.004749</td>
<td>-0.000613</td>
<td>...</td>
<td>-0.000390</td>
<td>-0.004674</td>
<td>0.000734</td>
<td>-0.008558</td>
<td>0.007108</td>
<td>-0.029229</td>
<td>0.006227</td>
<td>0.429043</td>
<td>-0.014920</td>
<td>-0.017799</td>
</tr>
<tr>
<th>PCA40</th>
<td>-0.078610</td>
<td>0.007107</td>
<td>-9.499561e-03</td>
<td>0.000061</td>
<td>0.002094</td>
<td>0.002062</td>
<td>-0.150645</td>
<td>-0.492837</td>
<td>0.002604</td>
<td>0.007693</td>
<td>...</td>
<td>-0.000938</td>
<td>-0.001546</td>
<td>0.002574</td>
<td>-0.005807</td>
<td>0.000166</td>
<td>-0.004752</td>
<td>0.000407</td>
<td>0.795232</td>
<td>0.013340</td>
<td>-0.040870</td>
</tr>
<tr>
<th>PCA41</th>
<td>0.006737</td>
<td>0.000359</td>
<td>-8.420006e-05</td>
<td>0.000107</td>
<td>-0.000837</td>
<td>0.000184</td>
<td>0.006225</td>
<td>0.013952</td>
<td>-0.001282</td>
<td>0.006402</td>
<td>...</td>
<td>-0.000116</td>
<td>0.000151</td>
<td>-0.000071</td>
<td>-0.000211</td>
<td>-0.001795</td>
<td>-0.003551</td>
<td>0.001750</td>
<td>-0.053457</td>
<td>-0.005627</td>
<td>-0.700985</td>
</tr>
<tr>
<th>PCA42</th>
<td>-0.000132</td>
<td>0.000034</td>
<td>-4.503891e-07</td>
<td>-0.000070</td>
<td>0.000415</td>
<td>-0.000199</td>
<td>0.000168</td>
<td>0.001198</td>
<td>-0.000434</td>
<td>-0.000152</td>
<td>...</td>
<td>0.000680</td>
<td>-0.000347</td>
<td>-0.005634</td>
<td>0.001479</td>
<td>-0.707924</td>
<td>-0.000182</td>
<td>0.000029</td>
<td>-0.000230</td>
<td>-0.000130</td>
<td>0.002142</td>
</tr>
</tbody>
</table>
<p>42 rows × 42 columns</p>
</div>
```python
labels = deep_names
pca1 = abs(np.array(deep_pca_comp.loc[['PCA1']])[0])
pca2 = abs(np.array(deep_pca_comp.loc[['PCA2']])[0])
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots(figsize=(14, 8))
rects1 = ax.bar(x - width/2, pca1, width, label='PCA 1 (33.1% of var)', color='g')
rects2 = ax.bar(x + width/2, pca2, width, label='PCA 2 (16.6% of var)', color='r')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Absolute value of Importance')
ax.set_title('Importance of features by principal component for DEEP')
ax.set_xticks(x)
ax.set_xticklabels(labels, rotation='vertical')
ax.legend(loc='upper left')
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{0:.2f}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
#plt.savefig('../figs/pca/deep_pca_barchart.png', dpi=400, bbox_inches='tight')
```

## DEEP PCA1
```python
deep_pca_comp.loc[['PCA1']] #most important principal component (remember 33.1% variance)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>PCA1</th>
<td>0.250279</td>
<td>0.048896</td>
<td>-0.00367</td>
<td>0.085912</td>
<td>-0.101881</td>
<td>-0.189234</td>
<td>0.261276</td>
<td>0.262704</td>
<td>-0.022062</td>
<td>-0.054601</td>
<td>...</td>
<td>0.046677</td>
<td>0.173186</td>
<td>0.152227</td>
<td>0.095246</td>
<td>-0.06489</td>
<td>0.041467</td>
<td>-0.113004</td>
<td>0.263995</td>
<td>-0.020057</td>
<td>0.222125</td>
</tr>
</tbody>
</table>
<p>1 rows × 42 columns</p>
</div>
```python
ordered_deep_pca1 = OrderedDict(sorted(deep_pca_comp.loc[['PCA1']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_deep_pca1.items():
print(key, value)
```
eta_e PCA1 -0.189234
Name: eta_e, dtype: float32
period_s_to_n_2 PCA1 -0.1426
Name: period_s_to_n_2, dtype: float32
period_s_to_n_1 PCA1 -0.134771
Name: period_s_to_n_1, dtype: float32
period_s_to_n_0 PCA1 -0.117934
Name: period_s_to_n_0, dtype: float32
skew PCA1 -0.113004
Name: skew, dtype: float32
eta PCA1 -0.101881
Name: eta, dtype: float32
linear_trend PCA1 -0.066346
Name: linear_trend, dtype: float32
median_buffer_range_percentage_5 PCA1 -0.066244
Name: median_buffer_range_percentage_5, dtype: float32
periodogram_percent_amplitude PCA1 -0.06489
Name: periodogram_percent_amplitude, dtype: float32
periodogram_amplitude PCA1 -0.063805
Name: periodogram_amplitude, dtype: float32
linear_fit_slope PCA1 -0.054601
Name: linear_fit_slope, dtype: float32
kurtosis PCA1 -0.022062
Name: kurtosis, dtype: float32
stetson_K PCA1 -0.020057
Name: stetson_K, dtype: float32
beyond_2_std PCA1 -0.00367
Name: beyond_2_std, dtype: float32
periodogram_beyond_2_std PCA1 -0.003453
Name: periodogram_beyond_2_std, dtype: float32
periodogram_beyond_1_std PCA1 -0.003409
Name: periodogram_beyond_1_std, dtype: float32
period_1 PCA1 0.00005
Name: period_1, dtype: float32
magnitude_percentage_ratio_20_10 PCA1 0.005766
Name: magnitude_percentage_ratio_20_10, dtype: float32
period_0 PCA1 0.00679
Name: period_0, dtype: float32
magnitude_percentage_ratio_40_5 PCA1 0.00788
Name: magnitude_percentage_ratio_40_5, dtype: float32
period_2 PCA1 0.009434
Name: period_2, dtype: float32
chi2 PCA1 0.041467
Name: chi2, dtype: float32
linear_fit_reduced_chi2 PCA1 0.043726
Name: linear_fit_reduced_chi2, dtype: float32
periodogram_cusum PCA1 0.046677
Name: periodogram_cusum, dtype: float32
beyond_1_std PCA1 0.048896
Name: beyond_1_std, dtype: float32
cusum PCA1 0.085912
Name: cusum, dtype: float32
periodogram_standard_deviation PCA1 0.095246
Name: periodogram_standard_deviation, dtype: float32
periodogram_inter_percentile_range_25 PCA1 0.152227
Name: periodogram_inter_percentile_range_25, dtype: float32
periodogram_eta PCA1 0.173186
Name: periodogram_eta, dtype: float32
maximum_slope PCA1 0.185373
Name: maximum_slope, dtype: float32
weighted_mean PCA1 0.222125
Name: weighted_mean, dtype: float32
mean PCA1 0.224195
Name: mean, dtype: float32
percent_amplitude PCA1 0.240974
Name: percent_amplitude, dtype: float32
linear_fit_slope_sigma PCA1 0.242441
Name: linear_fit_slope_sigma, dtype: float32
amplitude PCA1 0.250279
Name: amplitude, dtype: float32
linear_trend_sigma PCA1 0.255248
Name: linear_trend_sigma, dtype: float32
percent_difference_magnitude_percentile_20 PCA1 0.260222
Name: percent_difference_magnitude_percentile_20, dtype: float32
percent_difference_magnitude_percentile_5 PCA1 0.260746
Name: percent_difference_magnitude_percentile_5, dtype: float32
inter_percentile_range_25 PCA1 0.261276
Name: inter_percentile_range_25, dtype: float32
median_absolute_deviation PCA1 0.261389
Name: median_absolute_deviation, dtype: float32
inter_percentile_range_10 PCA1 0.262704
Name: inter_percentile_range_10, dtype: float32
standard_deviation PCA1 0.263995
Name: standard_deviation, dtype: float32
### most imporant features for PCA1: standard_deviation, inter_percentile_range_10, median_absolute_deviation, inter_percentile_range_25, percent_difference_magnitude_percentile_5
## DEEP PCA2
```python
deep_pca_comp.loc[['PCA2']] #second most important principal component (remember 16.6% variance)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
</thead>
<tbody>
<tr>
<th>PCA2</th>
<td>0.027382</td>
<td>0.011196</td>
<td>-0.01258</td>
<td>0.261779</td>
<td>-0.271932</td>
<td>-0.103574</td>
<td>0.035435</td>
<td>0.036006</td>
<td>-0.020417</td>
<td>0.071858</td>
<td>...</td>
<td>-0.006077</td>
<td>-0.040501</td>
<td>0.035786</td>
<td>0.287994</td>
<td>0.342611</td>
<td>0.058168</td>
<td>-0.052625</td>
<td>0.035934</td>
<td>-0.001992</td>
<td>-0.047487</td>
</tr>
</tbody>
</table>
<p>1 rows × 42 columns</p>
</div>
```python
ordered_deep_pca2 = OrderedDict(sorted(deep_pca_comp.loc[['PCA2']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_deep_pca2.items():
print(key, value)
```
periodogram_beyond_2_std PCA2 -0.314096
Name: periodogram_beyond_2_std, dtype: float32
periodogram_beyond_1_std PCA2 -0.309769
Name: periodogram_beyond_1_std, dtype: float32
eta PCA2 -0.271932
Name: eta, dtype: float32
eta_e PCA2 -0.103574
Name: eta_e, dtype: float32
skew PCA2 -0.052625
Name: skew, dtype: float32
weighted_mean PCA2 -0.047487
Name: weighted_mean, dtype: float32
mean PCA2 -0.046229
Name: mean, dtype: float32
periodogram_eta PCA2 -0.040501
Name: periodogram_eta, dtype: float32
median_buffer_range_percentage_5 PCA2 -0.02662
Name: median_buffer_range_percentage_5, dtype: float32
maximum_slope PCA2 -0.02192
Name: maximum_slope, dtype: float32
kurtosis PCA2 -0.020417
Name: kurtosis, dtype: float32
beyond_2_std PCA2 -0.01258
Name: beyond_2_std, dtype: float32
periodogram_cusum PCA2 -0.006077
Name: periodogram_cusum, dtype: float32
linear_fit_slope_sigma PCA2 -0.005884
Name: linear_fit_slope_sigma, dtype: float32
stetson_K PCA2 -0.001992
Name: stetson_K, dtype: float32
magnitude_percentage_ratio_20_10 PCA2 0.003011
Name: magnitude_percentage_ratio_20_10, dtype: float32
magnitude_percentage_ratio_40_5 PCA2 0.005714
Name: magnitude_percentage_ratio_40_5, dtype: float32
beyond_1_std PCA2 0.011196
Name: beyond_1_std, dtype: float32
amplitude PCA2 0.027382
Name: amplitude, dtype: float32
linear_trend_sigma PCA2 0.028092
Name: linear_trend_sigma, dtype: float32
percent_amplitude PCA2 0.031064
Name: percent_amplitude, dtype: float32
median_absolute_deviation PCA2 0.034026
Name: median_absolute_deviation, dtype: float32
inter_percentile_range_25 PCA2 0.035435
Name: inter_percentile_range_25, dtype: float32
periodogram_inter_percentile_range_25 PCA2 0.035786
Name: periodogram_inter_percentile_range_25, dtype: float32
standard_deviation PCA2 0.035934
Name: standard_deviation, dtype: float32
inter_percentile_range_10 PCA2 0.036006
Name: inter_percentile_range_10, dtype: float32
percent_difference_magnitude_percentile_20 PCA2 0.040881
Name: percent_difference_magnitude_percentile_20, dtype: float32
percent_difference_magnitude_percentile_5 PCA2 0.041275
Name: percent_difference_magnitude_percentile_5, dtype: float32
chi2 PCA2 0.058168
Name: chi2, dtype: float32
linear_fit_reduced_chi2 PCA2 0.059515
Name: linear_fit_reduced_chi2, dtype: float32
linear_fit_slope PCA2 0.071858
Name: linear_fit_slope, dtype: float32
linear_trend PCA2 0.084486
Name: linear_trend, dtype: float32
period_2 PCA2 0.088921
Name: period_2, dtype: float32
period_1 PCA2 0.128117
Name: period_1, dtype: float32
period_0 PCA2 0.138406
Name: period_0, dtype: float32
cusum PCA2 0.261779
Name: cusum, dtype: float32
period_s_to_n_2 PCA2 0.27074
Name: period_s_to_n_2, dtype: float32
period_s_to_n_1 PCA2 0.283358
Name: period_s_to_n_1, dtype: float32
periodogram_standard_deviation PCA2 0.287994
Name: periodogram_standard_deviation, dtype: float32
period_s_to_n_0 PCA2 0.308055
Name: period_s_to_n_0, dtype: float32
periodogram_percent_amplitude PCA2 0.342611
Name: periodogram_percent_amplitude, dtype: float32
periodogram_amplitude PCA2 0.343072
Name: periodogram_amplitude, dtype: float32
### most imporant features for PCA2: periodogram_amplitude, periodogram_percent_amplitude, period_s_to_n_0, periodogram_standard_deviation, period_s_to_n_2
### PC3
```python
ordered_deep_pca1 = OrderedDict(sorted(deep_pca_comp.loc[['PCA3']].items(), key=lambda item:np.max(item[1])))
for key, value in ordered_deep_pca1.items():
print(key, value)
```
stetson_K PCA3 -0.376785
Name: stetson_K, dtype: float32
beyond_1_std PCA3 -0.376201
Name: beyond_1_std, dtype: float32
median_buffer_range_percentage_5 PCA3 -0.28917
Name: median_buffer_range_percentage_5, dtype: float32
magnitude_percentage_ratio_40_5 PCA3 -0.189515
Name: magnitude_percentage_ratio_40_5, dtype: float32
magnitude_percentage_ratio_20_10 PCA3 -0.173857
Name: magnitude_percentage_ratio_20_10, dtype: float32
linear_fit_slope_sigma PCA3 -0.128996
Name: linear_fit_slope_sigma, dtype: float32
skew PCA3 -0.089703
Name: skew, dtype: float32
periodogram_eta PCA3 -0.087479
Name: periodogram_eta, dtype: float32
weighted_mean PCA3 -0.086401
Name: weighted_mean, dtype: float32
periodogram_standard_deviation PCA3 -0.082969
Name: periodogram_standard_deviation, dtype: float32
mean PCA3 -0.079947
Name: mean, dtype: float32
periodogram_inter_percentile_range_25 PCA3 -0.078182
Name: periodogram_inter_percentile_range_25, dtype: float32
cusum PCA3 -0.037247
Name: cusum, dtype: float32
periodogram_amplitude PCA3 -0.02161
Name: periodogram_amplitude, dtype: float32
periodogram_percent_amplitude PCA3 -0.021108
Name: periodogram_percent_amplitude, dtype: float32
median_absolute_deviation PCA3 -0.019486
Name: median_absolute_deviation, dtype: float32
inter_percentile_range_25 PCA3 -0.01493
Name: inter_percentile_range_25, dtype: float32
period_0 PCA3 -0.003208
Name: period_0, dtype: float32
period_2 PCA3 -0.00196
Name: period_2, dtype: float32
linear_trend_sigma PCA3 -0.001241
Name: linear_trend_sigma, dtype: float32
period_1 PCA3 0.000016
Name: period_1, dtype: float32
percent_difference_magnitude_percentile_20 PCA3 0.004515
Name: percent_difference_magnitude_percentile_20, dtype: float32
inter_percentile_range_10 PCA3 0.005635
Name: inter_percentile_range_10, dtype: float32
period_s_to_n_0 PCA3 0.01039
Name: period_s_to_n_0, dtype: float32
eta PCA3 0.019887
Name: eta, dtype: float32
period_s_to_n_1 PCA3 0.024902
Name: period_s_to_n_1, dtype: float32
period_s_to_n_2 PCA3 0.032738
Name: period_s_to_n_2, dtype: float32
beyond_2_std PCA3 0.033775
Name: beyond_2_std, dtype: float32
standard_deviation PCA3 0.0341
Name: standard_deviation, dtype: float32
eta_e PCA3 0.039662
Name: eta_e, dtype: float32
percent_difference_magnitude_percentile_5 PCA3 0.03967
Name: percent_difference_magnitude_percentile_5, dtype: float32
maximum_slope PCA3 0.045322
Name: maximum_slope, dtype: float32
periodogram_beyond_2_std PCA3 0.055088
Name: periodogram_beyond_2_std, dtype: float32
linear_fit_slope PCA3 0.080546
Name: linear_fit_slope, dtype: float32
periodogram_beyond_1_std PCA3 0.090533
Name: periodogram_beyond_1_std, dtype: float32
periodogram_cusum PCA3 0.10578
Name: periodogram_cusum, dtype: float32
amplitude PCA3 0.12206
Name: amplitude, dtype: float32
percent_amplitude PCA3 0.167942
Name: percent_amplitude, dtype: float32
linear_trend PCA3 0.172458
Name: linear_trend, dtype: float32
kurtosis PCA3 0.308662
Name: kurtosis, dtype: float32
chi2 PCA3 0.383842
Name: chi2, dtype: float32
linear_fit_reduced_chi2 PCA3 0.387974
Name: linear_fit_reduced_chi2, dtype: float32
```python
```
# TL;DR SUMMARY
> ## M31
### PC1 explains 29.3% and PC2 17.8%. Together, they explain 47.1% of variance.
#### PCA1 MOST IMPORTANT: standard_deviation, inter_percentile_range_10, percent_difference_magnitude_percentile_5, linear_trend_sigma, median_absolute_deviation, are the most important.
#### PCA2 MOST IMPORTANT: periodogram_amplitude, periodogram_percent_amplitude, period_s_to_n_0, periodogram_standard_deviation, period_s_to_n_1
> ## DISK
### PC1 explains 33.0% and PC2 17.2%. Together, they explain 50.2% of variance.
#### PCA1 MOST IMPORTANT: inter_percentile_range_10, linear_fit_slope_sigma, median_absolute_deviation, standard_deviation, linear_trend_sigma
#### PCA2 MOST IMPORTANT: periodogram_amplitude, periodogram_percent_amplitude, cusum, periodogram_standard_deviation, period_s_to_n_2
> ## DEEP
### PC1 explains 33.1% and PC2 16.6%. Together, they explain 49.7% of variance.
#### PCA1 MOST IMPORTANT: standard_deviation, inter_percentile_range_10, median_absolute_deviation, inter_percentile_range_25, percent_difference_magnitude_percentile_5
#### PCA2 MOST IMPORTANT: periodogram_amplitude, periodogram_percent_amplitude, period_s_to_n_0, periodogram_standard_deviation, period_s_to_n_2
# THE SAME MOST IMPORTANT FEATURES ACROSS FIELDS ARE
> PCA1:
i) standard_deviation (3 fields),
ii) inter_percentile_range_10 (3 fields),
iii) median_absolute_deviation (3 fields),
iv) percent_difference_magnitude_percentile_5 (2 fields),
v) linear_trend_sigma (2 fields)
> PCA2:
i) periodogram_amplitude (3 fields),
ii) periodogram_percent_amplitude (3 fields),
iii) periodogram_standard_deviation (3 fields)
iv) period_s_to_n_0, (2 fields),
v) period_s_to_n_2 (2 fields)
```python
# Combined features important plot for paper
import matplotlib.gridspec as gridspec
plt.rcParams.update({'text.usetex': True})
plt.rcParams.update({'font.size': 18})
labels = m31_names
m31_pca1 = abs(np.array(m31_pca_comp.loc[['PCA1']])[0])
m31_pca2 = abs(np.array(m31_pca_comp.loc[['PCA2']])[0])
disk_pca1 = abs(np.array(disk_pca_comp.loc[['PCA1']])[0])
disk_pca2 = abs(np.array(disk_pca_comp.loc[['PCA2']])[0])
deep_pca1 = abs(np.array(deep_pca_comp.loc[['PCA1']])[0])
deep_pca2 = abs(np.array(deep_pca_comp.loc[['PCA2']])[0])
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, (ax, ax2, ax3) = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(14, 14),
gridspec_kw = {'wspace':0, 'hspace':0})
m31_rects1 = ax.bar(x - width/2, m31_pca1, width, label=field_names['m31'] + ' PCA 1 (29.3\% of var)', color='k')
m31_rects2 = ax.bar(x + width/2, m31_pca2, width, label=field_names['m31'] + ' PCA 2 (17.8\% of var)', color='m')
deep_rects1 = ax2.bar(x - width/2, deep_pca1, width, label=field_names['deep'] + ' PCA 1 (33.1\% of var)', color='k')
deep_rects2 = ax2.bar(x + width/2, deep_pca2, width, label=field_names['deep'] + ' PCA 2 (16.6\% of var)', color='m')
disk_rects1 = ax3.bar(x - width/2, disk_pca1, width, label=field_names['disk'] + ' PCA 1 (33.0\% of var)', color='k')
disk_rects2 = ax3.bar(x + width/2, disk_pca2, width, label=field_names['disk'] + ' PCA 2 (17.2\% of var)', color='m')
ax.set_ylim([0, 0.42])
ax.set_yticks([0.0, 0.1, 0.2, 0.3])
# Add some text for labels, title and custom x-axis tick labels, etc.
ax2.set_ylabel('Contribution to PC', fontsize=20)
ax3.set_xticks(x)
ax3.set_xticklabels([feature_names[f] for f in m31_names], rotation=90)
ax.legend(loc='upper left', fontsize=14)
ax2.legend(loc='upper left', fontsize=14)
ax3.legend(loc='upper left', fontsize=14)
fig.tight_layout()
plt.savefig('../figs/pca/all_fields_PCA_importance_barchart.pdf', bbox_inches='tight')
```

```python
```
|
snad-spaceREPO_NAMEzwadPATH_START.@zwad_extracted@zwad-master@notebooks@PCA_features.ipynb@.PATH_END.py
|
{
"filename": "ex-2.16.py",
"repo_name": "mpi4py/mpi4py",
"repo_path": "mpi4py_extracted/mpi4py-master/demo/mpi-ref-v1/ex-2.16.py",
"type": "Python"
}
|
## mpiexec -n 4 python ex-2.16.py
# Jacobi code
# version of parallel code using sendrecv and null processes.
# --------------------------------------------------------------------
from mpi4py import MPI
try:
import numpy
except ImportError:
raise SystemExit
# --------------------------------------------------------------------
n = 5 * MPI.COMM_WORLD.Get_size()
# compute number of processes and myrank
p = MPI.COMM_WORLD.Get_size()
myrank = MPI.COMM_WORLD.Get_rank()
# compute size of local block
m = n//p
if myrank < (n - p * m):
m = m + 1
#compute neighbors
if myrank == 0:
left = MPI.PROC_NULL
else:
left = myrank - 1
if myrank == p - 1:
right = MPI.PROC_NULL
else:
right = myrank + 1
# allocate local arrays
A = numpy.empty((n+2, m+2), dtype='d', order='f')
B = numpy.empty((n, m), dtype='d', order='f')
A.fill(1)
A[0, :] = A[-1, :] = 0
A[:, 0] = A[:, -1] = 0
# main loop
converged = False
while not converged:
# compute, B = 0.25 * ( N + S + E + W)
N, S = A[:-2, 1:-1], A[2:, 1:-1]
E, W = A[1:-1, :-2], A[1:-1, 2:]
numpy.add(N, S, B)
numpy.add(E, B, B)
numpy.add(W, B, B)
B *= 0.25
A[1:-1, 1:-1] = B
# communicate
tag = 0
MPI.COMM_WORLD.Sendrecv([B[:, -1], MPI.DOUBLE], right, tag,
[A[:, 0], MPI.DOUBLE], left, tag)
MPI.COMM_WORLD.Sendrecv((B[:, 0], MPI.DOUBLE), left, tag,
(A[:, -1], MPI.DOUBLE), right, tag)
# convergence
myconv = numpy.allclose(B, 0)
loc_conv = numpy.asarray(myconv, dtype='i')
glb_conv = numpy.asarray(0, dtype='i')
MPI.COMM_WORLD.Allreduce([loc_conv, MPI.INT],
[glb_conv, MPI.INT],
op=MPI.LAND)
converged = bool(glb_conv)
# --------------------------------------------------------------------
|
mpi4pyREPO_NAMEmpi4pyPATH_START.@mpi4py_extracted@mpi4py-master@demo@mpi-ref-v1@ex-2.16.py@.PATH_END.py
|
{
"filename": "Untitled2.ipynb",
"repo_name": "FloorBroekgaarden/Double-Compact-Object-Mergers",
"repo_path": "Double-Compact-Object-Mergers_extracted/Double-Compact-Object-Mergers-main/plottingCode/Detectable_Distributions_GWTC-3/Untitled2.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import sys
import h5py as h5
import numpy as np
from scipy import stats
import scipy
#Quick fudge to make import from ../Scripts work
sys.path.append('../../Scripts')
#Custom imports
# for color map
import seaborn as sns
# for reading datafiles
import pandas as pd
# import script that has formation channel classification functions:
from PostProcessingScripts import *
import ClassCOMPAS as CC ###
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
```
<style>.container { width:100% !important; }</style>
```python
# nModels=17
# BPSnameslist = list(string.ascii_uppercase)[0:nModels]
# physicalNamesBPSmodelsWithEnter = [r'\textbf{fiducial}',\
# r'$\beta=0.25$', r'$\beta=0.5$', r'$\beta=0.75$',r'\textbf{unstable}' + '\n'+ r'\textbf{case BB}',\
# r'$\alpha_{\rm{CE}}=0.5$', r'$\alpha_{\rm{CE}}=2$', r'\textbf{optimistic}' +'\n' + r'\textbf{CE}',\
# r'\textbf{rapid SN}', r'$\rm{max} \ m_{\rm{NS}}$' +'\n' + r'$2.0\,\rm{M}_{\odot}$', r'$\rm{max} \ m_{\rm{NS}}$' +'\n' + r'$3.0\,\rm{M}_{\odot}$',\
# r'\textbf{no PISN}', r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}$' +'\n' + r'$100\,\rm{km}\,\rm{s}^{-1}$',r'\textbf{SN} '+ r'$\sigma_{\rm{rms}}^{\rm{1D}}$' +'\n' + r'$30\,\rm{km}\,\rm{s}^{-1}$',\
# r'\textbf{SN} '+ r'$v_{\rm{k,BH}}$' +'\n' + r'$0\,\rm{km}\,\rm{s}^{-1}$' , r'$\rm{f}_{\rm{WR}} = 0.1$', r'$\rm{f}_{\rm{WR}} = 5$']
# alphabetPhysicalNameDictWithEnter = {BPSnameslist[i]: physicalNamesBPSmodelsWithEnter[i] for i in range(len(BPSnameslist))}
def plot_kdes_single(axe=None, axe_ind=None, xparam='M1', BPSmodelNames=['A'], MSSFRmodelNames=['000'], DCOtype='BHNS', xvarrange=None, \
whichWeight='det', path_dir='/Volumes/Andromeda/DATA/AllDCO_bugfix/'):
"""
plot total mass distribution with channels
input:
axe = the axis to plot it on
xvar= the array of data that you want to plot the kde of
bw_method = bandwith method of the kernel density function you want to use (see class gaussian_kde())
xvarrange, the x-axis range to use for the kde [should be typically slightly larger than the range of xvar]
xlabel, ylabel = x and y labels of the plot
xlim, ylim = xlim and ylim of the plot axis
"""
# constants
Zsolar=0.0142
lw = 2
nSFRD = 28 # number of SFRD models
MSSFRlinestyles = ['-' , '--' , ':' , '-.']
DCOname_dict = {'BHNS':'BHNS', 'BBH':'BHBH', 'BNS':'NSNS'}
DCOname = DCOname_dict[DCOtype]
# xvarHeaders = ['Mass1', 'Mass2', 'tc',\
# 'log10(tc)', 'TotMass', 'ChirpMass', 'q', 'metallicitySystems', 'log10metallicitySystems', 'tdelay',\
# 'log10(tdelay)']
xvar_dict = {'Mtot':4,'Mass1':0, 'Mass2':1, 'q':6, 'ChirpMass':5, 'logZ':8, 'Z':7, 'tc':2, 'log10(tc)':3, 'tdelay':9, 'log10(tdelay)':10 }
for ind_m, BPSmodelName in enumerate(BPSmodelNames):
color_m = colorDirDict[BPSmodelName]
if axe_ind!=None:
axe = axe[ind_m,axe_ind]
else:
axe=axe
if ind_m==0:
print('now at m=', BPSmodelName)
# data for KDE / PDF:
path_ = path_dir + alphabetDirDict[BPSmodelName] +'/'
path = path_ + 'COMPASCompactOutput_'+ DCOtype +'_' + BPSmodelName + '.h5'
print(path)
fdata = h5.File(path)
if xparam in ['Mass1', 'Mass2', 'q', 'ChirpMass', 'Mtot','qLVK']:
# obtain BH and NS masses
xvar1 = fdata['doubleCompactObjects']['M1'][...].squeeze()
xvar2 = fdata['doubleCompactObjects']['M2'][...].squeeze()
M1, M2 = obtainM1BHandM2BHassymetric(m1=xvar1, m2=xvar2) # M1 will be the most massive, M2 the least massive compact object.
del xvar1
del xvar2
if xparam =='Mtot':
xvar=M1+M2
elif xparam=='Mass1':
xvar = M1
elif xparam=='Mass2':
xvar = M2
elif xparam =='q':
xvar = M1/M2
elif xparam =='qLVK':
xvar = M2/M1
elif xparam=='ChirpMass':
xvar = chirpmass(M1,M2)
del M1
del M2
elif xparam=='logZ':
ZZ = fdata['doubleCompactObjects']['Metallicity1'][...].squeeze()
xvar = np.log10((ZZ))
del ZZ
elif xparam=='Z':
ZZ = fdata['doubleCompactObjects']['Metallicity1'][...].squeeze()
xvar = ((ZZ))
del ZZ
elif xparam=='tc':
tc = fdata['doubleCompactObjects']['tc'][...].squeeze()
xvar = tc / 1000. # in Gyr.
del tc
elif xparam=='tdelay':
tc = fdata['doubleCompactObjects']['tc'][...].squeeze()
tform = fdata['doubleCompactObjects']['tform'][...].squeeze()
xvar = (tc/1000) + (tform / 1000) # in Gyr
del tc
del tform
else:
print('KeyError')
ymax=0
ii=0
print(MSSFRmodelNames)
for ind_mssfr, mssfr in enumerate(MSSFRmodelNames):
print('\t now at mssfr %s'%mssfr)
# read in MSSFR weights:
if whichWeight=='det':
# get detected weights
fparam_key = 'weights_detected'
elif whichWeight=='z0':
# get intrinsic weights
fparam_key = 'weights_intrinsic'
weightheader = 'w_' + mssfr
w = fdata[fparam_key][weightheader][...].squeeze()
# highlight the SFRD if they are one of the following 3
if mssfr in ['000', '231', '312']:
MSSFRalpha=1
if mssfr=='000':
MSSFRzorder = 57
MSSFRlinestyle = MSSFRlinestyles[0]
elif mssfr=='231':
MSSFRzorder=55
MSSFRlinestyle = MSSFRlinestyles[1]
elif mssfr=='312':
MSSFRzorder=54
MSSFRlinestyle = MSSFRlinestyles[2]
else:
MSSFRalpha=0.5
MSSFRlinestyle = '-'
MSSFRzorder=51
# make the KDE, by calculating KDE values for the following x points
xx_boot = np.linspace(xvarrange[0],xvarrange[1],100)
# # set bandwidth (dimensionless) based on the no of samples in the data.
lenXvar = len(xvar)
# bw = 0.4 # TEMP
if lenXvar<=100000:
bw=0.1
elif lenXvar>2*10**6:
bw = 0.06
elif (lenXvar<=2*10**6) & (lenXvar>10**6):
bw = 0.06
elif (lenXvar<=10**6) & (lenXvar>5*10**5):
bw = 0.07
elif (lenXvar<=5*10**5) & (lenXvar>10**5):
bw = 0.08
else:
print('error!! bw not defined')
# for metallicity we want a seperate, larger KDE bandwidth, since here the resolution is limited by our no of Zi grid points
if xparam=='logZ':
bw = 0.1
if (mssfr=='000')&(axe_ind==0):
print('Model ', BPSmodelName, 'with %s DCOs is given a kde bandwidth of '%len(xvar), 'bw=', bw)
KDE = MirroredKDE(xvar, lower_bound=min(xvar), upper_bound=max(xvarrange), weights=w, bw_method=bw)
yy_boot = KDE(xx_boot)
del KDE
axe.plot(xx_boot, yy_boot, alpha=MSSFRalpha, color=color_m, zorder=MSSFRzorder, label=None,\
linewidth=lw+4, linestyle=MSSFRlinestyle )
# print(ind_mssfr)
# if ind_mssfr==0:
# print('getting at q')
# # Plot MASS RATIOS:
# bw = 0.04
# beta = 1.1
# xx=np.linspace(0,1, 100000)
# yy = xx**(beta) # +1 for CDF
# qKDE = MirroredKDE(yy, lower_bound=0, upper_bound=1, weights=np.ones_like(xx), bw_method=bw)
# q_boot = qKDE(xx)
# axe.plot(xx, q_boot, color='gray', zorder=1, label=None,\
# linewidth=lw+4, linestyle=':')
# beta = 1.1-1.3
# xx=np.linspace(0,1, 10000)
# yy_low = xx**(beta) # +1 for CDF
# qKDE = MirroredKDE(yy_low, lower_bound=0, upper_bound=1, weights=np.ones_like(xx), bw_method=bw)
# q_boot_low = qKDE(xx)
# beta = 1.1+1.8
# xx=np.linspace(0,1, 10000)
# yy_high = xx**(beta) # +1 for CDF
# qKDE = MirroredKDE(yy_high, lower_bound=0, upper_bound=1, weights=np.ones_like(xx), bw_method=bw)
# q_boot_high = qKDE(xx)
# axe.fill_between(x=xx, y1=q_boot_low, y2=q_boot_high, color='b', alpha=0.2, zorder=0)
delta_x = (xx_boot[1:]-xx_boot[0:-1])[0]
print(np.sum(yy_boot*delta_x))
fdata.close()
# axe.set_ylim(0.001, ylim_max)
del xvar
del w
return
```
```python
def make_up_axis(axe=None, axe_ind=0, BPSmodelNames=['A'], xlim=[1,3.], ylim=[0.1,10], nameX='xname', nameY='yname', logY=False, logX=False):
for ind_bps, bps_name in enumerate(BPSnameslist):
axe = ax[ind_bps, axe_ind]
# if ind_bps
if logY==True:
axe.set_yscale('log')
if logX==True:
axe.set_xscale('log')
if xlim:
axe.set_xlim(xlim[0], xlim[1])
if ylim:
axe.set_ylim(ylim[0], ylim[1])
if bps_name != BPSmodelNames[-1]:
axe.set_xticklabels( () )
axe.set_xticks([])
axe.set_yticks([])
axe.set_yticklabels( () )
if (nameX==None):
axe.set_xticklabels( () )
axe.set_xticks([])
elif (nameY==None):
axe.set_yticks([])
axe.set_yticklabels( () )
# if (nameX!=None) & (nameY==None):
# layoutAxesNoYlabel()
return
def make_up_axis_single_ax(axe=None, xlim=[1,3.], ylim=[0.1,10], logY=False, logX=False):
# if ind_bps
if logY==True:
axe.set_yscale('log')
if logX==True:
axe.set_xscale('log')
if xlim:
axe.set_xlim(xlim[0], xlim[1])
if ylim:
axe.set_ylim(ylim[0], ylim[1])
# if bps_name != BPSmodelNames[-1]:
# axe.set_xticklabels( () )
# axe.set_xticks([])
# axe.set_yticks([])
# axe.set_yticklabels( () )
# # else:
# if (nameX==None):
# axe.set_xticklabels( () )
# axe.set_xticks([])
# elif (nameY==None):
# axe.set_yticks([])
# axe.set_yticklabels( () )
return
```
```python
```
```python
# # #Plot - overview
# xlim_low = 0.
# xlim_high = 1.
# for BPSmodelName in ['K']: #= 'G'
# ncols, nrows= 1,1
# fig, ax= plt.subplots(ncols=ncols,nrows=nrows,figsize=(16.0, 5.5),
# gridspec_kw={"width_ratios":1.5*np.ones(ncols), "height_ratios":1*np.ones(nrows)})
# # fig, ax = plt.figure(figsize = (16.0, 2 * 5.5))
# lw=4
# # add location of the COMPAS hdf5 files:
# path_dir = '/Volumes/Andromeda/DATA/AllDCO_bugfix/'
# TYPE,DCOtype = 'BBH', 'BBH'
# # GLOBAL SETTINGS
# whichWeight='z0' # which weighting we want (intrinisic or detecter)
# fs_t = 22
# # FIRST COLUMN WITH CHIRP MASS ###
# xparam, axe_ind, xlabelname ='qLVK', None, r' $q^{-1}$'
# # plot percentiles:
# # # SFRDlist = MSSFRnameslist
# plot_kdes_single(axe=ax, axe_ind=axe_ind, xparam=xparam, BPSmodelNames=[BPSmodelName], MSSFRmodelNames=[MSSFRnameslist[0]], DCOtype=DCOtype, xvarrange=[0, 1], \
# whichWeight=whichWeight, path_dir=path_dir)
# l_ = r'$\textbf{Model %s:} %s$'%(BPSmodelName, alphabetPhysicalNameDict[BPSmodelName])
# # 'Model %s'%(BPSmodelName)
# ax.plot([-1,-1], [-1,-1], c=colorDirDict[BPSmodelName], lw=5, label=l_)
# plt.legend(fontsize=20, frameon=False, loc='upper left')
# # ax.text(0+0.009, 1-0.009, 'model %s%s \n %s'%(BPSmodelName,mssfr,alphabetPhysicalNameDict[BPSmodelName]),\
# # rotation = 0, fontsize = fs+8, color = 'k', ha = 'left', va='top', transform=axe.transAxes, zorder=100)#, weight = 'bold')
# fontsize=24
# for tick in ax.xaxis.get_major_ticks():
# tick.label1.set_fontsize(fontsize)
# tick.label1.set_fontweight('bold')
# for tick in ax.yaxis.get_major_ticks():
# tick.label1.set_fontsize(fontsize)
# tick.label1.set_fontweight('bold')
# ax.set_yscale('log')
# # plt.xscale('log')
# ax.set_xlim(0, 1)
# # ax.xaxis.set_major_formatter(FormatStrFormatter("%.00f"))
# # ax.xaxis.set_minor_formatter(FormatStrFormatter("%.00f"))
# ax.set_ylim(1e-3, 1E1 )
# ax.grid(ls=':',color='grey')
# ax.set_xlabel('${q}^{-1} $',fontsize=fontsize+2, labelpad=2)
# ax.set_ylabel('$\, \mathrm{d}\mathcal{R}_{\mathrm{m}}^{0}/\mathrm{d} {q}^{-1} $ ',fontsize=fontsize+2)
# plt.savefig('./qf_' + BPSmodelName + '.png', dpi = 600, bbox_inches = 'tight')
```
```python
```
```python
```
```python
```
```python
```
```python
# #Plot - overview
xlim_low = 0.
xlim_high = 1.
for BPSmodelName in [ 'N', 'O', 'P', 'Q', 'R', 'S', 'T']: #= 'G'
print('now at model %s'%BPSmodelName)
ncols, nrows= 1,1
fig, ax= plt.subplots(ncols=ncols,nrows=nrows,figsize=(16.0, 5.5),
gridspec_kw={"width_ratios":1.5*np.ones(ncols), "height_ratios":1*np.ones(nrows)})
# fig, ax = plt.figure(figsize = (16.0, 2 * 5.5))
xx=np.linspace(1E-4,1, 10000)
lw=4
beta =1.1
yy = xx**(beta) #* 1E2 # +1 for CDF
norm = np.trapz(yy,xx)
ax.plot(xx,yy/norm, c='k')
yy_low = xx**(1.1+1.8) #* 1E2 # +1 for CDF
norm_low = np.trapz(yy_low,xx)
# ax.plot(xx,yy_low/norm_low, c='b')
yy_high = xx**(1.1-1.3) #* 1E2 # +1 for CDF
norm_high = np.trapz(yy_high,xx)
# ax.plot(xx, yy_high/norm_high, c='b')
ax.fill_between(xx, y1=yy_high/norm_high, y2=yy_low/norm_low, color='lightgray', zorder=0 )
ax.fill_between(xx, y1=yy_low/norm_low, y2=yy/norm, color='lightgray', zorder=0, label = r'$\textbf{Observed Distribution}$' )
# ax.hist(x=xx, bins=len(xx), weights=yy, histtype='step', density=True, alpha=1, color='r', zorder=1, label=None,\
# linewidth=lw, linestyle=':' , cumulative=False)
# add location of the COMPAS hdf5 files:
path_dir = '/Volumes/Andromeda/DATA/AllDCO_bugfix/'
TYPE,DCOtype = 'BBH', 'BBH'
# GLOBAL SETTINGS
whichWeight='z0' # which weighting we want (intrinisic or detecter)
fs_t = 22
# FIRST COLUMN WITH CHIRP MASS ###
xparam, axe_ind, xlabelname ='qLVK', None, r' $q^{-1}$'
# plot percentiles:
# # SFRDlist = MSSFRnameslist
plot_kdes_single(axe=ax, axe_ind=axe_ind, xparam=xparam, BPSmodelNames=[BPSmodelName], MSSFRmodelNames=MSSFRnameslist, DCOtype=DCOtype, xvarrange=[0, 1], \
whichWeight=whichWeight, path_dir=path_dir)
l_ = r'$\textbf{Model %s: }$ %s'%(BPSmodelName, alphabetPhysicalNameDict[BPSmodelName])
# 'Model %s'%(BPSmodelName)
ax.plot([-1,-1], [-1,-1], c=colorDirDict[BPSmodelName], lw=5, label=l_)
plt.legend(fontsize=20, frameon=False, loc='upper left', ncol=2)
# ax.text(0+0.009, 1-0.009, 'model %s%s \n %s'%(BPSmodelName,mssfr,alphabetPhysicalNameDict[BPSmodelName]),\
# rotation = 0, fontsize = fs+8, color = 'k', ha = 'left', va='top', transform=axe.transAxes, zorder=100)#, weight = 'bold')
fontsize=24
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
tick.label1.set_fontweight('bold')
ax.set_yscale('log')
# plt.xscale('log')
ax.set_xlim(0.1, 1)
# ax.xaxis.set_major_formatter(FormatStrFormatter("%.00f"))
# ax.xaxis.set_minor_formatter(FormatStrFormatter("%.00f"))
ax.set_ylim(1e-3, 1E1 )
ax.grid(ls=':',color='grey')
ax.set_xlabel('${q}_{\mathrm{f}}^{-1} $',fontsize=fontsize+2, labelpad=2)
ax.set_ylabel('$\, \mathrm{d}\mathcal{R}_{\mathrm{m}}^{0}/\mathrm{d} {q}_{\mathrm{f}}^{-1} $ ',fontsize=fontsize+2)
plt.savefig('./q_' + BPSmodelName + '.png', dpi = 600, bbox_inches = 'tight')
```
now at model F
now at m= F
/Volumes/Andromeda/DATA/AllDCO_bugfix/unstableCaseBB/COMPASCompactOutput_BBH_F.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9752003356134308
now at mssfr 111
0.9872681128081398
now at mssfr 211
0.9881141296724132
now at mssfr 311
0.98771627825202
now at mssfr 112
0.9821439757405833
now at mssfr 212
0.9879549135745226
now at mssfr 312
0.9825023093986961
now at mssfr 113
0.9736594108623746
now at mssfr 213
0.9789036917558621
now at mssfr 313
0.9741745830346829
now at mssfr 121
0.9893520050555159
now at mssfr 221
0.989552391060108
now at mssfr 321
0.9896317561915967
now at mssfr 122
0.9874553204637768
now at mssfr 222
0.9894145699426015
now at mssfr 322
0.9878680170691704
now at mssfr 123
0.9836580236580336
now at mssfr 223
0.9865691277575376
now at mssfr 323
0.9841783766760708
now at mssfr 131
0.9890071137718545
now at mssfr 231
0.9892127497785951
now at mssfr 331
0.9893050987251795
now at mssfr 132
0.9878947135760437
now at mssfr 232
0.9897710495689757
now at mssfr 332
0.9882557212915078
now at mssfr 133
0.9837766933552585
now at mssfr 233
0.9868588828573474
now at mssfr 333
0.9842409493701365
now at model G
now at m= G
/Volumes/Andromeda/DATA/AllDCO_bugfix/alpha0_1/COMPASCompactOutput_BBH_G.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9972600204652355
now at mssfr 111
0.9973645058317732
now at mssfr 211
0.9974294696483605
now at mssfr 311
0.9974790184996369
now at mssfr 112
0.9973590299506443
now at mssfr 212
0.9978537175104425
now at mssfr 312
0.997447907271893
now at mssfr 113
0.9959479707060201
now at mssfr 213
0.9969805906440297
now at mssfr 313
0.9961372243584019
now at mssfr 121
0.997503846731361
now at mssfr 221
0.9974141249195458
now at mssfr 321
0.9975577150140622
now at mssfr 122
0.9977361645278158
now at mssfr 222
0.9978023155069472
now at mssfr 322
0.997803627933041
now at mssfr 123
0.9975446432887104
now at mssfr 223
0.9977979093757059
now at mssfr 323
0.997648243467896
now at mssfr 131
0.9974729372796209
now at mssfr 231
0.9973927087454333
now at mssfr 331
0.9975305654070185
now at mssfr 132
0.9979293386172029
now at mssfr 232
0.9979309664387181
now at mssfr 332
0.9979715244724248
now at mssfr 133
0.997771876524701
now at mssfr 233
0.9979758417346505
now at mssfr 333
0.9978433168406978
now at model H
now at m= H
/Volumes/Andromeda/DATA/AllDCO_bugfix/alpha0_5/COMPASCompactOutput_BBH_H.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9974345050272793
now at mssfr 111
0.9962691302501347
now at mssfr 211
0.9960455225476782
now at mssfr 311
0.9963699435460338
now at mssfr 112
0.9969837859114447
now at mssfr 212
0.996889782576077
now at mssfr 312
0.9970938044809964
now at mssfr 113
0.9969750720571806
now at mssfr 213
0.9972176724337729
now at mssfr 313
0.9970333026881112
now at mssfr 121
0.9959094139753216
now at mssfr 221
0.9957720196134159
now at mssfr 321
0.9959486945627494
now at mssfr 122
0.996510288264154
now at mssfr 222
0.9963595800019923
now at mssfr 322
0.9965831444851201
now at mssfr 123
0.9967968668719015
now at mssfr 223
0.996575267444794
now at mssfr 323
0.9968789646086305
now at mssfr 131
0.9958719349069568
now at mssfr 231
0.99573885136997
now at mssfr 331
0.995915435341944
now at mssfr 132
0.9966469959472497
now at mssfr 232
0.9964759538345483
now at mssfr 332
0.9967007515909743
now at mssfr 133
0.9969602120633236
now at mssfr 233
0.9968491815358896
now at mssfr 333
0.9970156609461662
now at model I
now at m= I
/Volumes/Andromeda/DATA/AllDCO_bugfix/alpha2_0/COMPASCompactOutput_BBH_I.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9940646596744032
now at mssfr 111
0.9961265548919543
now at mssfr 211
0.9957058919194264
now at mssfr 311
0.9961893857872092
now at mssfr 112
0.9953516928215421
now at mssfr 212
0.9947169456343966
now at mssfr 312
0.9954562927891516
now at mssfr 113
0.9945616867050961
now at mssfr 213
0.993222075894612
now at mssfr 313
0.9947321106971351
now at mssfr 121
0.9955430894173204
now at mssfr 221
0.995105807365865
now at mssfr 321
0.9955269281906873
now at mssfr 122
0.9953813374580968
now at mssfr 222
0.994768356108398
now at mssfr 322
0.9954349229346765
now at mssfr 123
0.9956613383302056
now at mssfr 223
0.995139764561068
now at mssfr 323
0.9957358131140598
now at mssfr 131
0.995445198724082
now at mssfr 231
0.9950386193166206
now at mssfr 331
0.9954383434706168
now at mssfr 132
0.9952898498973387
now at mssfr 232
0.9947582078514632
now at mssfr 332
0.9953446244175754
now at mssfr 133
0.9956655905431819
now at mssfr 233
0.995277688598294
now at mssfr 333
0.9957323080081582
now at model J
now at m= J
/Volumes/Andromeda/DATA/AllDCO_bugfix/alpha10/COMPASCompactOutput_BBH_J.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9973821243758866
now at mssfr 111
0.9978555989110082
now at mssfr 211
0.9970440780750569
now at mssfr 311
0.997863436324126
now at mssfr 112
0.9975248286443376
now at mssfr 212
0.996923916801899
now at mssfr 312
0.9975892503294346
now at mssfr 113
0.9975609668950953
now at mssfr 213
0.9972827430872554
now at mssfr 313
0.9975282219439713
now at mssfr 121
0.9970252466568661
now at mssfr 221
0.9961405268933513
now at mssfr 321
0.9969735577084197
now at mssfr 122
0.9966512412748814
now at mssfr 222
0.9956047507416098
now at mssfr 322
0.9967010664876199
now at mssfr 123
0.9972789191218172
now at mssfr 223
0.996291714855129
now at mssfr 323
0.9973298039824398
now at mssfr 131
0.9969343670195903
now at mssfr 231
0.9961037658533415
now at mssfr 331
0.9968924502052037
now at mssfr 132
0.9966943521829306
now at mssfr 232
0.9957734770768839
now at mssfr 332
0.9967348064598525
now at mssfr 133
0.9974205428695393
now at mssfr 233
0.9967942381077105
now at mssfr 333
0.9974406555543992
now at model K
now at m= K
/Volumes/Andromeda/DATA/AllDCO_bugfix/fiducial/COMPASCompactOutput_BBH_K.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9756086257500153
now at mssfr 111
0.9870054229198999
now at mssfr 211
0.9879808847259101
now at mssfr 311
0.9874602759018093
now at mssfr 112
0.9823503842120584
now at mssfr 212
0.9885172190846081
now at mssfr 312
0.9828508921521508
now at mssfr 113
0.973245002691199
now at mssfr 213
0.9791532371584242
now at mssfr 313
0.9739901350525447
now at mssfr 121
0.9892082891908376
now at mssfr 221
0.9896130301167697
now at mssfr 321
0.9894935069410513
now at mssfr 122
0.9874746019403327
now at mssfr 222
0.989911664229898
now at mssfr 322
0.9879391165204384
now at mssfr 123
0.9833366016383036
now at mssfr 223
0.9867279988945815
now at mssfr 323
0.983951393776251
now at mssfr 131
0.9889132886626695
now at mssfr 231
0.9893099064583188
now at mssfr 331
0.9892140334152474
now at mssfr 132
0.9879219568819783
now at mssfr 232
0.9902138592859769
now at mssfr 332
0.9883354516903949
now at mssfr 133
0.9834406734431842
now at mssfr 233
0.9868846217087505
now at mssfr 333
0.9840103982392796
now at model L
now at m= L
/Volumes/Andromeda/DATA/AllDCO_bugfix/rapid/COMPASCompactOutput_BBH_L.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9928356228751785
now at mssfr 111
0.9942269768819665
now at mssfr 211
0.9947547665225932
now at mssfr 311
0.994411492218191
now at mssfr 112
0.991935481031159
now at mssfr 212
0.9943851166117579
now at mssfr 312
0.9920425328161393
now at mssfr 113
0.9902823554908123
now at mssfr 213
0.9917412147025838
now at mssfr 313
0.9904399965413835
now at mssfr 121
0.994669549181407
now at mssfr 221
0.994945128069842
now at mssfr 321
0.9947703387826716
now at mssfr 122
0.9941571783862402
now at mssfr 222
0.9951181154998918
now at mssfr 322
0.9943036623057201
now at mssfr 123
0.9937169061873663
now at mssfr 223
0.9947628516714838
now at mssfr 323
0.99386721106926
now at mssfr 131
0.994625910915855
now at mssfr 231
0.9949172822585355
now at mssfr 331
0.9947361425526999
now at mssfr 132
0.9944936017706749
now at mssfr 232
0.995250539894427
now at mssfr 332
0.9946000946109137
now at mssfr 133
0.9940724533904407
now at mssfr 233
0.9949512193402289
now at mssfr 333
0.9941736404257271
now at model M
now at m= M
/Volumes/Andromeda/DATA/AllDCO_bugfix/maxNSmass2_0/COMPASCompactOutput_BBH_M.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9955634793466852
now at mssfr 111
0.9948032246881586
now at mssfr 211
0.9945568505594392
now at mssfr 311
0.994876134370861
now at mssfr 112
0.9951854754827011
now at mssfr 212
0.9950202230716574
now at mssfr 312
0.9952937012576362
now at mssfr 113
0.9953186759239183
now at mssfr 213
0.9955834102903665
now at mssfr 313
0.9953340051497981
now at mssfr 121
0.994290270888403
now at mssfr 221
0.9939283249036696
now at mssfr 321
0.9942906143784859
now at mssfr 122
0.994608343714123
now at mssfr 222
0.9942363220382819
now at mssfr 322
0.9946966341188682
now at mssfr 123
0.99496629679843
now at mssfr 223
0.9946626496992975
now at mssfr 323
0.9950596965954767
now at mssfr 131
0.9941059687572679
now at mssfr 231
0.9937831110550471
now at mssfr 331
0.9941179662416976
now at mssfr 132
0.9945492264658259
now at mssfr 232
0.9943245020744277
now at mssfr 332
0.9946430846574555
now at mssfr 133
0.9949494896918978
now at mssfr 233
0.9949302090786226
now at mssfr 333
0.9950383667193056
now at model N
now at m= N
/Volumes/Andromeda/DATA/AllDCO_bugfix/maxNSmass3_0/COMPASCompactOutput_BBH_N.h5
['000', '111', '211', '311', '112', '212', '312', '113', '213', '313', '121', '221', '321', '122', '222', '322', '123', '223', '323', '131', '231', '331', '132', '232', '332', '133', '233', '333']
now at mssfr 000
0.9967519619623656
now at mssfr 111
0.9952189637458619
now at mssfr 211
0.9952327828235381
now at mssfr 311
0.9953477630800046
now at mssfr 112
0.9960708690881727
now at mssfr 212
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-5-e3db6216d07f> in <module>
47 # # SFRDlist = MSSFRnameslist
48 plot_kdes_single(axe=ax, axe_ind=axe_ind, xparam=xparam, BPSmodelNames=[BPSmodelName], MSSFRmodelNames=MSSFRnameslist, DCOtype=DCOtype, xvarrange=[0, 1], \
---> 49 whichWeight=whichWeight, path_dir=path_dir)
50
51
<ipython-input-2-ddfdce4bbaed> in plot_kdes_single(axe, axe_ind, xparam, BPSmodelNames, MSSFRmodelNames, DCOtype, xvarrange, whichWeight, path_dir)
141
142 weightheader = 'w_' + mssfr
--> 143 w = fdata[fparam_key][weightheader][...].squeeze()
144
145 # highlight the SFRD if they are one of the following 3
h5py/_objects.pyx in h5py._objects.with_phil.wrapper()
h5py/_objects.pyx in h5py._objects.with_phil.wrapper()
/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/h5py/_hl/group.py in __getitem__(self, name)
260 raise ValueError("Invalid HDF5 object reference")
261 else:
--> 262 oid = h5o.open(self.id, self._e(name), lapl=self._lapl)
263
264 otype = h5i.get_type(oid)
h5py/_objects.pyx in h5py._objects.with_phil.wrapper()
h5py/_objects.pyx in h5py._objects.with_phil.wrapper()
h5py/h5o.pyx in h5py.h5o.open()
KeyError: "Unable to open object (file read failed: time = Fri Nov 19 21:27:22 2021\n, filename = '/Volumes/Andromeda/DATA/AllDCO_bugfix/maxNSmass3_0/COMPASCompactOutput_BBH_N.h5', file descriptor = 58, errno = 5, error message = 'Input/output error', buf = 0x7f9376cad360, total read size = 512, bytes this sub-read = 512, bytes actually read = 18446744073709551615, offset = 17967031032)"









```python
```
```python
```
```python
```
|
FloorBroekgaardenREPO_NAMEDouble-Compact-Object-MergersPATH_START.@Double-Compact-Object-Mergers_extracted@Double-Compact-Object-Mergers-main@plottingCode@Detectable_Distributions_GWTC-3@Untitled2.ipynb@.PATH_END.py
|
{
"filename": "test_cwrappers.py",
"repo_name": "AWehrhahn/PyReduce",
"repo_path": "PyReduce_extracted/PyReduce-master/test/test_cwrappers.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pyreduce.cwrappers import slitfunc, slitfunc_curved
def test_slitfunc_vert():
img = np.full((100, 100), 1, dtype=float)
ycen = np.full(100, 0, dtype=float)
lambda_sp = 0
lambda_sf = 0.1
osample = 1
# Run it once the way it is supposed to
slitfunc(img, ycen, lambda_sp, lambda_sf, osample)
# Then try different incompatible inputs, which have to be caught before going to the C code
with pytest.raises(AssertionError):
slitfunc(None, ycen, lambda_sp, lambda_sf, osample)
with pytest.raises(ValueError):
slitfunc("bla", ycen, lambda_sp, lambda_sf, osample)
with pytest.raises(AssertionError):
slitfunc(img, None, lambda_sp, lambda_sf, osample)
with pytest.raises(ValueError):
slitfunc(img, "blub", lambda_sp, lambda_sf, osample)
with pytest.raises(TypeError):
slitfunc(img, ycen, None, lambda_sf, osample)
with pytest.raises(ValueError):
slitfunc(img, ycen, "bla", lambda_sf, osample)
with pytest.raises(TypeError):
slitfunc(img, ycen, lambda_sp, None, osample)
with pytest.raises(ValueError):
slitfunc(img, ycen, lambda_sp, "bla", osample)
with pytest.raises(TypeError):
slitfunc(img, ycen, lambda_sp, lambda_sf, None)
with pytest.raises(ValueError):
slitfunc(img, ycen, lambda_sp, lambda_sf, "bla")
# Then try different sizes for img and ycen
with pytest.raises(AssertionError):
ycen_bad = np.full(50, 1, dtype=float)
slitfunc(img, ycen_bad, lambda_sp, lambda_sf, osample)
with pytest.raises(AssertionError):
slitfunc(img, ycen, lambda_sp, lambda_sf, osample=0)
with pytest.raises(AssertionError):
slitfunc(img, ycen, lambda_sp, -1, osample)
with pytest.raises(AssertionError):
slitfunc(img, ycen, -1, lambda_sf, osample)
def test_slitfunc_curved():
img = np.full((100, 100), 1)
ycen = np.full(100, 50)
tilt = np.full(100, 0)
shear = np.full(100, 0)
lambda_sp = 0
lambda_sf = 0.1
osample = 1
yrange = (49, 50)
# Run it once the way it is supposed to
slitfunc_curved(img, ycen, tilt, shear, lambda_sp, lambda_sf, osample, yrange)
slitfunc_curved(img, ycen, 1, 0.01, lambda_sp, lambda_sf, osample, yrange)
# Then try different incompatible inputs, which have to be caught before going to the C code
with pytest.raises(AssertionError):
slitfunc_curved(None, ycen, tilt, shear, lambda_sp, lambda_sf, osample, yrange)
with pytest.raises(ValueError):
slitfunc_curved("bla", ycen, tilt, shear, lambda_sp, lambda_sf, osample, yrange)
with pytest.raises(AssertionError):
slitfunc_curved(img, None, tilt, shear, lambda_sp, lambda_sf, osample, yrange)
with pytest.raises(ValueError):
slitfunc_curved(img, "blub", tilt, shear, lambda_sp, lambda_sf, osample, yrange)
with pytest.raises(AssertionError):
slitfunc_curved(img, ycen, tilt, None, lambda_sp, lambda_sf, osample, yrange)
with pytest.raises(ValueError):
slitfunc_curved(img, ycen, tilt, "boo", lambda_sp, lambda_sf, osample, yrange)
with pytest.raises(TypeError):
slitfunc_curved(img, ycen, tilt, shear, None, lambda_sf, osample, yrange)
with pytest.raises(ValueError):
slitfunc_curved(img, ycen, tilt, shear, "bla", lambda_sf, osample, yrange)
with pytest.raises(TypeError):
slitfunc_curved(img, ycen, tilt, shear, lambda_sp, None, osample, yrange)
with pytest.raises(ValueError):
slitfunc_curved(img, ycen, tilt, shear, lambda_sp, "bla", osample, yrange)
with pytest.raises(TypeError):
slitfunc_curved(img, ycen, tilt, shear, lambda_sp, lambda_sf, None, yrange)
with pytest.raises(ValueError):
slitfunc_curved(img, ycen, tilt, shear, lambda_sp, lambda_sf, "bla", yrange)
# Then try different sizes for img and ycen
with pytest.raises(AssertionError):
ycen_bad = np.full(50, 0, dtype=float)
slitfunc_curved(
img, ycen_bad, tilt, shear, lambda_sp, lambda_sf, osample, yrange
)
with pytest.raises(AssertionError):
tilt_bad = np.full(50, 0, dtype=float)
slitfunc_curved(
img, ycen, tilt_bad, shear, lambda_sp, lambda_sf, osample, yrange
)
with pytest.raises(AssertionError):
shear_bad = np.full(50, 0, dtype=float)
slitfunc_curved(
img, ycen, tilt, shear_bad, lambda_sp, lambda_sf, osample, yrange
)
with pytest.raises(AssertionError):
slitfunc_curved(img, ycen, tilt, shear, lambda_sp, lambda_sf, 0, yrange)
with pytest.raises(AssertionError):
slitfunc_curved(img, ycen, tilt, shear, lambda_sp, -1, osample, yrange)
with pytest.raises(AssertionError):
slitfunc_curved(img, ycen, tilt, shear, -1, lambda_sf, osample, yrange)
|
AWehrhahnREPO_NAMEPyReducePATH_START.@PyReduce_extracted@PyReduce-master@test@test_cwrappers.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "saopicc/killMS",
"repo_path": "killMS_extracted/killMS-master/killMS/Gridder/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
killMS, a package for calibration in radio interferometry.
Copyright (C) 2013-2017 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
|
saopiccREPO_NAMEkillMSPATH_START.@killMS_extracted@killMS-master@killMS@Gridder@__init__.py@.PATH_END.py
|
{
"filename": "ternary_ops_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/compiler/tests/ternary_ops_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for ternary operators."""
from absl.testing import parameterized
import numpy as np
import scipy.special as sps
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class TernaryOpsTest(xla_test.XLATestCase, parameterized.TestCase):
def _testTernary(self, op, a, b, c, expected, rtol=1e-3, atol=1e-6):
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
pc = array_ops.placeholder(dtypes.as_dtype(c.dtype), c.shape, name="c")
output = op(pa, pb, pc)
result = session.run(output, {pa: a, pb: b, pc: c})
self.assertAllClose(result, expected, rtol=rtol, atol=atol)
return result
@parameterized.parameters(
{'start': 1, 'end': 2, 'num': 1},
{'start': 1, 'end': 4, 'num': 3},
{'start': 0, 'end': 41, 'num': 42})
@test_util.disable_mlir_bridge(
'TODO(b/156174708): Dynamic result types not supported')
def testLinspace(self, start, end, num):
expected = np.linspace(start, end, num, dtype=np.float32)
result = self._testTernary(
math_ops.linspace,
np.float32(start),
np.float32(end),
np.int32(num),
expected)
# According to linspace spec, start has to be the first element and end has
# to be last element.
self.assertEqual(result[-1], expected[-1])
self.assertEqual(result[0], expected[0])
def testRange(self):
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(2),
np.int32(1),
expected=np.array([1], dtype=np.int32))
self._testTernary(
math_ops.range,
np.int32(1),
np.int32(7),
np.int32(2),
expected=np.array([1, 3, 5], dtype=np.int32))
def testSelect(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.where,
np.array(False),
np.array(2, dtype=dtype),
np.array(7, dtype=dtype),
expected=np.array(7, dtype=dtype))
self._testTernary(
array_ops.where,
np.array(True),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([1, 2, 3, 4], dtype=dtype))
self._testTernary(
array_ops.where,
np.array(False),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype))
self._testTernary(
array_ops.where,
np.array([0, 1, 1, 0], dtype=np.bool_),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([5, 2, 3, 8], dtype=dtype))
self._testTernary(
array_ops.where,
np.array([0, 1, 0], dtype=np.bool_),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 8], [3, 4], [11, 12]], dtype=dtype))
def testSelectV2(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.where_v2,
np.array(False),
np.array(2, dtype=dtype),
np.array(7, dtype=dtype),
expected=np.array(7, dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array(True),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([1, 2, 3, 4], dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array(False),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array([0, 1, 1, 0], dtype=np.bool_),
np.array([1, 2, 3, 4], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([5, 2, 3, 8], dtype=dtype))
# Broadcast the condition
self._testTernary(
array_ops.where_v2,
np.array([0, 1], dtype=np.bool_),
np.array([[1, 2], [3, 4], [5, 6]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 2], [9, 4], [11, 6]], dtype=dtype))
# Broadcast the then branch to the else
self._testTernary(
array_ops.where_v2,
np.array([[0, 1], [1, 0], [1, 1]], dtype=np.bool_),
np.array([[1, 2]], dtype=dtype),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
expected=np.array([[7, 2], [1, 10], [1, 2]], dtype=dtype))
# Broadcast the else branch to the then
self._testTernary(
array_ops.where_v2,
np.array([[1, 0], [0, 1], [0, 0]], dtype=np.bool_),
np.array([[7, 8], [9, 10], [11, 12]], dtype=dtype),
np.array([[1, 2]], dtype=dtype),
expected=np.array([[7, 2], [1, 10], [1, 2]], dtype=dtype))
# Broadcast the then/else branches to the condition
self._testTernary(
array_ops.where_v2,
np.array([[1, 0], [0, 1], [1, 1]], dtype=np.bool_),
np.array(7, dtype=dtype),
np.array(8, dtype=dtype),
expected=np.array([[7, 8], [8, 7], [7, 7]], dtype=dtype))
self._testTernary(
array_ops.where_v2,
np.array([[1, 0], [0, 1], [0, 0]], dtype=np.bool_),
np.array(7, dtype=dtype),
np.array([8, 9], dtype=dtype),
expected=np.array([[7, 9], [8, 7], [8, 9]], dtype=dtype))
def testSlice(self):
for dtype in self.numeric_types:
self._testTernary(
array_ops.slice,
np.array([[], [], []], dtype=dtype),
np.array([1, 0], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
expected=np.array([[], []], dtype=dtype),
)
self._testTernary(
array_ops.slice,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
np.array([2, 1], dtype=np.int32),
expected=np.array([[2], [5]], dtype=dtype),
)
def testClipByValue(self):
for dtype in (
self.numeric_types - self.complex_types - self.unsigned_int_types
):
test_cases = [
(np.array([2, 4, 5], dtype=dtype), dtype(7)), #
(dtype(1), np.array([2, 4, 5], dtype=dtype)), #
(
np.array([-2, 7, 7], dtype=dtype),
np.array([-2, 9, 8], dtype=dtype),
),
]
x = np.array([-2, 10, 6], dtype=dtype)
for lower, upper in test_cases:
self._testTernary(
gen_math_ops._clip_by_value,
x,
lower,
upper,
expected=np.minimum(np.maximum(x, lower), upper))
def testBetaincSanity(self):
# This operation is only supported for float32 and float64.
for dtype in self.numeric_types & {np.float32, np.float64}:
# Sanity check a few identities:
# - betainc(a, b, 0) == 0
# - betainc(a, b, 1) == 1
# - betainc(a, 1, x) == x ** a
# Compare against the implementation in SciPy.
a = np.array([.3, .4, .2, .2], dtype=dtype)
b = np.array([1., 1., .4, .4], dtype=dtype)
x = np.array([.3, .4, .0, .1], dtype=dtype)
expected = sps.betainc(a, b, x)
self._testTernary(
math_ops.betainc, a, b, x, expected, rtol=5e-6, atol=6e-6)
@parameterized.parameters(
{
'sigma': 1e15,
'rtol': 1e-6,
'atol': 1e-4
},
{
'sigma': 30,
'rtol': 1e-6,
'atol': 2e-3
},
{
'sigma': 1e-8,
'rtol': 5e-4,
'atol': 3e-4
},
{
'sigma': 1e-16,
'rtol': 1e-6,
'atol': 2e-4
},
)
def testBetainc(self, sigma, rtol, atol):
# This operation is only supported for float32 and float64.
for dtype in self.numeric_types & {np.float32, np.float64}:
# Randomly generate a, b, x in the numerical domain of betainc.
# Compare against the implementation in SciPy.
a = np.abs(np.random.randn(10, 10) * sigma).astype(dtype) # in (0, infty)
b = np.abs(np.random.randn(10, 10) * sigma).astype(dtype) # in (0, infty)
x = np.random.rand(10, 10).astype(dtype) # in (0, 1)
expected = sps.betainc(a, b, x, dtype=dtype)
self._testTernary(
math_ops.betainc, a, b, x, expected, rtol=rtol, atol=atol)
if __name__ == "__main__":
googletest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@compiler@tests@ternary_ops_test.py@.PATH_END.py
|
{
"filename": "fit_gaussian_process.py",
"repo_name": "daniel-muthukrishna/astrorapid",
"repo_path": "astrorapid_extracted/astrorapid-master/astrorapid/fit_gaussian_process.py",
"type": "Python"
}
|
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
import celerite
from celerite import terms
from scipy.optimize import minimize
def combined_neg_log_like(params, fluxes, gp_lcs, passbands):
loglike = 0
for pb in passbands:
gp_lcs[pb].set_parameter_vector(params)
y = fluxes[pb]
loglike += gp_lcs[pb].log_likelihood(y)
return -loglike
def fit_gaussian_process_one_argument(args):
lc, objid, passbands, plot, extrapolate = args
return fit_gaussian_process(lc, objid, passbands, plot, extrapolate)
def fit_gaussian_process(lc, objid, passbands, plot, extrapolate, bad_loglike_thresh=-2000):
print(f"Fitting GP to {objid}")
gp_lc = {}
if plot:
plt.figure()
kernel = terms.Matern32Term(log_sigma=5., log_rho=3.)
times, fluxes, fluxerrs = {}, {}, {}
for pbidx, pb in enumerate(passbands):
pbmask = lc['passband'] == pb
sortedidx = np.argsort(lc[pbmask]['time'].data)
times[pb] = lc[pbmask]['time'].data[sortedidx]
fluxes[pb] = lc[pbmask]['flux'].data[sortedidx]
fluxerrs[pb] = lc[pbmask]['fluxErr'].data[sortedidx]
try:
gp_lc[pb] = celerite.GP(kernel)
gp_lc[pb].compute(times[pb], fluxerrs[pb])
except Exception as e:
print("Failed object", objid, e)
return
# print("Initial log likelihood: {0}".format(gp_lc[pb].log_likelihood(fluxes[pb])))
initial_params = gp_lc[pb].get_parameter_vector() # This should be the same across passbands
bounds = gp_lc[pb].get_parameter_bounds()
# Optimise parameters
try:
r = minimize(combined_neg_log_like, initial_params, method="L-BFGS-B", bounds=bounds,
args=(fluxes, gp_lc, passbands))
# print(r)
except Exception as e:
print("Failed object", objid, e)
return
for pbidx, pb in enumerate(passbands):
gp_lc[pb].set_parameter_vector(r.x)
time = times[pb]
flux = fluxes[pb]
fluxerr = fluxerrs[pb]
# print("Final log likelihood: {0}".format(gp_lc[pb].log_likelihood(flux)))
# Remove objects with bad fits
if extrapolate is True:
x = np.linspace(min(min(time), -70), max(max(time), 80), 5000)
else:
x = np.linspace(min(time), max(time), 5000)
pred_mean, pred_var = gp_lc[pb].predict(flux, x, return_var=True)
if np.any(~np.isfinite(pred_mean)) or gp_lc[pb].log_likelihood(flux) < bad_loglike_thresh:
print("Bad fit for object", objid)
return
# Plot GP fit
if plot:
# Predict with GP
if extrapolate:
x = np.linspace(min(min(time), -70), max(max(time), 80), 5000)
else:
x = np.linspace(min(time), max(time), 5000)
pred_mean, pred_var = gp_lc[pb].predict(flux, x, return_var=True)
pred_std = np.sqrt(pred_var)
color = {'g': 'tab:green', 'r': "tab:red", 'i': "tab:purple", 'z': "tab:brown"}
# plt.plot(time, flux, "k", lw=1.5, alpha=0.3)
plt.errorbar(time, flux, yerr=fluxerr, fmt=".", capsize=0, color=color[pb])
plt.plot(x, pred_mean, color=color[pb], label=pb)
plt.fill_between(x, pred_mean + pred_std, pred_mean - pred_std, color=color[pb], alpha=0.3,
edgecolor="none")
if plot:
plt.xlabel("Days since trigger", fontsize=15)
plt.ylabel("Flux", fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.legend(fontsize=15)
plt.show()
# if extrapolate:
# plt.savefig(f'/Users/danmuth/PycharmProjects/transomaly/plots/gp_fits/extrapolated/gp_{objid}.pdf')
# else:
# plt.savefig(f'/Users/danmuth/PycharmProjects/transomaly/plots/gp_fits/gp_{objid}.pdf')
plt.close()
return gp_lc, objid
def save_gps(light_curves, save_dir='data/saved_light_curves/', class_num=None, passbands=('g', 'r'), plot=False,
nprocesses=1, redo=False, extrapolate=False):
""" Save gaussian process fits.
Don't plot in parallel
"""
if extrapolate:
save_gp_filepath = os.path.join(save_dir, f"gp_classnum_{class_num}_extrapolate.pickle")
else:
save_gp_filepath = os.path.join(save_dir, f"gp_classnum_{class_num}.pickle")
if os.path.exists(save_gp_filepath) and not redo:
with open(save_gp_filepath, "rb") as fp: # Unpickling
saved_gp_fits = pickle.load(fp)
else:
args_list = []
for objid, lc in light_curves.items():
args_list.append((lc, objid, passbands, plot, extrapolate))
saved_gp_fits = {}
if nprocesses == 1:
for args in args_list:
out = fit_gaussian_process_one_argument(args)
if out is not None:
gp_lc, objid = out
saved_gp_fits[objid] = gp_lc
else:
pool = mp.Pool(nprocesses)
results = pool.map_async(fit_gaussian_process_one_argument, args_list)
pool.close()
pool.join()
outputs = results.get()
print('combining results...')
for i, output in enumerate(outputs):
print(i, len(outputs))
if output is not None:
gp_lc, objid = output
saved_gp_fits[objid] = gp_lc
with open(save_gp_filepath, "wb") as fp: # Pickling
pickle.dump(saved_gp_fits, fp)
return saved_gp_fits
|
daniel-muthukrishnaREPO_NAMEastrorapidPATH_START.@astrorapid_extracted@astrorapid-master@astrorapid@fit_gaussian_process.py@.PATH_END.py
|
{
"filename": "exceptions.py",
"repo_name": "cosmo-ethz/CosmoHammer",
"repo_path": "CosmoHammer_extracted/CosmoHammer-master/cosmoHammer/exceptions.py",
"type": "Python"
}
|
#Created on Nov 11, 2013
#author: jakeret
class LikelihoodComputationException(Exception):
'''
Exception for likelihood computation
'''
def __init__(self):
'''
Constructor
'''
pass
class InvalidLikelihoodException(LikelihoodComputationException):
"""
Exception for invalid likelihoods e.g. -loglike >= 0.0
"""
def __init__(self, params=None):
self.params = params
|
cosmo-ethzREPO_NAMECosmoHammerPATH_START.@CosmoHammer_extracted@CosmoHammer-master@cosmoHammer@exceptions.py@.PATH_END.py
|
{
"filename": "_ids.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmap/_ids.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="heatmap", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmap@_ids.py@.PATH_END.py
|
{
"filename": "plasticc_make_predictions.py",
"repo_name": "LSSTDESC/snmachine",
"repo_path": "snmachine_extracted/snmachine-main/snmachine/utils/plasticc_make_predictions.py",
"type": "Python"
}
|
LSSTDESCREPO_NAMEsnmachinePATH_START.@snmachine_extracted@snmachine-main@snmachine@utils@plasticc_make_predictions.py@.PATH_END.py
|
|
{
"filename": "slsqp.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/optimize/slsqp.py",
"type": "Python"
}
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.optimize` namespace for importing the functions
# included below.
import warnings
from . import _slsqp_py
__all__ = [ # noqa: F822
'OptimizeResult',
'append',
'approx_derivative',
'approx_jacobian',
'array',
'asfarray',
'atleast_1d',
'concatenate',
'exp',
'finfo',
'fmin_slsqp',
'inf',
'isfinite',
'linalg',
'old_bound_to_new',
'slsqp',
'sqrt',
'vstack',
'zeros',
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.optimize.slsqp is deprecated and has no attribute "
f"{name}. Try looking in scipy.optimize instead.")
warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
"the `scipy.optimize.slsqp` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_slsqp_py, name)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@optimize@slsqp.py@.PATH_END.py
|
{
"filename": "classic.py",
"repo_name": "eggplantbren/DNest4",
"repo_path": "DNest4_extracted/DNest4-master/python/dnest4/classic.py",
"type": "Python"
}
|
import copy
import numpy as np
import numpy.random as rng
from .loading import *
def logsumexp(values):
biggest = np.max(values)
x = values - biggest
result = np.log(np.sum(np.exp(x))) + biggest
return result
def logdiffexp(x1, x2):
biggest = x1
xx1 = x1 - biggest
xx2 = x2 - biggest
result = np.log(np.exp(xx1) - np.exp(xx2)) + biggest
return result
def postprocess(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True, compression_bias_min=1., verbose=True,\
compression_scatter=0., moreSamples=1., compression_assert=None, single_precision=False, rng_seed=None):
if rng_seed is not None:
rng.seed(rng_seed)
if len(loaded) == 0:
levels_orig = np.atleast_2d(my_loadtxt("levels.txt"))
sample_info = np.atleast_2d(my_loadtxt("sample_info.txt"))
else:
levels_orig, sample_info = loaded[0], loaded[1]
# Remove regularisation from levels_orig if we asked for it
if compression_assert is not None:
levels_orig[1:,0] = -np.cumsum(compression_assert*np.ones(levels_orig.shape[0] - 1))
cut = int(cut*sample_info.shape[0])
sample_info = sample_info[cut:, :]
if plot:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(sample_info[:,0], "k")
plt.xlabel("Iteration")
plt.ylabel("Level")
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels_orig[:,0]), "k")
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='g')
plt.axhline(-np.log(10.), color='g', linestyle="--")
plt.ylim(top=0.05)
plt.subplot(2,1,2)
good = np.nonzero(levels_orig[:,4] > 0)[0]
plt.plot(levels_orig[good,3]/levels_orig[good,4], "ko-")
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
# Convert to lists of tuples
logl_levels = [(levels_orig[i,1], levels_orig[i, 2]) for i in range(0, levels_orig.shape[0])] # logl, tiebreaker
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in range(0, sample_info.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logz_estimates = np.zeros((numResampleLogX, 1))
H_estimates = np.zeros((numResampleLogX, 1))
# Find sandwiching level for each sample
sandwich = sample_info[:,0].copy().astype('int')
for i in range(0, sample_info.shape[0]):
while sandwich[i] < levels_orig.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
sandwich[i] += 1
for z in range(0, numResampleLogX):
# Make a monte carlo perturbation of the level compressions
levels = levels_orig.copy()
compressions = -np.diff(levels[:,0])
compressions *= compression_bias_min + (1. - compression_bias_min)*np.random.rand()
compressions *= np.exp(compression_scatter*np.random.randn(compressions.size))
levels[1:, 0] = -compressions
levels[:, 0] = np.cumsum(levels[:,0])
# For each level
for i in range(0, levels.shape[0]):
# Find the samples sandwiched by this level
which = np.nonzero(sandwich == i)[0]
logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
for j in range(0, len(which)):
logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
logl_samples_thisLevel = sorted(logl_samples_thisLevel)
N = len(logl_samples_thisLevel)
# Generate intermediate logx values
logx_max = levels[i, 0]
if i == levels.shape[0]-1:
logx_min = -1E300
else:
logx_min = levels[i+1, 0]
Umin = np.exp(logx_min - logx_max)
if N == 0 or numResampleLogX > 1:
U = Umin + (1. - Umin)*np.random.rand(len(which))
else:
U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]
for j in range(0, which.size):
logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]
if j != which.size - 1:
left = logx_samples_thisLevel[j+1]
elif i == levels.shape[0]-1:
left = -1E300
else:
left = levels[i+1][0]
if j != 0:
right = logx_samples_thisLevel[j-1]
else:
right = levels[i][0]
logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)
logl = sample_info[:,1]/temperature
logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
logP_samples[:,z] = logp_samples[:,z] + logl
logz_estimates[z] = logsumexp(logP_samples[:,z])
logP_samples[:,z] -= logz_estimates[z]
P_samples[:,z] = np.exp(logP_samples[:,z])
H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)
if plot:
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(logx_samples[:,z], sample_info[:,1], 'k.', label='Samples')
plt.plot(levels[1:,0], levels[1:,1], 'g.', label='Levels')
plt.legend(numpoints=1, loc='lower left')
plt.ylabel('log(L)')
plt.title(str(z+1) + "/" + str(numResampleLogX) + ", log(Z) = " + str(logz_estimates[z][0]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
if zoom_in:
plt.ylim([lower, upper])
xlim = plt.gca().get_xlim()
if plot:
plt.subplot(2,1,2)
plt.plot(logx_samples[:,z], P_samples[:,z], 'k.')
plt.ylabel('Posterior Weights')
plt.xlabel('log(X)')
plt.xlim(xlim)
# Log prior weights
logp_samples_averaged = np.empty(len(P_samples))
for i in range(len(P_samples)):
logp_samples_averaged = logsumexp(logp_samples[i, :]) \
- np.log(logp_samples.shape[1])
P_samples = np.mean(P_samples, 1)
P_samples = P_samples/np.sum(P_samples)
logz_estimate = np.mean(logz_estimates)
logz_error = np.std(logz_estimates)
H_estimate = np.mean(H_estimates)
H_error = np.std(H_estimates)
ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))
errorbar1 = ""
errorbar2 = ""
if numResampleLogX > 1:
errorbar1 += " +- " + str(logz_error)
errorbar2 += " +- " + str(H_error)
if verbose:
print("log(Z) = " + str(logz_estimate) + errorbar1)
print("Information = " + str(H_estimate) + errorbar2 + " nats.")
print("Effective sample size = " + str(ESS))
# Resample to uniform weight
N = int(moreSamples*ESS)
w = P_samples
w = w/np.max(w)
rows = np.empty(N, dtype="int64")
for i in range(0, N):
while True:
which = np.random.randint(sample_info.shape[0])
if np.random.rand() <= w[which]:
break
rows[i] = which + cut
# Get header row
f = open("sample.txt", "r")
line = f.readline()
if line[0] == "#":
header = line[1:]
else:
header = ""
f.close()
sample = loadtxt_rows("sample.txt", set(rows), single_precision)
posterior_sample = None
if single_precision:
posterior_sample = np.empty((N, sample["ncol"]), dtype="float32")
else:
posterior_sample = np.empty((N, sample["ncol"]))
for i in range(0, N):
posterior_sample[i, :] = sample[rows[i]]
if save:
np.savetxt("log_prior_weights.txt", logp_samples)
np.savetxt('weights.txt', w)
if single_precision:
np.savetxt("posterior_sample.txt", posterior_sample, fmt="%.7e",\
header=header)
else:
np.savetxt("posterior_sample.txt", posterior_sample,\
header=header)
if plot:
plt.show()
return [logz_estimate, H_estimate, logx_samples]
def postprocess_abc(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True, compression_bias_min=1., verbose=True,\
compression_scatter=0., moreSamples=1., compression_assert=None, single_precision=False, threshold_fraction=0.8,rng_seed=None):
if rng_seed is not None:
rng.seed(rng_seed)
if len(loaded) == 0:
levels_orig = np.atleast_2d(my_loadtxt("levels.txt"))
sample_info = np.atleast_2d(my_loadtxt("sample_info.txt"))
else:
levels_orig, sample_info = loaded[0], loaded[1]
# Remove regularisation from levels_orig if we asked for it
if compression_assert is not None:
levels_orig[1:,0] = -np.cumsum(compression_assert*np.ones(levels_orig.shape[0] - 1))
cut = int(cut*sample_info.shape[0])
sample_info = sample_info[cut:, :]
if plot:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(sample_info[:,0], "k")
plt.xlabel("Iteration")
plt.ylabel("Level")
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels_orig[:,0]), "k")
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='g')
plt.axhline(-np.log(10.), color='g', linestyle="--")
plt.ylim(top=0.05)
plt.subplot(2,1,2)
good = np.nonzero(levels_orig[:,4] > 0)[0]
plt.plot(levels_orig[good,3]/levels_orig[good,4], "ko-")
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
# Convert to lists of tuples
logl_levels = [(levels_orig[i,1], levels_orig[i, 2]) for i in range(0, levels_orig.shape[0])] # logl, tiebreakercut
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in range(0, sample_info.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logz_estimates = np.zeros((numResampleLogX, 1))
H_estimates = np.zeros((numResampleLogX, 1))
# Find sandwiching level for each sample
sandwich = sample_info[:,0].copy().astype('int')
for i in range(0, sample_info.shape[0]):
while sandwich[i] < levels_orig.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
sandwich[i] += 1
for z in range(0, numResampleLogX):
# Make a monte carlo perturbation of the level compressions
levels = levels_orig.copy()
compressions = -np.diff(levels[:,0])
compressions *= compression_bias_min + (1. - compression_bias_min)*np.random.rand()
compressions *= np.exp(compression_scatter*np.random.randn(compressions.size))
levels[1:, 0] = -compressions
levels[:, 0] = np.cumsum(levels[:,0])
# For each level
for i in range(0, levels.shape[0]):
# Find the samples sandwiched by this level
which = np.nonzero(sandwich == i)[0]
logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
for j in range(0, len(which)):
logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
logl_samples_thisLevel = sorted(logl_samples_thisLevel)
N = len(logl_samples_thisLevel)
# Generate intermediate logx values
logx_max = levels[i, 0]
if i == levels.shape[0]-1:
logx_min = -1E300
else:
logx_min = levels[i+1, 0]
Umin = np.exp(logx_min - logx_max)
if N == 0 or numResampleLogX > 1:
U = Umin + (1. - Umin)*np.random.rand(len(which))
else:
U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]
for j in range(0, which.size):
logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]
if j != which.size - 1:
left = logx_samples_thisLevel[j+1]
elif i == levels.shape[0]-1:
left = -1E300
else:
left = levels[i+1][0]
if j != 0:
right = logx_samples_thisLevel[j-1]
else:
right = levels[i][0]
logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)
logl = sample_info[:,1]/temperature
logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
# Define the threshold for ABC, in terms of log(X)
threshold = threshold_fraction*levels[:,0].min()
# Particles below threshold get no posterior weight
logp_samples[logx_samples > threshold] = -1E300
logP_samples[:,z] = logp_samples[:,z] + logl
logz_estimates[z] = logsumexp(logP_samples[:,z])
logP_samples[:,z] -= logz_estimates[z]
P_samples[:,z] = np.exp(logP_samples[:,z])
H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)
if plot:
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(logx_samples[:,z], sample_info[:,1], 'k.', label='Samples')
plt.plot(levels[1:,0], levels[1:,1], 'g.', label='Levels')
plt.legend(numpoints=1, loc='lower left')
plt.ylabel('log(L)')
plt.title(str(z+1) + "/" + str(numResampleLogX) + ", log(Z) = " + str(logz_estimates[z][0]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
if zoom_in:
plt.ylim([lower, upper])
xlim = plt.gca().get_xlim()
if plot:
plt.subplot(2,1,2)
plt.plot(logx_samples[:,z], P_samples[:,z], 'k.')
plt.ylabel('Posterior Weights')
plt.xlabel('log(X)')
plt.xlim(xlim)
P_samples = np.mean(P_samples, 1)
P_samples = P_samples/np.sum(P_samples)
logz_estimate = np.mean(logz_estimates)
logz_error = np.std(logz_estimates)
H_estimate = np.mean(H_estimates)
H_error = np.std(H_estimates)
ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))
errorbar1 = ""
errorbar2 = ""
if numResampleLogX > 1:
errorbar1 += " +- " + str(logz_error)
errorbar2 += " +- " + str(H_error)
if verbose:
print("log(Z) = " + str(logz_estimate) + errorbar1)
print("Information = " + str(H_estimate) + errorbar2 + " nats.")
print("Effective sample size = " + str(ESS))
# Resample to uniform weight
N = int(moreSamples*ESS)
w = P_samples
w = w/np.max(w)
rows = np.empty(N, dtype="int64")
for i in range(0, N):
while True:
which = np.random.randint(sample_info.shape[0])
if np.random.rand() <= w[which]:
break
rows[i] = which + cut
sample = loadtxt_rows("sample.txt", set(rows), single_precision)
posterior_sample = None
if single_precision:
posterior_sample = np.empty((N, sample["ncol"]), dtype="float32")
else:
posterior_sample = np.empty((N, sample["ncol"]))
for i in range(0, N):
posterior_sample[i, :] = sample[rows[i]]
if save:
np.savetxt('weights.txt', w)
if single_precision:
np.savetxt("posterior_sample.txt", posterior_sample, fmt="%.7e")
else:
np.savetxt("posterior_sample.txt", posterior_sample)
if plot:
plt.show()
return [logz_estimate, H_estimate, logx_samples]
def diffusion_plot():
"""
Plot a nice per-particle diffusion plot.
"""
import matplotlib.pyplot as plt
sample_info = np.atleast_2d(my_loadtxt('sample_info.txt'))
ID = sample_info[:,3].astype('int')
j = sample_info[:,0].astype('int')
ii = np.arange(1, sample_info.shape[0] + 1)
for i in range(0, ID.max() + 1):
which = np.nonzero(ID == i)[0]
plt.plot(ii[which], j[which])
plt.xlabel('Iteration')
plt.ylabel('Level')
plt.show()
def levels_plot():
"""
Plot the differences between the logl values of the levels.
"""
import matplotlib.pyplot as plt
levels = my_loadtxt('levels.txt')
plt.plot(np.log10(np.diff(levels[:,1])), "ko-")
plt.ylim([-1, 6])
plt.axhline(0., color='g', linewidth=2)
# plt.axhline(np.log10(np.log(10.)), color='g')
plt.axhline(np.log10(0.75), color='g', linestyle='--')
plt.xlabel('Level')
plt.ylabel('$\\log_{10}$(Delta log likelihood)')
plt.show()
|
eggplantbrenREPO_NAMEDNest4PATH_START.@DNest4_extracted@DNest4-master@python@dnest4@classic.py@.PATH_END.py
|
{
"filename": "_end.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/contours/y/_end.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class EndValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="end", parent_name="surface.contours.y", **kwargs):
super(EndValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@contours@y@_end.py@.PATH_END.py
|
{
"filename": "writer.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/utils/xml/writer.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class that makes it simple to stream out well-formed and
nicely-indented XML.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
# STDLIB
import contextlib
import textwrap
try:
import bleach
HAS_BLEACH = True
except ImportError:
HAS_BLEACH = False
try:
from . import _iterparser
except ImportError:
def xml_escape_cdata(s):
"""
Escapes &, < and > in an XML CDATA string.
"""
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def xml_escape(s):
"""
Escapes &, ', ", < and > in an XML attribute value.
"""
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace("\"", """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
else:
xml_escape_cdata = _iterparser.escape_xml_cdata
xml_escape = _iterparser.escape_xml
class XMLWriter:
"""
A class to write well-formed and nicely indented XML.
Use like this::
w = XMLWriter(fh)
with w.tag('html'):
with w.tag('body'):
w.data('This is the content')
Which produces::
<html>
<body>
This is the content
</body>
</html>
"""
def __init__(self, file):
"""
Parameters
----------
file : writable file-like object.
"""
self.write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self._open = 0 # true if start tag is open
self._tags = []
self._data = []
self._indentation = " " * 64
self.xml_escape_cdata = xml_escape_cdata
self.xml_escape = xml_escape
def _flush(self, indent=True, wrap=False):
"""
Flush internal buffers.
"""
if self._open:
if indent:
self.write(">\n")
else:
self.write(">")
self._open = 0
if self._data:
data = ''.join(self._data)
if wrap:
indent = self.get_indentation_spaces(1)
data = textwrap.fill(
data,
initial_indent=indent,
subsequent_indent=indent)
self.write('\n')
self.write(self.xml_escape_cdata(data))
self.write('\n')
self.write(self.get_indentation_spaces())
else:
self.write(self.xml_escape_cdata(data))
self._data = []
def start(self, tag, attrib={}, **extra):
"""
Opens a new element. Attributes can be given as keyword
arguments, or as a string/string dictionary. The method
returns an opaque identifier that can be passed to the
:meth:`close` method, to close all open elements up to and
including this one.
Parameters
----------
tag : str
The element name
attrib : dict of str -> str
Attribute dictionary. Alternatively, attributes can
be given as keyword arguments.
Returns
-------
id : int
Returns an element identifier.
"""
self._flush()
# This is just busy work -- we know our tag names are clean
# tag = xml_escape_cdata(tag)
self._data = []
self._tags.append(tag)
self.write(self.get_indentation_spaces(-1))
self.write("<{}".format(tag))
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = list(six.iteritems(attrib))
attrib.sort()
for k, v in attrib:
if v is not None:
# This is just busy work -- we know our keys are clean
# k = xml_escape_cdata(k)
v = self.xml_escape(v)
self.write(" {}=\"{}\"".format(k, v))
self._open = 1
return len(self._tags)
@contextlib.contextmanager
def xml_cleaning_method(self, method='escape_xml', **clean_kwargs):
"""Context manager to control how XML data tags are cleaned (escaped) to
remove potentially unsafe characters or constructs.
The default (``method='escape_xml'``) applies brute-force escaping of
certain key XML characters like ``<``, ``>``, and ``&`` to ensure that
the output is not valid XML.
In order to explicitly allow certain XML tags (e.g. link reference or
emphasis tags), use ``method='bleach_clean'``. This sanitizes the data
string using the ``clean`` function of the
`http://bleach.readthedocs.io/en/latest/clean.html <bleach>`_ package.
Any additional keyword arguments will be passed directly to the
``clean`` function.
Finally, use ``method='none'`` to disable any sanitization. This should
be used sparingly.
Example::
w = writer.XMLWriter(ListWriter(lines))
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<a href="http://google.com">google.com</a>')
w.end()
Parameters
----------
method : str
Cleaning method. Allowed values are "escape_xml",
"bleach_clean", and "none".
**clean_kwargs : keyword args
Additional keyword args that are passed to the
bleach.clean() function.
"""
current_xml_escape_cdata = self.xml_escape_cdata
if method == 'bleach_clean':
if HAS_BLEACH:
if clean_kwargs is None:
clean_kwargs = {}
self.xml_escape_cdata = lambda x: bleach.clean(x, **clean_kwargs)
else:
raise ValueError('bleach package is required when HTML escaping is disabled.\n'
'Use "pip install bleach".')
elif method == "none":
self.xml_escape_cdata = lambda x: x
elif method != 'escape_xml':
raise ValueError('allowed values of method are "escape_xml", "bleach_clean", and "none"')
yield
self.xml_escape_cdata = current_xml_escape_cdata
@contextlib.contextmanager
def tag(self, tag, attrib={}, **extra):
"""
A convenience method for creating wrapper elements using the
``with`` statement.
Examples
--------
>>> with writer.tag('foo'): # doctest: +SKIP
... writer.element('bar')
... # </foo> is implicitly closed here
...
Parameters are the same as to `start`.
"""
self.start(tag, attrib, **extra)
yield
self.end(tag)
def comment(self, comment):
"""
Adds a comment to the output stream.
Parameters
----------
comment : str
Comment text, as a Unicode string.
"""
self._flush()
self.write(self.get_indentation_spaces())
self.write("<!-- {} -->\n".format(self.xml_escape_cdata(comment)))
def data(self, text):
"""
Adds character data to the output stream.
Parameters
----------
text : str
Character data, as a Unicode string.
"""
self._data.append(text)
def end(self, tag=None, indent=True, wrap=False):
"""
Closes the current element (opened by the most recent call to
`start`).
Parameters
----------
tag : str
Element name. If given, the tag must match the start tag.
If omitted, the current element is closed.
"""
if tag:
if not self._tags:
raise ValueError("unbalanced end({})".format(tag))
if tag != self._tags[-1]:
raise ValueError("expected end({}), got {}".format(
self._tags[-1], tag))
else:
if not self._tags:
raise ValueError("unbalanced end()")
tag = self._tags.pop()
if self._data:
self._flush(indent, wrap)
elif self._open:
self._open = 0
self.write("/>\n")
return
if indent:
self.write(self.get_indentation_spaces())
self.write("</{}>\n".format(tag))
def close(self, id):
"""
Closes open elements, up to (and including) the element identified
by the given identifier.
Parameters
----------
id : int
Element identifier, as returned by the `start` method.
"""
while len(self._tags) > id:
self.end()
def element(self, tag, text=None, wrap=False, attrib={}, **extra):
"""
Adds an entire element. This is the same as calling `start`,
`data`, and `end` in sequence. The ``text`` argument
can be omitted.
"""
self.start(tag, attrib, **extra)
if text:
self.data(text)
self.end(indent=False, wrap=wrap)
def flush(self):
pass # replaced by the constructor
def get_indentation(self):
"""
Returns the number of indentation levels the file is currently
in.
"""
return len(self._tags)
def get_indentation_spaces(self, offset=0):
"""
Returns a string of spaces that matches the current
indentation level.
"""
return self._indentation[:len(self._tags) + offset]
@staticmethod
def object_attrs(obj, attrs):
"""
Converts an object with a bunch of attributes on an object
into a dictionary for use by the `XMLWriter`.
Parameters
----------
obj : object
Any Python object
attrs : sequence of str
Attribute names to pull from the object
Returns
-------
attrs : dict
Maps attribute names to the values retrieved from
``obj.attr``. If any of the attributes is `None`, it will
not appear in the output dictionary.
"""
d = {}
for attr in attrs:
if getattr(obj, attr) is not None:
d[attr.replace('_', '-')] = six.text_type(getattr(obj, attr))
return d
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@utils@xml@writer.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "koustavchandra/gwforge",
"repo_path": "gwforge_extracted/gwforge-main/GWForge/inject/__init__.py",
"type": "Python"
}
|
koustavchandraREPO_NAMEgwforgePATH_START.@gwforge_extracted@gwforge-main@GWForge@inject@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "pynucastro/pynucastro",
"repo_path": "pynucastro_extracted/pynucastro-main/pynucastro/networks/__init__.py",
"type": "Python"
}
|
"""The pynucastro modules that support the creation of networks.
There are several main submodules here:
:meth:`rate_collection <pynucastro.networks.rate_collection>`: this is
simply a collection of rates that knows about the links connecting
nuclei. This is used as the base for the different classes the write
code to output networks for integration.
:meth:`python_network <pynucastro.networks.python_network>`: the
support routines to generate a full, integrable network in python.
:meth:`base_cxx_network <pynucastro.networks.base_cxx_network>`:
the support routines to generate a standalone integrable network in
pure C++.
:meth:`simple_cxx_network <pynucastro.networks.simple_cxx_network>`:
the support routines to generate a simple pure C++ network for
interfacing with simulation codes.
:meth:`amrexastro_cxx_network <pynucastro.networks.amrexastro_cxx_network>`:
the support routines to generate a C++ network that can be incorporated
into the AMReX-Astro Microphysics routines supported by astrophysical
hydrodynamics codes.
"""
#__all__ = ["python_network", "rate_collection", "sympy_network_support"]
from .amrexastro_cxx_network import AmrexAstroCxxNetwork
from .base_cxx_network import BaseCxxNetwork
from .fortran_network import FortranNetwork
from .nse_network import NSENetwork
from .numpy_network import NumpyNetwork
from .python_network import PythonNetwork
from .rate_collection import (Composition, Explorer, RateCollection,
RateDuplicationError)
from .simple_cxx_network import SimpleCxxNetwork
from .sympy_network_support import SympyRates
StarKillerCxxNetwork = AmrexAstroCxxNetwork
|
pynucastroREPO_NAMEpynucastroPATH_START.@pynucastro_extracted@pynucastro-main@pynucastro@networks@__init__.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "icecube/skyllh",
"repo_path": "skyllh_extracted/skyllh-master/doc/sphinx/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
from datetime import (
date,
)
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
year = date.today().year
project = u'SkyLLH'
copyright = u'%s, The IceCube Collaboration, T. Kontrimas, M. Wolf' % year
author = u'The IceCube Collaboration'
# The short X.Y version
version = u'24.1.0'
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'nbsphinx',
'sphinxcontrib.apidoc',
'sphinx_multiversion',
'sphinx_rtd_theme'
]
# Extensions configuration.
apidoc_module_dir = '../../skyllh'
apidoc_output_dir = 'reference'
apidoc_module_first = True
apidoc_toc_file = False
apidoc_extra_args = ['-d', '0']
smv_branch_whitelist = r'^(?!HEAD|gh-pages).*$'
smv_remote_whitelist = r'^.*$'
smv_outputdir_format = '{ref.name}/html'
autoclass_content = 'both'
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
'special-members': True,
'undoc-members': True,
'exclude-members': ','.join([
'__abstractmethods__',
'__dict__',
'__hash__',
'__init__',
'__module__',
'__str__',
'__weakref__',
]),
'show-inheritance': True,
}
autosummary_generate = True
napoleon_use_rtype = False
intersphinx_mapping = {'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/',
None)}
nbsphinx_execute = 'never'
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
u'_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints', '_assets']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['html_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
html_sidebars = {
'**': [
'versions.html',
],
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SkyLLHdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SkyLLH.tex', u'SkyLLH Documentation',
u'The IceCube Collaboration', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'skyllh', u'SkyLLH Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SkyLLH', u'SkyLLH Documentation',
author, 'SkyLLH', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
icecubeREPO_NAMEskyllhPATH_START.@skyllh_extracted@skyllh-master@doc@sphinx@conf.py@.PATH_END.py
|
{
"filename": "c_backend_spherical.py",
"repo_name": "astro-informatics/s2fft",
"repo_path": "s2fft_extracted/s2fft-main/s2fft/transforms/c_backend_spherical.py",
"type": "Python"
}
|
import healpy
import jax.numpy as jnp
import numpy as np
# C backend functions for which to provide JAX frontend.
import pyssht
from jax import core, custom_vjp
from jax.interpreters import ad
from s2fft.sampling import reindex
from s2fft.utils import quadrature_jax
@custom_vjp
def ssht_inverse(
flm: jnp.ndarray,
L: int,
spin: int = 0,
reality: bool = False,
ssht_sampling: int = 0,
_ssht_backend: int = 1,
) -> jnp.ndarray:
r"""
Compute the inverse spin-spherical harmonic transform (SSHT JAX).
SSHT is a C library which implements the spin-spherical harmonic transform outlined in
McEwen & Wiaux 2011 [1]. We make use of their python bindings for which we provide
custom JAX frontends, hence providing support for automatic differentiation. Currently
these transforms can only be deployed on CPU, which is a limitation of the SSHT C package.
Args:
flm (jnp.ndarray): Spherical harmonic coefficients.
L (int): Harmonic band-limit.
spin (int, optional): Harmonic spin. Defaults to 0.
reality (bool, optional): Whether the signal on the sphere is real. If so,
conjugate symmetry is exploited to reduce computational costs. Defaults to
False.
ssht_sampling (int, optional): Sampling scheme. Supported sampling schemes include
{"mw" = 0, "mwss" = 1, "dh" = 2, "gl" = 3}. Defaults to "mw" = 0.
_ssht_backend (int, optional, experimental): Whether to default to SSHT core
(set to 0) recursions or pick up ducc0 (set to 1) accelerated experimental
backend. Use with caution.
Returns:
jnp.ndarray: Signal on the sphere.
Note:
[1] McEwen, Jason D. and Yves Wiaux. “A Novel Sampling Theorem on the Sphere.”
IEEE Transactions on Signal Processing 59 (2011): 5876-5887.
"""
sampling_str = ["MW", "MWSS", "DH", "GL"]
flm_1d = reindex.flm_2d_to_1d_fast(flm, L)
_backend = "SSHT" if _ssht_backend == 0 else "ducc0"
return jnp.array(
pyssht.inverse(
np.array(flm_1d),
L,
spin,
Method=sampling_str[ssht_sampling],
Reality=reality,
backend=_backend,
)
)
def _ssht_inverse_fwd(
flm: jnp.ndarray,
L: int,
spin: int = 0,
reality: bool = False,
ssht_sampling: int = 0,
_ssht_backend: int = 1,
):
"""Private function which implements the forward pass for inverse jax_ssht."""
res = ([], L, spin, reality, ssht_sampling, _ssht_backend)
return ssht_inverse(flm, L, spin, reality, ssht_sampling, _ssht_backend), res
def _ssht_inverse_bwd(res, f):
"""Private function which implements the backward pass for inverse jax_ssht."""
_, L, spin, reality, ssht_sampling, _ssht_backend = res
sampling_str = ["MW", "MWSS", "DH", "GL"]
_backend = "SSHT" if _ssht_backend == 0 else "ducc0"
if ssht_sampling < 2: # MW or MWSS sampling
flm = jnp.array(
np.conj(
pyssht.inverse_adjoint(
np.conj(np.array(f)),
L,
spin,
Method=sampling_str[ssht_sampling],
Reality=reality,
backend=_backend,
)
)
)
else: # DH or GL samping
quad_weights = quadrature_jax.quad_weights_transform(
L, sampling_str[ssht_sampling].lower()
)
f = jnp.einsum("tp,t->tp", f, 1 / quad_weights, optimize=True)
flm = jnp.array(
np.conj(
pyssht.forward(
np.conj(np.array(f)),
L,
spin,
Method=sampling_str[ssht_sampling],
Reality=reality,
backend=_backend,
)
)
)
flm_out = reindex.flm_1d_to_2d_fast(flm, L)
if reality:
m_conj = (-1) ** (jnp.arange(1, L) % 2)
flm_out = flm_out.at[..., L:].add(
jnp.flip(m_conj * jnp.conj(flm_out[..., : L - 1]), axis=-1)
)
flm_out = flm_out.at[..., : L - 1].set(0)
return flm_out, None, None, None, None, None
@custom_vjp
def ssht_forward(
f: jnp.ndarray,
L: int,
spin: int = 0,
reality: bool = False,
ssht_sampling: int = 0,
_ssht_backend: int = 1,
) -> jnp.ndarray:
r"""
Compute the forward spin-spherical harmonic transform (SSHT JAX).
SSHT is a C library which implements the spin-spherical harmonic transform outlined in
McEwen & Wiaux 2011 [1]. We make use of their python bindings for which we provide
custom JAX frontends, hence providing support for automatic differentiation. Currently
these transforms can only be deployed on CPU, which is a limitation of the SSHT C package.
Args:
f (jnp.ndarray): Signal on the sphere.
L (int): Harmonic band-limit.
spin (int, optional): Harmonic spin. Defaults to 0.
reality (bool, optional): Whether the signal on the sphere is real. If so,
conjugate symmetry is exploited to reduce computational costs. Defaults to
False.
ssht_sampling (int, optional): Sampling scheme. Supported sampling schemes include
{"mw" = 0, "mwss" = 1, "dh" = 2, "gl" = 3}. Defaults to "mw" = 0.
_ssht_backend (int, optional, experimental): Whether to default to SSHT core
(set to 0) recursions or pick up ducc0 (set to 1) accelerated experimental
backend. Use with caution.
Returns:
jnp.ndarray: Harmonic coefficients of signal f.
Note:
[1] McEwen, Jason D. and Yves Wiaux. “A Novel Sampling Theorem on the Sphere.”
IEEE Transactions on Signal Processing 59 (2011): 5876-5887.
"""
sampling_str = ["MW", "MWSS", "DH", "GL"]
_backend = "SSHT" if _ssht_backend == 0 else "ducc0"
flm = jnp.array(
pyssht.forward(
np.array(f),
L,
spin,
Method=sampling_str[ssht_sampling],
Reality=reality,
backend=_backend,
)
)
return reindex.flm_1d_to_2d_fast(flm, L)
def _ssht_forward_fwd(
f: jnp.ndarray,
L: int,
spin: int = 0,
reality: bool = False,
ssht_sampling: int = 0,
_ssht_backend: int = 1,
):
"""Private function which implements the forward pass for forward jax_ssht."""
res = ([], L, spin, reality, ssht_sampling, _ssht_backend)
return ssht_forward(f, L, spin, reality, ssht_sampling, _ssht_backend), res
def _ssht_forward_bwd(res, flm):
"""Private function which implements the backward pass for forward jax_ssht."""
_, L, spin, reality, ssht_sampling, _ssht_backend = res
sampling_str = ["MW", "MWSS", "DH", "GL"]
_backend = "SSHT" if _ssht_backend == 0 else "ducc0"
flm_1d = reindex.flm_2d_to_1d_fast(flm, L)
if ssht_sampling < 2: # MW or MWSS sampling
f = jnp.array(
np.conj(
pyssht.forward_adjoint(
np.conj(np.array(flm_1d)),
L,
spin,
Method=sampling_str[ssht_sampling],
Reality=reality,
backend=_backend,
)
)
)
else: # DH or GL sampling
quad_weights = quadrature_jax.quad_weights_transform(
L, sampling_str[ssht_sampling].lower()
)
f = jnp.array(
np.conj(
pyssht.inverse(
np.conj(np.array(flm_1d)),
L,
spin,
Method=sampling_str[ssht_sampling],
Reality=reality,
backend=_backend,
)
)
)
f = jnp.einsum("tp,t->tp", f, quad_weights, optimize=True)
return f, None, None, None, None, None
# Link JAX gradients for C backend functions
ssht_inverse.defvjp(_ssht_inverse_fwd, _ssht_inverse_bwd)
ssht_forward.defvjp(_ssht_forward_fwd, _ssht_forward_bwd)
def _complex_dtype(real_dtype):
"""
Get complex datatype corresponding to a given real datatype.
Derived from https://github.com/jax-ml/jax/blob/1471702adc28/jax/_src/lax/fft.py#L92
Original license:
Copyright 2019 The JAX Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
return (np.zeros((), real_dtype) + np.zeros((), np.complex64)).dtype
def _real_dtype(complex_dtype):
"""
Get real datatype corresponding to a given complex datatype.
Derived from https://github.com/jax-ml/jax/blob/1471702adc28/jax/_src/lax/fft.py#L93
Original license:
Copyright 2019 The JAX Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
return np.finfo(complex_dtype).dtype
def _healpy_map2alm_impl(f: jnp.ndarray, L: int, nside: int) -> jnp.ndarray:
return jnp.array(healpy.map2alm(np.array(f), lmax=L - 1, iter=0))
def _healpy_map2alm_abstract_eval(
f: core.ShapedArray, L: int, nside: int
) -> core.ShapedArray:
return core.ShapedArray(shape=(L * (L + 1) // 2,), dtype=_complex_dtype(f.dtype))
def _healpy_map2alm_transpose(dflm: jnp.ndarray, L: int, nside: int):
scale_factors = (
jnp.concatenate((jnp.ones(L), 2 * jnp.ones(L * (L - 1) // 2)))
* (3 * nside**2)
/ jnp.pi
)
return (jnp.conj(healpy_alm2map(jnp.conj(dflm) / scale_factors, L, nside)),)
_healpy_map2alm_p = core.Primitive("healpy_map2alm")
_healpy_map2alm_p.def_impl(_healpy_map2alm_impl)
_healpy_map2alm_p.def_abstract_eval(_healpy_map2alm_abstract_eval)
ad.deflinear(_healpy_map2alm_p, _healpy_map2alm_transpose)
def healpy_map2alm(f: jnp.ndarray, L: int, nside: int) -> jnp.ndarray:
"""
JAX wrapper for healpy map2alm function (forward spherical harmonic transform).
This wrapper will return the spherical harmonic coefficients as a one dimensional
array using HEALPix (ring-ordered) indexing. To instead return a two-dimensional
array of harmonic coefficients use :py:func:`healpy_forward`.
Args:
f (jnp.ndarray): Signal on the sphere.
L (int): Harmonic band-limit. Equivalent to `lmax + 1` in healpy.
nside (int): HEALPix Nside resolution parameter.
Returns:
jnp.ndarray: Harmonic coefficients of signal f.
"""
return _healpy_map2alm_p.bind(f, L=L, nside=nside)
def _healpy_alm2map_impl(flm: jnp.ndarray, L: int, nside: int) -> jnp.ndarray:
return jnp.array(healpy.alm2map(np.array(flm), lmax=L - 1, nside=nside))
def _healpy_alm2map_abstract_eval(
flm: core.ShapedArray, L: int, nside: int
) -> core.ShapedArray:
return core.ShapedArray(shape=(12 * nside**2,), dtype=_real_dtype(flm.dtype))
def _healpy_alm2map_transpose(df: jnp.ndarray, L: int, nside: int) -> tuple:
scale_factors = (
jnp.concatenate((jnp.ones(L), 2 * jnp.ones(L * (L - 1) // 2)))
* (3 * nside**2)
/ jnp.pi
)
# Scale factor above includes the inverse quadrature weight given by
# (12 * nside**2) / (4 * jnp.pi) = (3 * nside**2) / jnp.pi
# and also a factor of 2 for m>0 to account for the negative m.
# See explanation in this issue comment:
# https://github.com/astro-informatics/s2fft/issues/243#issuecomment-2500951488
return (scale_factors * jnp.conj(healpy_map2alm(jnp.conj(df), L, nside)),)
_healpy_alm2map_p = core.Primitive("healpy_alm2map")
_healpy_alm2map_p.def_impl(_healpy_alm2map_impl)
_healpy_alm2map_p.def_abstract_eval(_healpy_alm2map_abstract_eval)
ad.deflinear(_healpy_alm2map_p, _healpy_alm2map_transpose)
def healpy_alm2map(flm: jnp.ndarray, L: int, nside: int) -> jnp.ndarray:
"""
JAX wrapper for healpy alm2map function (inverse spherical harmonic transform).
This wrapper assumes the passed spherical harmonic coefficients are a one
dimensional array using HEALPix (ring-ordered) indexing. To instead pass a
two-dimensional array of harmonic coefficients use :py:func:`healpy_inverse`.
Args:
flm (jnp.ndarray): Spherical harmonic coefficients.
L (int): Harmonic band-limit. Equivalent to `lmax + 1` in healpy.
nside (int): HEALPix Nside resolution parameter.
Returns:
jnp.ndarray: Signal on the sphere.
"""
return _healpy_alm2map_p.bind(flm, L=L, nside=nside)
def healpy_forward(f: jnp.ndarray, L: int, nside: int, iter: int = 3) -> jnp.ndarray:
r"""
Compute the forward scalar spherical harmonic transform (healpy JAX).
HEALPix is a C++ library which implements the scalar spherical harmonic transform
outlined in [1]. We make use of their healpy python bindings for which we provide
custom JAX frontends, hence providing support for automatic differentiation.
Currently these transforms can only be deployed on CPU, which is a limitation of the
C++ library.
Args:
f (jnp.ndarray): Signal on the sphere.
L (int): Harmonic band-limit.
nside (int): HEALPix Nside resolution parameter.
iter (int, optional): Number of subiterations (iterative refinement steps) for
healpy. Note that iterations increase the precision of the forward transform
as an inverse of inverse transform, but with a linear increase in
computational cost. Between 2 and 3 iterations is a good compromise.
Returns:
jnp.ndarray: Harmonic coefficients of signal f.
Note:
[1] Gorski, Krzysztof M., et al. "HEALPix: A framework for high-resolution
discretization and fast analysis of data distributed on the sphere." The
Astrophysical Journal 622.2 (2005): 759
"""
flm = healpy_map2alm(f, L, nside)
for _ in range(iter):
f_recov = healpy_alm2map(flm, L, nside)
f_error = f - f_recov
flm += healpy_map2alm(f_error, L, nside)
return reindex.flm_hp_to_2d_fast(flm, L)
def healpy_inverse(flm: jnp.ndarray, L: int, nside: int) -> jnp.ndarray:
r"""
Compute the inverse scalar real spherical harmonic transform (HEALPix JAX).
HEALPix is a C++ library which implements the scalar spherical harmonic transform
outlined in [1]. We make use of their healpy python bindings for which we provide
custom JAX frontends, hence providing support for automatic differentiation.
Currently these transforms can only be deployed on CPU, which is a limitation of the
C++ library.
Args:
flm (jnp.ndarray): Spherical harmonic coefficients.
L (int): Harmonic band-limit.
nside (int): HEALPix Nside resolution parameter.
Returns:
jnp.ndarray: Signal on the sphere.
Note:
[1] Gorski, Krzysztof M., et al. "HEALPix: A framework for high-resolution
discretization and fast analysis of data distributed on the sphere." The
Astrophysical Journal 622.2 (2005): 759
"""
flm = reindex.flm_2d_to_hp_fast(flm, L)
return healpy_alm2map(flm, L, nside)
|
astro-informaticsREPO_NAMEs2fftPATH_START.@s2fft_extracted@s2fft-main@s2fft@transforms@c_backend_spherical.py@.PATH_END.py
|
{
"filename": "test_models.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/contrib/mue/test_models.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from torch.optim import Adam
import pyro
from pyro.contrib.mue.dataloaders import BiosequenceDataset
from pyro.contrib.mue.models import FactorMuE, ProfileHMM
from pyro.optim import MultiStepLR
@pytest.mark.parametrize("jit", [False, True])
def test_ProfileHMM_smoke(jit):
# Setup dataset.
seqs = ["BABBA", "BAAB", "BABBB"]
alph = "AB"
dataset = BiosequenceDataset(seqs, "list", alph)
# Infer.
scheduler = MultiStepLR(
{
"optimizer": Adam,
"optim_args": {"lr": 0.1},
"milestones": [20, 100, 1000, 2000],
"gamma": 0.5,
}
)
model = ProfileHMM(int(dataset.max_length * 1.1), dataset.alphabet_length)
n_epochs = 5
batch_size = 2
losses = model.fit_svi(dataset, n_epochs, batch_size, scheduler, jit)
assert not np.isnan(losses[-1])
# Evaluate.
train_lp, test_lp, train_perplex, test_perplex = model.evaluate(
dataset, dataset, jit
)
assert train_lp < 0.0
assert test_lp < 0.0
assert train_perplex > 0.0
assert test_perplex > 0.0
@pytest.mark.parametrize("indel_factor_dependence", [False, True])
@pytest.mark.parametrize("z_prior_distribution", ["Normal", "Laplace"])
@pytest.mark.parametrize("ARD_prior", [False, True])
@pytest.mark.parametrize("substitution_matrix", [False, True])
@pytest.mark.parametrize("jit", [False, True])
def test_FactorMuE_smoke(
indel_factor_dependence, z_prior_distribution, ARD_prior, substitution_matrix, jit
):
# Setup dataset.
seqs = ["BABBA", "BAAB", "BABBB"]
alph = "AB"
dataset = BiosequenceDataset(seqs, "list", alph)
# Infer.
z_dim = 2
scheduler = MultiStepLR(
{
"optimizer": Adam,
"optim_args": {"lr": 0.1},
"milestones": [20, 100, 1000, 2000],
"gamma": 0.5,
}
)
model = FactorMuE(
dataset.max_length,
dataset.alphabet_length,
z_dim,
indel_factor_dependence=indel_factor_dependence,
z_prior_distribution=z_prior_distribution,
ARD_prior=ARD_prior,
substitution_matrix=substitution_matrix,
)
n_epochs = 5
anneal_length = 2
batch_size = 2
losses = model.fit_svi(dataset, n_epochs, anneal_length, batch_size, scheduler, jit)
# Reconstruct.
recon = model._reconstruct_regressor_seq(dataset, 1, pyro.param)
assert not np.isnan(losses[-1])
assert recon.shape == (1, max([len(seq) for seq in seqs]), len(alph))
assert torch.allclose(model._beta_anneal(3, 2, 6, 2), torch.tensor(0.5))
assert torch.allclose(model._beta_anneal(100, 2, 6, 2), torch.tensor(1.0))
# Evaluate.
train_lp, test_lp, train_perplex, test_perplex = model.evaluate(
dataset, dataset, jit
)
assert train_lp < 0.0
assert test_lp < 0.0
assert train_perplex > 0.0
assert test_perplex > 0.0
# Embedding.
z_locs, z_scales = model.embed(dataset)
assert z_locs.shape == (len(dataset), z_dim)
assert z_scales.shape == (len(dataset), z_dim)
assert torch.all(z_scales > 0.0)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@contrib@mue@test_models.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/error_x/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._width import WidthValidator
from ._visible import VisibleValidator
from ._valueminus import ValueminusValidator
from ._value import ValueValidator
from ._type import TypeValidator
from ._tracerefminus import TracerefminusValidator
from ._traceref import TracerefValidator
from ._thickness import ThicknessValidator
from ._symmetric import SymmetricValidator
from ._copy_ystyle import Copy_YstyleValidator
from ._color import ColorValidator
from ._arraysrc import ArraysrcValidator
from ._arrayminussrc import ArrayminussrcValidator
from ._arrayminus import ArrayminusValidator
from ._array import ArrayValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._width.WidthValidator",
"._visible.VisibleValidator",
"._valueminus.ValueminusValidator",
"._value.ValueValidator",
"._type.TypeValidator",
"._tracerefminus.TracerefminusValidator",
"._traceref.TracerefValidator",
"._thickness.ThicknessValidator",
"._symmetric.SymmetricValidator",
"._copy_ystyle.Copy_YstyleValidator",
"._color.ColorValidator",
"._arraysrc.ArraysrcValidator",
"._arrayminussrc.ArrayminussrcValidator",
"._arrayminus.ArrayminusValidator",
"._array.ArrayValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@error_x@__init__.py@.PATH_END.py
|
{
"filename": "tencentvectordb.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/retrievers/self_query/tencentvectordb.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.query_constructors.tencentvectordb import (
TencentVectorDBTranslator,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TencentVectorDBTranslator": (
"langchain_community.query_constructors.tencentvectordb"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["TencentVectorDBTranslator"]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@retrievers@self_query@tencentvectordb.py@.PATH_END.py
|
{
"filename": "cubed_m_24_fluid_scf_perts_off_newt_2_cl.py",
"repo_name": "PoulinV/AxiCLASS",
"repo_path": "AxiCLASS_extracted/AxiCLASS-master/output/cubed_m_24_fluid_scf_perts_off_newt_2_cl.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
import itertools
files = ['/home/charlotte/class_perso-ScalarField/class_perso-ScalarField/output/cubed_m_24_fluid_scf_perts_off_newt_2_cl.dat']
data = []
for data_file in files:
data.append(np.loadtxt(data_file))
roots = ['cubed_m_24_fluid_scf_perts_off_newt_2_cl']
fig, ax = plt.subplots()
index, curve = 0, data[0]
y_axis = [u'TT']
tex_names = ['TT']
x_axis = 'l'
ylim = []
xlim = []
ax.semilogx(curve[:, 0], curve[:, 1])
ax.legend([root+': '+elem for (root, elem) in
itertools.product(roots, y_axis)], loc='best')
ax.set_xlabel('$\ell$', fontsize=16)
plt.show()
|
PoulinVREPO_NAMEAxiCLASSPATH_START.@AxiCLASS_extracted@AxiCLASS-master@output@cubed_m_24_fluid_scf_perts_off_newt_2_cl.py@.PATH_END.py
|
{
"filename": "lru_cache_test.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/tests/lru_cache_test.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import importlib.util
import tempfile
import time
from absl.testing import absltest
from jax._src import path as pathlib
from jax._src.lru_cache import _CACHE_SUFFIX, LRUCache
import jax._src.test_util as jtu
class LRUCacheTestCase(jtu.JaxTestCase):
name: str | None
path: pathlib.Path | None
def setUp(self):
if importlib.util.find_spec("filelock") is None:
self.skipTest("filelock is not installed")
super().setUp()
tmpdir = tempfile.TemporaryDirectory()
self.enter_context(tmpdir)
self.name = tmpdir.name
self.path = pathlib.Path(self.name)
def tearDown(self):
self.path = None
self.name = None
super().tearDown()
def assertCacheKeys(self, keys):
self.assertEqual(set(self.path.glob(f"*{_CACHE_SUFFIX}")), {self.path / f"{key}{_CACHE_SUFFIX}" for key in keys})
class LRUCacheTest(LRUCacheTestCase):
def test_get_nonexistent_key(self):
cache = LRUCache(self.name, max_size=-1)
self.assertIsNone(cache.get("a"))
def test_put_and_get_key(self):
cache = LRUCache(self.name, max_size=-1)
cache.put("a", b"a")
self.assertEqual(cache.get("a"), b"a")
self.assertCacheKeys(("a",))
cache.put("b", b"b")
self.assertEqual(cache.get("a"), b"a")
self.assertEqual(cache.get("b"), b"b")
self.assertCacheKeys(("a", "b"))
def test_put_empty_value(self):
cache = LRUCache(self.name, max_size=-1)
cache.put("a", b"")
self.assertEqual(cache.get("a"), b"")
def test_put_empty_key(self):
cache = LRUCache(self.name, max_size=-1)
with self.assertRaisesRegex(ValueError, r"key cannot be empty"):
cache.put("", b"a")
def test_eviction(self):
cache = LRUCache(self.name, max_size=2)
cache.put("a", b"a")
cache.put("b", b"b")
# `sleep()` is necessary to guarantee that `b`'s timestamp is strictly greater than `a`'s
time.sleep(1)
cache.get("b")
# write `c`. `a` should be evicted
cache.put("c", b"c")
self.assertCacheKeys(("b", "c"))
# calling `get()` on `b` makes `c` least recently used
time.sleep(1)
cache.get("b")
# write `d`. `c` should be evicted
cache.put("d", b"d")
self.assertCacheKeys(("b", "d"))
def test_eviction_with_empty_value(self):
cache = LRUCache(self.name, max_size=1)
cache.put("a", b"a")
# write `b` with length 0
# eviction should not happen even though the cache is full
cache.put("b", b"")
self.assertCacheKeys(("a", "b"))
# calling `get()` on `a` makes `b` least recently used
time.sleep(1)
cache.get("a")
# writing `c` should result in evicting the
# least recent used file (`b`) first,
# but this is not sufficient to make room for `c`,
# so `a` should be evicted as well
cache.put("c", b"c")
self.assertCacheKeys(("c",))
def test_existing_cache_dir(self):
cache = LRUCache(self.name, max_size=2)
cache.put("a", b"a")
# simulates reinitializing the cache in another process
del cache
cache = LRUCache(self.name, max_size=2)
self.assertEqual(cache.get("a"), b"a")
# ensure that the LRU policy survives cache reinitialization
cache.put("b", b"b")
# calling `get()` on `a` makes `b` least recently used
time.sleep(1)
cache.get("a")
# write `c`. `b` should be evicted
cache.put("c", b"c")
self.assertCacheKeys(("a", "c"))
def test_max_size(self):
cache = LRUCache(self.name, max_size=1)
msg = (r"Cache value for key .+? of size \d+ bytes exceeds the maximum "
r"cache size of \d+ bytes")
with self.assertWarnsRegex(UserWarning, msg):
cache.put("a", b"aaaa")
self.assertIsNone(cache.get("a"))
self.assertEqual(set(self.path.glob(f"*{_CACHE_SUFFIX}")), set())
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@lru_cache_test.py@.PATH_END.py
|
{
"filename": "addcol2ms.py",
"repo_name": "revoltek/LiLF",
"repo_path": "LiLF_extracted/LiLF-master/scripts/addcol2ms.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2018 - Francesco de Gasperin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
addcol2ms.py
Create a new column in a measumrent set
"""
import optparse, logging
import casacore.tables as pt
import numpy
import logging
logging.basicConfig(level=logging.DEBUG)
def main(options):
ms = options.ms
if ms == '':
logging.error('You have to specify an input MS, use -h for help')
return
cols = options.cols
incol = options.incol
dysco = options.dysco
t = pt.table(ms, readonly=False, ack=False)
for col in cols.split(','):
if col not in t.colnames():
logging.info('Adding the output column '+col+' to '+ms+'.')
if incol == '':
# prepare col metadata
cd = t.getcoldesc('DATA')
coldmi = t.getdminfo('DATA')
if dysco:
cd['dataManagerType'] = 'DyscoStMan'
cd['dataManagerGroup'] = 'DyscoData'
coldmi = {'NAME': col,'SEQNR': 3,'SPEC': {'dataBitCount': 10,'distribution': 'TruncatedGaussian','distributionTruncation': 2.5,'normalization': 'AF','studentTNu': 0.0,'weightBitCount': 12},'TYPE': 'DyscoStMan'}
# not as performing as standard DATA
else:
coldmi["NAME"] = col
cd['dataManagerType'] = 'StandardStMan'
cd['dataManagerGroup'] = 'SSMVar'
coldmi = {'NAME': col,'SEQNR': 0,'SPEC': {'ActualCacheSize': 2,'BUCKETSIZE': 32768,'IndexLength': 799832,'PERSCACHESIZE': 2},'TYPE': 'StandardStMan'}
cd['comment'] = 'Added by addcol2ms'
t.addcols(pt.makecoldesc(col, cd), coldmi)
# if non dysco is done by default
if options.dysco:
logging.warning('Setting '+col+' = 0')
pt.taql("update $t set "+col+"=0")
else:
# prepare col metadata
coldmi = t.getdminfo(incol)
coldmi['NAME'] = col
cd = t.getcoldesc(incol)
cd['comment'] = 'Added by addcol2ms'
t.addcols(pt.makecoldesc(col, cd), coldmi)
logging.warning('Setting '+col+' = '+incol)
pt.taql("update $t set "+col+"="+incol)
else:
logging.warning('Column '+col+' already exists.')
t.close()
opt = optparse.OptionParser()
opt.add_option('-m','--ms',help='Input MS [no default].',default='')
opt.add_option('-c','--cols',help='Output column, comma separated if more than one [no default].',default='')
opt.add_option('-i','--incol',help='Input column to copy in the output column, otherwise it will be set to 0 [default set to 0].',default='')
opt.add_option('-d','--dysco',help='Enable dysco dataManager for new columns (copied columns always get the same dataManager of the original)',action="store_true",default=False)
options, arguments = opt.parse_args()
main(options)
|
revoltekREPO_NAMELiLFPATH_START.@LiLF_extracted@LiLF-master@scripts@addcol2ms.py@.PATH_END.py
|
{
"filename": "faq.md",
"repo_name": "sbi-dev/sbi",
"repo_path": "sbi_extracted/sbi-main/docs/docs/faq.md",
"type": "Markdown"
}
|
# Frequently asked questions
1. [What should I do when my 'posterior samples are outside of the prior support' in SNPE?](faq/question_01_leakage.md)
2. [Can the algorithms deal with invalid data, e.g., NaN or inf?](faq/question_02_nans.md)
3. [When using multiple workers, I get a pickling error. Can I still use multiprocessing?](faq/question_03_pickling_error.md)
4. [Can I use the GPU for training the density estimator?](faq/question_04_gpu.md)
5. [How should I save and load objects in `sbi`?](faq/question_05_pickling.md)
6. [Can I stop neural network training and resume it later?](faq/question_06_resume_training.md)
7. [How can I use a prior that is not defined in PyTorch?](faq/question_07_custom_prior.md)
See also [discussion page](https://github.com/sbi-dev/sbi/discussions) and [issue
tracker](https://github.com/sbi-dev/sbi/issues) on the `sbi` GitHub repository for
recent questions and problems.
|
sbi-devREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@docs@docs@faq.md@.PATH_END.py
|
{
"filename": "marked_npairs_xy_z.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/mock_observables/pair_counters/marked_npairs_xy_z.py",
"type": "Python"
}
|
r""" Module containing the `~halotools.mock_observables.npairs_3d` function
used to count pairs as a function of separation.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import numpy as np
import multiprocessing
from functools import partial
from .npairs_xy_z import _npairs_xy_z_process_args
from .mesh_helpers import _set_approximate_cell_sizes, _cell1_parallelization_indices
from .rectangular_mesh import RectangularDoubleMesh
from .marked_cpairs import marked_npairs_xy_z_engine
from ...custom_exceptions import HalotoolsError
__author__ = ('Duncan Campbell', 'Andrew Hearin')
__all__ = ('marked_npairs_xy_z', )
def marked_npairs_xy_z(sample1, sample2, rp_bins, pi_bins,
period=None, weights1=None, weights2=None,
weight_func_id=0, num_threads=1,
approx_cell1_size=None, approx_cell2_size=None):
r"""
Calculate the number of weighted pairs with separations greater than
or equal to :math:`r_{\perp}` and :math:`r_{\parallel}`, :math:`W(>r_{\perp},>r_{\parallel})`.
:math:`r_{\perp}` and :math:`r_{\parallel}` are defined wrt the z-direction.
The weight given to each pair is determined by the weights for a pair,
:math:`w_1`, :math:`w_2`, and a user-specified "weighting function", indicated
by the ``wfunc`` parameter, :math:`f(w_1,w_2)`.
Parameters
----------
sample1 : array_like
Numpy array of shape (Npts1, 3) containing 3-D positions of points.
See the :ref:`mock_obs_pos_formatting` documentation page, or the
Examples section below, for instructions on how to transform
your coordinate position arrays into the
format accepted by the ``sample1`` and ``sample2`` arguments.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
sample2 : array_like, optional
Numpy array of shape (Npts2, 3) containing 3-D positions of points.
Should be identical to sample1 for cases of auto-sample pair counts.
rp_bins : array_like
array of boundaries defining the radial bins perpendicular to the LOS in which
pairs are counted.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
pi_bins : array_like
array of boundaries defining the p radial bins parallel to the LOS in which
pairs are counted.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
period : array_like, optional
Length-3 sequence defining the periodic boundary conditions
in each dimension. If you instead provide a single scalar, Lbox,
period is assumed to be the same in all Cartesian directions.
weights1 : array_like, optional
Either a 1-D array of length *N1*, or a 2-D array of length *N1* x *N_weights*,
containing the weights used for the weighted pair counts. If this parameter is
None, the weights are set to np.ones(*(N1,N_weights)*).
weights2 : array_like, optional
Either a 1-D array of length *N1*, or a 2-D array of length *N1* x *N_weights*,
containing the weights used for the weighted pair counts. If this parameter is
None, the weights are set to np.ones(*(N1,N_weights)*).
wfunc : int, optional
weighting function integer ID. Each weighting function requires a specific
number of weights per point, *N_weights*. See the Notes for a description of
available weighting functions.
num_threads : int, optional
Number of threads to use in calculation, where parallelization is performed
using the python ``multiprocessing`` module. Default is 1 for a purely serial
calculation, in which case a multiprocessing Pool object will
never be instantiated. A string 'max' may be used to indicate that
the pair counters should use all available cores on the machine.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by how points
will be apportioned into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use Lbox/10 in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cell2_size : array_like, optional
Analogous to ``approx_cell1_size``, but for sample2. See comments for
``approx_cell1_size`` for details.
Returns
-------
wN_pairs : numpy.ndarray
2-D array of shape *(Nrp_bins,Npi_bins)* containing the weighted number
counts of pairs
Notes
-----
See the docstring of the `~halotools.mock_observables.marked_tpcf` function
for a description of the available marking functions that can be passed in
via the ``wfunc`` optional argument.
Examples
--------
For demonstration purposes we create randomly distributed sets of points within a
periodic unit cube, using random weights.
>>> Npts1, Npts2, Lbox = 1000, 1000, 250.
>>> period = [Lbox, Lbox, Lbox]
>>> rp_bins = np.logspace(-1, 1.5, 15)
>>> pi_bins = [20, 40, 60]
>>> x1 = np.random.uniform(0, Lbox, Npts1)
>>> y1 = np.random.uniform(0, Lbox, Npts1)
>>> z1 = np.random.uniform(0, Lbox, Npts1)
>>> x2 = np.random.uniform(0, Lbox, Npts2)
>>> y2 = np.random.uniform(0, Lbox, Npts2)
>>> z2 = np.random.uniform(0, Lbox, Npts2)
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> sample1 = np.vstack([x1, y1, z1]).T
>>> sample2 = np.vstack([x2, y2, z2]).T
We create a set of random weights:
>>> weights1 = np.random.random((Npts1, 1))
>>> weights2 = np.random.random((Npts2, 1))
The weighted counts are calculated by:
>>> weighted_counts = marked_npairs_xy_z(sample1, sample2, rp_bins, pi_bins, period=period, weights1=weights1, weights2=weights2, weight_func_id=1)
"""
# Process the inputs with the helper function
result = _npairs_xy_z_process_args(sample1, sample2, rp_bins, pi_bins, period,
num_threads, approx_cell1_size, approx_cell2_size)
x1in, y1in, z1in, x2in, y2in, z2in = result[0:6]
rp_bins, pi_bins, period, num_threads, PBCs, approx_cell1_size, approx_cell2_size = result[6:]
xperiod, yperiod, zperiod = period
rp_max = np.max(rp_bins)
pi_max = np.max(pi_bins)
search_xlength, search_ylength, search_zlength = rp_max, rp_max, pi_max
# Process the input weights and with the helper function
weights1, weights2 = _marked_npairs_process_weights(sample1, sample2,
weights1, weights2, weight_func_id)
# Compute the estimates for the cell sizes
approx_cell1_size, approx_cell2_size = (
_set_approximate_cell_sizes(approx_cell1_size, approx_cell2_size, period)
)
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size = approx_cell1_size
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size = approx_cell2_size
# Build the rectangular mesh
double_mesh = RectangularDoubleMesh(x1in, y1in, z1in, x2in, y2in, z2in,
approx_x1cell_size, approx_y1cell_size, approx_z1cell_size,
approx_x2cell_size, approx_y2cell_size, approx_z2cell_size,
search_xlength, search_ylength, search_zlength, xperiod, yperiod, zperiod, PBCs)
# Create a function object that has a single argument, for parallelization purposes
engine = partial(marked_npairs_xy_z_engine, double_mesh,
x1in, y1in, z1in, x2in, y2in, z2in,
weights1, weights2, weight_func_id, rp_bins, pi_bins)
# Calculate the cell1 indices that will be looped over by the engine
num_threads, cell1_tuples = _cell1_parallelization_indices(
double_mesh.mesh1.ncells, num_threads)
if num_threads > 1:
pool = multiprocessing.Pool(num_threads)
result = pool.map(engine, cell1_tuples)
counts = np.sum(np.array(result), axis=0)
pool.close()
else:
counts = engine(cell1_tuples[0])
return np.array(counts)
def _marked_npairs_process_weights(sample1, sample2, weights1, weights2, weight_func_id):
"""
"""
correct_num_weights = _func_signature_int_from_wfunc(weight_func_id)
npts_sample1 = np.shape(sample1)[0]
npts_sample2 = np.shape(sample2)[0]
correct_shape1 = (npts_sample1, correct_num_weights)
correct_shape2 = (npts_sample2, correct_num_weights)
# Process the input weights1
_converted_to_2d_from_1d = False
# First convert weights1 into a 2-d ndarray
if weights1 is None:
weights1 = np.ones(correct_shape1, dtype=np.float64)
else:
weights1 = np.atleast_1d(weights1)
weights1 = weights1.astype("float64")
if weights1.ndim == 1:
_converted_to_2d_from_1d = True
npts1 = len(weights1)
weights1 = weights1.reshape((npts1, 1))
elif weights1.ndim == 2:
pass
else:
ndim1 = weights1.ndim
msg = ("\n You must either pass in a 1-D or 2-D array \n"
"for the input `weights1`. Instead, an array of \n"
"dimension %i was received.")
raise HalotoolsError(msg % ndim1)
npts_weights1 = np.shape(weights1)[0]
num_weights1 = np.shape(weights1)[1]
# At this point, weights1 is guaranteed to be a 2-d ndarray
# now we check its shape
if np.shape(weights1) != correct_shape1:
if _converted_to_2d_from_1d is True:
msg = ("\n You passed in a 1-D array for `weights1` that \n"
"does not have the correct length. The number of \n"
"points in `sample1` = %i, while the number of points \n"
"in your input 1-D `weights1` array = %i")
raise HalotoolsError(msg % (npts_sample1, npts_weights1))
else:
msg = ("\n You passed in a 2-D array for `weights1` that \n"
"does not have a consistent shape with `sample1`. \n"
"`sample1` has length %i. The input value of `weight_func_id` = %i \n"
"For this value of `weight_func_id`, there should be %i weights \n"
"per point. The shape of your input `weights1` is (%i, %i)\n")
raise HalotoolsError(msg %
(npts_sample1, weight_func_id, correct_num_weights, npts_weights1, num_weights1))
# Process the input weights2
_converted_to_2d_from_1d = False
# Now convert weights2 into a 2-d ndarray
if weights2 is None:
weights2 = np.ones(correct_shape2, dtype=np.float64)
else:
weights2 = np.atleast_1d(weights2)
weights2 = weights2.astype("float64")
if weights2.ndim == 1:
_converted_to_2d_from_1d = True
npts2 = len(weights2)
weights2 = weights2.reshape((npts2, 1))
elif weights2.ndim == 2:
pass
else:
ndim2 = weights2.ndim
msg = ("\n You must either pass in a 1-D or 2-D array \n"
"for the input `weights2`. Instead, an array of \n"
"dimension %i was received.")
raise HalotoolsError(msg % ndim2)
npts_weights2 = np.shape(weights2)[0]
num_weights2 = np.shape(weights2)[1]
# At this point, weights2 is guaranteed to be a 2-d ndarray
# now we check its shape
if np.shape(weights2) != correct_shape2:
if _converted_to_2d_from_1d is True:
msg = ("\n You passed in a 1-D array for `weights2` that \n"
"does not have the correct length. The number of \n"
"points in `sample2` = %i, while the number of points \n"
"in your input 1-D `weights2` array = %i")
raise HalotoolsError(msg % (npts_sample2, npts_weights2))
else:
msg = ("\n You passed in a 2-D array for `weights2` that \n"
"does not have a consistent shape with `sample2`. \n"
"`sample2` has length %i. The input value of `weight_func_id` = %i \n"
"For this value of `weight_func_id`, there should be %i weights \n"
"per point. The shape of your input `weights2` is (%i, %i)\n")
raise HalotoolsError(msg %
(npts_sample2, weight_func_id, correct_num_weights, npts_weights2, num_weights2))
return weights1, weights2
def _func_signature_int_from_wfunc(weight_func_id):
"""
Return the function signature available weighting functions.
"""
if type(weight_func_id) != int:
msg = "\n weight_func_id parameter must be an integer ID of a weighting function."
raise ValueError(msg)
if weight_func_id == 1:
return 1
elif weight_func_id == 2:
return 1
elif weight_func_id == 3:
return 2
elif weight_func_id == 4:
return 2
elif weight_func_id == 5:
return 2
elif weight_func_id == 6:
return 2
elif weight_func_id == 7:
return 2
elif weight_func_id == 8:
return 2
elif weight_func_id == 9:
return 2
elif weight_func_id == 10:
return 2
elif weight_func_id == 11:
return 2
elif weight_func_id == 12:
return 4
elif weight_func_id == 13:
return 4
elif weight_func_id == 14:
return 3
elif weight_func_id == 15:
return 3
else:
msg = ("The value ``weight_func_id`` = %i is not recognized")
raise HalotoolsError(msg % weight_func_id)
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@mock_observables@pair_counters@marked_npairs_xy_z.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Keck-DataReductionPipelines/OsirisDRP",
"repo_path": "OsirisDRP_extracted/OsirisDRP-master/tests/drptestbones/__init__.py",
"type": "Python"
}
|
Keck-DataReductionPipelinesREPO_NAMEOsirisDRPPATH_START.@OsirisDRP_extracted@OsirisDRP-master@tests@drptestbones@__init__.py@.PATH_END.py
|
|
{
"filename": "quantize_model_test_base.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py",
"type": "Python"
}
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class for quantize_model Tests."""
from typing import List, Mapping, Optional, Sequence, Tuple
from absl.testing import parameterized
from mlir import ir
from mlir.dialects import stablehlo as stablehlo_dialect
import numpy as np
import tensorflow # pylint: disable=unused-import
from tensorflow.compiler.mlir.stablehlo import stablehlo
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import save as saved_model_save
from tensorflow.python.types import core
FUNC_ALIAS = 'some_alias'
class QuantizedModelTest(test.TestCase, parameterized.TestCase):
"""Base test class for StableHLO quant tests."""
def setUp(self) -> None:
super().setUp()
# Many test cases for quantization involve creating and saving the input
# model and saving the output quantized model. These two member
# attributes can be used to specify the paths for such models,
# respectively. These paths will be cleaned up after each test case.
self._input_saved_model_path = self.create_tempdir('input').full_path
self._output_saved_model_path = self.create_tempdir('output').full_path
# Extra output path occasionally used for comparing two different
# quantized models.
self._output_saved_model_path_2 = self.create_tempdir('output2').full_path
def _extract_first_xla_call_module_op(
self, output_saved_model_path: str
) -> str:
"""Extracts the first XlaCallModule op from output saved model to string."""
root = load.load(output_saved_model_path)
tf_graph_def = root.signatures['serving_default'].graph.as_graph_def()
for function in tf_graph_def.library.function:
for node_def in function.node_def:
if node_def.op == 'XlaCallModule':
with ir.Context() as context:
stablehlo_dialect.register_dialect(context)
# Serialization in VHLO dialect.
serialized = node_def.attr.get('module').s
# MLIR bytecode matching StableHLO version.
mlir_bytecode = stablehlo.deserialize_portable_artifact_str(
serialized)
stablehlo_module = ir.Module.parse(mlir_bytecode, context=context)
return str(stablehlo_module)
raise ValueError('No XlaCallModule found in saved model.')
def _get_num_xla_call_module_op(self, output_saved_model_path: str) -> int:
"""Gets the number of XlaCallModule ops in the output saved model."""
root = load.load(output_saved_model_path)
tf_graph_def = root.signatures['serving_default'].graph.as_graph_def()
count = 0
for node_def in tf_graph_def.node:
if node_def.op == 'XlaCallModule':
count += 1
for function in tf_graph_def.library.function:
for node_def in function.node_def:
if node_def.op == 'XlaCallModule':
count += 1
return count
def _get_function_aliases(
self, output_saved_model_path: str, tags: List[str]
) -> dict[str, str]:
"""Gets the function aliases in the output saved model."""
loader = loader_impl.SavedModelLoader(output_saved_model_path)
return loader.get_meta_graph_def_from_tags(
tags
).meta_info_def.function_aliases
def _create_matmul_model(
self,
input_shape: Sequence[int],
weight_shape: Sequence[int],
saved_model_path: str,
bias_fn: Optional[ops.Operation] = None,
activation_fn: Optional[ops.Operation] = None,
) -> module.Module:
class MatmulModel(module.Module):
"""A simple model with a single matmul.
Bias and activation function are optional.
"""
def __init__(
self,
weight_shape: Sequence[int],
) -> None:
"""Initializes a MatmulModel.
Args:
weight_shape: Shape of the weight tensor.
"""
self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)
if bias_fn is not None:
self.bias = np.random.uniform(
low=-1.0, high=1.0, size=weight_shape[-1]
)
def has_reshape(self) -> bool:
return self.bias_fn() and self.bias_size != self.filters.shape[-1]
@def_function.function
def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a matrix multiplication.
Depending on self.bias_fn and self.activation_fn, it may add a bias
term or go through the activaction function.
Args:
input_tensor: Input tensor to matmul with the filter.
Returns:
A map of: output key -> output result.
"""
out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul')
if bias_fn is not None:
out = bias_fn(out, self.bias)
if activation_fn is not None:
out = activation_fn(out)
return {'output': out}
model = MatmulModel(weight_shape)
saved_model_save.save(
model,
saved_model_path,
signatures=model.matmul.get_concrete_function(
tensor_spec.TensorSpec(
shape=input_shape, dtype=dtypes.float32, name='input_tensor'
)
),
)
return model
def _any_log_contains(
self, substring: str, log_record_list: List['logging.LogRecord']
) -> bool:
"""Returns True if any of the log contains a given substring.
Args:
substring: A piece of string to check whether it exists in the log
message.
log_record_list: A list of `absl.logging.LogRecord`s.
Returns:
True if and only if the substring exists in any of the log in
`log_record_list`.
"""
return any(
map(
lambda log_record: substring in str(log_record.message),
log_record_list,
)
)
def _create_matmul_and_same_scale_model(
self,
input_shape: Sequence[int],
weight_shape: Sequence[int],
saved_model_path: str,
same_scale_op: str,
) -> module.Module:
class MatmulAndSameScaleModel(module.Module):
"""A simple model with a same-scale op.
Op name in StableHLO dialect is given as a string.
"""
def __init__(
self,
weight_shape: Sequence[int],
same_scale_op: str,
) -> None:
"""Initializes a MatmulModel.
Args:
weight_shape: Shape of the weight tensor.
same_scale_op: Name of the same-scale op to be tested. Raises error
when an unknown name is given.
"""
self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)
self.same_scale_op = same_scale_op
@def_function.function
def matmul_and_same_scale(
self, input_tensor: core.Tensor
) -> Mapping[str, core.Tensor]:
"""Performs a matrix multiplication.
Args:
input_tensor: Input tensor to matmul with the filter.
Returns:
A map of: output key -> output result.
"""
out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul')
if self.same_scale_op == 'concatenate':
ones = array_ops.ones_like(out)
out = array_ops.concat([out, ones], 0)
elif self.same_scale_op == 'gather':
out = array_ops.gather(out, indices=[0], axis=0)
elif self.same_scale_op == 'max_pool':
out = nn_ops.max_pool(out, ksize=3, strides=1, padding='SAME')
elif self.same_scale_op == 'pad':
paddings = array_ops.ones(
(array_ops.rank(out), 2), dtype=dtypes.int32
)
out = array_ops.pad(out, paddings, 'CONSTANT')
elif self.same_scale_op == 'reshape':
out = array_ops.reshape(out, [-1])
elif self.same_scale_op == 'select':
rng = np.random.default_rng(seed=1234)
condition = ops.convert_to_tensor(
rng.uniform(low=0.0, high=1.0, size=out.shape) < 0.5
)
ones = array_ops.ones_like(out)
out = math_ops.select(condition, out, ones)
elif self.same_scale_op == 'slice':
begin = array_ops.zeros((array_ops.rank(out)), dtype=dtypes.int32)
size = array_ops.ones((array_ops.rank(out)), dtype=dtypes.int32)
out = array_ops.slice(out, begin, size)
elif self.same_scale_op == 'transpose':
out = array_ops.transpose(out)
else:
raise NotImplementedError(
'{} is not implemented for integration test.'.format(
self.same_scale_op
)
)
return {'output': out}
model = MatmulAndSameScaleModel(weight_shape, same_scale_op)
saved_model_save.save(
model,
saved_model_path,
signatures=model.matmul_and_same_scale.get_concrete_function(
tensor_spec.TensorSpec(
shape=input_shape, dtype=dtypes.float32, name='input_tensor'
)
),
)
return model
def _create_conv2d_model(
self,
input_shape: Sequence[int],
filter_shape: Sequence[int],
saved_model_path: str,
bias_fn: Optional[ops.Operation] = None,
activation_fn: Optional[ops.Operation] = None,
has_batch_norm: bool = False,
strides: Sequence[int] = (1, 1, 1, 1),
dilations: Sequence[int] = (1, 1, 1, 1),
padding: str = 'SAME',
has_func_alias: bool = False,
) -> module.Module:
class ConvModel(module.Module):
"""A simple model with a single conv2d, bias and relu."""
def __init__(self):
self.out_channel_size = filter_shape[-1]
# This ensures filters will have different value range per out channel
self.filters = np.stack(
[
np.random.uniform(
low=-(i + 1), high=(i + 1), size=filter_shape[:-1]
).astype('f4')
for i in range(self.out_channel_size)
],
axis=-1,
)
self.bias = np.random.uniform(
low=0, high=10, size=(self.out_channel_size)
).astype('f4')
@def_function.function
def conv2d(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a 2D convolution operation.
Args:
input_tensor: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
"""
scale = [1.0] * self.out_channel_size
offset = [0.5] * self.out_channel_size
mean, variance = scale, offset
out = nn_ops.conv2d(
input_tensor,
self.filters,
strides=strides,
dilations=dilations,
padding=padding,
data_format='NHWC',
name='sample/conv',
)
if bias_fn is not None:
out = nn_ops.bias_add(out, self.bias)
if has_batch_norm:
# Fusing is supported for non-training case.
out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
out, scale, offset, mean, variance, is_training=False
)
if activation_fn is not None:
out = activation_fn(out)
return {'output': out}
model = ConvModel()
save_options = None
if has_func_alias:
save_options = tensorflow.saved_model.SaveOptions(
function_aliases={FUNC_ALIAS: model.conv2d}
)
saved_model_save.save(
model,
saved_model_path,
signatures=model.conv2d.get_concrete_function(
tensor_spec.TensorSpec(
shape=input_shape, dtype=dtypes.float32, name='input_tensor'
)
),
options=save_options,
)
return model
def _create_gather_model(self, input_type, use_variable) -> module.Module:
class GatherModel(module.Module):
"""A simple model with a single gather."""
def __init__(self, use_variable):
"""Initializes a GatherModel.
Args:
use_variable: If True, creates a variable for weight.
"""
super().__init__()
w_val = np.random.randn(128, 32).astype('f4')
if use_variable:
self.w = variables.Variable(w_val)
else:
self.w = w_val
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
shape=[6], dtype=input_type, name='input_tensor'
)
]
)
def __call__(
self, input_tensor: core.Tensor
) -> Mapping[str, core.Tensor]:
"""Performs a gather operation."""
out = array_ops.gather_v2(self.w, input_tensor)
return {'output': out}
return GatherModel(use_variable)
def _create_add_model(
self,
shape: Sequence[int],
saved_model_path: str,
) -> module.Module:
class AddModel(module.Module):
"""A simple model with a single add."""
def __init__(self):
pass
@def_function.function
def add(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs an add operation.
Args:
input_tensor: Input tensor to perform add on.
Returns:
A map of: output key -> output result.
"""
out = math_ops.add(input_tensor, input_tensor)
return {'output': out}
model = AddModel()
saved_model_save.save(
model,
saved_model_path,
signatures=model.add.get_concrete_function(
tensor_spec.TensorSpec(
shape=shape, dtype=dtypes.float32, name='input_tensor'
)
),
)
return model
# Prepares sample einsum input data shapes.
# This function returns:
# 1. Shape for input 1
# 2. Shape for input 2
# 3. Shape for bias
# 4. Signature for input 1 (Could contain None dimension)
# 5. Signature for input 2 (Could contain None dimension)
def _prepare_sample_einsum_datashapes(
self,
equation: str,
generate_unknown_shape_signature: bool = False,
use_bias: bool = False,
) -> Tuple[
List[Optional[int]],
List[Optional[int]],
Optional[List[Optional[int]]],
List[Optional[int]],
List[Optional[int]],
]:
# 1. Parse equation.
comma_pos = equation.find(',')
arrow_pos = equation.find('->')
x_labels = equation[0:comma_pos]
y_labels = equation[comma_pos + 1 : arrow_pos]
out_labels = equation[arrow_pos + 1 :]
# 2. Create sample shapes.
label_to_size = {'a': 4, 'b': 32, 'c': 64, 'd': 128, 'e': 8}
x_shape = [label_to_size.get(x_label) for x_label in x_labels]
y_shape = [label_to_size.get(y_label) for y_label in y_labels]
bias_shape = None
if use_bias:
bias_shape = [label_to_size.get(out_label) for out_label in out_labels]
bias_shape = bias_shape[-1:]
contracting_dims = set()
x_signature = list(x_shape)
y_signature = list(y_shape)
if generate_unknown_shape_signature:
for c in x_labels:
if c in y_labels:
contracting_dims.add(c)
x_signature = [
None if c not in contracting_dims else x_shape[cidx]
for cidx, c in enumerate(x_labels)
]
y_signature = [
None if c not in contracting_dims else y_shape[cidx]
for cidx, c in enumerate(y_labels)
]
return x_shape, y_shape, bias_shape, x_signature, y_signature
def _create_einsum_model(
self,
saved_model_path: str,
equation: str,
y_shape: Sequence[int],
x_signature: Sequence[Optional[int]],
y_signature: Sequence[Optional[int]],
bias_shape: Optional[Sequence[int]] = None,
) -> module.Module:
class EinsumModel(module.Module):
"""Einsum class."""
def __init__(self):
self._bias = None
if bias_shape is not None:
self._bias = array_ops.constant(
np.random.uniform(size=bias_shape), dtype=dtypes.float32
)
self._kernel = np.random.uniform(size=y_shape).astype('f4')
self._min = (-0.8, -0.8, -0.9)
self._max = (0.9, 0.9, 1.0)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='x', shape=x_signature, dtype=dtypes.float32
)
]
)
def einsum_with_kernel(self, x: core.Tensor) -> Mapping[str, core.Tensor]:
return self._einsum(x, self._kernel)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='x', shape=x_signature, dtype=dtypes.float32
),
tensor_spec.TensorSpec(
name='y', shape=y_signature, dtype=dtypes.float32
),
]
)
def einsum_without_kernel(
self, x: core.Tensor, y: core.Tensor
) -> Mapping[str, core.Tensor]:
return self._einsum(x, y)
def _einsum(self, x, y):
out = tensorflow.einsum(equation, x, y)
if self._bias is not None:
out = nn_ops.bias_add(out, self._bias)
return {'output': out}
model = EinsumModel()
signatures = {
'serving_default': model.einsum_with_kernel.get_concrete_function(
tensor_spec.TensorSpec(
name='x', shape=x_signature, dtype=dtypes.float32
)
),
}
saved_model_save.save(model, saved_model_path, signatures=signatures)
return model
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@compiler@mlir@quantization@stablehlo@python@integration_test@quantize_model_test_base.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="scattergl", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the line color.
dash
Sets the style of the lines.
shape
Determines the line shape. The values
correspond to step-wise line shapes.
width
Sets the line width (in px).
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@_line.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/pycbc/inference/__init__.py",
"type": "Python"
}
|
# pylint: disable=unused-import
from . import (models, sampler, io)
from . import (burn_in, entropy, gelman_rubin, geweke, option_utils)
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@pycbc@inference@__init__.py@.PATH_END.py
|
{
"filename": "_adapters.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py3/pkg_resources/_vendor/importlib_resources/_adapters.py",
"type": "Python"
}
|
from contextlib import suppress
from io import TextIOWrapper
from . import abc
class SpecLoaderAdapter:
"""
Adapt a package spec to adapt the underlying loader.
"""
def __init__(self, spec, adapter=lambda spec: spec.loader):
self.spec = spec
self.loader = adapter(spec)
def __getattr__(self, name):
return getattr(self.spec, name)
class TraversableResourcesLoader:
"""
Adapt a loader to provide TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
def get_resource_reader(self, name):
return CompatibilityFiles(self.spec)._native()
def _io_wrapper(file, mode='r', *args, **kwargs):
if mode == 'r':
return TextIOWrapper(file, *args, **kwargs)
elif mode == 'rb':
return file
raise ValueError(
"Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
)
class CompatibilityFiles:
"""
Adapter for an existing or non-existent resource reader
to provide a compatibility .files().
"""
class SpecPath(abc.Traversable):
"""
Path tied to a module spec.
Can be read and exposes the resource reader children.
"""
def __init__(self, spec, reader):
self._spec = spec
self._reader = reader
def iterdir(self):
if not self._reader:
return iter(())
return iter(
CompatibilityFiles.ChildPath(self._reader, path)
for path in self._reader.contents()
)
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
if not self._reader:
return CompatibilityFiles.OrphanPath(other)
return CompatibilityFiles.ChildPath(self._reader, other)
@property
def name(self):
return self._spec.name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
class ChildPath(abc.Traversable):
"""
Path tied to a resource reader child.
Can be read but doesn't expose any meaningful children.
"""
def __init__(self, reader, name):
self._reader = reader
self._name = name
def iterdir(self):
return iter(())
def is_file(self):
return self._reader.is_resource(self.name)
def is_dir(self):
return not self.is_file()
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(self.name, other)
@property
def name(self):
return self._name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(
self._reader.open_resource(self.name), mode, *args, **kwargs
)
class OrphanPath(abc.Traversable):
"""
Orphan path, not tied to a module spec or resource reader.
Can't be read and doesn't expose any meaningful children.
"""
def __init__(self, *path_parts):
if len(path_parts) < 1:
raise ValueError('Need at least one path part to construct a path')
self._path = path_parts
def iterdir(self):
return iter(())
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(*self._path, other)
@property
def name(self):
return self._path[-1]
def open(self, mode='r', *args, **kwargs):
raise FileNotFoundError("Can't open orphan path")
def __init__(self, spec):
self.spec = spec
@property
def _reader(self):
with suppress(AttributeError):
return self.spec.loader.get_resource_reader(self.spec.name)
def _native(self):
"""
Return the native reader if it supports files().
"""
reader = self._reader
return reader if hasattr(reader, 'files') else self
def __getattr__(self, attr):
return getattr(self._reader, attr)
def files(self):
return CompatibilityFiles.SpecPath(self.spec, self._reader)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
"""
return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py3@pkg_resources@_vendor@importlib_resources@_adapters.py@.PATH_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/streamtube/hoverlabel/font/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="streamtube.hoverlabel.font", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@streamtube@hoverlabel@font@_colorsrc.py@.PATH_END.py
|
{
"filename": "calc_error.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/UnitTesting/calc_error.py",
"type": "Python"
}
|
import logging
from mpmath import log10, fabs, mp
from datetime import date
from UnitTesting.standard_constants import precision
from UnitTesting.create_dict_string import create_dict_string
# calc_error loops through each item in self.calculated_dict and self.trusted_values_dict_entry,
# and makes sure that the difference between each respective value's number of decimal in the dictionaries places is
# less than 1/2 of the precision. It returns a boolean representing whether any variables differed.
# Called by run_test
# Uses self.calculated_dict, self.trusted_values_dict_entry, self.module_name
def calc_error(self):
# Setting precision
mp.dps = precision
# Creating sets to easily compare the keys of calculated_dict and trusted_dict
calculated_set = set(self.calculated_dict)
trusted_set = set(self.trusted_values_dict_entry)
logging.debug(' Checking that calculated and trusted dicts contain the same variables...')
# If the sets differ, output the differing variables
if calculated_set != trusted_set:
logging.error(' {}: Calculated dictionary and trusted dictionary have different variables.'
.format(self.module_name))
calculated_minus_trusted = calculated_set - trusted_set
trusted_minus_calculated = trusted_set - calculated_set
if calculated_minus_trusted != set([]):
logging.error(' Calculated Dictionary variables not in Trusted Dictionary: ' +
str(sorted(calculated_minus_trusted)))
if trusted_minus_calculated != set([]):
logging.error(' Trusted Dictionary variables not in Calculated Dictionary: ' +
str(sorted(trusted_minus_calculated)))
# Automatically fail and don't proceed
return False
logging.debug(' ...Success: same variables in both dicts.\n')
# Initialize list of variables whose values differ
bad_var_list = []
logging.debug(' Comparing all calculated and trusted values...')
# Loop through all variables in sorted order
for var in sorted(self.calculated_dict):
# Values to compare
calculated_val = self.calculated_dict[var]
trusted_val = self.trusted_values_dict_entry[var]
# For each variable, print calculated and trusted values
logging.debug('\n' + self.module_name + ': ' + var + ': Calculated: ' + str(
calculated_val) + '\n' + self.module_name + ': ' + var
+ ': Trusted: ' + str(trusted_val) + '\n')
# Calculate the error between both values
if trusted_val == 0:
log10_relative_error = log10(fabs(calculated_val))
elif calculated_val == 0:
log10_relative_error = log10(fabs(trusted_val))
else:
log10_relative_error = log10(fabs((trusted_val - calculated_val) / trusted_val))
# Boolean determining if their difference is within the tolerance we accept
good = (log10_relative_error < (precision / -2.0))
# Store all variables who are not 'good'
if not good:
bad_var_list.append(var)
# If we want to output and there exists at least one variable with error, print
if bad_var_list:
logging.error('''
\nVariable(s) {} in module {} failed. Please check values.
If you are confident that the newly calculated values are correct, comment out the old trusted values for
{} in your trusted_values_dict and copy the following code between the ##### into your trusted_values_dict.
Make sure to fill out the TODO comment describing why the values had to be changed. Then re-run test script.
#####
# Generated on: {}
# Reason for changing values: TODO
trusted_values_dict['{}'] = {}
#####
'''.format(bad_var_list, self.module_name, self.trusted_values_dict_name, date.today(),
self.trusted_values_dict_name, create_dict_string(self.calculated_dict)))
else:
logging.debug(' ...Success: all variables identical.\n')
# Return True if all variables are good, False otherwise
return bad_var_list == []
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@UnitTesting@calc_error.py@.PATH_END.py
|
{
"filename": "feature_request.md",
"repo_name": "nlesc-dirac/sagecal",
"repo_path": "sagecal_extracted/sagecal-master/.github/ISSUE_TEMPLATE/feature_request.md",
"type": "Markdown"
}
|
---
name: Feature request
about: Suggest an idea for this project
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
|
nlesc-diracREPO_NAMEsagecalPATH_START.@sagecal_extracted@sagecal-master@.github@ISSUE_TEMPLATE@feature_request.md@.PATH_END.py
|
{
"filename": "_labelalias.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/colorbar/_labelalias.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelaliasValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="labelalias", parent_name="densitymap.colorbar", **kwargs
):
super(LabelaliasValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymap@colorbar@_labelalias.py@.PATH_END.py
|
{
"filename": "test_profile_plots.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/visualization/tests/test_profile_plots.py",
"type": "Python"
}
|
import os
import shutil
import tempfile
import unittest
import pytest
import yt
from yt.testing import assert_allclose_units, fake_random_ds
from yt.visualization.api import PhasePlot
class TestPhasePlotAPI:
@classmethod
def setup_class(cls):
cls.ds = fake_random_ds(
16, fields=("density", "temperature"), units=("g/cm**3", "K")
)
def get_plot(self):
return PhasePlot(
self.ds, ("gas", "density"), ("gas", "temperature"), ("gas", "mass")
)
@pytest.mark.parametrize("kwargs", [{}, {"color": "b"}])
@pytest.mark.mpl_image_compare
def test_phaseplot_annotate_text(self, kwargs):
p = self.get_plot()
p.annotate_text(1e-4, 1e-2, "Test text annotation", **kwargs)
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_title(self):
p = self.get_plot()
p.set_title(("gas", "mass"), "Test Title")
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_log(self):
p = self.get_plot()
p.set_log(("gas", "mass"), False)
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_unit(self):
p = self.get_plot()
p.set_unit(("gas", "mass"), "Msun")
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_xlim(self):
p = self.get_plot()
p.set_xlim(1e-3, 1e0)
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_ylim(self):
p = self.get_plot()
p.set_ylim(1e-2, 1e0)
p.render()
return p.plots["gas", "mass"].figure
def test_set_units():
fields = ("density", "temperature")
units = (
"g/cm**3",
"K",
)
ds = fake_random_ds(16, fields=fields, units=units)
sp = ds.sphere("max", (1.0, "Mpc"))
p1 = yt.ProfilePlot(sp, ("index", "radius"), ("gas", "density"))
p2 = yt.PhasePlot(sp, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"))
# make sure we can set the units using the tuple without erroring out
p1.set_unit(("gas", "density"), "Msun/kpc**3")
p2.set_unit(("gas", "temperature"), "R")
def test_set_labels():
ds = fake_random_ds(16)
ad = ds.all_data()
plot = yt.ProfilePlot(
ad,
("index", "radius"),
[("gas", "velocity_x"), ("gas", "density")],
weight_field=None,
)
# make sure we can set the labels without erroring out
plot.set_ylabel("all", "test ylabel")
plot.set_xlabel("test xlabel")
def test_create_from_dataset():
ds = fake_random_ds(16)
plot1 = yt.ProfilePlot(
ds,
("index", "radius"),
[("gas", "velocity_x"), ("gas", "density")],
weight_field=None,
)
plot2 = yt.ProfilePlot(
ds.all_data(),
("index", "radius"),
[("gas", "velocity_x"), ("gas", "density")],
weight_field=None,
)
assert_allclose_units(
plot1.profiles[0]["gas", "density"], plot2.profiles[0]["gas", "density"]
)
assert_allclose_units(
plot1.profiles[0]["velocity_x"], plot2.profiles[0]["velocity_x"]
)
plot1 = yt.PhasePlot(ds, ("gas", "density"), ("gas", "velocity_x"), ("gas", "mass"))
plot2 = yt.PhasePlot(
ds.all_data(), ("gas", "density"), ("gas", "velocity_x"), ("gas", "mass")
)
assert_allclose_units(plot1.profile["mass"], plot2.profile["mass"])
class TestAnnotations(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
cls.curdir = os.getcwd()
os.chdir(cls.tmpdir)
ds = fake_random_ds(16)
ad = ds.all_data()
cls.fields = [
("gas", "velocity_x"),
("gas", "velocity_y"),
("gas", "velocity_z"),
]
cls.plot = yt.ProfilePlot(
ad, ("index", "radius"), cls.fields, weight_field=None
)
@classmethod
def tearDownClass(cls):
os.chdir(cls.curdir)
shutil.rmtree(cls.tmpdir)
def test_annotations(self):
# make sure we can annotate without erroring out
# annotate the plot with only velocity_x
self.plot.annotate_title("velocity_x plot", self.fields[0])
self.plot.annotate_text(1e-1, 1e1, "Annotated velocity_x")
# annotate the plots with velocity_y and velocity_z with
# the same annotations
self.plot.annotate_title("Velocity Plots (Y or Z)", self.fields[1:])
self.plot.annotate_text(1e-1, 1e1, "Annotated vel_y, vel_z", self.fields[1:])
self.plot.save()
def test_annotations_wrong_fields(self):
from yt.utilities.exceptions import YTFieldNotFound
with self.assertRaises(YTFieldNotFound):
self.plot.annotate_title("velocity_x plot", "wrong_field_name")
with self.assertRaises(YTFieldNotFound):
self.plot.annotate_text(1e-1, 1e1, "Annotated text", "wrong_field_name")
def test_phaseplot_set_log():
fields = ("density", "temperature")
units = (
"g/cm**3",
"K",
)
ds = fake_random_ds(16, fields=fields, units=units)
sp = ds.sphere("max", (1.0, "Mpc"))
p1 = yt.ProfilePlot(sp, ("index", "radius"), ("gas", "density"))
p2 = yt.PhasePlot(sp, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"))
# make sure we can set the log-scaling using the tuple without erroring out
p1.set_log(("gas", "density"), False)
p2.set_log(("gas", "temperature"), False)
assert not p1.y_log["gas", "density"]
assert not p2.y_log
# make sure we can set the log-scaling using a string without erroring out
p1.set_log(("gas", "density"), True)
p2.set_log(("gas", "temperature"), True)
assert p1.y_log["gas", "density"]
assert p2.y_log
# make sure we can set the log-scaling using a field object
p1.set_log(ds.fields.gas.density, False)
p2.set_log(ds.fields.gas.temperature, False)
assert not p1.y_log["gas", "density"]
assert not p2.y_log
def test_phaseplot_showhide_colorbar_axes():
fields = ("density", "temperature")
units = (
"g/cm**3",
"K",
)
ds = fake_random_ds(16, fields=fields, units=units)
ad = ds.all_data()
plot = yt.PhasePlot(ad, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"))
# make sure we can hide colorbar
plot.hide_colorbar()
with tempfile.NamedTemporaryFile(suffix="png") as f1:
plot.save(f1.name)
# make sure we can show colorbar
plot.show_colorbar()
with tempfile.NamedTemporaryFile(suffix="png") as f2:
plot.save(f2.name)
# make sure we can hide axes
plot.hide_axes()
with tempfile.NamedTemporaryFile(suffix="png") as f3:
plot.save(f3.name)
# make sure we can show axes
plot.show_axes()
with tempfile.NamedTemporaryFile(suffix="png") as f4:
plot.save(f4.name)
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@visualization@tests@test_profile_plots.py@.PATH_END.py
|
{
"filename": "irafnames.py",
"repo_name": "iraf-community/pyraf",
"repo_path": "pyraf_extracted/pyraf-main/pyraf/irafnames.py",
"type": "Python"
}
|
"""module irafnames.py -- define how names of IRAF packages and tasks get
included in the user's namespace. Uses a plug-in strategy so behavior can
be changed.
R. White, 1999 March 26
"""
import __main__
from .tools import irafglobals
from . import iraf
def _addName(task, module):
"""Add a task object to the module namespace
Skip if there is a collision with another name
unless it is an IrafTask
"""
name = task.getName()
if hasattr(module, name):
p = getattr(module, name)
else:
p = None
if (p is None) or isinstance(p, irafglobals.IrafTask):
setattr(module, name, task)
else:
if irafglobals.Verbose > 0:
print("Warning: " + module.__name__ + "." + name +
" was not redefined as Iraf Task")
# Basic namespace strategy class (does nothing)
class IrafNameStrategy:
def addTask(self, task):
pass
def addPkg(self, pkg):
pass
# NameClean implementation puts tasks and packages in iraf module name space
# Note that since packages are also tasks, we only need to do this for tasks
class IrafNameClean(IrafNameStrategy):
def addTask(self, task):
_addName(task, iraf)
# IrafNamePkg also adds packages to __main__ name space
class IrafNamePkg(IrafNameClean):
def addPkg(self, pkg):
_addName(pkg, __main__)
# IrafNameTask puts everything (tasks and packages) in __main__ name space
class IrafNameTask(IrafNameClean):
def addTask(self, task):
_addName(task, iraf)
_addName(task, __main__)
def setPkgStrategy():
global strategy
strategy = IrafNamePkg()
def setTaskStrategy():
global strategy
strategy = IrafNameTask()
def setCleanStrategy():
global strategy
strategy = IrafNameClean()
# define adding package names as the default behavior
# setPkgStrategy()
# define adding package names to iraf module only as the default behavior
setCleanStrategy()
|
iraf-communityREPO_NAMEpyrafPATH_START.@pyraf_extracted@pyraf-main@pyraf@irafnames.py@.PATH_END.py
|
{
"filename": "test_background_subtraction.py",
"repo_name": "LCOGT/banzai-nres",
"repo_path": "banzai-nres_extracted/banzai-nres-main/banzai_nres/tests/test_background_subtraction.py",
"type": "Python"
}
|
from banzai_nres.frames import NRESObservationFrame
from banzai.data import CCDData
import numpy as np
from banzai_nres.background import BackgroundSubtractor
from banzai import context
import pytest
@pytest.fixture
def seed():
np.random.seed(11248)
def test_background_subtraction_on_noisy_data(seed):
nx, ny = 405, 403
x = np.arange(nx)
y = np.arange(ny)
X, Y = np.meshgrid(x, y)
noise_sigma = 1.0
input_background = 30 * np.exp(-(X - nx / 2.0)**2/300**2 - (Y - ny / 2.0 - 50.0)**2 / 200**2)
test_data = input_background + np.random.normal(0.0, noise_sigma, size=input_background.shape)
test_image = NRESObservationFrame([CCDData(data=test_data, uncertainty=np.ones((ny, nx)) * noise_sigma,
meta={'OBJECTS': 'tung&tung&none'})], 'foo.fits')
test_image.traces = np.zeros((ny, nx))
input_context = context.Context({})
stage = BackgroundSubtractor(input_context)
output_image = stage.do_stage(test_image)
# Make sure our background estimation is good. This is roughly 4 counts which is not bad
# If we fully model the image, we can do better than this, but that becomes computationally prohibitive on a 4kx4k
# image
np.testing.assert_allclose(output_image.background, input_background, atol=4.0)
def test_background_subtraction_with_traces(seed):
nx, ny = 405, 403
x = np.arange(nx)
y = np.arange(ny)
X, Y = np.meshgrid(x, y)
noise_sigma = 1.0
input_background = 30 * np.exp(-(X - nx / 2.0)**2/300**2 - (Y - ny / 2.0 - 50.0)**2 / 200**2)
test_data = input_background + np.random.normal(0.0, noise_sigma, size=input_background.shape)
test_image = NRESObservationFrame([CCDData(data=test_data, uncertainty=np.ones((ny, nx)) * noise_sigma,
meta={'OBJECTS': 'tung&tung&none'})], 'foo.fits')
test_image.traces = np.zeros((ny, nx))
for i in range(1, 8):
test_image.traces[40*i:40*i+10] = i
input_context = context.Context({})
stage = BackgroundSubtractor(input_context)
output_image = stage.do_stage(test_image)
# Make sure our background estimation is good. This is roughly 4 counts which is not bad
# If we fully model the image, we can do better than this, but that becomes computationally prohibitive on a 4kx4k
# image
np.testing.assert_allclose(output_image.background, input_background, atol=5.0)
|
LCOGTREPO_NAMEbanzai-nresPATH_START.@banzai-nres_extracted@banzai-nres-main@banzai_nres@tests@test_background_subtraction.py@.PATH_END.py
|
{
"filename": "normalize_is_none.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pythran/pythran/transformations/normalize_is_none.py",
"type": "Python"
}
|
""" NormalizeIsNone detects is None patterns. """
from pythran.passmanager import Transformation
from pythran.analyses import Ancestors
from pythran.syntax import PythranSyntaxError
from functools import reduce
import gast as ast
def is_none(expr):
# py3
if isinstance(expr, ast.Constant) and expr.value is None:
return True
# py2
if not isinstance(expr, ast.Attribute):
return False
return expr.attr == "None"
def is_is_none(expr):
if not isinstance(expr, ast.Compare):
return None
if len(expr.ops) != 1:
exprs = [expr.left] + expr.comparators
if any(is_none(expr) for expr in exprs):
raise PythranSyntaxError("is None in complex condition", expr)
return None
if not isinstance(expr.ops[0], (ast.Eq, ast.Is)):
return None
if is_none(expr.left):
return expr.comparators[0]
if is_none(expr.comparators[0]):
return expr.left
return None
def is_is_not_none(expr):
if not isinstance(expr, ast.Compare):
return None
if len(expr.ops) != 1:
exprs = [expr.left] + expr.comparators
if any(is_none(expr) for expr in exprs):
raise PythranSyntaxError("is None in complex condition", expr)
return None
if not isinstance(expr.ops[0], (ast.NotEq, ast.IsNot)):
return None
if is_none(expr.left):
return expr.comparators[0]
if is_none(expr.comparators[0]):
return expr.left
return None
class NormalizeIsNone(Transformation):
table = {ast.And: ast.BitAnd, ast.Or: ast.BitOr}
def __init__(self):
super(NormalizeIsNone, self).__init__(Ancestors)
@staticmethod
def match_is_none(node):
noned_var = is_is_none(node)
if noned_var is None:
noned_var = is_is_not_none(node)
negated = noned_var is not None
else:
negated = False
return noned_var, negated
def visit_BoolOp(self, node):
values = list(node.values)
self.generic_visit(node)
if any(x != y for x, y in zip(values, node.values)):
self.update = True
expr = reduce(lambda x, y:
ast.BinOp(x,
NormalizeIsNone.table[type(node.op)](), y),
node.values)
return expr
else:
return node
def visit_Compare(self, node):
self.generic_visit(node)
noned_var, negated = self.match_is_none(node)
if noned_var is None:
return node
call = ast.Call(
ast.Attribute(
ast.Attribute(
ast.Name('builtins', ast.Load(), None, None),
'pythran',
ast.Load()
),
'is_none',
ast.Load()),
[noned_var], [])
self.update = True
if negated:
return ast.UnaryOp(ast.Not(), call)
else:
return call
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pythran@pythran@transformations@normalize_is_none.py@.PATH_END.py
|
{
"filename": "_tickangle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/barpolar/marker/colorbar/_tickangle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name="tickangle", parent_name="barpolar.marker.colorbar", **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@barpolar@marker@colorbar@_tickangle.py@.PATH_END.py
|
{
"filename": "xi_data_2d_eBOSS_broadband_test.py",
"repo_name": "Samreay/Barry",
"repo_path": "Barry_extracted/Barry-master/config/examples/xi_data_2d_eBOSS_broadband_test.py",
"type": "Python"
}
|
import sys
sys.path.append("..")
sys.path.append("../..")
from barry.samplers import NautilusSampler
from barry.config import setup
from barry.models import CorrBeutler2017
from barry.datasets.dataset_correlation_function import CorrelationFunction_eBOSS_LRGpCMASS
from barry.fitter import Fitter
import numpy as np
import pandas as pd
from barry.models.model import Correction
import matplotlib.pyplot as plt
# Run an optimisation on each of the post-recon SDSS DR12 mocks. Then compare to the pre-recon mocks
# to compute the cross-correlation between BAO parameters and pre-recon measurements
if __name__ == "__main__":
pfn, dir_name, file = setup(__file__)
fitter = Fitter(dir_name)
sampler = NautilusSampler(temp_dir=dir_name)
dataset = CorrelationFunction_eBOSS_LRGpCMASS(
realisation="data", recon="iso", isotropic=False, fit_poles=[0, 2], min_dist=50.0, max_dist=150.0
)
# Standard Beutler Model
model_poly = CorrBeutler2017(
recon="iso",
isotropic=False,
fix_params=["om"],
poly_poles=[0, 2],
correction=Correction.NONE,
marg="full",
n_poly=[-2, -1, 0],
)
model_poly.set_default("sigma_nl_par", 7.0, min=0.0, max=20.0, sigma=2.0, prior="gaussian")
model_poly.set_default("sigma_nl_perp", 2.0, min=0.0, max=20.0, sigma=2.0, prior="gaussian")
model_poly.set_default("sigma_s", 0.0, min=0.0, max=20.0, sigma=2.0, prior="gaussian")
model_spline = CorrBeutler2017(
recon="iso",
isotropic=False,
fix_params=["om"],
poly_poles=[0, 2],
correction=Correction.NONE,
marg="full",
n_poly=[0, 2, 4],
)
model_spline.set_default("sigma_nl_par", 7.0, min=0.0, max=20.0, sigma=2.0, prior="gaussian")
model_spline.set_default("sigma_nl_perp", 2.0, min=0.0, max=20.0, sigma=2.0, prior="gaussian")
model_spline.set_default("sigma_s", 0.0, min=0.0, max=20.0, sigma=2.0, prior="gaussian")
fitter.add_model_and_dataset(model_poly, dataset, name=dataset.name + " poly")
fitter.add_model_and_dataset(model_spline, dataset, name=dataset.name + " spline")
fitter.set_sampler(sampler)
fitter.set_num_walkers(1)
fitter.fit(file)
# Everything below is nasty plotting code ###########################################################
if fitter.should_plot():
import logging
logging.info("Creating plots")
from chainconsumer import ChainConsumer
counter = 0
c = [ChainConsumer(), ChainConsumer()]
for posterior, weight, chain, evidence, model, data, extra in fitter.load():
fitname = "_".join([extra["name"].split()[4], extra["name"].split()[6]])
skybin = 0 if "ngc" in fitname.lower() else 1
df = pd.DataFrame(chain, columns=model.get_labels())
alpha = df["$\\alpha$"].to_numpy()
epsilon = df["$\\epsilon$"].to_numpy()
alpha_par, alpha_perp = model.get_alphas(alpha, epsilon)
df["$\\alpha_\\parallel$"] = alpha_par
df["$\\alpha_\\perp$"] = alpha_perp
extra.pop("realisation", None)
extra.pop("name", None)
print(" ".join(fitname.split("_")))
c[skybin].add_chain(df, weights=weight, posterior=posterior, name=" ".join(fitname.split("_")), **extra)
# Get the MAP point and set the model up at this point
model.set_data(data)
r_s = model.camb.get_data()["r_s"]
max_post = posterior.argmax()
params = df.loc[max_post]
params_dict = model.get_param_dict(chain[max_post])
for name, val in params_dict.items():
model.set_default(name, val)
new_chi_squared, dof, bband, mods, smooths = model.plot(
params_dict, figname=pfn + "_" + fitname + "_bestfit.pdf", display=False
)
alp_per, alp_par = 17.85823691865007 / 17.436, 19.32575373059217 / 20.194
alp_cov = np.array(
[
[0.2838176386340292 / 20.194**2, -0.05831820341302727 / (17.436 * 20.194)],
[-0.0583182034130273 / (17.436 * 20.194), 0.1076634008565565 / 17.436**2],
]
)
alp, eps = alp_per ** (2.0 / 3.0) * alp_par ** (1.0 / 3.0), (alp_par / alp_per) ** (1.0 / 3.0) - 1.0
jac = np.array(
[
[
(1.0 / 3.0) * alp_per ** (2.0 / 3.0) * alp_par ** (-2.0 / 3.0),
(2.0 / 3.0) * alp_per ** (-1.0 / 3.0) * alp_par ** (1.0 / 3.0),
],
[
(1.0 / 3.0) * alp_par ** (-2.0 / 3.0) * alp_per ** (-1.0 / 3.0),
(-1.0 / 3.0) * alp_par ** (1.0 / 3.0) * alp_per ** (-4.0 / 3.0),
],
]
)
alp_cov2 = jac @ alp_cov @ jac.T
print(alp, eps, alp_par, alp_per)
print(np.sqrt(np.diag(alp_cov2)), np.sqrt(np.diag(alp_cov)))
aperp, apar = [1.042, 0.992], [0.947, 0.996]
aperp_err, apar_err = [0.024, 0.038], [0.026, 0.113]
for skybin in range(1):
"""c[skybin].add_covariance(
[alp_par, alp_per],
alp_cov,
parameters=["$\\alpha_\\parallel$", "$\\alpha_\\perp$"],
name="Bautista et. al., 2020",
color="k",
shade_alpha=0.5,
)"""
c[skybin].add_covariance(
[alp, eps], alp_cov2, parameters=["$\\alpha$", "$\\epsilon$"], name="Bautista et. al., 2021", color="k", shade_alpha=0.5
)
sky = "NGC" if skybin == 0 else "SGC"
c[skybin].configure(
shade=True,
bins=20,
legend_artists=True,
max_ticks=4,
# legend_location=(0, -1),
legend_kwargs={"fontsize": 10},
plot_contour=True,
zorder=[3, 5, 4],
)
axes = (
c[skybin]
.plotter.plot(
# filename=pfn + f"{sky}_contour.pdf",
# parameters=["$\\alpha_\\parallel$", "$\\alpha_\\perp$"],
parameters=["$\\alpha$", "$\\epsilon$"],
)
.get_axes()
)
# axes[0].axvspan(apar[skybin] - apar_err[skybin], apar[skybin] + apar_err[skybin], color="k", alpha=0.1, zorder=1)
# axes[2].axhspan(aperp[skybin] - aperp_err[skybin], aperp[skybin] + aperp_err[skybin], color="k", alpha=0.1, zorder=1)
# axes[2].axvspan(apar[skybin] - apar_err[skybin], apar[skybin] + apar_err[skybin], color="k", alpha=0.1, zorder=1)
# axes[3].axhspan(aperp[skybin] - aperp_err[skybin], aperp[skybin] + aperp_err[skybin], color="k", alpha=0.1, zorder=1)
results = c[skybin].analysis.get_summary(parameters=["$\\alpha_\\parallel$", "$\\alpha_\\perp$"])
print(c[skybin].analysis.get_latex_table(parameters=["$\\alpha$", "$\\epsilon$", "$\\alpha_\\parallel$", "$\\alpha_\\perp$"]))
print(results)
# plt.tight_layout()
plt.savefig(pfn + f"_{sky}_contour.pdf", bbox_inches="tight")
|
SamreayREPO_NAMEBarryPATH_START.@Barry_extracted@Barry-master@config@examples@xi_data_2d_eBOSS_broadband_test.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scatter3d/_hoverlabel.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d"
_path_str = "scatter3d.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
Returns
-------
plotly.graph_objs.scatter3d.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scatter3d@_hoverlabel.py@.PATH_END.py
|
{
"filename": "pupmed.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/retrievers/pupmed.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import PubMedRetriever
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"PubMedRetriever": "langchain_community.retrievers"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PubMedRetriever",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@retrievers@pupmed.py@.PATH_END.py
|
{
"filename": "kernel_utils.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/common/kernel_utils.py",
"type": "Python"
}
|
#
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals.logger as logger
from uuid import uuid1
import functools
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
# Mapping of common PyData dtypes to their corresponding C-primitive
dtype_str_map = {
np.dtype("float32"): "float",
np.dtype("float64"): "double",
np.dtype("int32"): "int",
np.dtype("int64"): "long long int",
"float32": "float",
"float64": "double",
"int32": "int",
"int64": "long long int",
}
extern_prefix = r'extern "C" __global__'
def get_dtype_str(dtype):
if dtype not in dtype_str_map:
raise ValueError(f"{dtype} is not a valid type for this kernel.")
return dtype_str_map[dtype]
def get_dtype_strs(dtypes):
return list(map(get_dtype_str, dtypes))
@functools.lru_cache(maxsize=5000)
def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None):
"""
A factory wrapper function to perform some of the boiler-plate involved in
making cuPy RawKernels type-agnostic.
Until a better method is created, either by RAPIDS or cuPy, this function
will perform a string search and replace of c-based datatype primitives
in ``nvrtc_kernel_str`` using a numerical placeholder (eg. {0}, {1}) for
the dtype in the corresponding index of tuple ``dtypes``.
Note that the extern, function scope, and function name should not be
included in the kernel string. These will be added by this function and
the function name will be made unique, based on the given dtypes.
Examples
--------
The following kernel string with dtypes = [float, double, int]
({0} *a, {1} *b, {2} *c) {}
Will become
(float *a, double *b, int *c) {}
Parameters
----------
nvrtc_kernel_str : string valid nvrtc kernel string without extern, scope,
or function name.
dtypes : tuple of dtypes to search and replace.
kernel_name : string prefix and function name to use. Note that when
this not set (or is set to None), a UUID will
be used, which will stop this function from
being memoized.
Returns
-------
kernel_name : string unique function name created for kernel,
raw_kernel : cupy.RawKernel object ready for use
"""
dtype_strs = get_dtype_strs(dtypes)
for idx, dtype in enumerate(dtypes):
nvrtc_kernel_str = nvrtc_kernel_str.replace(
"{%d}" % idx, dtype_strs[idx]
)
kernel_name_prefix = uuid1() if kernel_name is None else kernel_name
kernel_name_suffix = "".join(dtype_strs).replace(" ", "_")
kernel_name = f"{kernel_name_prefix}_{kernel_name_suffix}"
nvrtc_kernel_str = "%s\nvoid %s%s" % (
extern_prefix,
kernel_name,
nvrtc_kernel_str,
)
if logger.should_log_for(logger.level_debug):
logger.debug(str(nvrtc_kernel_str))
return cp.RawKernel(nvrtc_kernel_str, kernel_name)
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@common@kernel_utils.py@.PATH_END.py
|
{
"filename": "TestDeepClean.py",
"repo_name": "saopicc/DDFacet",
"repo_path": "DDFacet_extracted/DDFacet-master/DDFacet/Tests/VeryLongAcceptanceTests/TestDeepClean.py",
"type": "Python"
}
|
'''
DDFacet, a facet-based radio imaging package
Copyright (C) 2013-2016 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DDFacet.compatibility import range
import unittest
import DDFacet.Tests.ShortAcceptanceTests.ClassCompareFITSImage
import numpy as np
class TestDeepCleanWithBeam(DDFacet.Tests.ShortAcceptanceTests.ClassCompareFITSImage.ClassCompareFITSImage):
@classmethod
def defineImageList(cls):
""" Method to define set of reference images to be tested.
Can be overridden to add additional output products to the test.
These must correspond to whatever is used in writing out the FITS files (eg. those in ClassDeconvMachine.py)
Returns:
List of image identifiers to reference and output products
"""
return ['dirty', 'app.residual']
@classmethod
def defineMaxSquaredError(cls):
""" Method defining maximum error tolerance between any pair of corresponding
pixels in the output and corresponding reference FITS images.
Should be overridden if another tolerance is desired
Returns:
constant for maximum tolerance used in test case setup
"""
return [1e+9, 1e+9] # dont care about sidelobes DR test will check the one and only source
@classmethod
def defMeanSquaredErrorLevel(cls):
""" Method defining maximum tolerance for the mean squared error between any
pair of FITS images. Should be overridden if another tolerance is
desired
Returns:
constant for tolerance on mean squared error
"""
return [1e+9, 1e+9] # dont care about sidelobes DR test will check the one and only source
@classmethod
def defDRTolerance(cls):
"""
Relative tolerance of clean dynamic range
"""
return 0.05 # +/- 5% drift
def testMaxSquaredError(self):
pass # skip: since there is only one source we don't care about verifying the components placed on the sidelobes
def testMeanSquaredError(self):
pass # skip: since there is only one source we don't care about verifying the components placed on the sidelobes
def testDR(self):
"""
Checks clean dynamic range against previous known good result
"""
cls = self.__class__
dirty_ref = cls._refHDUList[cls.defineImageList().index("dirty")][0].data[...]
appresidue_ref = cls._refHDUList[cls.defineImageList().index("app.residual")][0].data[...]
DR_ref = np.max(np.abs(dirty_ref)) / np.sqrt(np.sum(appresidue_ref ** 2))
dirty_out = cls._outHDUList[cls.defineImageList().index("dirty")][0].data[...]
appresidue_out = cls._outHDUList[cls.defineImageList().index("app.residual")][0].data[...]
DR_out = np.max(abs(dirty_out)) / np.sqrt(np.sum(appresidue_out ** 2))
# DR_out > DR_ref is OK!
assert 1.0 - DR_out / DR_ref <= cls.defDRTolerance(), "%s DR value has regressed. " \
"Known good: %f, current %f" % (cls.__name__,
DR_ref,
DR_out)
class TestDeepCleanWithoutBeam(TestDeepCleanWithBeam):
pass # also do a DR check for the deep clean without the beam (same as above)
if __name__ == '__main__':
unittest.main()
|
saopiccREPO_NAMEDDFacetPATH_START.@DDFacet_extracted@DDFacet-master@DDFacet@Tests@VeryLongAcceptanceTests@TestDeepClean.py@.PATH_END.py
|
{
"filename": "communication.py",
"repo_name": "jmd-dk/concept",
"repo_path": "concept_extracted/concept-master/src/communication.py",
"type": "Python"
}
|
# This file is part of CO𝘕CEPT, the cosmological 𝘕-body code in Python.
# Copyright © 2015–2024 Jeppe Mosgaard Dakin.
#
# CO𝘕CEPT is free software: You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CO𝘕CEPT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CO𝘕CEPT. If not, see https://www.gnu.org/licenses/
#
# The author of CO𝘕CEPT can be contacted at dakin(at)phys.au.dk
# The latest version of CO𝘕CEPT is available at
# https://github.com/jmd-dk/concept/
# Import everything from the commons module.
# In the .pyx file, Cython declared variables will also get cimported.
from commons import *
# Function for fairly partitioning data among the processes
@cython.header(
# Arguments
size='Py_ssize_t',
# Locals
size_local='Py_ssize_t',
start_local='Py_ssize_t',
rank_transition='Py_ssize_t',
returns=tuple,
)
def partition(size):
"""This function takes in the size (nr. of elements) of an array
and partitions it fairly among the processes.
Both the starting index and size of the local part are returned.
If the given size cannot be divided evenly, one additional element
will be given to the higher ranks.
"""
# Size and starting index of local part of the data
size_local = size//nprocs
start_local = rank*size_local
# Lowest rank which receives one extra data point
rank_transition = nprocs + size_local*nprocs - size
# Correct local size and starting index for processes receiving
# the extra data point.
if rank >= rank_transition:
size_local += 1
start_local += rank - rank_transition
return start_local, size_local
# This function examines every particle of the supplied component and
# communicates them to the process governing the domain in which the
# particle is located.
@cython.header(
# Arguments
component='Component',
include_mom='bint',
progress_msg='bint',
# Locals
data_mv='double[::1]',
data_mvs=list,
dim='int',
ids='Py_ssize_t*',
ids_mv='Py_ssize_t[::1]',
indexᵖ='Py_ssize_t',
indexᵖ_end_i='Py_ssize_t',
indexᵖ_hole_bgn='Py_ssize_t',
indexᵖ_hole_end='Py_ssize_t',
indexᵖ_i='Py_ssize_t',
indexᵖ_j='Py_ssize_t',
indexᵖ_j_bgn='Py_ssize_t',
indexᵖ_left='Py_ssize_t',
indexᵖ_recv_bgn_ℓ='Py_ssize_t',
indexᵖ_right='Py_ssize_t',
indexᵖ_send_bgn_i='Py_ssize_t',
indexᵖ_send_bgn_ℓ='Py_ssize_t',
indexᵖ_send_end_i='Py_ssize_t',
indexᵖ_send_end_ℓ='Py_ssize_t',
indexʳ_i='Py_ssize_t',
indexʳ_j='Py_ssize_t',
indexʳ_left='Py_ssize_t',
indexʳ_recv_bgn_ℓ='Py_ssize_t',
indexʳ_right='Py_ssize_t',
indexʳ_send_bgn_ℓ='Py_ssize_t',
indexʳ_send_end_ℓ='Py_ssize_t',
indexˣ='Py_ssize_t',
indexˣ_hole='Py_ssize_t',
indexˣ_i='Py_ssize_t',
indexˣ_j='Py_ssize_t',
indexˣ_left='Py_ssize_t',
indexˣ_right='Py_ssize_t',
mom='double*',
mom_mv='double[::1]',
n_particles_recv_tot='Py_ssize_t',
n_particles_recv_ℓ='Py_ssize_t',
n_particles_send_max='Py_ssize_t',
n_particles_send_tot='Py_ssize_t',
n_particles_send_tot_global='Py_ssize_t',
n_particles_send_ℓ='Py_ssize_t',
n_particles_store='Py_ssize_t',
particles_leftover='bint',
pos='double*',
pos_mv='double[::1]',
posxˣ='double*',
posyˣ='double*',
poszˣ='double*',
rank_left='int',
rank_other='int',
rank_other_i='int',
rank_other_j='int',
rank_recv='int',
rank_right='int',
rank_send='int',
rung_index='signed char',
rung_index_i='signed char',
rung_index_j='signed char',
rung_index_left='signed char',
rung_index_right='signed char',
rung_indices='signed char*',
rung_indices_jumped='signed char*',
rung_indices_mv='signed char[::1]',
rungs_N='Py_ssize_t*',
Δmom='double*',
Δmom_mv='double[::1]',
ℓ='int',
returns='void',
)
def exchange(component, include_mom=True, progress_msg=False):
"""This function will do an exchange of particles between processes,
so that every particle ends up on the process in charge of the
domain where the particle is located.
The particle data to be exchanged is:
- pos
- mom
- Δmom
- ids (if IDs are used by the component)
- rung_indices (if rungs are used by the component)
We do not communicate rung jumps, as it is expected that no such
jumps are flagged when calling this function.
The overall scheme for the particle exchange is this:
- Locate non-local particles at the left end of the particle array
and swap them with local particles at the right end. This leaves
all non-local particles to the right of all local particles.
Keep track of the total number of non-local particles
to send to each process.
- Go over the now contiguous sub-list of non-local particles and
swap them so that they are ordered by the process
they belong to.
- Send the non-local particles to each non-local process.
When receiving, do so directly into the component data arrays,
to the right of even the non-local particles.
The data arrays then need to be expanded first.
- Once particles have been exchanged to all processes,
move the received data to the left where the
non-local particles used to be.
- In all of the above, the only particle buffer memory needed
(not counting buffers of size nprocs) are that which is used to
receive the particle data. Still, to keep this to a minimum,
a maximum number of particles to be sent to each process
is introduced. If more particles should be exchanged than this,
the above steps are simply repeated in a second round
of exchange. Note that after the first step, the left part of
the particles are guaranteed to be local, so we start at the end
of these when searching for non-local particles.
Likewise, particles appearing further to the right than already
dealt with will have to be have come there by receiving them
from other processes, implying that they are local. The window
of particles to check then shrinks from both sides
as more and more exchanges take place.
"""
# No need to consider exchange of particles if running serially
if 𝔹[nprocs == 1]:
return
# Only particles are exchangeable
if component.representation != 'particles':
return
if progress_msg:
masterprint(f'Exchanging {component.name} particles between processes ...')
# Maximum number of particles allowed to be sent
# to each process at a time.
n_particles_send_max = 2**17
# Carry out exchanges as long as we have particles to communicate
particles_leftover = True
indexᵖ_left = 0
indexᵖ_right = component.N_local - 1
while particles_leftover:
particles_leftover = False
# Extract pointers
posxˣ = component.posxˣ
posyˣ = component.posyˣ
poszˣ = component.poszˣ
pos = component.pos
mom = component.mom
Δmom = component.Δmom
ids = component.ids
rung_indices = component.rung_indices
rung_indices_jumped = component.rung_indices_jumped
# Sweep over the particles from the left and right
# simultaneously, matching a left non-local particle with a
# right local particle and swapping them. Keep a tally of the
# total number of non-local particles belonging to each process.
for rank_other in range(nprocs):
n_particles_send[rank_other] = 0
indexˣ_left = 3*indexᵖ_left
indexˣ_right = 3*indexᵖ_right
while indexᵖ_left <= indexᵖ_right and not particles_leftover:
rank_left = which_domain(
posxˣ[indexˣ_left],
posyˣ[indexˣ_left],
poszˣ[indexˣ_left],
)
if rank_left == rank:
# Local left particle found
indexᵖ_left += 1
indexˣ_left += 3
continue
# Non-local left particle found
if n_particles_send[rank_left] == n_particles_send_max:
# Send limit reached for left process
particles_leftover = True
break
# Pair non-local left particle with local right particle
while indexᵖ_left <= indexᵖ_right:
rank_right = which_domain(
posxˣ[indexˣ_right],
posyˣ[indexˣ_right],
poszˣ[indexˣ_right],
)
if rank_right == rank:
# Local right particle found.
# Increment send tally of left process.
n_particles_send[rank_left] += 1
# Swap non-local left with local right particle.
for dim in range(3):
indexʳ_left = indexˣ_left + dim
indexʳ_right = indexˣ_right + dim
pos[indexʳ_left], pos[indexʳ_right] = pos[indexʳ_right], pos[indexʳ_left]
with unswitch(2):
if include_mom:
for dim in range(3):
indexʳ_left = indexˣ_left + dim
indexʳ_right = indexˣ_right + dim
mom[indexʳ_left], mom[indexʳ_right] = mom[indexʳ_right], mom[indexʳ_left]
for dim in range(3):
indexʳ_left = indexˣ_left + dim
indexʳ_right = indexˣ_right + dim
Δmom[indexʳ_left], Δmom[indexʳ_right] = Δmom[indexʳ_right], Δmom[indexʳ_left]
with unswitch(2):
if component.use_ids:
ids[indexᵖ_left], ids[indexᵖ_right] = ids[indexᵖ_right], ids[indexᵖ_left]
with unswitch(2):
if component.use_rungs:
rung_index_left = rung_indices[indexᵖ_left]
rung_index_right = rung_indices[indexᵖ_right]
rung_indices[indexᵖ_left ] = rung_index_right
rung_indices[indexᵖ_right] = rung_index_left
# The jumped rung indices are equal to the
# rung indices as we cannot have
# upcoming jumps.
rung_indices_jumped[indexᵖ_left ] = rung_index_right
rung_indices_jumped[indexᵖ_right] = rung_index_left
# Go to next left and right particles
indexᵖ_left += 1
indexˣ_left += 3
indexᵖ_right -= 1
indexˣ_right -= 3
break
else:
# Non-local right particle found
if n_particles_send[rank_right] == n_particles_send_max:
# Send limit reached for right process
particles_leftover = True
break
# Increment send tally of right process,
# leaving the left non-local particle hanging.
n_particles_send[rank_right] += 1
# Go to next right particle
indexᵖ_right -= 1
indexˣ_right -= 3
continue
# No need to continue if no particles should be exchanged
n_particles_send_tot = sum(n_particles_send_mv)
n_particles_send_tot_global = allreduce(n_particles_send_tot, op=MPI.SUM)
if n_particles_send_tot_global == 0:
break
# If any process have more non-local particles than can be
# sent in one go, every process should know about it.
particles_leftover = allreduce(particles_leftover, op=MPI.LOR)
# Sort non-local right particles in order of rank
if n_particles_send_tot > 0:
# Construct beginning particle indices of each rank.
# The non-local particles of each rank should then end up
# in range(indicesᵖ_send_bgn[rank], indicesᵖ_send_bgn[rank] + n_particles_send[rank]).
indexᵖ = indexᵖ_right + 1
for rank_other in range(nprocs):
indicesᵖ_send_bgn[rank_other] = indexᵖ
indexᵖ += n_particles_send[rank_other]
# The running tally of the number of sorted particles will
# be stored in this array.
for rank_other in range(nprocs):
n_particles_sorted[rank_other] = 0
# Sweep over the non-local particles, swapping particles
# situated at wrong indices with the particle
# at their destination index.
indexᵖ_i = indexᵖ_right + 1
indexᵖ_end_i = indexᵖ_i + n_particles_send_tot
indexˣ_i = 3*indexᵖ_i
rank_other_i = 0
while indexᵖ_i < indexᵖ_end_i:
if rank_other_i == -1:
rank_other_i = rank_other_j
else:
rank_other_i = which_domain(
posxˣ[indexˣ_i],
posyˣ[indexˣ_i],
poszˣ[indexˣ_i],
)
indexᵖ_send_bgn_i = indicesᵖ_send_bgn[rank_other_i]
indexᵖ_send_end_i = indexᵖ_send_bgn_i + n_particles_send[rank_other_i]
if indexᵖ_send_bgn_i <= indexᵖ_i < indexᵖ_send_end_i:
# Particle already situated correctly
n_particles_sorted[rank_other_i] += 1
indexᵖ_i += 1
indexˣ_i += 3
continue
# Locate index j where particle currently at i
# should be moved to.
indexᵖ_j_bgn = indexᵖ_send_bgn_i + n_particles_sorted[rank_other_i]
indexᵖ_j = indexᵖ_j_bgn
indexˣ_j = 3*indexᵖ_j
while True:
rank_other_j = which_domain(
posxˣ[indexˣ_j],
posyˣ[indexˣ_j],
poszˣ[indexˣ_j],
)
if rank_other_i != rank_other_j:
break
# Particle at j belongs to the same process as the
# one at i. Skip along, counting the particle at j
# as being sorted.
indexᵖ_j += 1
indexˣ_j += 3
n_particles_sorted[rank_other_i] += indexᵖ_j - indexᵖ_j_bgn
# Swap particle i and j
for dim in range(3):
indexʳ_i = indexˣ_i + dim
indexʳ_j = indexˣ_j + dim
pos[indexʳ_i], pos[indexʳ_j] = pos[indexʳ_j], pos[indexʳ_i]
with unswitch(1):
if include_mom:
for dim in range(3):
indexʳ_i = indexˣ_i + dim
indexʳ_j = indexˣ_j + dim
mom[indexʳ_i], mom[indexʳ_j] = mom[indexʳ_j], mom[indexʳ_i]
for dim in range(3):
indexʳ_i = indexˣ_i + dim
indexʳ_j = indexˣ_j + dim
Δmom[indexʳ_i], Δmom[indexʳ_j] = Δmom[indexʳ_j], Δmom[indexʳ_i]
with unswitch(1):
if component.use_ids:
ids[indexᵖ_i], ids[indexᵖ_j] = ids[indexᵖ_j], ids[indexᵖ_i]
with unswitch(1):
if component.use_rungs:
rung_index_i = rung_indices[indexᵖ_i]
rung_index_j = rung_indices[indexᵖ_j]
rung_indices[indexᵖ_i] = rung_index_j
rung_indices[indexᵖ_j] = rung_index_i
# The jumped rung indices are equal to the rung
# indices as we cannot have upcoming jumps.
rung_indices_jumped[indexᵖ_i] = rung_index_i
rung_indices_jumped[indexᵖ_j] = rung_index_j
# The particle previous at i is now situated correctly,
# but the swapped in particle (the one previously at j)
# may now be incorrectly placed.
# Continue without incrementing indexᵖ_i and indexˣ_i.
n_particles_sorted[rank_other_i] += 1
rank_other_i = -1 # flag as being equal to rank_other_j
continue
# Find out how many particles to receive
n_particles_recv_tot = 0
for ℓ in range(1, nprocs):
rank_send = mod(rank + ℓ, nprocs)
rank_recv = mod(rank - ℓ, nprocs)
n_particles_recv_ℓ = sendrecv(
n_particles_send[rank_send],
dest=rank_send,
source=rank_recv,
)
n_particles_recv[rank_recv] = n_particles_recv_ℓ
n_particles_recv_tot += n_particles_recv_ℓ
# The particles to be received will be so directly into the
# component particle arrays. Enlarge these if necessary.
n_particles_store = component.N_local + n_particles_recv_tot
if component.N_allocated < n_particles_store:
component.resize(n_particles_store)
# Extract particle data pointers and memory views
pos = component. pos
mom = component. mom
Δmom = component.Δmom
ids = component. ids
pos_mv = component. pos_mv
mom_mv = component. mom_mv
Δmom_mv = component.Δmom_mv
ids_mv = component. ids_mv
# Extract rung information
rungs_N = component.rungs_N
rung_indices = component.rung_indices
rung_indices_mv = component.rung_indices_mv
rung_indices_jumped = component.rung_indices_jumped
# Particle data to be exchanged
data_mvs = [pos_mv]
if include_mom:
data_mvs += [mom_mv, Δmom_mv]
# Exchange particles between processes
indexᵖ_recv_bgn_ℓ = component.N_local # start index for received data
for ℓ in range(1, nprocs):
rank_send = mod(rank + ℓ, nprocs)
rank_recv = mod(rank - ℓ, nprocs)
n_particles_send_ℓ = n_particles_send[rank_send]
n_particles_recv_ℓ = n_particles_recv[rank_recv]
# Exchange particle data
indexᵖ_send_bgn_ℓ = indicesᵖ_send_bgn[rank_send]
indexᵖ_send_end_ℓ = indexᵖ_send_bgn_ℓ + n_particles_send_ℓ
indexᵖ_recv_end_ℓ = indexᵖ_recv_bgn_ℓ + n_particles_recv_ℓ
indexʳ_send_bgn_ℓ = 3*indexᵖ_send_bgn_ℓ
indexʳ_send_end_ℓ = 3*indexᵖ_send_end_ℓ
indexʳ_recv_bgn_ℓ = 3*indexᵖ_recv_bgn_ℓ
for data_mv in data_mvs:
Sendrecv(
data_mv[indexʳ_send_bgn_ℓ:indexʳ_send_end_ℓ],
dest=rank_send,
recvbuf=data_mv[indexʳ_recv_bgn_ℓ:],
source=rank_recv,
)
# If using IDs we also exchange these
if component.use_ids:
Sendrecv(
ids_mv[indexᵖ_send_bgn_ℓ:indexᵖ_send_end_ℓ],
dest=rank_send,
recvbuf=ids_mv[indexᵖ_recv_bgn_ℓ:],
source=rank_recv,
)
# If using rungs we also exchange the rung indices
if component.use_rungs:
Sendrecv(
rung_indices_mv[indexᵖ_send_bgn_ℓ:indexᵖ_send_end_ℓ],
dest=rank_send,
recvbuf=rung_indices_mv[indexᵖ_recv_bgn_ℓ:],
source=rank_recv,
)
# Decrement rung population due to sent particles
for indexᵖ in range(indexᵖ_send_bgn_ℓ, indexᵖ_send_end_ℓ):
rung_index = rung_indices[indexᵖ]
rungs_N[rung_index] -= 1
# Increment rung population due to received
# particles and set their jumped rung indices.
for indexᵖ in range(indexᵖ_recv_bgn_ℓ, indexᵖ_recv_end_ℓ):
rung_index = rung_indices[indexᵖ]
rungs_N[rung_index] += 1
# Set the jumped rung index equal to
# the rung index, signalling no upcoming jump.
rung_indices_jumped[indexᵖ] = rung_index
# Update the start index for received data
indexᵖ_recv_bgn_ℓ += n_particles_recv_ℓ
# Move particles into the holes left by the sent particles
indexᵖ_hole_bgn = indexᵖ_right + 1
indexᵖ_hole_end = pairmin(indexᵖ_hole_bgn + n_particles_send_tot, component.N_local)
indexˣ_hole = 3*(indexᵖ_hole_bgn - 1)
indexᵖ = indexᵖ_recv_bgn_ℓ
indexˣ = 3*indexᵖ
for indexᵖ_hole in range(indexᵖ_hole_bgn, indexᵖ_hole_end):
indexˣ_hole += 3
indexˣ -= 3
with unswitch(1):
if component.use_ids or component.use_rungs:
indexᵖ -= 1
for dim in range(3):
pos [indexˣ_hole + dim] = pos [indexˣ + dim]
with unswitch(1):
if include_mom:
for dim in range(3):
mom [indexˣ_hole + dim] = mom [indexˣ + dim]
for dim in range(3):
Δmom[indexˣ_hole + dim] = Δmom[indexˣ + dim]
with unswitch(1):
if component.use_ids:
ids[indexᵖ_hole] = ids[indexᵖ]
with unswitch(1):
if component.use_rungs:
rung_index = rung_indices[indexᵖ]
rung_indices [indexᵖ_hole] = rung_index
rung_indices_jumped[indexᵖ_hole] = rung_index # no jump
# With the holes filled, update the N_local attribute
component.N_local -= n_particles_send_tot
component.N_local += n_particles_recv_tot
# Exchange completed.
# Update the rung flags.
if component.use_rungs:
# Find and set lowest and highest populated rung
component.set_lowest_highest_populated_rung()
# There is no need to have the lowest active rung
# be below the lowest populated rung.
if component.lowest_active_rung < component.lowest_populated_rung:
component.lowest_active_rung = component.lowest_populated_rung
else:
# When not using rungs, all particles occupy rung 0
component.rungs_N[0] = component.N_local
if progress_msg:
masterprint('done')
# Buffers used by the exchange() function
cython.declare(
n_particles_send_mv='Py_ssize_t[::1]',
n_particles_recv_mv='Py_ssize_t[::1]',
n_particles_sorted_mv='Py_ssize_t[::1]',
indicesᵖ_send_bgn_mv='Py_ssize_t[::1]',
n_particles_send='Py_ssize_t*',
n_particles_recv='Py_ssize_t*',
n_particles_sorted='Py_ssize_t*',
indicesᵖ_send_bgn='Py_ssize_t*',
)
n_particles_send_mv = zeros(nprocs, dtype=C2np['Py_ssize_t'])
n_particles_recv_mv = zeros(nprocs, dtype=C2np['Py_ssize_t'])
n_particles_sorted_mv = zeros(nprocs, dtype=C2np['Py_ssize_t'])
indicesᵖ_send_bgn_mv = zeros(nprocs, dtype=C2np['Py_ssize_t'])
n_particles_send = cython.address(n_particles_send_mv[:])
n_particles_recv = cython.address(n_particles_recv_mv[:])
n_particles_sorted = cython.address(n_particles_sorted_mv[:])
indicesᵖ_send_bgn = cython.address(indicesᵖ_send_bgn_mv[:])
# Function for communicating ghost values
# of domain grids between processes.
@cython.header(
# Arguments
grid='double[:, :, ::1]',
operation=str,
# Locals
i='int',
index_recv_bgn_i='Py_ssize_t',
index_recv_end_i='Py_ssize_t',
index_send_bgn_i='Py_ssize_t',
index_send_end_i='Py_ssize_t',
index_recv_bgn_j='Py_ssize_t',
index_recv_end_j='Py_ssize_t',
index_send_bgn_j='Py_ssize_t',
index_send_end_j='Py_ssize_t',
index_recv_bgn_k='Py_ssize_t',
index_recv_end_k='Py_ssize_t',
index_send_bgn_k='Py_ssize_t',
index_send_end_k='Py_ssize_t',
j='int',
k='int',
reverse='bint',
returns='void',
)
def communicate_ghosts(grid, operation):
"""This function can operate in two different modes depending on the
operation argument:
- operation == '+=':
Current values in the ghost points will be send to their
designated neighbour processes, where they will be added to the
current values of the outer (non-ghost) layer of points.
- operation == "=":
All local ghost points will be assigned values based on the
values stored at the corresponding points on neighbour
processes. Current ghost point values will be ignored.
"""
if grid is None:
return
# Set the direction of communication depending on the operation
reverse = (operation == '=')
# Loop over all 26 neighbour domains
for i in range(-1, 2):
if i == -1:
# Send left, receive right
index_send_bgn_i = 0
index_send_end_i = ℤ[1*nghosts]
index_recv_bgn_i = ℤ[grid.shape[0] - 2*nghosts]
index_recv_end_i = ℤ[grid.shape[0] - 1*nghosts]
elif i == 0:
# Do not send to or receive from this direction.
# Include the entire i-dimension of the local bulk.
index_send_bgn_i = ℤ[1*nghosts]
index_send_end_i = ℤ[grid.shape[0] - 1*nghosts]
index_recv_bgn_i = ℤ[1*nghosts]
index_recv_end_i = ℤ[grid.shape[0] - 1*nghosts]
else: # i == -1
# Send right, receive left
index_send_bgn_i = ℤ[grid.shape[0] - 1*nghosts]
index_send_end_i = ℤ[grid.shape[0]]
index_recv_bgn_i = ℤ[1*nghosts]
index_recv_end_i = ℤ[2*nghosts]
for j in range(-1, 2):
if j == -1:
# Send backward, receive forward
index_send_bgn_j = 0
index_send_end_j = ℤ[1*nghosts]
index_recv_bgn_j = ℤ[grid.shape[1] - 2*nghosts]
index_recv_end_j = ℤ[grid.shape[1] - 1*nghosts]
elif j == 0:
# Do not send to or receive from this direction.
# Include the entire j-dimension of the local bulk.
index_send_bgn_j = ℤ[1*nghosts]
index_send_end_j = ℤ[grid.shape[1] - 1*nghosts]
index_recv_bgn_j = ℤ[1*nghosts]
index_recv_end_j = ℤ[grid.shape[1] - 1*nghosts]
else: # j == -1
# Send forward, receive backward
index_send_bgn_j = ℤ[grid.shape[1] - 1*nghosts]
index_send_end_j = ℤ[grid.shape[1]]
index_recv_bgn_j = ℤ[1*nghosts]
index_recv_end_j = ℤ[2*nghosts]
for k in range(-1, 2):
if i == j == k == 0:
# Do not communicate the local bulk
continue
if k == -1:
# Send downward, receive upward
index_send_bgn_k = 0
index_send_end_k = ℤ[1*nghosts]
index_recv_bgn_k = ℤ[grid.shape[2] - 2*nghosts]
index_recv_end_k = ℤ[grid.shape[2] - 1*nghosts]
elif k == 0:
# Do not send to or receive from this direction.
# Include the entire k-dimension of the local bulk.
index_send_bgn_k = ℤ[1*nghosts]
index_send_end_k = ℤ[grid.shape[2] - 1*nghosts]
index_recv_bgn_k = ℤ[1*nghosts]
index_recv_end_k = ℤ[grid.shape[2] - 1*nghosts]
else: # k == -1
# Send upward, receive downward
index_send_bgn_k = ℤ[grid.shape[2] - 1*nghosts]
index_send_end_k = ℤ[grid.shape[2]]
index_recv_bgn_k = ℤ[1*nghosts]
index_recv_end_k = ℤ[2*nghosts]
# Communicate this face/edge/corner
smart_mpi(
grid[
index_send_bgn_i:index_send_end_i,
index_send_bgn_j:index_send_end_j,
index_send_bgn_k:index_send_end_k,
],
grid[
index_recv_bgn_i:index_recv_end_i,
index_recv_bgn_j:index_recv_end_j,
index_recv_bgn_k:index_recv_end_k,
],
dest =rank_neighbouring_domain(+i, +j, +k),
source=rank_neighbouring_domain(-i, -j, -k),
reverse=reverse,
mpifun='Sendrecv',
operation=operation,
)
# Function for cutting out domains as cuboidal boxes in the best
# possible way. The return value is an array of 3 elements; the number
# of subdivisions of the box for each dimension. When all dimensions
# cannot be equally divided, the x-dimension is subdivided the most,
# then the y-dimension and lastly the z-dimension.
@cython.header(
# Arguments
n='int',
# Locals
elongation='double',
elongation_min='double',
factor='int',
factor_pair=frozenset,
factors=list,
factors_pairs=set,
factors_singles=set,
factors_triplet=tuple,
factors_triplet_best=tuple,
factors_x=tuple,
factors_x_mul='int',
factors_y=tuple,
factors_y_mul='int',
factors_yz=list,
factors_z_mul='int',
i='int',
m='int',
r_x='int',
r_y='int',
returns='int[::1]',
)
def cutout_domains(n):
if n == 1:
return ones(3, dtype=C2np['int'])
if n < 1:
abort(f'Cannot cut the box into {n} domains')
# Factorise n into primes
factors = []
m = n
while m%2 == 0:
factors.append(2)
m //= 2
for i in range(3, cast(ceil(sqrt(n)), 'int') + 1, 2):
while m%i == 0:
factors.append(i)
m //= i
if m != 1:
factors.append(m)
# Go through all triplets of factors and find the one resulting in
# the least elongation of the domains, i.e. the triplet with the
# smallest ratio between the largest and smallest element.
factors_singles = set()
factors_pairs = set()
elongation_min = ထ
factors_triplet_best = ()
for r_x in range(1, len(factors) + 1):
for factors_x in itertools.combinations(factors, r_x):
factors_x_mul = np.prod(factors_x)
if factors_x_mul in factors_singles:
continue
factors_singles.add(factors_x_mul)
factors_yz = factors.copy()
for factor in factors_x:
factors_yz.remove(factor)
factors_yz.append(1)
for r_y in range(1, len(factors_yz) + 1):
for factors_y in itertools.combinations(factors_yz, r_y):
factors_y_mul = np.prod(factors_y)
factor_pair = frozenset({factors_x_mul, factors_y_mul})
if factor_pair in factors_pairs:
continue
factors_pairs.add(factor_pair)
factors_z_mul = n//(factors_x_mul*factors_y_mul)
factors_triplet = (factors_x_mul, factors_y_mul, factors_z_mul)
elongation = np.max(factors_triplet)/np.min(factors_triplet)
if elongation < elongation_min:
factors_triplet_best = factors_triplet
elongation_min = elongation
if len(factors_triplet_best) != 3 or np.prod(factors_triplet_best) != n:
abort('Something went wrong during domain decomposition')
return asarray(sorted(factors_triplet_best, reverse=True), dtype=C2np['int'])
# This function takes coordinates as arguments and returns the rank of
# the process that governs the domain in which the coordinates reside.
@cython.header(
# Arguments
x='double',
y='double',
z='double',
# Locals
x_index='int',
y_index='int',
z_index='int',
returns='int',
)
def which_domain(x, y, z):
# Note that using division here is bad, and not just
# for performance. With division, inlining combined with aggressive
# math optimizations by the compiler can lead to this function not
# being deterministic for particles right at the boundary between
# two domains. Using multiplication with pre-computed reciprocals
# as below is safe.
x_index = int(x*domain_size_x_inv)
y_index = int(y*domain_size_y_inv)
z_index = int(z*domain_size_z_inv)
# To get the rank we could index into domain_layout[...],
# but as an optimization we compute it ourselves.
return (
+ x_index*domain_subdivisions_21
+ y_index*domain_subdivisions_2
+ z_index
)
# This function computes the ranks of the processes governing the
# domain which is located i domains to the right, j domains forward and
# k domains up, relative to the local domain.
@cython.pheader(
# Arguments
i='int',
j='int',
k='int',
# Locals
returns='int',
)
def rank_neighbouring_domain(i, j, k):
return domain_layout[
mod(domain_layout_local_indices[0] + i, domain_subdivisions[0]),
mod(domain_layout_local_indices[1] + j, domain_subdivisions[1]),
mod(domain_layout_local_indices[2] + k, domain_subdivisions[2]),
]
# Function which communicates local component data
@cython.header(
# Arguments
component_send='Component',
variables=list, # list of str's
pairing_level=str,
interaction_name=str,
tile_indices_send='Py_ssize_t[::1]',
dest='int',
source='int',
component_recv='Component',
use_Δ_recv='bint',
# Locals
N_particles='Py_ssize_t',
N_particles_recv='Py_ssize_t',
contain_particles='signed char*',
domain_layout_source='int[::1]',
indexᵖ='Py_ssize_t',
mv_recv='double[::1]',
mv_recv_buf='double[::1]',
mv_recv_list=list,
mv_send='double[::1]',
mv_send_buf='double[::1]',
mv_send_list=list,
n_send='Py_ssize_t',
operation=str,
ptr_recv='double*',
ptr_recv_buf='double*',
ptr_send='double*',
ptr_send_buf='double*',
rung='Py_ssize_t*',
rung_N='Py_ssize_t',
rung_index='signed char',
rung_indices='signed char*',
rung_indices_buf='signed char[::1]',
rung_indices_buf_ptr='signed char*',
rung_indices_jumped='signed char*',
rung_indices_jumped_buf='signed char[::1]',
rung_indices_jumped_buf_ptr='signed char*',
rung_particle_index='Py_ssize_t',
rungs_N='Py_ssize_t*',
subtiling_name=str,
tile='Py_ssize_t**',
tile_index='Py_ssize_t',
tile_indices_send_prev_ptr='Py_ssize_t*',
tile_indices_send_ptr='Py_ssize_t*',
tiles='Py_ssize_t***',
tiles_rungs_N='Py_ssize_t**',
tiling='Tiling',
tiling_name=str,
tiling_recv='Tiling',
use_rungs='bint',
variable=str,
returns='Component',
)
def sendrecv_component(
component_send, variables, pairing_level, interaction_name,
tile_indices_send, dest, source, component_recv=None, use_Δ_recv=True,
):
"""This function operates in two modes:
- Communicate data (no component_recv supplied):
The data of component_send will be send and received
into the global component_buffer.
The component_buffer is then returned.
- Communicate and apply buffers (a component_recv is supplied):
The data buffer of component_send will be send and
received into the data buffers of component_recv. The received
buffer data is then used to update the corresponding data
in component_recv.
It is assumed that the data arrays of component_recv are large
enough to hold the data from component_send.
The return value is the updated component_recv.
Note that if you try to use this function locally within a single
process (dest == rank == source), nothing will happen. Thus you
cannot rely on this function to apply buffers to data attributes.
The variables argument must be a list of str's designating
which local data variables of component_send to communicate.
The implemented variables are:
- 'pos'
- 'mom'
Only particles within the tiles given by tile_indices_send will be
communicated. The tiling used will be determined from
interaction_name. In the case of pairing_level == 'domain',
no actual tiling should be used, and so here we use the trivial
tiling. Note that the passed tile_indices_send should be identical
on all processes. After tile particles have been communicated,
the returned buffer component will be tile sorted at the domain
(tile, not subtile) level. Note that the particle order is not
preserved when doing such a communication + tile sorting.
"""
global component_buffer, tile_indices_send_prev
if component_send.representation != 'particles':
abort('The sendrecv_component function is only implemented for particle components')
# No communication is needed if the destination and source is
# really the local process.
if dest == rank == source:
return component_send
# Determine the mode of operation
operation = '+='
if 𝔹[component_recv is None]:
operation = '='
# Determine which tiling to use
if pairing_level == 'tile':
tiling_name = f'{interaction_name} (tiles)'
else: # pairing_level == 'domain'
tiling_name = 'trivial'
# Find out how many particles should be communicated
tiling = component_send.tilings[tiling_name]
tiles = tiling.tiles
tiles_rungs_N = tiling.tiles_rungs_N
if 𝔹[operation == '=']:
tile_indices_send_ptr = cython.address(tile_indices_send[:])
N_particles = 0
for tile_index in range(tile_indices_send.shape[0]):
tile_index = tile_indices_send_ptr[tile_index]
rungs_N = tiles_rungs_N [tile_index]
for rung_index in range(
ℤ[component_send.lowest_populated_rung],
ℤ[component_send.highest_populated_rung + 1],
):
N_particles += rungs_N[rung_index]
else: # operation == '+=':
# When operation == '+=', we always send all particles back
# to the process from which they originally came.
# Really we should only include particles within the tiles
# given by tile_indices_send, and so the above loop over
# these tiles is correct even when operation == '+='.
# However, as long as this function has been called
# correctly, the component_send is really just a buffer
# component storing only particles within the specified
# tiles, and so we can skip the counting above.
N_particles = component_send.N_local
# Also extract tile variables from component_recv
tiling_recv = component_recv.tilings[tiling_name]
N_particles_recv = sendrecv(N_particles, dest=dest, source=source)
# In communicate mode (operation == '='),
# the global component_buffer is used as component_recv.
if 𝔹[operation == '=']:
# We cannot simply import Component from the species module,
# as this would create an import loop. Instead, the first time
# the component_buffer is needed, we grab the type of the passed
# component_send (Component) and instantiate such an instance.
if component_buffer is None:
component_buffer = type(component_send)('', 'cold dark matter', N=1)
# Adjust important meta data on the buffer component
component_buffer.name = component_send.name
component_buffer.species = component_send.species
component_buffer.representation = component_send.representation
component_buffer.N = component_send.N
component_buffer.mass = component_send.mass
component_buffer.softening_length = component_send.softening_length
component_buffer.use_rungs = component_send.use_rungs
# Enlarge the data arrays of the component_buffer if necessary
component_buffer.N_local = N_particles_recv
if component_buffer.N_allocated < component_buffer.N_local:
# Temporarily set use_rungs = True to ensure that the
# rung_indices and rung_indices_jumped
# get resized as well.
use_rungs = component_buffer.use_rungs
component_buffer.use_rungs = True
component_buffer.resize(component_buffer.N_local)
component_buffer.use_rungs = use_rungs
# Use component_buffer as component_recv
component_recv = component_buffer
# Operation-dependent preparations for the communication
if 𝔹[operation == '=']:
# In communication mode the particles within the tiles are
# temporarily copied to the mv_send_buf buffer.
# Make sure that this is large enough.
mv_send_buf = get_buffer(3*N_particles, 'send')
ptr_send_buf = cython.address(mv_send_buf[:])
else: # operation == '+=':
# We need to receive the data into a buffer, and then update the
# local data by this amount. Get the buffer.
mv_recv_buf = get_buffer(3*N_particles_recv, 'recv')
ptr_recv_buf = cython.address(mv_recv_buf[:])
# Do the communication for each variable
for variable in variables:
# Get arrays to send and receive into
if variable == 'pos':
with unswitch:
if 𝔹[operation == '=']:
mv_send = component_send.pos_mv[:3*component_send.N_local]
mv_recv = component_recv.pos_mv
else: # operation == '+='
abort('Δpos not implemented')
elif variable == 'mom':
with unswitch:
if 𝔹[operation == '=']:
mv_send = component_send.mom_mv[:3*component_send.N_local]
mv_recv = component_recv.mom_mv
else: # operation == '+='
mv_send = component_send.Δmom_mv[:3*component_send.N_local]
if use_Δ_recv:
mv_recv = component_recv.Δmom_mv
else:
mv_recv = component_recv.mom_mv
else:
abort(
f'Variable "{variable}" supplied to sendrecv_component() '
f'but only "pos" and "mom" are implemented.'
)
ptr_send = cython.address(mv_send[:])
ptr_recv = cython.address(mv_recv[:])
# In communication mode we only need to send the particular
# particles within the specified tiles. Here we copy the
# variable of these specific particles to a buffer.
if 𝔹[operation == '=']:
n_send = copy_particles_in_tiles(
component_send,
tiling, tile_indices_send,
ptr_send, ptr_send_buf,
)
mv_send = mv_send_buf[:n_send]
# Communicate the particle data
if 𝔹[operation == '=']:
Sendrecv(mv_send, recvbuf=mv_recv, dest=dest, source=source)
else: # operation == '+='
Sendrecv(mv_send, recvbuf=mv_recv_buf, dest=dest, source=source)
copy_particles_in_tiles(
component_recv,
tiling_recv, tile_indices_send,
ptr_recv_buf, ptr_recv,
add=True,
)
# When in communication mode, we additionally need to communicate
# the rung indices and rung jumps of the communicated particles.
# If not using rungs, we skip this.
if 𝔹[operation == '=' and component_send.use_rungs]:
# Create contiguous memory view over rung indices.
# We must only include the rung indices for
# particles within the specified tiles.
if rung_indices_arr.shape[0] < N_particles:
rung_indices_arr.resize(N_particles, refcheck=False)
rung_indices_buf = rung_indices_arr
rung_indices_buf_ptr = cython.address(rung_indices_buf[:])
n_send = 0
for tile_index in range(tile_indices_send.shape[0]):
tile_index = tile_indices_send_ptr[tile_index]
rungs_N = tiles_rungs_N [tile_index]
for rung_index in range(
ℤ[component_send.lowest_populated_rung],
ℤ[component_send.highest_populated_rung + 1],
):
rung_N = rungs_N[rung_index]
for rung_particle_index in range(rung_N):
rung_indices_buf_ptr[n_send] = rung_index
n_send += 1
# Communicate rung indices
Sendrecv(rung_indices_buf[:n_send],
recvbuf=component_recv.rung_indices_mv, dest=dest, source=source)
# Fill buffer with jumped rung indices
# and communicate these as well.
rung_indices_jumped = component_send.rung_indices_jumped
rung_indices_jumped_buf = rung_indices_buf # reuse buffer
rung_indices_jumped_buf_ptr = cython.address(rung_indices_jumped_buf[:])
n_send = 0
for tile_index in range(tile_indices_send.shape[0]):
tile_index = tile_indices_send_ptr[tile_index]
tile = tiles [tile_index]
rungs_N = tiles_rungs_N [tile_index]
for rung_index in range(
ℤ[component_send.lowest_populated_rung],
ℤ[component_send.highest_populated_rung + 1],
):
rung = tile[rung_index]
rung_N = rungs_N[rung_index]
for rung_particle_index in range(rung_N):
indexᵖ = rung[rung_particle_index]
rung_indices_jumped_buf_ptr[n_send] = rung_indices_jumped[indexᵖ]
n_send += 1
Sendrecv(rung_indices_jumped_buf[:n_send],
recvbuf=component_recv.rung_indices_jumped_mv, dest=dest, source=source)
# Count up how many particles occupy each rung
rung_indices = component_recv.rung_indices
rungs_N = component_recv.rungs_N
for rung_index in range(N_rungs):
rungs_N[rung_index] = 0
for indexᵖ in range(component_recv.N_local):
rung_index = rung_indices[indexᵖ]
rungs_N[rung_index] += 1
# Find and set lowest and highest populated rung
component_recv.set_lowest_highest_populated_rung()
# Communicate the active rung
component_recv.lowest_active_rung = sendrecv(
component_send.lowest_active_rung, dest=dest, source=source,
)
if component_recv.lowest_active_rung < component_recv.lowest_populated_rung:
# There is no need to have the lowest active rung
# be below the lowest populated rung.
component_recv.lowest_active_rung = component_recv.lowest_populated_rung
# When in communication mode the buffer (recv) component
# needs to know its own tiling.
if 𝔹[operation == '=']:
# Ensure that the required tiling (and subtiling)
# is instantiated on the buffer component.
tiling_recv = component_recv.tilings.get(tiling_name)
if tiling_recv is None:
component_recv.init_tiling(tiling_name, initial_rung_size=0)
tiling_recv = component_recv.tilings[tiling_name]
if 𝔹[tiling_name != 'trivial']:
subtiling_name = f'{interaction_name} (subtiles)'
component_recv.init_tiling(subtiling_name, initial_rung_size=0)
# Place the tiling over the domain of the process
# with a rank given by 'source'.
if 𝔹[tiling_name != 'trivial']:
domain_layout_source = asarray(
np.unravel_index(source, domain_subdivisions),
dtype=C2np['int'],
)
tiling_recv.relocate(asarray(
(
domain_layout_source[0]*domain_size_x,
domain_layout_source[1]*domain_size_y,
domain_layout_source[2]*domain_size_z,
),
dtype=C2np['double'],
))
# Perform tile sorting (but do not sort into subtiles)
if tile_indices_send_prev is None:
tiling_recv.sort(None, -1, already_reset=False)
else:
# We know that all particles (left over from the last call)
# are within tile_indices_send_prev. Reset particle
# information within tiling_recv before sorting into tiles.
tile_indices_send_prev_ptr = cython.address(tile_indices_send_prev[:])
tiles_rungs_N = tiling_recv.tiles_rungs_N
contain_particles = tiling_recv.contain_particles
for tile_index in range(tile_indices_send_prev.shape[0]):
tile_index = tile_indices_send_prev_ptr[tile_index]
rungs_N = tiles_rungs_N[tile_index]
for rung_index in range(N_rungs):
rungs_N[rung_index] = 0
contain_particles[tile_index] = 0
tiling_recv.sort(None, -1, already_reset=True)
# Set the global tile_indices_send_prev,
# for use with the next call to this function.
tile_indices_send_prev = tile_indices_send
return component_recv
# Declare global buffers used by sendrecv_component() function.
# The rung_indices_arr array is also used by the exchange() function
# and the species.Component class.
cython.declare(
component_buffer='Component',
rung_indices_arr=object, # np.ndarray
tile_indices_send_prev='Py_ssize_t[::1]',
)
component_buffer = None
rung_indices_arr = empty(1, dtype=C2np['signed char'])
tile_indices_send_prev = None
# Helper function for the sendrecv_component() function,
# handling copying of particle data within specified tiles to a buffer.
@cython.header(
# Arguments
component='Component',
tiling='Tiling',
tile_indices='Py_ssize_t[::1]',
ptr='double*',
ptr_buf='double*',
add='bint',
# Locals
count='Py_ssize_t',
count_add='Py_ssize_t',
dim='Py_ssize_t',
indexᵖ='Py_ssize_t',
indexˣ='Py_ssize_t',
rung='Py_ssize_t*',
rung_N='Py_ssize_t',
rung_index='signed char',
rung_index_bgn='signed char',
rung_index_end='signed char',
rung_index_inactive_bgn='signed char',
rung_index_inactive_end='signed char',
rung_particle_index='Py_ssize_t',
rungs_N='Py_ssize_t*',
tile='Py_ssize_t**',
tile_index='Py_ssize_t',
tile_indices_ptr='Py_ssize_t*',
tiles='Py_ssize_t***',
tiles_rungs_N='Py_ssize_t**',
returns='Py_ssize_t',
)
def copy_particles_in_tiles(component, tiling, tile_indices, ptr, ptr_buf, add=False):
tiles = tiling.tiles
tiles_rungs_N = tiling.tiles_rungs_N
tile_indices_ptr = cython.address(tile_indices[:])
# When adding to existing values, we skip over particles
# on inactive rungs as these will have zero updates.
if add:
rung_index_inactive_bgn = component.lowest_populated_rung
rung_index_inactive_end = component.lowest_active_rung
rung_index_bgn = rung_index_inactive_end
else:
rung_index_bgn = component.lowest_populated_rung
rung_index_end = component.highest_populated_rung + 1
# Iterate through the particles in the tiles and perform the copying
count = 0
for tile_index in range(tile_indices.shape[0]):
tile_index = tile_indices_ptr[tile_index]
tile = tiles [tile_index]
rungs_N = tiles_rungs_N [tile_index]
# Skip particles on inactive rungs
# when adding to existing values.
with unswitch:
if add:
with unswitch:
if rung_index_inactive_bgn < rung_index_inactive_end:
count_add = 0
for rung_index in range(rung_index_inactive_bgn, rung_index_inactive_end):
count_add += rungs_N[rung_index]
count += 3*count_add
# Copy data of all (overwrite) or just
# the active (add) particles.
for rung_index in range(rung_index_bgn, rung_index_end):
rung = tile [rung_index]
rung_N = rungs_N[rung_index]
for rung_particle_index in range(rung_N):
indexᵖ = rung[rung_particle_index]
indexˣ = 3*indexᵖ
for dim in range(3):
with unswitch:
if add:
ptr_buf[indexˣ + dim] += ptr[count + dim]
else:
ptr_buf[count + dim] = ptr[indexˣ + dim]
count += 3
return count
# Very general function for different MPI communications
@cython.pheader(
# Arguments
block_send=object, # Memoryview of dimension 1, 2 or 3
block_recv=object, # Memoryview of dimension 1, 2 or 3, or int
dest='int',
source='int',
root='int',
reverse='bint',
mpifun=str,
operation=str,
# Local
arr_recv=object, # np.ndarray
arr_send=object, # np.ndarray
block_recv_passed_as_scalar='bint',
contiguous_recv='bint',
contiguous_send='bint',
data_recv=object, # np.ndarray
data_send=object, # np.ndarray
i='Py_ssize_t',
index='Py_ssize_t',
j='Py_ssize_t',
k='Py_ssize_t',
recving='bint',
sending='bint',
shape_send=tuple,
size_recv='Py_ssize_t',
size_send='Py_ssize_t',
sizes_recv='Py_ssize_t[::1]',
recvbuf_mv='double[::1]',
recvbuf_name=object, # int or str
reverse_mpifun_mapping=dict,
sendbuf_mv='double[::1]',
using_recvbuf='bint',
returns=object, # np.ndarray or mpi4py.MPI.Request
)
def smart_mpi(
block_send=(), block_recv=(), dest=-1, source=-1, root=master_rank,
reverse=False, mpifun='', operation='=',
):
"""This function will do MPI communication. It will send the data in
the array/memoryview block_send to the process of rank dest
and receive data into array/memoryview block_recv from rank source.
The arrays can be of any shape (currently bigger than 0 and less
than 4) and size and may be different for the two.
If block_recv is larger than the received data, the extra elements
in the end will be filled with junk if the dimension of block_recv
is larger than 1. Though for the sake of performance, always pass
a fitting block_recv.
The MPI function to use is specified in the mpifun argument
(e.g. mpifun='sendrecv' or mpifun='send'). Upper-case communication
(array communication) is always used, regardless of the case of the
value of mpifun.
For some MPI communications a root process should be specified.
This can be set by the root argument.
All arguments are optional, so that it is not needed to specify e.g.
block_recv when doing a Send. For Cython to be able to compile this,
a cython.pheader decorator is used (corresponding to cython.ccall
or cpdef). Also, if a call to smart_mpi results in a receive but not
a send, block_recv can be passed as the first argument
instead of block_send (which is not used in this case).
It is allowed not to pass in a block_recv, even when a message
should be received. In that case, the recvbuf buffer will be used
and returned.
The received data can either be copied into block_recv (overwriting
existing data) or it can be added to the existing data. Change
this behaviour through the operation argument (operation='=' or
operation='+=').
If the passed blocks are contiguous, they will be used directly
in the communication (though in the case of block_recv, only when
operation='='). If not, contiguous buffers will be used. The
buffers used are the variables sendbuf/sendbuf_mv and
recvbuf/recvbuf_mv. These will be enlarged if necessary.
Since the buffers contain doubles, the passed arrays must also
contain doubles if the buffers are to be used. If communication can
take place directly without the use of the buffers, the passed
arrays may contain any type (though the type of the send and recv
block should always be identical).
What is returned depends on the choice of mpifun. Whenever a message
should be received, the passed block_recv is returned (as block_recv
is populated with values in-place, this is rarely used). When a
non-blocking send-only is used, the MPI request is returned. When a
blocking send-only is used, None is returned.
If reverse is True, the communication is reversed, meaning that
sending block_send to dist and receiving into block_recv from source
turns into sending block_recv to source and receiving into
block_send from dist.
"""
# Sanity check on operation argument
if master and operation not in {'=', '+='}:
abort(f'smart_mpi() got operation = "{operation}" ∉ {{"=", "+="}}')
# Determine whether we are sending and/or receiving
mpifun = mpifun.lower()
sending = False
recving = False
if 'all' in mpifun:
sending = True
recving = True
else:
if 'send' in mpifun:
sending = True
if dest == -1:
abort('Cannot send when no destination is given')
if 'recv' in mpifun:
recving = True
if source == -1:
abort('Cannot receive when no source is given')
if 'bcast' in mpifun:
sending = (rank == root)
recving = not sending
if 'gather' in mpifun:
sending = True
recving = (rank == root)
if not sending and not recving:
if mpifun:
abort(f'MPI function "{mpifun}" not understood')
else:
abort('Which MPI function to use is not specified')
# If requested, reverse the communication direction
if reverse:
# Swap the send and receive blocks
block_send, block_recv = block_recv, block_send
# Swap the source and destination
dest, source = source, dest
# Reverse the MPI function
reverse_mpifun_mapping = {'recv' : 'send',
'send' : 'recv',
'sendrecv': 'sendrecv',
}
if mpifun not in reverse_mpifun_mapping:
abort(f'MPI function "{mpifun}" cannot be reversed')
mpifun = reverse_mpifun_mapping[mpifun]
# If only receiving, block_recv should be
# accessible as the first argument.
if (
not sending
and recving
and not (isinstance(block_send, tuple) and len(block_send) == 0) # block_send != ()
and (isinstance(block_recv, tuple) and len(block_recv) == 0) # block_recv == ()
):
block_send, block_recv = block_recv, block_send
# If block_recv is an int or str,
# this designates a specific buffer to use as recvbuf.
recvbuf_name = 'recv'
if isinstance(block_recv, (int, np.integer, str)):
recvbuf_name = block_recv
# NumPy arrays over the data
arr_send = asarray(block_send)
arr_recv = asarray(block_recv)
# If the input blocks contain different types (and one of them
# contain doubles), convert them both to doubles.
# This is not done in-place, meaning that the passed recv_block will
# not be changed! The returned block should be used instead.
if sending and recving:
if ( arr_send.dtype == np.dtype(C2np['double'])
and arr_recv.dtype != np.dtype(C2np['double'])):
arr_recv = arr_recv.astype(C2np['double'])
elif ( arr_send.dtype != np.dtype(C2np['double'])
and arr_recv.dtype == np.dtype(C2np['double'])):
arr_send = arr_send.astype(C2np['double'])
# Are the passed arrays contiguous?
contiguous_send = arr_send.flags.c_contiguous
contiguous_recv = arr_recv.flags.c_contiguous
# The send and recv blocks cannot be scalar NumPy arrays.
# Do an in-place reshape to 1D-arrays of size 1.
if arr_send.ndim == 0:
arr_send.resize(1, refcheck=False)
block_recv_passed_as_scalar = False
if arr_recv.ndim == 0:
block_recv_passed_as_scalar = True
arr_recv.resize(1, refcheck=False)
size_send = arr_send.size
shape_send = arr_send.shape
# Figure out the size of the data to be received
size_recv = 0
if mpifun == 'bcast':
# Broadcast the shape of the data to be broadcasted
shape_recv = bcast(arr_send.shape, root=root)
size_recv = np.prod(shape_recv)
if rank == root:
size_recv = 0
elif mpifun == 'gather':
# The root process will receive a block of size_send
# from all processes.
if rank == root:
size_recv = nprocs*size_send
elif mpifun == 'gatherv':
# The root process will receive blocks of possibly different
# sizes from all processes. Communicate these sizes.
if rank == root:
sizes_recv = empty(nprocs, dtype=C2np['Py_ssize_t'])
Gather(asarray(size_send, dtype=C2np['Py_ssize_t']),
sizes_recv if rank == root else None)
elif sending and recving:
if mpifun == 'allgather':
# A block of size_send is to be received from each process
size_recv = nprocs*size_send
elif mpifun == 'allgatherv':
# The blocks to be received from each process may have
# different sizes. Communicate these sizes.
sizes_recv = empty(nprocs, dtype=C2np['Py_ssize_t'])
Allgather(asarray(size_send, dtype=C2np['Py_ssize_t']), sizes_recv)
else:
# Communicate the size of the data to be exchanged
size_recv = sendrecv(size_send, dest=dest, source=source)
elif recving:
# The exact size of the data to receive is not known,
# but it cannot be larger than the size of the receiver block.
size_recv = arr_recv.size
# Based on the contiguousness of the input arrays, assign the names
# data_send and data_recv to the contiguous blocks of data,
# which are to be passed into the MPI functions.
if contiguous_send:
data_send = arr_send
else:
sendbuf_mv = get_buffer(size_send, 'send')
data_send = sendbuf_mv
# When no block_recv is passed, use the recvbuf buffer
using_recvbuf = False
if arr_recv.size == 0 or block_recv_passed_as_scalar:
using_recvbuf = True
recvbuf_mv = get_buffer(size_recv, recvbuf_name)
data_recv = recvbuf_mv
arr_recv = asarray(data_recv)
elif contiguous_recv and operation == '=':
# Only if operation == '=' can we receive
# directly into the input array.
data_recv = arr_recv
else:
using_recvbuf = True
recvbuf_mv = get_buffer(size_recv, recvbuf_name)
data_recv = recvbuf_mv
# Fill send buffer if this is to be used
if sending and not contiguous_send:
copy_to_contiguous(arr_send, sendbuf_mv)
# Do the communication
if mpifun == 'allgather':
Allgather(data_send, data_recv)
elif mpifun == 'allgatherv':
Allgatherv(data_send, (data_recv, sizes_recv))
elif mpifun == 'bcast':
if rank == root:
Bcast(data_send, root=root)
else:
Bcast(data_recv, root=root)
elif mpifun == 'gather':
Gather(data_send, data_recv, root=root)
elif mpifun == 'gatherv':
Gatherv(data_send, (data_recv, sizes_recv) if rank == root else None, root=root)
elif mpifun == 'isend':
return Isend(data_send, dest=dest)
elif mpifun == 'recv':
Recv(data_recv, source=source)
elif mpifun == 'send':
Send(data_send, dest=dest)
elif mpifun == 'sendrecv':
Sendrecv(data_send, recvbuf=data_recv, dest=dest, source=source)
else:
abort('MPI function "{}" is not implemented'.format(mpifun))
# If only sending, return now
if not recving:
return data_send
# If nothing was received, return an empty slice of arr_recv
if size_recv == 0:
return arr_recv[:0]
# Copy or add the received data from the buffer
# to the passed block_recv (arr_recv), if needed.
if (operation == '=' and not contiguous_recv) or operation == '+=':
copy_to_noncontiguous(recvbuf_mv, arr_recv, operation)
# If both sending and receiving, the two blocks of data
# should (probably) have the same shape. If no block_recv was
# supplied, arr_recv will always be 1D.
# In this case, do a reshaping.
if sending and recving and using_recvbuf and size_send == size_recv:
arr_recv = arr_recv.reshape(shape_send)
# When broadcasting, the received data should be of the same size
# as that which was send.
if mpifun == 'bcast' and using_recvbuf:
arr_recv = arr_recv.reshape(shape_recv)
# Return the now populated arr_recv
return arr_recv
# Function for copying a multi-dimensional non-contiguous
# array into a 1D contiguous buffer.
@cython.header(
# Arguments
arr=object, # np.ndarray
buf='double[::1]',
# Locals
bufptr='double*',
bufview1D='double[::1]',
bufview2D='double[:, ::1]',
bufview3D='double[:, :, ::1]',
i='Py_ssize_t',
index='Py_ssize_t',
index_i='Py_ssize_t',
index_ij='Py_ssize_t',
j='Py_ssize_t',
k='Py_ssize_t',
ndim='int',
size='Py_ssize_t',
size_i='Py_ssize_t',
size_j='Py_ssize_t',
size_k='Py_ssize_t',
view1D='double[:]',
view2D='double[:, :]',
view3D='double[:, :, :]',
viewcontig='double[::1]',
returns='void',
)
def copy_to_contiguous(arr, buf):
"""It is assumed that the contiguous buf is at least
as large as the arr, but it may be larger.
"""
arr = asarray(arr, dtype=C2np['double'])
if arr.flags.c_contiguous:
size = arr.size
viewcontig = arr.reshape(size)
buf[:size] = viewcontig
return
ndim = arr.ndim
bufptr = cython.address(buf[:])
if ndim == 1:
view1D = arr
bufview1D = cast(bufptr, 'double[:view1D.shape[0]]')
bufview1D[:] = view1D
elif ndim == 2:
view2D = arr
bufview2D = cast(bufptr, 'double[:view2D.shape[0], :view2D.shape[1]]')
bufview2D[...] = view2D
elif ndim == 3:
view3D = arr
bufview3D = cast(bufptr, 'double[:view3D.shape[0], :view3D.shape[1], :view3D.shape[2]]')
bufview3D[...] = view3D
elif ndim == 0:
pass
else:
abort(f'copy_to_contiguous() got array with {ndim} dimensions')
# Function for copying a 1D contiguous buffer into a
# multi-dimensional non-contiguous array.
@cython.header(
# Arguments
buf='double[::1]',
arr=object, # np.ndarray
operation=str,
# Locals
bufptr='double*',
bufview1D='double[::1]',
bufview2D='double[:, ::1]',
bufview3D='double[:, :, ::1]',
i='Py_ssize_t',
index='Py_ssize_t',
index_i='Py_ssize_t',
index_ij='Py_ssize_t',
j='Py_ssize_t',
k='Py_ssize_t',
ndim='int',
size='Py_ssize_t',
size_i='Py_ssize_t',
size_j='Py_ssize_t',
size_k='Py_ssize_t',
view1D='double[:]',
view2D='double[:, :]',
view3D='double[:, :, :]',
viewcontig='double[::1]',
returns='void',
)
def copy_to_noncontiguous(buf, arr, operation='='):
"""It is assumed that the contiguous buf is at least
as large as the arr, but it may be larger.
"""
arr = asarray(arr, dtype=C2np['double'])
bufptr = cython.address(buf[:])
if arr.flags.c_contiguous:
size = arr.size
viewcontig = arr.reshape(size)
if operation == '=':
viewcontig[:] = buf[:size]
else: # operation == '+='
for index in range(size):
viewcontig[index] += bufptr[index]
return
ndim = arr.ndim
if ndim == 1:
view1D = arr
size = view1D.shape[0]
if operation == '=':
bufview1D = cast(bufptr, 'double[:size]')
view1D[:] = bufview1D
else: # operation == '+='
for index in range(size):
view1D[index] += bufptr[index]
elif ndim == 2:
view2D = arr
size_i, size_j = view2D.shape[0], view2D.shape[1]
if operation == '=':
bufview2D = cast(bufptr, 'double[:size_i, :size_j]')
view2D[...] = bufview2D
else: # operation == '+='
index_i = -size_j
for i in range(size_i):
index_i += size_j
for j in range(size_j):
index = index_i + j
view2D[i, j] += bufptr[index]
elif ndim == 3:
view3D = arr
size_i, size_j, size_k = view3D.shape[0], view3D.shape[1], view3D.shape[2]
if operation == '=':
bufview3D = cast(bufptr, 'double[:size_i, :size_j, :size_k]')
view3D[...] = bufview3D
else: # operation == '+='
index_i = -size_j
for i in range(size_i):
index_i += size_j
index_ij = (index_i - 1)*size_k
for j in range(size_j):
index_ij += size_k
for k in range(size_k):
index = index_ij + k
view3D[i, j, k] += bufptr[index]
elif ndim == 0:
pass
else:
abort(f'copy_to_noncontiguous() got array with {ndim} dimensions')
# Function which manages buffers used by other functions
@cython.pheader(
# Arguments
size_or_shape=object, # Py_ssize_t or tuple
buffer_name=object, # Any hashable object
nullify='bint',
# Local
N_buffers='Py_ssize_t',
buffer='double*',
buffer_mv='double[::1]',
i='Py_ssize_t',
index='Py_ssize_t',
shape=tuple,
size='Py_ssize_t',
size_given='bint',
returns=object, # multi-dimensional array of doubles
)
def get_buffer(size_or_shape=-1, buffer_name=0, nullify=False):
"""This function returns a contiguous buffer containing doubles.
The buffer will be exactly of size 'size_or_shape' when this is an
integer, or exactly the shape of 'size_or_shape' when this is
a tuple. If no size or shape is given, the buffer will be returned
as a 1D array with whatever size it happens to have.
When multiple buffers are in use, a specific buffer can be
requested by passing a buffer_name, which can be any hashable type.
A buffer with the given name does not have to exist beforehand.
A given buffer will be reallocated (enlarged) if necessary.
If nullify is True, all elements of the buffer will be set to 0.
"""
global buffers
# Get shape and size from argument
if size_or_shape == -1:
size_given = False
size_or_shape = 1
else:
size_given = True
shape = size_or_shape if isinstance(size_or_shape, tuple) else (size_or_shape, )
size = np.prod(shape)
# The smallest possible buffer size is 1
if size == 0:
size = 1
shape = (1, )
# Fetch or create the buffer
if buffer_name in buffers_mv:
# This buffer already exists
index = 0
for key in buffers_mv:
if key == buffer_name:
break
index += 1
buffer = buffers[index]
buffer_mv = buffers_mv[buffer_name]
if size > buffer_mv.shape[0]:
# Enlarge this buffer
resize_buffer(size, buffer_name)
buffer = buffers[index]
buffer_mv = buffers_mv[buffer_name]
elif not size_given:
# No size was given. Use the entire array.
size = buffer_mv.shape[0]
shape = (size, )
else:
# This buffer does not exist yet. Create it.
buffer = malloc(size*sizeof('double'))
N_buffers = len(buffers_mv) + 1
buffers = realloc(buffers, N_buffers*sizeof('double*'))
buffers[N_buffers - 1] = buffer
buffer_mv = cast(buffer, 'double[:size]')
buffers_mv[buffer_name] = buffer_mv
# Nullify the buffer, if required
if nullify:
for i in range(size):
buffer[i] = 0
# Return the buffer in the requested shape
return np.reshape(buffer_mv[:size], shape)
# Function which resizes one of the global buffers
@cython.header(
# Arguments
buffer_name=object, # Any hashable object
size='Py_ssize_t',
# Local
buffer='double*',
buffer_mv='double[::1]',
index='Py_ssize_t',
)
def resize_buffer(size, buffer_name):
if buffer_name not in buffers_mv:
abort(f'Cannot resize buffer "{buffer_name}" as it does not exist')
index = 0
for key in buffers_mv:
if key == buffer_name:
break
index += 1
buffer = buffers[index]
buffer = realloc(buffer, size*sizeof('double'))
buffers[index] = buffer
buffer_mv = cast(buffer, 'double[:size]')
buffers_mv[buffer_name] = buffer_mv
# Initialise buffers
cython.declare(
buffers='double**',
buffer='double*',
buffer_mv='double[::1]',
buffers_mv=dict,
)
buffers = malloc(1*sizeof('double*'))
buffer = malloc(1*sizeof('double'))
buffers[0] = buffer
buffer_mv = cast(buffer, 'double[:1]')
buffers_mv = {}
buffers_mv[0] = buffer_mv
# Function computing basic domain information
# and collecting them into a namespace.
@lru_cache()
def get_domain_info():
# Number of subdivisions (domains) of the box
# in each of the three dimensions.
subdivisions = cutout_domains(nprocs)
# The global 3D layout of the division of the box
layout = arange(nprocs, dtype=C2np['int']).reshape(subdivisions)
# The indices in domain_layout of the local domain
layout_local_indices = asarray(
np.unravel_index(rank, subdivisions),
dtype=C2np['int'],
)
# The size of the domain, which is the same for all of them
size_x = boxsize/subdivisions[0]
size_y = boxsize/subdivisions[1]
size_z = boxsize/subdivisions[2]
# The start and end coordinates of the local domain
bgn_x = layout_local_indices[0]*size_x
bgn_y = layout_local_indices[1]*size_y
bgn_z = layout_local_indices[2]*size_z
end_x = bgn_x + size_x
end_y = bgn_y + size_y
end_z = bgn_z + size_z
# Reciprocals of the domain sizes. To avoid future round-off errors,
# these are constructed such that their product with boxsize is as
# close to subdivisions[:] as possible without being equal.
size_x_inv = 1./size_x
size_y_inv = 1./size_y
size_z_inv = 1./size_z
while boxsize*size_x_inv < subdivisions[0]:
size_x_inv = np.nextafter(size_x_inv, ထ)
while boxsize*size_x_inv >= subdivisions[0]:
size_x_inv = np.nextafter(size_x_inv, -ထ)
while boxsize*size_y_inv < subdivisions[1]:
size_y_inv = np.nextafter(size_y_inv, ထ)
while boxsize*size_y_inv >= subdivisions[1]:
size_y_inv = np.nextafter(size_y_inv, -ထ)
while boxsize*size_z_inv < subdivisions[2]:
size_z_inv = np.nextafter(size_z_inv, ထ)
while boxsize*size_z_inv >= subdivisions[2]:
size_z_inv = np.nextafter(size_z_inv, -ထ)
# Return everything collected into a common namespace
domain_info = types.SimpleNamespace(**locals())
return domain_info
# Get local domain information
domain_info = get_domain_info()
cython.declare(
domain_subdivisions='int[::1]',
domain_subdivisions_2='int',
domain_subdivisions_21='int',
domain_layout='int[:, :, ::1]',
domain_layout_local_indices='int[::1]',
domain_size_x='double',
domain_size_y='double',
domain_size_z='double',
domain_size_x_inv='double',
domain_size_y_inv='double',
domain_size_z_inv='double',
)
domain_subdivisions = domain_info.subdivisions
domain_subdivisions_2 = domain_info.subdivisions[2]
domain_subdivisions_21 = domain_info.subdivisions[1]*domain_info.subdivisions[2]
domain_layout = domain_info.layout
domain_layout_local_indices = domain_info.layout_local_indices
domain_size_x = domain_info.size_x
domain_size_y = domain_info.size_y
domain_size_z = domain_info.size_z
domain_size_x_inv = domain_info.size_x_inv
domain_size_y_inv = domain_info.size_y_inv
domain_size_z_inv = domain_info.size_z_inv
|
jmd-dkREPO_NAMEconceptPATH_START.@concept_extracted@concept-master@src@communication.py@.PATH_END.py
|
{
"filename": "_hoverongaps.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/contour/_hoverongaps.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverongapsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="hoverongaps", parent_name="contour", **kwargs):
super(HoverongapsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@contour@_hoverongaps.py@.PATH_END.py
|
{
"filename": "_labelpadding.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/carpet/baxis/_labelpadding.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelpaddingValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="labelpadding", parent_name="carpet.baxis", **kwargs
):
super(LabelpaddingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@carpet@baxis@_labelpadding.py@.PATH_END.py
|
{
"filename": "_arrayminus.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram/error_y/_arrayminus.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ArrayminusValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="arrayminus", parent_name="histogram.error_y", **kwargs
):
super(ArrayminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram@error_y@_arrayminus.py@.PATH_END.py
|
{
"filename": "test_netiterintegrate.py",
"repo_name": "JohannesBuchner/UltraNest",
"repo_path": "UltraNest_extracted/UltraNest-master/tests/test_netiterintegrate.py",
"type": "Python"
}
|
from __future__ import print_function, division
import os
import numpy as np
from ultranest.store import TextPointStore
from ultranest.netiter import PointPile, TreeNode, count_tree, print_tree, dump_tree
from ultranest.netiter import SingleCounter, MultiCounter, BreadthFirstIterator
def integrate_singleblock(num_live_points, pointstore, x_dim, num_params, dlogz=0.5):
active_u = []
active_v = []
active_logl = []
for i in range(num_live_points):
idx, row = pointstore.pop(-np.inf)
assert row is not None
active_u.append(row[2:2+x_dim])
active_v.append(row[2+x_dim:2+x_dim+num_params])
active_logl.append(row[1])
saved_v = [] # Stored points for posterior results
saved_logl = []
saved_logwt = []
h = 0.0 # Information, initially 0.
logz = -1e300 # ln(Evidence Z), initially Z=0
logvol = 0
logvolf = np.log1p(- np.exp(-1.0 / num_live_points))
#fraction_remain = 1.0
max_iters = 10000000
for it in range(0, max_iters):
# Worst object in collection and its weight (= volume * likelihood)
worst = np.argmin(active_logl)
logwt = logvol + logvolf + active_logl[worst]
# Update evidence Z and information h.
logz_new = np.logaddexp(logz, logwt)
h = (np.exp(logwt - logz_new) * active_logl[worst] + np.exp(logz - logz_new) * (h + logz) - logz_new)
logz = logz_new
logz_remain = np.max(active_logl) - it / num_live_points
#print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f logZremain=%.1f" % (active_logl[worst], num_live_points, logvol, logwt, logz, logz_remain))
# Shrink interval
logvol -= 1.0 / num_live_points
# Add worst object to samples.
saved_v.append(np.array(active_v[worst]))
saved_logwt.append(logwt)
saved_logl.append(active_logl[worst])
# The new likelihood constraint is that of the worst object.
loglstar = active_logl[worst]
idx, row = pointstore.pop(loglstar)
assert row is not None
u = row[2:2+x_dim]
v = row[2+x_dim:2+x_dim+num_params]
logl = row[1]
active_u[worst] = u
active_v[worst] = v
active_logl[worst] = logl
#fraction_remain = np.logaddexp(logz, logz_remain) - logz
# Stopping criterion
if logz_remain < logz:
break
logvol = -len(saved_v) / num_live_points
for i in np.argsort(active_logl):
logwt = logvol - np.log(num_live_points) + active_logl[i]
logz_new = np.logaddexp(logz, logwt)
h = (np.exp(logwt - logz_new) * active_logl[i] + np.exp(logz - logz_new) * (h + logz) - logz_new)
logz = logz_new
#print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f" % (active_logl[i], num_live_points, logvol, logwt, logz))
saved_v.append(np.array(active_v[i]))
saved_logwt.append(logwt)
saved_logl.append(active_logl[i])
saved_v = np.array(saved_v)
saved_wt = np.exp(np.array(saved_logwt) - logz)
saved_logl = np.array(saved_logl)
logzerr = np.sqrt(h / num_live_points)
results = dict(niter=it, logz=logz, logzerr=logzerr,
weighted_samples=dict(v=saved_v, w = saved_wt, logw = saved_logwt, L=saved_logl),
)
return results
def strategy_advice(node, parallel_values, main_iterator, counting_iterators, rootid):
if len(node.children) > 0:
# we don't expand if node already has children
print("not expanding, already has children")
assert False
return np.nan, np.nan
Lmin = parallel_values.min()
Lmax = parallel_values.max()
logZremain = main_iterator.logZremain
# if the remainder dominates, return that range
if logZremain > main_iterator.logZ:
return Lmin, Lmax
#print("not expanding, remainder not dominant")
return np.nan, np.nan
class __Point(object):
def __init__(self, u, p):
self.u = u
self.p = p
def integrate_graph_singleblock(num_live_points, pointstore, x_dim, num_params, dlogz=0.5):
pp = PointPile(x_dim, num_params)
def create_node(pointstore, Lmin):
idx, row = pointstore.pop(Lmin)
assert row is not None
L = row[1]
u = row[2:2+x_dim]
p = row[2+x_dim:2+x_dim+num_params]
assert np.isfinite(L)
return pp.make_node(L, u, p)
# we create a bunch of live points from the prior volume
# each of which is the start of a chord (in the simplest case)
roots = [create_node(pointstore, -np.inf) for i in range(num_live_points)]
iterator_roots = []
np.random.seed(1)
for i in range(10):
# boot-strap which roots are assigned to this iterator
rootids = np.unique(np.random.randint(len(roots), size=len(roots)))
#print(rootids)
iterator_roots.append((SingleCounter(random=True), rootids))
# and we have one that operators on the entire tree
main_iterator = SingleCounter()
main_iterator.Lmax = max(n.value for n in roots)
assert np.isfinite(main_iterator.Lmax)
explorer = BreadthFirstIterator(roots)
Llo, Lhi = -np.inf, np.inf
strategy_stale = True
saved_nodeids = []
saved_logl = []
# we go through each live point (regardless of root) by likelihood value
while True:
#print()
next = explorer.next_node()
if next is None:
break
rootid, node, (active_nodes, active_rootids, active_values, active_node_ids) = next
# this is the likelihood level we have to improve upon
Lmin = node.value
saved_nodeids.append(node.id)
saved_logl.append(Lmin)
expand_node = Lmin <= Lhi and Llo <= Lhi
# if within suggested range, expand
if strategy_stale or not (Lmin <= Lhi):
# check with advisor if we want to expand this node
Llo, Lhi = strategy_advice(node, active_values, main_iterator, [], rootid)
#print("L range to expand:", Llo, Lhi, "have:", Lmin, "=>", Lmin <= Lhi, Llo <= Lhi)
strategy_stale = False
strategy_stale = True
if expand_node:
# sample a new point above Lmin
#print("replacing node", Lmin, "from", rootid, "with", L)
node.children.append(create_node(pointstore, Lmin))
main_iterator.Lmax = max(main_iterator.Lmax, node.children[0].value)
else:
#print("ending node", Lmin)
pass
# inform iterators (if it is their business) about the arc
main_iterator.passing_node(node, active_values)
for it, rootids in iterator_roots:
if rootid in rootids:
mask = np.in1d(active_rootids, rootids, assume_unique=True)
#mask1 = np.array([rootid2 in rootids for rootid2 in active_rootids])
#assert (mask1 == mask).all(), (mask1, mask)
it.passing_node(node, active_values[mask])
#print([it.H for it,_ in iterator_roots])
explorer.expand_children_of(rootid, node)
# points with weights
#saved_u = np.array([pp[nodeid].u for nodeid in saved_nodeids])
saved_v = pp.getp(saved_nodeids)
saved_logwt = np.array(main_iterator.logweights)
saved_wt = np.exp(saved_logwt - main_iterator.logZ)
saved_logl = np.array(saved_logl)
print('%.4f +- %.4f (main)' % (main_iterator.logZ, main_iterator.logZerr))
Zest = np.array([it.logZ for it, _ in iterator_roots])
print('%.4f +- %.4f (bs)' % (Zest.mean(), Zest.std()))
results = dict(niter=len(saved_logwt),
logz=main_iterator.logZ, logzerr=main_iterator.logZerr,
weighted_samples=dict(v=saved_v, w = saved_wt, logw = saved_logwt, L=saved_logl),
tree=TreeNode(-np.inf, children=roots),
)
# return entire tree
return results
def multi_integrate_graph_singleblock(num_live_points, pointstore, x_dim, num_params, dlogz=0.5, withtests=False):
pp = PointPile(x_dim, num_params)
def create_node(pointstore, Lmin):
idx, row = pointstore.pop(Lmin)
assert row is not None
L = row[1]
u = row[2:2+x_dim]
p = row[2+x_dim:2+x_dim+num_params]
return pp.make_node(L, u, p)
# we create a bunch of live points from the prior volume
# each of which is the start of a chord (in the simplest case)
roots = [create_node(pointstore, -np.inf) for i in range(num_live_points)]
# and we have one that operators on the entire tree
main_iterator = MultiCounter(nroots=len(roots), nbootstraps=10, random=True, check_insertion_order=withtests)
main_iterator.Lmax = max(n.value for n in roots)
explorer = BreadthFirstIterator(roots)
Llo, Lhi = -np.inf, np.inf
strategy_stale = True
saved_nodeids = []
saved_logl = []
# we go through each live point (regardless of root) by likelihood value
while True:
#print()
next = explorer.next_node()
if next is None:
break
rootid, node, (active_nodes, active_rootids, active_values, active_nodeids) = next
assert not isinstance(rootid, float)
# this is the likelihood level we have to improve upon
Lmin = node.value
saved_nodeids.append(node.id)
saved_logl.append(Lmin)
expand_node = Lmin <= Lhi and Llo <= Lhi
# if within suggested range, expand
if strategy_stale or not (Lmin <= Lhi):
# check with advisor if we want to expand this node
Llo, Lhi = strategy_advice(node, active_values, main_iterator, [], rootid)
#print("L range to expand:", Llo, Lhi, "have:", Lmin, "=>", Lmin <= Lhi, Llo <= Lhi)
strategy_stale = False
strategy_stale = True
if expand_node:
# sample a new point above Lmin
node.children.append(create_node(pointstore, Lmin))
main_iterator.Lmax = max(main_iterator.Lmax, node.children[0].value)
else:
#print("ending node", Lmin)
pass
# inform iterators (if it is their business) about the arc
assert not isinstance(rootid, float)
main_iterator.passing_node(rootid, node, active_rootids, active_values)
explorer.expand_children_of(rootid, node)
print('tree size:', count_tree(roots))
# points with weights
#saved_u = pp.getu(saved_nodeids)
saved_v = pp.getp(saved_nodeids)
saved_logwt = np.array(main_iterator.logweights)
saved_wt = np.exp(saved_logwt - main_iterator.logZ)
saved_logl = np.array(saved_logl)
print('%.4f +- %.4f (main)' % (main_iterator.logZ, main_iterator.logZerr))
print('%.4f +- %.4f (bs)' % (main_iterator.all_logZ[1:].mean(), main_iterator.all_logZ[1:].std()))
if withtests:
print("insertion order:", float(main_iterator.insertion_order_runlength))
results = dict(niter=len(saved_logwt),
logz=main_iterator.logZ, logzerr=main_iterator.logZerr,
weighted_samples=dict(v=saved_v, w = saved_wt, logw = saved_logwt, L=saved_logl),
tree=TreeNode(-np.inf, children=roots),
)
# return entire tree
return results
testfile = os.path.join(os.path.dirname(__file__), 'eggboxpoints.tsv')
import time
import pytest
@pytest.mark.parametrize("nlive", [100])
def test_singleblock(nlive):
assert os.path.exists(testfile), ("%s does not exist" % testfile)
print("="*80)
print("NLIVE=%d " % nlive)
print("Standard integrator")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result = integrate_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result, '%.2fs' % (time.time() - t))
pointstore.close()
print("Graph integrator")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result2 = integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result2, '%.2fs' % (time.time() - t))
pointstore.close()
assert np.isclose(result2['logz'], result['logz'])
print("Vectorized graph integrator")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result3 = multi_integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result3, '%.2fs' % (time.time() - t))
pointstore.close()
assert np.isclose(result3['logz'], result['logz'])
print("Vectorized graph integrator with insertion order test")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result3 = multi_integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2, withtests=True)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result3, '%.2fs' % (time.time() - t))
pointstore.close()
assert np.isclose(result3['logz'], result['logz'])
def test_visualisation():
print("testing tree visualisation...")
pp = PointPile(1, 1)
tree = TreeNode()
for i in range(5):
j = np.random.randint(1000)
node = pp.make_node(j, np.array([j]), np.array([j]))
for k in range(i):
j = np.random.randint(1000)
node2 = pp.make_node(j, [j], [j])
node.children.append(node2)
tree.children.append(node)
print(tree)
print_tree(tree.children, title='Empty Tree')
def test_treedump():
print("testing tree dumping...")
pp = PointPile(1, 1)
tree = TreeNode()
for i in range(5):
j = np.random.randint(1000)
node = pp.make_node(j, np.array([j]), np.array([j]))
for k in range(i):
j = np.random.randint(1000)
node2 = pp.make_node(j, [j], [j])
node.children.append(node2)
tree.children.append(node)
dump_tree("test_tree.hdf5", tree.children, pp)
os.remove("test_tree.hdf5")
dump_tree("test_tree.hdf5", roots=tree.children, pointpile=pp)
dump_tree("test_tree.hdf5", tree.children, pp)
os.remove("test_tree.hdf5")
if __name__ == '__main__':
for nlive in [100, 400, 2000]:
test_singleblock(nlive)
#pointstore = TextPointStore(testfile, 2 + 2 + 2)
#nlive = 400
#multi_integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
#pointstore.close()
|
JohannesBuchnerREPO_NAMEUltraNestPATH_START.@UltraNest_extracted@UltraNest-master@tests@test_netiterintegrate.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "wmpg/WesternMeteorPyLib",
"repo_path": "WesternMeteorPyLib_extracted/WesternMeteorPyLib-master/wmpl/MetSim/__init__.py",
"type": "Python"
}
|
wmpgREPO_NAMEWesternMeteorPyLibPATH_START.@WesternMeteorPyLib_extracted@WesternMeteorPyLib-master@wmpl@MetSim@__init__.py@.PATH_END.py
|
|
{
"filename": "st_instance.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/alembic/models/rev_4dc6a93dfed8/strategies/st_instance.py",
"type": "Python"
}
|
from typing import Optional
from hypothesis import strategies as st
from .. import Model
@st.composite
def st_model_instance_list(
draw: st.DrawFn,
min_size: int = 0,
max_size: Optional[int] = None,
) -> list[Model]:
'''A strategy for a list of `Model` instances.
Instances of `Script` and `Run` models are explicitly generated. Instances of other
models will be generated as related models of `Script` and `Run` models.
'''
from .st_run import st_model_run_list
from .st_script import st_model_script_list
# Generate a list of `Script` models
scripts = draw(st_model_script_list(min_size=min_size, max_size=max_size))
# Adjust min_size and max_size
size = len(scripts)
min_size = max(0, min_size - size)
max_size = max_size - size if max_size is not None else None
# Generate a list of `Run` models
runs = draw(
st_model_run_list(
generate_traces=True,
min_size=min_size,
max_size=max_size,
scripts=scripts,
)
)
ret: list[Model] = list(scripts) + list(runs)
return ret
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@alembic@models@rev_4dc6a93dfed8@strategies@st_instance.py@.PATH_END.py
|
{
"filename": "run_test.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/UnitTesting/run_test.py",
"type": "Python"
}
|
# Step 1.a: Initialize core Python/UnitTesting modules
from UnitTesting.calc_error import calc_error
from UnitTesting.evaluate_globals import evaluate_globals
from UnitTesting.first_time_print import first_time_print
from UnitTesting.cse_simplify_and_evaluate_sympy_expressions import cse_simplify_and_evaluate_sympy_expressions
from UnitTesting.standard_constants import precision
from mpmath import mp
from importlib import import_module
import logging
def run_test(self):
# Step 1: Setup
logging.info(' Currently working on function ' + self.function + ' in module ' + self.module_name + '...\n')
# Step 1.a: Set precision to the value defined in standard_constants
mp.dps = precision
# Step 1.b: Import trusted_values_dict from trusted_values_dict.py in self.path
logging.info(' Importing trusted_values_dict...')
self.trusted_values_dict = import_module('trusted_values_dict').trusted_values_dict
logging.info(' ...Success: Imported trusted_values_dict.\n')
# Step 1.c: Set boolean self.first_time based on existence of desired trusted_values_dict entry
self.first_time = self.trusted_values_dict_name not in self.trusted_values_dict
if self.first_time:
logging.info(' Proper entry not in trusted_values_dict -- '
'this function in this module is being run for the first time.')
# Step 1.e: Set trusted_values_dict_entry to its corresponding trusted_values_dict entry
self.trusted_values_dict_entry = {} if self.first_time else self.trusted_values_dict[self.trusted_values_dict_name]
# Step 2: Calculation
# Step 2.a: Call evaluate_globals which calls self.function and gets expressions for all globals in self.global_list
logging.info(' Calling evaluate_globals...')
self.variable_dict = evaluate_globals(self)
logging.info(' ...Success: evaluate_globals ran without errors.\n')
# Step 2.b: Call cse_simplify_and_evaluate_sympy_expressions to assign each variable in each expression a random
# value and calculate the numerical result
logging.info(' Calling cse_simplify_and_evaluate_sympy_expressions...')
self.calculated_dict = cse_simplify_and_evaluate_sympy_expressions(self)
logging.info(' ...Success: cse_simplify_and_evaluate_sympy_expressions ran without errors.\n')
# Step 3: Comparison
if self.first_time:
# Step 3.a: Print self.calculated_dict in a nice format and append it to trusted_values_dict
logging.info(' Calling first_time_print since it is being run for the first time...')
first_time_print(self)
logging.info(' ...Success: first_time_print ran without errors. Automatically failing due to first_time.\n')
self.assertTrue(False)
else:
# Step 3.b: Call calc_error to calculate the error between the trusted values and the calculated values
logging.info(' Calling calc_error...')
values_identical = calc_error(self)
# If there is an error large enough, fail
if not values_identical:
self.assertTrue(values_identical,
'Variable(s) above have different calculated and trusted values. Follow '
'instructions above.')
# Otherwise, pass
else:
logging.info(' ...Success: calc_error ran without errors.\n')
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@UnitTesting@run_test.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnelarea/title/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="variant", parent_name="funnelarea.title.font", **kwargs
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnelarea@title@font@_variant.py@.PATH_END.py
|
{
"filename": "job_server.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/ext/job_server.py",
"type": "Python"
}
|
"""
JobServer: class for job farming using amuse communication channels
usage:
start jobserver
jobserver=JobServer(hosts=<list of hostnames> [ ,channel_type="mpi", preamble="<commands>", retry_jobs=True/ False] )
submit job
job=jobserver.submit_job(somework, (args,))
wait for one result (encounters all):
jobserver.wait()
job=jobserver.last_finished_job
wait for all to finish, loop over all:
jobserver.waitall()
for job in jobserver.finished_jobs:
print job.result
it is essential that the function which are to be executed remotely are pickleable, i.e. they must not be
derived from the main module. Easy way to achieve this is to import them from a seperate file.
2 issues to be fixed:
- blocking startup of hosts may prevent threads shutting down, leading to freeze at end of script
(so manual kill necessary)
- thread function _startup contains references to JobServer, hence del jobserver will actually not be called
until the situation of issue 1 is resolved (so the warning given there is useless)
"""
from amuse.rfi.core import *
import pickle
from amuse.rfi.async_request import AsyncRequestsPool
import inspect
from collections import deque
import threading
from time import sleep
import warnings
import base64
def dump_and_encode(x):
return base64.b64encode(pickle.dumps(x)).decode()
def decode_and_load(x):
return pickle.loads(base64.b64decode(x.encode()))
class RemoteCodeException(Exception):
def __init__(self,ex=None):
self.ex=ex
def __str__(self):
return "["+self.ex.__class__.__name__+"] "+str(self.ex)
class RemoteCodeImplementation(object):
def __init__(self):
self.scope={}
self.scope['dump_and_encode']=dump_and_encode
self.scope['decode_and_load']=decode_and_load
def _exec(self,arg):
try:
exec(arg, self.scope)
return dump_and_encode(None)
except Exception as ex:
return dump_and_encode(RemoteCodeException(ex))
def _eval(self,arg,argout):
try:
self.scope.update(dict(arg=arg))
exec("argout="+arg, self.scope)
argout.value=eval("dump_and_encode(argout)",self.scope)
return dump_and_encode(None)
except Exception as ex:
argout.value=dump_and_encode("")
return dump_and_encode(RemoteCodeException(ex))
def _assign(self,lhs,argin):
try:
self.scope.update(dict(argin=argin))
exec(lhs+"=decode_and_load(argin)", self.scope)
return dump_and_encode(None)
except Exception as ex:
return dump_and_encode(RemoteCodeException(ex))
def _func(self,func,argin,kwargin,argout):
try:
self.scope.update(dict(func=func,argin=argin,kwargin=kwargin))
exec("func=decode_and_load(func)", self.scope)
exec("arg=decode_and_load(argin)", self.scope)
exec("kwarg=decode_and_load(kwargin)", self.scope)
exec("result=func(*arg,**kwarg)", self.scope)
argout.value=eval("dump_and_encode(result)",self.scope)
return dump_and_encode(None)
except Exception as ex:
argout.value=dump_and_encode(None)
return dump_and_encode(RemoteCodeException(ex))
class RemoteCodeInterface(PythonCodeInterface):
def __init__(self, **options):
PythonCodeInterface.__init__(self, RemoteCodeImplementation, **options)
@legacy_function
def _func():
function = LegacyFunctionSpecification()
function.addParameter('func', dtype='string', direction=function.IN)
function.addParameter('argin', dtype='string', direction=function.IN)
function.addParameter('kwargin', dtype='string', direction=function.IN)
function.addParameter('argout', dtype='string', direction=function.OUT)
function.result_type = 'string'
return function
@legacy_function
def _exec():
function = LegacyFunctionSpecification()
function.addParameter('arg', dtype='string', direction=function.IN)
function.result_type = 'string'
return function
@legacy_function
def _eval():
function = LegacyFunctionSpecification()
function.addParameter('arg', dtype='string', direction=function.IN)
function.addParameter('argout', dtype='string', direction=function.OUT)
function.result_type = 'string'
return function
@legacy_function
def _assign():
function = LegacyFunctionSpecification()
function.addParameter('lhs', dtype='string', direction=function.IN)
function.addParameter('argin', dtype='string', direction=function.IN)
function.result_type = 'string'
return function
def execute(self,express):
err=decode_and_load( self._exec(express) )
if err:
raise err
def assign(self,lhs,arg):
err=decode_and_load( self._assign(lhs, dump_and_encode(arg)) )
if err:
raise err
def evaluate(self,express):
result,err=self._eval(express)
err=decode_and_load( err)
if err :
raise err
return decode_and_load(result)
def func(self,f,*args,**kwargs):
result,err=self._func( dump_and_encode(f),
dump_and_encode(args),
dump_and_encode(kwargs) )
err=decode_and_load( err)
if err :
raise err
return decode_and_load(result)
def async_func(self,f,*args,**kwargs):
request=self._func.asynchronous(dump_and_encode(f),
dump_and_encode(args),
dump_and_encode(kwargs) )
def f(x):
result,err=x()
err=decode_and_load( err)
if err :
raise err
return decode_and_load(result)
request.add_result_handler( f )
return request
class Job(object):
def __init__(self, f, args, kwargs,retries=0):
self.f=f
self.args=args
self.kwargs=kwargs
self.result=None
self.request=None
self.err=None
self.retries=retries
class JobServer(object):
def __init__(self,hosts=[],channel_type="mpi",preamble=None, retry_jobs=True,
no_wait=True,verbose=True,max_retries=2, use_threading=False):
self.hosts=[]
self.job_list=deque()
self.idle_codes=[]
self.retry_jobs=retry_jobs
self.max_retries=max_retries
self._finished_jobs=deque()
self.preamble=preamble
self.pool=AsyncRequestsPool()
self.number_available_codes=0
self.number_starting_codes=0
self.no_wait=no_wait
self.last_finished_job=None
self.use_threading=use_threading
self.verbose=verbose
if self.verbose:
print("AMUSE JobServer launching")
self.add_hosts(hosts=hosts,channel_type=channel_type)
def no_hosts(self):
if self.number_available_codes==0 and self.number_starting_codes==0:
return True
return False
def add_hosts(self,hosts=[],channel_type="mpi"):
self.hosts.append(hosts)
if self.verbose:
print("JobServer: connecting %i hosts"%len(hosts))
if not self.use_threading:
for host in hosts:
self.number_starting_codes+=1
self._startup( channel_type=channel_type,hostname=host,label=host,
copy_worker_code=True,redirection="none" )
else:
threads=[]
for host in hosts:
kwargs=dict( channel_type=channel_type,hostname=host,label=host,
copy_worker_code=True,redirection="none" )
threads.append( threading.Thread(target=self._startup,kwargs=kwargs) )
for thread in threads:
self.number_starting_codes+=1
thread.daemon=True
thread.start()
if not self.no_wait:
if self.verbose:
print("... waiting")
for thread in threads:
thread.join()
else:
if self.verbose:
print("... waiting for first available host")
while self.number_available_codes==0 and self.number_starting_codes>0:
sleep(0.1)
if self.no_wait:
if self.verbose:
print("JobServer: launched")
else:
if self.verbose:
print("JobServer: launched with", len(self.idle_codes),"hosts")
def _startup(self, *args,**kwargs):
try:
code=RemoteCodeInterface(*args,**kwargs)
except Exception as ex:
self.number_starting_codes-=1
print("JobServer: startup failed on", kwargs['hostname'] or "default")
print(ex)
else:
if self.preamble is not None:
code.execute(self.preamble)
self.number_available_codes+=1
self.number_starting_codes-=1
if self.no_wait:
if self.number_available_codes & (self.number_available_codes-1) ==0:
if self.verbose:
print("JobServer: hosts now available:",self.number_available_codes)
if self.number_starting_codes==0:
if self.verbose:
print("JobServer: hosts in total:", self.number_available_codes)
if self.job_list:
self._add_job(self.job_list.popleft(), code)
else:
self.idle_codes.append(code)
def exec_(self,arg):
while self.number_starting_codes>0:
sleep(0.1)
self.waitall()
for code in self.idle_codes:
code.execute(arg)
def submit_job(self,f,args=(),kwargs={}):
if len(self.pool)==0 and not self.job_list:
if self.verbose:
print("JobServer: submitting first job on queue")
job=Job(f,args,kwargs)
self.job_list.append( job)
if self.idle_codes:
self._add_job(self.job_list.popleft(), self.idle_codes.pop())
return job
def wait(self):
if self._finished_jobs:
self.last_finished_job=self._finished_jobs.popleft()
return True
elif len(self.pool)==0 and not self.job_list:
if self.verbose:
print("JobServer: no more jobs on queue or running")
return False
else:
while len(self.pool)==0 and self.job_list:
if self.number_available_codes>0:
raise Exception("JobServer: this should not happen")
if self.number_starting_codes==0:
raise Exception("JobServer: no codes available")
self.pool.wait()
self.last_finished_job=self._finished_jobs.popleft()
return True
def waitall(self):
while len(self.pool)==0 and self.job_list:
if self.number_available_codes>0:
raise Exception("JobServer: this should not happen")
if self.number_starting_codes==0:
raise Exception("JobServer: no codes available")
while len(self.pool)>0 or self.job_list:
self.pool.wait()
self.last_finished_job=self._finished_jobs[-1]
@property
def finished_jobs(self):
while self._finished_jobs:
yield self._finished_jobs.popleft()
def _finalize_job(self,request,job,code):
try:
job.result=request.result()
job.err=None
except Exception as ex:
job.result=None
job.err=ex
if job.err and not isinstance(job.err,RemoteCodeException):
del code
self.number_available_codes-=1
if self.retry_jobs and job.retries<self.max_retries:
retry=Job(job.f,job.args,job.kwargs,job.retries+1)
self.job_list.append(retry)
else:
self.idle_codes.append(code)
if self.job_list and self.idle_codes:
self._add_job( self.job_list.popleft(), self.idle_codes.pop())
if not self.job_list:
if self.verbose:
print("JobServer: last job dispatched")
self._finished_jobs.append(job)
def _add_job(self,job,code):
job.request=code.async_func(job.f,*job.args,**job.kwargs)
self.pool.add_request(job.request,self._finalize_job, [job,code])
def __del__(self):
if not self.no_hosts():
self.waitall()
if self.job_list:
warnings.warn("JobServer: Warning: shutting down with unfinished jobs")
for code in self.idle_codes:
code.stop()
if self.number_starting_codes>0:
warnings.warn("JobServer: Warning: some hosts startup threads possibly blocking")
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@ext@job_server.py@.PATH_END.py
|
{
"filename": "Subversion.py",
"repo_name": "rat-pac/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Tool/Subversion.py",
"type": "Python"
}
|
"""SCons.Tool.Subversion.py
Tool-specific initialization for Subversion.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/Subversion.py 4043 2009/02/23 09:06:45 scons"
import os.path
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
Subversion to an Environment."""
def SubversionFactory(repos, module='', env=env):
""" """
# fail if repos is not an absolute path name?
if module != '':
module = os.path.join(module, '')
act = SCons.Action.Action('$SVNCOM', '$SVNCOMSTR')
return SCons.Builder.Builder(action = act,
env = env,
SVNREPOSITORY = repos,
SVNMODULE = module)
#setattr(env, 'Subversion', SubversionFactory)
env.Subversion = SubversionFactory
env['SVN'] = 'svn'
env['SVNFLAGS'] = SCons.Util.CLVar('')
env['SVNCOM'] = '$SVN $SVNFLAGS cat $SVNREPOSITORY/$SVNMODULE$TARGET > $TARGET'
def exists(env):
return env.Detect('svn')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rat-pacREPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Tool@Subversion.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2d/legendgrouptitle/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="histogram2d.legendgrouptitle.font",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2d@legendgrouptitle@font@_variant.py@.PATH_END.py
|
{
"filename": "simple_dust_2023.py",
"repo_name": "sdss/idlspec2d",
"repo_path": "idlspec2d_extracted/idlspec2d-master/python/boss_drp/prep/simple_dust_2023.py",
"type": "Python"
}
|
import h5py
import healpy as hp
import numpy as np
import json
from os import getenv
import os.path as ptt
def find_nearest_idx(value,array):
value = np.atleast_1d(value)
array = np.asarray(array)
idx = np.full(len(value), -1)
for i, val in enumerate(value):
idx[i] = (np.abs(array - val)).argmin()
return idx
class simple_dust_2023:
def __init__(self, mapfile = None):
self.load_map(mapfile=mapfile)
def load_map(self,mapfile=None):
if mapfile is not None:
if not ptt.exists(mapfile):
print(f'{mapfile} not found using default')
if mapfile is None:
with open(getenv('DUSTMAPS_CONFIG_FNAME')) as config:
conf = json.load(config)
mapfile = ptt.join(conf['data_dir'],'manual','map3d_multires.h5')
f1 = h5py.File(mapfile,'r')
self._distance_centers = f1['distance_centers'][:]
self._extinction = f1['extinction'][:]
self._extinction_variance = f1['extinction_variance'][:]
self._nside = f1['nside'][:]
def _find_data_idx(self, b,l,nside=512,nest=True):
if type(l) is list:
l = np.asarray(l)
if type(b) is list:
b = np.asarray(b)
theta = np.radians(90. - b)
phi = np.radians(l)
if not hasattr(l, '__len__'):
if (b < -90.) or (b > 90.):
return -1
pix_idx = hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
return pix_idx
idx = (b >= -90.) & (b <= 90.)
pix_idx = np.empty(l.shape, dtype='i8')
pix_idx[idx] = hp.pixelfunc.ang2pix(nside, theta[idx], phi[idx], nest=nest)
pix_idx[~idx] = -1
return pix_idx
def query(self, coords):
# Get number of coordinates requested
n_coords_ret = coords.shape[0]
# Determine if distance has been requested
has_dist = hasattr(coords.distance, 'kpc')
d = coords.distance.kpc
# Extract the correct angular pixel(s)
# t0 = time.time()
pix_idx = self._find_data_idx(coords.l.deg, coords.b.deg)
in_bounds_idx = np.where((pix_idx != -1))[0]
in_dist_idx = np.where((d > 0.0) & (d < self._distance_centers[-1]+(self._distance_centers[-1]-self._distance_centers[-2])/2))[0]
ret = np.full((n_coords_ret,), np.nan, dtype='f4')
ret = self._extinction[find_nearest_idx(d, self._distance_centers),pix_idx]
ret[~in_bounds_idx] = np.nan
ret[~in_dist_idx] = np.nan
return ret
|
sdssREPO_NAMEidlspec2dPATH_START.@idlspec2d_extracted@idlspec2d-master@python@boss_drp@prep@simple_dust_2023.py@.PATH_END.py
|
{
"filename": "_thicknessmode.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram2d/colorbar/_thicknessmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ThicknessmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="thicknessmode", parent_name="histogram2d.colorbar", **kwargs
):
super(ThicknessmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram2d@colorbar@_thicknessmode.py@.PATH_END.py
|
{
"filename": "tpu_outside_compilation_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/tpu/tpu_outside_compilation_test.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU outside compilation."""
import os
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorboard.plugins.histogram import summary_v2 as histogram_summary_v2
from tensorboard.plugins.image import summary_v2 as image_summary_v2
from tensorboard.plugins.scalar import summary_v2 as scalar_summary_v2
from tensorflow.core.util import event_pb2
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import while_loop
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_replication
from tensorflow.python.tpu.ops import tpu_ops
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"use_local_tpu",
False,
"use local TPUs on a TPU VM instead of connecting to a GCP TPU VM or node.",
)
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
if FLAGS.use_local_tpu:
return tpu_cluster_resolver.TPUClusterResolver("local")
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_cluster_resolver.initialize_tpu_system(resolver)
return tpu_lib.TPUStrategyV2(resolver)
def computation_with_string_ops(x):
output = string_ops.string_format("1{}", x)
return string_ops.string_to_number(output)
def _events_from_logdir(test_case, logdir):
"""Reads summary events from log directory."""
test_case.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
test_case.assertLen(files, 1)
records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def _rewrite_func_wrapper(tf_func):
def tpu_fn(*args, **kwargs):
# tpu.rewrite only accepts list of tensors as input. We need to flatten
# keyword arguments to meet this requirement.
concrete = tf_func.get_concrete_function(*(list(args) +
list(kwargs.values())))
return tpu.rewrite(concrete.__call__, list(args) + list(kwargs.values()))
return def_function.function(tpu_fn)
def _tpu_partitioned_call_wrapper(tf_func):
"""Wrap a tensorflow Function with TPUPartitionedCall."""
def inner_func(*args, **kwargs):
concrete = tf_func.get_concrete_function(*args, **kwargs)
# TPUPartitionedCall only accepts list of tensors as input args.
# Flatten keyword arguments and do some basic ordering:
# Positional args + Flattened keyword args + Captured args.
op_args = list(args) + list(kwargs.values()) + concrete.captured_inputs
return tpu_functional.TPUPartitionedCall(
args=op_args,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in concrete.function_def.signature.output_arg],
f=concrete)
return def_function.function(inner_func)
class TpuOutsideCompilationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(TpuOutsideCompilationTest, self).setUp()
config.set_soft_device_placement(False)
def testHostNoInput(self):
strategy = get_tpu_strategy()
def outside_fn():
logging_ops.print_v2("Outside compiled")
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu_replication.outside_compilation(outside_fn)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOnly(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu_replication.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testJitCompile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
# jit_compile=True should have no effect for TPU.
@def_function.function(jit_compile=True)
def train_step():
def tpu_fn(x):
x2 = x + 5.0
tpu_replication.outside_compilation(outside_fn, x2)
return x2 + 5.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
def testHostInputOutput(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output = tpu_replication.outside_compilation(outside_fn, x2)
return output
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(36., shape=(strategy.num_replicas_in_sync)))
def testHostMultipleInputs(self):
strategy = get_tpu_strategy()
val0 = np.arange(6).reshape((2, 3)).astype(np.float32)
val1 = np.arange(6).reshape((3, 2)).astype(np.float32)
def outside_fn(arg0, arg1):
tmp = array_ops.reshape(arg1, array_ops.shape(arg0))
ret0 = arg0 + tmp
ret1 = math_ops.matmul(arg0, arg1)
ret2 = array_ops.concat([arg0, tmp], 0)
return ret0, ret1, ret2
@def_function.function
def train_step():
def tpu_fn(x, y):
a = x + 7.0
b = y * 2.0
c, d, e = tpu_replication.outside_compilation(outside_fn, a, b)
return (math_ops.reduce_max(c) + math_ops.reduce_min(d) +
math_ops.reduce_sum(e))
return strategy.run(tpu_fn, args=(val0, val1))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(213., shape=(strategy.num_replicas_in_sync)))
def testMultipleClusters(self):
strategy = get_tpu_strategy()
def outside_fn1(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
def outside_fn2(x):
logging_ops.print_v2("Outside compiled", x)
return x - 18.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
output1 = tpu_replication.outside_compilation(outside_fn1, x2)
x3 = output1 + 3.0
output2 = tpu_replication.outside_compilation(outside_fn2, x3)
return output2
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(21., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testOutsideCompilationControlFlowIf(self, take_true_branch):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
input_value = 51.0 if take_true_branch else 25.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
if x < 50.0:
return tpu_replication.outside_compilation(outside_fn, x2)
else:
return x2
return strategy.run(tpu_fn, args=(input_value,))
output_value = 36.0
if take_true_branch:
output_value = 56.0
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationControlFlowWhile(self):
strategy = get_tpu_strategy()
def outside_fn(x):
logging_ops.print_v2("Outside compiled", x)
return x + 6.0
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
while x2 < 50.0:
x2 = tpu_replication.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationHostControlFlow(self):
"""Tests that control flow on host for outside_compilation works."""
strategy = get_tpu_strategy()
def outside_fn(x):
n = 0
while n < 4:
x = x + 6.0
n = n + 1
return x
@def_function.function
def train_step():
def tpu_fn(x):
x2 = x + 5.0
x2 = tpu_replication.outside_compilation(outside_fn, x2)
return x2 + 4.0
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(58., shape=(strategy.num_replicas_in_sync)))
def testSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = tpu_replication.outside_compilation(host_computation, x)
y = tpu_replication.outside_compilation(host_computation, x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
@parameterized.parameters((True), (False))
def testSummaryInCond(self, take_true_branch):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step(take_true_branch):
def computation(x):
x = x + 1.0
if x < 5.0:
y = tpu_replication.outside_compilation(host_computation, x)
y = tpu_replication.outside_compilation(host_computation, x)
x = y
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step(take_true_branch)),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
def testSummaryInWhile(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
n = 0
while n < 3:
x = x + 1.0
y = tpu_replication.outside_compilation(host_computation, x)
y = tpu_replication.outside_compilation(host_computation, x)
x = y
n = n + 1
return x + 1.0
return strategy.run(computation, args=(2.0,))
summary_writer = summary.create_file_writer(
os.path.join(os.getenv("TEST_TMPDIR", "/tmp")), flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(31., shape=(strategy.num_replicas_in_sync)))
def testOutsideCompilationAtHeadAndTail(self):
"""Tests that outside_compilation at head/tail of TPU computation works."""
strategy = get_tpu_strategy()
def host_computation(x):
return x * 2.0
@def_function.function
def train_step():
def computation(x):
w = tpu_replication.outside_compilation(host_computation, x)
y = w + 1.0
z = tpu_replication.outside_compilation(host_computation, y)
return z + 5.0
return strategy.run(computation, args=(2.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(15., shape=(strategy.num_replicas_in_sync)))
def testGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu_replication.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
return d
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(8748., shape=(strategy.num_replicas_in_sync)))
def testGradientOfGradientAcrossOutsideCompilation(self):
"""Tests compiled gradients of gradients can contain host computations."""
strategy = get_tpu_strategy()
def host_computation(a):
b = a * a
c = b * b
return c
@def_function.function
def train_step():
def computation(x, y):
a = x + 7.0
b = tpu_replication.outside_compilation(host_computation, a)
c = b * y
d = gradients_impl.gradients(
[c], [x], colocate_gradients_with_ops=True)[0]
e = gradients_impl.gradients(
[d], [x], colocate_gradients_with_ops=True)[0]
return e
return strategy.run(computation, args=(2.0, 3.0))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(2916., shape=(strategy.num_replicas_in_sync)))
def testColocateGradientWithOutsideCompiledOp(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
@def_function.function
def tpu_fn(x):
x1 = tpu_replication.outside_compilation(math_ops.sqrt, x)
grad = gradients_impl.gradients([x1], [x],
colocate_gradients_with_ops=True)[0]
sqrt = [
op for op in ops.get_default_graph().get_operations()
if op.type == "Sqrt"
][0]
sqrt_grad = [
op for op in ops.get_default_graph().get_operations()
if op.type == "SqrtGrad"
][0]
assert sqrt.get_attr(
tpu_replication._OUTSIDE_COMPILATION_ATTR) == b"0"
assert (sqrt_grad.get_attr(
tpu_replication._OUTSIDE_COMPILATION_ATTR) == b"0.gradients/uid"
)
return grad
return strategy.run(tpu_fn, args=(25.0,))
self.assertAllEqual(
strategy.experimental_local_results(train_step()),
constant_op.constant(.1, shape=(strategy.num_replicas_in_sync)))
class OutsideCompilationOnUnsupportedOpTest(test.TestCase,
parameterized.TestCase):
def setUp(self):
super(OutsideCompilationOnUnsupportedOpTest, self).setUp()
config.set_soft_device_placement(True)
def testStringOpWithManualOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return tpu_replication.outside_compilation(
computation_with_string_ops, x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testStringOpWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(x):
def computation(x):
return computation_with_string_ops(x)
return strategy.run(computation, args=(x,))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
# Regression test for b/180509859.
def testImageSummary(self):
strategy = get_tpu_strategy()
def run():
@def_function.function
def sample_sequence():
bsz = 3
max_length = 32 * 32
def f():
def body(step, tokens):
next_token = random_ops.random_uniform([bsz])
tokens = tokens.write(step, next_token)
return (step + 1, tokens)
def cond_fn(step, tokens):
del tokens
return math_ops.less(step, max_length)
tokens_var = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=max_length,
dynamic_size=False,
clear_after_read=False,
element_shape=(bsz,),
name="tokens_accumulator",
)
step = constant_op.constant(0)
step, tokens_var = while_loop.while_loop(cond_fn, body,
[step, tokens_var])
image_flat = array_ops.transpose(tokens_var.stack(), [1, 0])
image = array_ops.tile(
array_ops.reshape(image_flat, [bsz, 32, 32, 1]), [1, 1, 1, 3])
image_summary_v2.image("image_sample", image,
constant_op.constant(5, dtype=dtypes.int64))
return strategy.run(f)
sample_sequence()
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
run()
events = _events_from_logdir(self, logdir)
decoded_image = image_ops.decode_png(
events[1].summary.value[0].tensor.string_val[2]).numpy()
# Ensure that non-zero values were written to the image summary.
self.assertNotAllEqual(
array_ops.zeros((3072,), dtype=dtypes.float32),
list(decoded_image.flat))
def testSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testNestedFunctionScalarSummary(self):
strategy = get_tpu_strategy()
def host_computation(x):
scalar_summary_v2.scalar("x", x, step=0)
return x * 2.0
@def_function.function
def step():
@def_function.function
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
def testHistogramSummaryWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
def host_computation(x):
histogram_summary_v2.histogram("x", x, step=0)
return x * 2.0
@def_function.function
def step():
def computation(x):
x = x + 1.0
y = host_computation(x)
return y + 1.0
return strategy.run(computation, args=(2.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(7., shape=(strategy.num_replicas_in_sync)))
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "x")
@parameterized.parameters((True), (False))
def testSummaryControlFlowIfWithAutoOutsideCompilation(
self, take_true_branch):
strategy = get_tpu_strategy()
@def_function.function
def step():
def computation(x):
x = x + 1.0
if x < 5:
scalar_summary_v2.scalar("x", x, step=0)
x = x * 2.0
return x + 1.0
if take_true_branch:
return strategy.run(computation, args=(2.0,))
else:
return strategy.run(computation, args=(10.0,))
logdir = tempfile.mkdtemp()
summary_writer = summary.create_file_writer(logdir, flush_millis=10000)
output_value = 12.
if take_true_branch:
output_value = 7.
with summary_writer.as_default(), summary.always_record_summaries():
self.assertAllEqual(
strategy.experimental_local_results(step()),
constant_op.constant(
output_value, shape=(strategy.num_replicas_in_sync)))
if take_true_branch:
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by host.
#
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "cond/x")
def testAutoOutsideCompilationWithFunctionalNodes(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step(a, b):
def fn(a, b):
fn1 = lambda: computation_with_string_ops(a * 100)
fn2 = lambda: computation_with_string_ops(a)
pred = math_ops.greater_equal(a, b)
result = array_ops.identity(
cond.cond(pred, fn1, fn2),
name="uncompilable_control_flow")
return result
return strategy.run(fn, args=(a, b))
self.assertAllEqual(
strategy.experimental_local_results(train_step(0.0, -1.0)),
constant_op.constant(10, shape=(strategy.num_replicas_in_sync)))
def testRandomOpsWithAutoOutsideCompilation(self):
strategy = get_tpu_strategy()
@def_function.function
def train_step():
def computation():
return random_ops.random_normal(shape=[1, 2, 3])
return strategy.run(computation, args=())
self.assertAllEqual(
strategy.experimental_local_results(train_step())[0].shape, [1, 2, 3])
def testOutsideCompilationWithTPUPartitionedCallOp(self):
"""Tests that control flow with TPUPartitionedCall including outside_compilation works."""
get_tpu_strategy()
def host_computation(x):
return x + 1
@def_function.function()
def train_step(x):
x2 = x + 5.0
logging_ops.print_v2(x2)
x2 = tpu_replication.outside_compilation(host_computation, x2)
return x2 + 4.0
tpu_fn = _rewrite_func_wrapper(train_step)
partitioned_tpu_fn = _tpu_partitioned_call_wrapper(tpu_fn)
concrete = partitioned_tpu_fn.get_concrete_function(
x=tensor.TensorSpec(
shape=(1), dtype=dtypes.float32, name="input_tensor"))
self.assertIsInstance(
concrete(array_ops.ones((1), dtype=dtypes.float32))[0], tensor.Tensor)
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@tpu@tpu_outside_compilation_test.py@.PATH_END.py
|
{
"filename": "test_frame.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/visualization/wcsaxes/tests/test_frame.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import matplotlib.pyplot as plt
from ....wcs import WCS
from ....tests.helper import remote_data
from .. import WCSAxes
from ..frame import BaseFrame
from ....tests.image_tests import IMAGE_REFERENCE_DIR
from .test_images import BaseImageTests
class HexagonalFrame(BaseFrame):
spine_names = 'abcdef'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
ymid = 0.5 * (ymin + ymax)
xmid1 = (xmin + xmax) / 4.
xmid2 = (xmin + xmax) * 3. / 4.
self['a'].data = np.array(([xmid1, ymin], [xmid2, ymin]))
self['b'].data = np.array(([xmid2, ymin], [xmax, ymid]))
self['c'].data = np.array(([xmax, ymid], [xmid2, ymax]))
self['d'].data = np.array(([xmid2, ymax], [xmid1, ymax]))
self['e'].data = np.array(([xmid1, ymax], [xmin, ymid]))
self['f'].data = np.array(([xmin, ymid], [xmid1, ymin]))
class TestFrame(BaseImageTests):
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='custom_frame.png',
tolerance=1.5)
def test_custom_frame(self):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7],
wcs=wcs,
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.coords.grid(color='white')
im = ax.imshow(np.ones((149, 149)), vmin=0., vmax=2.,
origin='lower', cmap=plt.cm.gist_heat)
minpad = {}
minpad['a'] = minpad['d'] = 1
minpad['b'] = minpad['c'] = minpad['e'] = minpad['f'] = 2.75
ax.coords['glon'].set_axislabel("Longitude", minpad=minpad)
ax.coords['glon'].set_axislabel_position('ad')
ax.coords['glat'].set_axislabel("Latitude", minpad=minpad)
ax.coords['glat'].set_axislabel_position('bcef')
ax.coords['glon'].set_ticklabel_position('ad')
ax.coords['glat'].set_ticklabel_position('bcef')
# Set limits so that no labels overlap
ax.set_xlim(5.5, 100.5)
ax.set_ylim(5.5, 110.5)
# Clip the image to the frame
im.set_clip_path(ax.coords.frame.patch)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_rectangular.png',
tolerance=1.5)
def test_update_clip_path_rectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_nonrectangular.png',
tolerance=1.5)
def test_update_clip_path_nonrectangular(self, tmpdir):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal',
frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='update_clip_path_change_wcs.png',
tolerance=1.5)
def test_update_clip_path_change_wcs(self, tmpdir):
# When WCS is changed, a new frame is created, so we need to make sure
# that the path is carried over to the new frame.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
ax.set_xlim(0., 2.)
ax.set_ylim(0., 2.)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmpdir.join('nothing').strpath)
ax.reset_wcs()
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
def test_copy_frame_properties_change_wcs(self):
# When WCS is changed, a new frame is created, so we need to make sure
# that the color and linewidth are transferred over
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.coords.frame.set_linewidth(5)
ax.coords.frame.set_color('purple')
ax.reset_wcs()
assert ax.coords.frame.get_linewidth() == 5
assert ax.coords.frame.get_color() == 'purple'
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@visualization@wcsaxes@tests@test_frame.py@.PATH_END.py
|
{
"filename": "_linewidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/_linewidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="linewidth", parent_name="layout.xaxis", **kwargs):
super(LinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks+layoutstyle"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@_linewidth.py@.PATH_END.py
|
{
"filename": "resample.py",
"repo_name": "astropy/specutils",
"repo_path": "specutils_extracted/specutils-main/specutils/manipulation/resample.py",
"type": "Python"
}
|
from abc import ABC, abstractmethod
import numpy as np
from astropy.nddata import VarianceUncertainty, InverseVariance
from astropy.units import Quantity
from scipy.interpolate import CubicSpline
from ..spectra import Spectrum1D, SpectralAxis
__all__ = ['ResamplerBase', 'FluxConservingResampler',
'LinearInterpolatedResampler', 'SplineInterpolatedResampler']
class ResamplerBase(ABC):
"""
Base class for resample classes. The algorithms and needs for difference
resamples will vary quite a bit, so this class is relatively sparse.
Parameters
----------
extrapolation_treatment : str
What to do when resampling off the edge of the spectrum. Can be
``'nan_fill'`` to have points beyond the edges by set to NaN,
``'zero_fill'`` to set thoe points to zero, or ``'truncate'`` to
truncate any non-overlapping bins of the spectrum.
"""
def __init__(self, extrapolation_treatment='nan_fill'):
if extrapolation_treatment not in ('nan_fill', 'zero_fill', 'truncate'):
raise ValueError('invalid extrapolation_treatment value: ' + str(extrapolation_treatment))
self.extrapolation_treatment = extrapolation_treatment
def __call__(self, orig_spectrum, fin_spec_axis):
"""
Return the resulting `~specutils.Spectrum1D` of the resampling.
"""
return self.resample1d(orig_spectrum, fin_spec_axis)
@abstractmethod
def resample1d(self, orig_spectrum, fin_spec_axis):
"""
Workhorse method that will return the resampled Spectrum1D
object.
"""
return NotImplemented
class FluxConservingResampler(ResamplerBase):
"""
This resampling algorithm conserves overall integrated flux (as opposed to
flux density).
Algorithm based on the equations documented in the following paper:
https://ui.adsabs.harvard.edu/abs/2017arXiv170505165C/abstract
Parameters
----------
extrapolation_treatment : str
What to do when resampling off the edge of the spectrum. Can be
``'nan_fill'`` to have points beyond the edges by set to NaN,
``'zero_fill'`` to set those points to zero, or ``'truncate'`` to
truncate any non-overlapping bins of the spectrum.
Examples
--------
To resample an input spectrum to a user specified spectral grid using
a flux conserving algorithm:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import FluxConservingResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = [1, 5, 9, 13, 14, 17, 21, 22, 23] *u.nm
>>> fluxc_resample = FluxConservingResampler()
>>> fluxc_resample(input_spectra, resample_grid) # doctest: +FLOAT_CMP
<Spectrum1D(flux=<Quantity [ 1. , 3. , 6. , 7. , 6.25, 10. , 20. , nan, nan] mJy> (shape=(9,), mean=7.60714 mJy); spectral_axis=<SpectralAxis [ 1. 5. 9. ... 21. 22. 23.] nm> (length=9))>
"""
def _fluxc_resample(self, input_bin_centers, output_bin_centers,
input_bin_fluxes, errs):
"""
Resample ``input_bin_fluxes`` and (optionally) ``errs`` from
``input_bin_centers`` to ``output_bin_centers``.
Parameters
----------
input_bin_centers : `~specutils.SpectralAxis`
`~specutils.SpectralAxis` object, with input bin centers.
output_bin_centers : `~specutils.SpectralAxis`
`~specutils.SpectralAxis` object, with input bin centers.
input_bin_fluxes : Quantity
Quantity array of flux values.
errs : `~astropy.nddata.Variance` object, or None
Variance array of errors corresponding to input bin fluxes. If None,
error resampling is not performed.
Returns
-------
(output_fluxes, output_errs)
A tuple containing plain numpy arrays of the resampled fluxes and
errors (if available).
"""
fill_val = np.nan # bin_edges=nan_fill case
if self.extrapolation_treatment == 'zero_fill':
fill_val = 0
# get bin edges from centers
input_bin_edges = input_bin_centers.bin_edges.value
output_bin_edges = output_bin_centers.bin_edges.value
# create array of output fluxes and errors to be returned
output_fluxes = np.zeros(shape=len(output_bin_centers)) * input_bin_fluxes.unit
output_errs = None
if errs is not None:
output_errs = np.zeros(shape=len(output_bin_centers))
# first, figure out what output bins cover wavelengths outside the span of
# input bins. these bins should have fluxes set to nan (or whatever the
# fill val is.) and can be skipped
min_idx = 0
max_idx = None
low_out_of_range = np.where(output_bin_edges <= input_bin_edges[0])[0]
if len(low_out_of_range) > 0: # if any bins below wavelength range
min_idx = low_out_of_range[-1] # This doesn't need +1 because bin_edges has len+1 compared to output_fluxes
output_fluxes[:min_idx] = fill_val
if errs is not None:
output_errs[:min_idx] = fill_val
high_out_of_range = np.where(output_bin_edges > input_bin_edges[-1])[0]
if len(high_out_of_range) > 0:
max_idx = high_out_of_range[0] - 1
output_fluxes[max_idx:] = fill_val
if errs is not None:
output_errs[max_idx:] = fill_val
clipped_output_centers = output_bin_centers[min_idx:max_idx]
# find the index of the first input bin that intersects the first
# in-range output bin.
first_output_edge = output_bin_edges[min_idx]
idx_last_overlapping_bin = np.where(input_bin_edges[1:] > first_output_edge)[0][0]
# iterate over each output bin in wavelength range of input bins
for i, output_bin in enumerate(clipped_output_centers):
i = i + min_idx # index in orig, unclipped array
bin_start, bin_stop = output_bin_edges[i], output_bin_edges[i+1]
# the first at least partially overlapping bin was determined in the
# last iteration (or by the initial clipping, if i=0)
first_bin = idx_last_overlapping_bin
# keep checking bins, starting at the one after we know overlaps first,
# and stop when the back edge of an input bin overlaps the front
# edge of this output bin.
while input_bin_edges[idx_last_overlapping_bin + 1] < bin_stop:
idx_last_overlapping_bin += 1
# if the front edge of the last overlapping bin terminates in this
# output bin, don't check it next time
final_bin = idx_last_overlapping_bin
if input_bin_edges[idx_last_overlapping_bin + 1] <= bin_stop:
idx_last_overlapping_bin = idx_last_overlapping_bin + 1
# now, calculate fluxes and errors
# if only one input bin covers this output bin
# flux_j=p_ij*w_i*f_i/p_ij*w_i = f_i - f_j=f_i, err_j=err_i
if final_bin == first_bin:
output_fluxes[i] = input_bin_fluxes[first_bin]
if errs is not None:
output_errs[i] = errs[first_bin]
# otherwise, figure out the contribution from each overlapping
# input bin to calculate the final flux in the output bin.
else:
# the first edges of each overlapping input bin
first_edges = input_bin_edges[first_bin:final_bin+1]
# the final edges of each overlapping input bin
second_edges = input_bin_edges[first_bin+1:final_bin+2]
# to calculate the overlap area, of input on output, we
# want to only deal with input bin's leading edges if they are
# inside the output bin. otherwise, they are bounded by the
# output bin edges. temporarily set the last edges to the output
# bin bounds and then reset them at the end
first_edges_orig_first = first_edges[0]
first_edges[0] = bin_start
second_edges_orig_last = second_edges[-1]
second_edges[-1] = bin_stop
p_ij = second_edges - first_edges
# reset back
first_edges[0] = first_edges_orig_first
second_edges[-1] = second_edges_orig_last
sum_pij = np.sum(p_ij)
final_flux = (np.sum(input_bin_fluxes[first_bin:final_bin+1] * p_ij)) / sum_pij
output_fluxes[i] = final_flux
if errs is not None:
final_err = np.sum((errs[first_bin:final_bin+1] * p_ij) ** 2) / (sum_pij * sum_pij)
output_errs[i] = np.sqrt(final_err)
if errs is not None:
output_errs = InverseVariance(np.reciprocal(output_errs))
return (output_fluxes, output_errs)
def resample1d(self, orig_spectrum, fin_spec_axis):
"""
Create a re-sampling matrix to be used in re-sampling spectra in a way
that conserves flux. If an uncertainty is present in the input spectra
it will be propagated through to the final resampled output spectra
as an InverseVariance uncertainty.
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_spec_axis : Quantity
The desired spectral axis array.
Returns
-------
resampled_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
if isinstance(fin_spec_axis, Quantity):
if orig_spectrum.spectral_axis.unit != fin_spec_axis.unit:
raise ValueError("Original spectrum spectral axis grid and new"
"spectral axis grid must have the same units.")
if not isinstance(fin_spec_axis, SpectralAxis):
fin_spec_axis = SpectralAxis(fin_spec_axis)
# Get provided uncertainty into variance
if orig_spectrum.uncertainty is not None:
pixel_uncer = orig_spectrum.uncertainty.represent_as(VarianceUncertainty).array
else:
pixel_uncer = None
# convert unit
orig_axis_in_fin = orig_spectrum.spectral_axis.to(fin_spec_axis.unit)
# handle multi dimensional flux inputs
if orig_spectrum.flux.ndim >= 2:
# the output fluxes and errs should have the same shape as the input
# except for the last axis, which should be the size of the new
# spectral axis
new_shape = tuple(list(orig_spectrum.shape[0:-1]) +
[len(fin_spec_axis)])
# make output matricies
output_fluxes = np.zeros(shape=new_shape)
output_errs = np.zeros(shape=new_shape)
for index, row in np.ndenumerate(orig_spectrum.flux[..., 0]):
orig_fluxes = orig_spectrum.flux[index]
orig_uncer = pixel_uncer[index]
new_f, new_e = self._fluxc_resample(input_bin_centers=orig_axis_in_fin,
output_bin_centers=fin_spec_axis,
input_bin_fluxes=orig_fluxes,
errs=orig_uncer)
output_fluxes[index] = new_f
output_errs[index] = new_e.array
new_errs = InverseVariance(output_errs)
else:
# calculate new fluxes and errors
output_fluxes, new_errs = self._fluxc_resample(input_bin_centers=orig_axis_in_fin,
output_bin_centers=fin_spec_axis,
input_bin_fluxes=orig_spectrum.flux,
errs=pixel_uncer)
output_fluxes = output_fluxes << orig_spectrum.flux.unit
fin_spec_axis = np.array(fin_spec_axis) << orig_spectrum.spectral_axis.unit
if self.extrapolation_treatment == 'truncate':
fin_spec_axis = fin_spec_axis[np.where(~np.isnan(output_fluxes))]
if new_errs is not None:
new_errs = new_errs[np.where(~np.isnan(output_fluxes))]
output_fluxes = output_fluxes[np.where(~np.isnan(output_fluxes))]
resampled_spectrum = Spectrum1D(flux=output_fluxes,
spectral_axis=fin_spec_axis,
uncertainty=new_errs)
return resampled_spectrum
class LinearInterpolatedResampler(ResamplerBase):
"""
Resample a spectrum onto a new ``spectral_axis`` using linear interpolation.
Parameters
----------
extrapolation_treatment : str
What to do when resampling off the edge of the spectrum. Can be
``'nan_fill'`` to have points beyond the edges by set to NaN,
``'zero_fill'`` to set those points to zero, or ``'truncate'`` to
truncate any non-overlapping bins of the spectrum.
Examples
--------
To resample an input spectrum to a user specified dispersion grid using
linear interpolation:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import LinearInterpolatedResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = [1, 5, 9, 13, 14, 17, 21, 22, 23] * u.nm
>>> fluxc_resample = LinearInterpolatedResampler()
>>> fluxc_resample(input_spectra, resample_grid) # doctest: +FLOAT_CMP
<Spectrum1D(flux=<Quantity [ nan, 3.5 , 5.5 , 6.75, 6.5 , 9.5 , nan, nan, nan] mJy> (shape=(9,), mean=6.35000 mJy); spectral_axis=<SpectralAxis [ 1. 5. 9. ... 21. 22. 23.] nm> (length=9))>
"""
def __init__(self, extrapolation_treatment='nan_fill'):
super().__init__(extrapolation_treatment)
def resample1d(self, orig_spectrum, fin_spec_axis):
"""
Call interpolation, repackage new spectra
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_spec_axis : ndarray
The desired spectral axis array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
fill_val = np.nan # bin_edges=nan_fill case
if self.extrapolation_treatment == 'zero_fill':
fill_val = 0
orig_axis_in_fin = orig_spectrum.spectral_axis.to(fin_spec_axis.unit)
out_flux_arr = np.interp(fin_spec_axis.value, orig_axis_in_fin.value,
orig_spectrum.flux.value, left=fill_val, right=fill_val)
out_flux = Quantity(out_flux_arr, unit=orig_spectrum.flux.unit)
new_unc = None
if orig_spectrum.uncertainty is not None:
out_unc_arr = np.interp(fin_spec_axis.value, orig_axis_in_fin.value,
orig_spectrum.uncertainty.array,
left=fill_val, right=fill_val)
new_unc = orig_spectrum.uncertainty.__class__(array=out_unc_arr,
unit=orig_spectrum.unit)
if self.extrapolation_treatment == 'truncate':
fin_spec_axis = fin_spec_axis[np.where(~np.isnan(out_flux))]
if new_unc is not None:
new_unc = new_unc[np.where(~np.isnan(out_flux))]
out_flux = out_flux[np.where(~np.isnan(out_flux))]
return Spectrum1D(spectral_axis=fin_spec_axis,
flux=out_flux,
uncertainty=new_unc)
class SplineInterpolatedResampler(ResamplerBase):
"""
This resample algorithim uses a cubic spline interpolator. Any uncertainty
is also interpolated using an identical spline.
Parameters
----------
extrapolation_treatment : str
What to do when resampling off the edge of the spectrum. Can be
``'nan_fill'`` to have points beyond the edges by set to NaN,
``'zero_fill'`` to set those points to zero, or ``'truncate'`` to
truncate any non-overlapping bins of the spectrum. Any other value will
have the spline interpolate beyond the edges of the original data.
Examples
--------
To resample an input spectrum to a user specified spectral axis grid using
a cubic spline interpolator:
>>> import numpy as np
>>> import astropy.units as u
>>> from specutils import Spectrum1D
>>> from specutils.manipulation import SplineInterpolatedResampler
>>> input_spectra = Spectrum1D(
... flux=np.array([1, 3, 7, 6, 20]) * u.mJy,
... spectral_axis=np.array([2, 4, 12, 16, 20]) * u.nm)
>>> resample_grid = [1, 5, 9, 13, 14, 17, 21, 22, 23] * u.nm
>>> fluxc_resample = SplineInterpolatedResampler()
>>> fluxc_resample(input_spectra, resample_grid) # doctest: +FLOAT_CMP
<Spectrum1D(flux=<Quantity [ nan, 3.98808594, 6.94042969, 6.45869141, 5.89921875,
7.29736328, nan, nan, nan] mJy> (shape=(9,), mean=6.11676 mJy); spectral_axis=<SpectralAxis [ 1. 5. 9. ... 21. 22. 23.] nm> (length=9))>
"""
def __init__(self, extrapolation_treatment='nan_fill'):
super().__init__(extrapolation_treatment)
def resample1d(self, orig_spectrum, fin_spec_axis):
"""
Call interpolation, repackage new spectra
Parameters
----------
orig_spectrum : `~specutils.Spectrum1D`
The original 1D spectrum.
fin_spec_axis : Quantity
The desired spectral axis array.
Returns
-------
resample_spectrum : `~specutils.Spectrum1D`
An output spectrum containing the resampled `~specutils.Spectrum1D`
"""
orig_axis_in_new = orig_spectrum.spectral_axis.to(fin_spec_axis.unit)
flux_spline = CubicSpline(orig_axis_in_new.value, orig_spectrum.flux.value,
extrapolate=self.extrapolation_treatment not in ('nan_fill',
'zero_fill',
'truncate'))
out_flux_val = flux_spline(fin_spec_axis.value)
new_unc = None
if orig_spectrum.uncertainty is not None:
unc_spline = CubicSpline(orig_axis_in_new.value, orig_spectrum.uncertainty.array,
extrapolate=self.extrapolation_treatment not in ('nan_fill',
'zero_fill',
'truncate'))
out_unc_val = unc_spline(fin_spec_axis.value)
new_unc = orig_spectrum.uncertainty.__class__(array=out_unc_val, unit=orig_spectrum.unit)
fill_val = np.nan
if self.extrapolation_treatment == 'zero_fill':
fill_val = 0
orig_edges = orig_axis_in_new.bin_edges
off_edges = (fin_spec_axis < np.min(orig_edges)) | (np.max(orig_edges) < fin_spec_axis)
out_flux_val[off_edges] = fill_val
if new_unc is not None:
new_unc.array[off_edges] = fill_val
if self.extrapolation_treatment == 'truncate':
fin_spec_axis = fin_spec_axis[np.where(~np.isnan(out_flux_val))]
if new_unc is not None:
new_unc = new_unc[np.where(~np.isnan(out_flux_val))]
out_flux_val = out_flux_val[np.where(~np.isnan(out_flux_val))]
return Spectrum1D(spectral_axis=fin_spec_axis,
flux=out_flux_val*orig_spectrum.flux.unit,
uncertainty=new_unc)
|
astropyREPO_NAMEspecutilsPATH_START.@specutils_extracted@specutils-main@specutils@manipulation@resample.py@.PATH_END.py
|
{
"filename": "copy_injection_recovery.py",
"repo_name": "ThibeauWouters/TurboPE-BNS",
"repo_path": "TurboPE-BNS_extracted/TurboPE-BNS-main/injections/outdir_NRTv2/injection_38/copy_injection_recovery.py",
"type": "Python"
}
|
"""
Idea: try different learning rate schemes to try and fix the injections
"""
import psutil
p = psutil.Process()
p.cpu_affinity([0])
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "3"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.10"
import numpy as np
import argparse
# Regular imports
import argparse
import copy
import numpy as np
from astropy.time import Time
import time
import shutil
import json
import jax
jax.config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jimgw.jim import Jim
from jimgw.single_event.detector import H1, L1, V1
from jimgw.single_event.likelihood import HeterodynedTransientLikelihoodFD, TransientLikelihoodFD
from jimgw.single_event.waveform import RippleTaylorF2, RippleIMRPhenomD_NRTidalv2, RippleIMRPhenomD_NRTidalv2_no_taper
from jimgw.prior import Uniform, Composite
import utils # our plotting and postprocessing utilities script
import optax
# Names of the parameters and their ranges for sampling parameters for the injection
NAMING = ['M_c', 'q', 's1_z', 's2_z', 'lambda_1', 'lambda_2', 'd_L', 't_c', 'phase_c', 'cos_iota', 'psi', 'ra', 'sin_dec']
PRIOR = {
"M_c": [0.8759659737275101, 2.6060030916165484],
"q": [0.5, 1.0],
"s1_z": [-0.05, 0.05],
"s2_z": [-0.05, 0.05],
"lambda_1": [0.0, 5000.0],
"lambda_2": [0.0, 5000.0],
"d_L": [30.0, 300.0],
"t_c": [-0.1, 0.1],
"phase_c": [0.0, 2 * jnp.pi],
"cos_iota": [-1.0, 1.0],
"psi": [0.0, jnp.pi],
"ra": [0.0, 2 * jnp.pi],
"sin_dec": [-1, 1]
}
################
### ARGPARSE ###
################
# TODO save these into a new file
def get_parser(**kwargs):
add_help = kwargs.get("add_help", True)
parser = argparse.ArgumentParser(
description="Perform an injection recovery.",
add_help=add_help,
)
# TODO os does not use them
# parser.add_argument(
# "--GPU-device",
# type=int,
# default=0,
# help="Select GPU index to use.",
# )
# parser.add_argument(
# "--GPU-memory-fraction",
# type=float,
# default=0.5,
# help="Select percentage of GPU memory to use.",
# )
parser.add_argument(
"--outdir",
type=str,
default="./outdir/",
help="Output directory for the injection.",
)
parser.add_argument(
"--load-existing-config",
type=bool,
default=False,
help="Whether to load and redo an existing injection (True) or to generate a new set of parameters (False).",
)
parser.add_argument(
"--N",
type=str,
default="",
help="Number (or generically, a custom identifier) of this injection, used to locate the output directory. If an empty string is passed (default), we generate a new injection.",
)
parser.add_argument(
"--SNR-threshold",
type=float,
default=12,
help="Skip injections with SNR below this threshold.",
)
parser.add_argument(
"--waveform-approximant",
type=str,
default="TaylorF2",
help="Which waveform approximant to use. Recommended to use TaylorF2 for now, NRTidalv2 might still be a bit unstable.",
)
parser.add_argument(
"--relative-binning-binsize",
type=int,
default=100,
help="Number of bins for the relative binning.",
)
parser.add_argument(
"--relative-binning-ref-params-equal-true-params",
type=bool,
default=True,
help="Whether to set the reference parameters in the relative binning code to injection parameters.",
)
parser.add_argument(
"--save-training-chains",
type=bool,
default=False,
help="Whether to save training chains or not (can be very large!)",
)
parser.add_argument(
"--eps-mass-matrix",
type=float,
default=1e-6,
help="Overall scale factor to rescale the step size of the local sampler.",
)
parser.add_argument(
"--which-local-sampler",
type=str,
default="MALA",
help="Which local sampler to use.",
)
parser.add_argument(
"--smart-initial-guess",
type=bool,
default=False,
help="Distribute the walkers around the injected parameters. TODO change this to reference parameters found by the relative binning code.",
)
parser.add_argument(
"--use-scheduler",
type=bool,
default=True,
help="Use a learning rate scheduler instead of a fixed learning rate.",
)
parser.add_argument(
"--stopping-criterion-global-acc",
type=float,
default=1.0,
help="Stop the run once we reach this global acceptance rate.",
)
parser.add_argument(
"--save-likelihood",
type=bool,
default=False,
help="Whether to save the likelihood object",
)
parser.add_argument(
"--tight-Mc-prior",
type=bool,
default=False,
help="Whether to use a tight prior on the Mc values or not",
)
# # TODO this has to be implemented
# parser.add_argument(
# "--autotune_local_sampler",
# type=bool,
# default=False,
# help="TODO Still has to be implemented! Specify whether to use autotuning for the local sampler.",
# )
return parser
####################
### Script setup ###
####################
def body(args):
"""
Run an injection and recovery. To get an explanation of the hyperparameters, go to:
- jim hyperparameters: https://github.com/ThibeauWouters/jim/blob/8cb4ef09fefe9b353bfb89273a4bc0ee52060d72/src/jimgw/jim.py#L26
- flowMC hyperparameters: https://github.com/ThibeauWouters/flowMC/blob/ad1a32dcb6984b2e178d7204a53d5da54b578073/src/flowMC/sampler/Sampler.py#L40
"""
start_time = time.time()
# TODO move and get these as arguments
# Deal with the hyperparameters
naming = NAMING
HYPERPARAMETERS = {
"flowmc":
{
"n_loop_training": 400,
"n_loop_production": 50,
"n_local_steps": 5,
"n_global_steps": 400,
"n_epochs": 50,
"n_chains": 1000,
"learning_rate": 0.001, # using a scheduler below
"max_samples": 50000,
"momentum": 0.9,
"batch_size": 50000,
"use_global": True,
"logging": True,
"keep_quantile": 0.0,
"local_autotune": None,
"train_thinning": 10,
"output_thinning": 30,
"n_sample_max": 10000,
"precompile": False,
"verbose": False,
"outdir": args.outdir,
"stopping_criterion_global_acc": args.stopping_criterion_global_acc,
"which_local_sampler": "MALA"
},
"jim":
{
"seed": 0,
"n_chains": 1000,
"num_layers": 10,
"hidden_size": [128, 128],
"num_bins": 8,
}
}
flowmc_hyperparameters = HYPERPARAMETERS["flowmc"]
jim_hyperparameters = HYPERPARAMETERS["jim"]
hyperparameters = {**flowmc_hyperparameters, **jim_hyperparameters}
# TODO can I just replace this with update dict?
for key, value in args.__dict__.items():
if key in hyperparameters:
hyperparameters[key] = value
### POLYNOMIAL SCHEDULER
if args.use_scheduler:
print("Using polynomial learning rate scheduler")
total_epochs = hyperparameters["n_epochs"] * hyperparameters["n_loop_training"]
start = int(total_epochs / 10)
start_lr = 1e-3
end_lr = 1e-5
power = 4.0
schedule_fn = optax.polynomial_schedule(start_lr, end_lr, power, total_epochs-start, transition_begin=start)
hyperparameters["learning_rate"] = schedule_fn
print(f"Saving output to {args.outdir}")
# Fetch waveform used
supported_waveforms = ["TaylorF2", "NRTidalv2", "IMRPhenomD_NRTidalv2"]
if args.waveform_approximant not in supported_waveforms:
print(f"Waveform approximant {args.waveform_approximant} not supported. Supported waveforms are {supported_waveforms}. Changing to TaylorF2.")
args.waveform_approximant = "TaylorF2"
if args.waveform_approximant == "TaylorF2":
ripple_waveform_fn = RippleTaylorF2
elif args.waveform_approximant in ["IMRPhenomD_NRTidalv2", "NRTv2", "NRTidalv2"]:
ripple_waveform_fn = RippleIMRPhenomD_NRTidalv2
else:
raise ValueError(f"Waveform approximant {args.waveform_approximant} not supported.")
# Before main code, check if outdir is correct dir format TODO improve with sys?
if args.outdir[-1] != "/":
args.outdir += "/"
outdir = f"{args.outdir}injection_{args.N}/"
# Get the prior bounds, both as 1D and 2D arrays
prior_ranges = jnp.array([PRIOR[name] for name in naming])
prior_low, prior_high = prior_ranges[:, 0], prior_ranges[:, 1]
bounds = np.array(list(PRIOR.values()))
# Now go over to creating parameters, and potentially check SNR cutoff
network_snr = 0.0
print(f"The SNR threshold parameter is set to {args.SNR_threshold}")
while network_snr < args.SNR_threshold:
# Generate the parameters or load them from an existing file
if args.load_existing_config:
config_path = f"{outdir}config.json"
print(f"Loading existing config, path: {config_path}")
config = json.load(open(config_path))
else:
print(f"Generating new config")
config = utils.generate_config(prior_low, prior_high, naming, args.N, args.outdir)
key = jax.random.PRNGKey(config["seed"])
# Save the given script hyperparams
with open(f"{outdir}script_args.json", 'w') as json_file:
json.dump(args.__dict__, json_file)
# Start injections
print("Injecting signals . . .")
waveform = ripple_waveform_fn(f_ref=config["fref"])
# Create frequency grid
freqs = jnp.arange(
config["fmin"],
config["f_sampling"] / 2, # maximum frequency being halved of sampling frequency
1. / config["duration"]
)
# convert injected mass ratio to eta, and apply arccos and arcsin
q = config["q"]
eta = q / (1 + q) ** 2
iota = float(jnp.arccos(config["cos_iota"]))
dec = float(jnp.arcsin(config["sin_dec"]))
# Setup the timing setting for the injection
epoch = config["duration"] - config["post_trigger_duration"]
gmst = Time(config["trigger_time"], format='gps').sidereal_time('apparent', 'greenwich').rad
# Array of injection parameters
true_param = {
'M_c': config["M_c"], # chirp mass
'eta': eta, # symmetric mass ratio 0 < eta <= 0.25
's1_z': config["s1_z"], # aligned spin of priminary component s1_z.
's2_z': config["s2_z"], # aligned spin of secondary component s2_z.
'lambda_1': config["lambda_1"], # tidal deformability of priminary component lambda_1.
'lambda_2': config["lambda_2"], # tidal deformability of secondary component lambda_2.
'd_L': config["d_L"], # luminosity distance
't_c': config["t_c"], # timeshift w.r.t. trigger time
'phase_c': config["phase_c"], # merging phase
'iota': iota, # inclination angle
'psi': config["psi"], # polarization angle
'ra': config["ra"], # right ascension
'dec': dec # declination
}
# Get the true parameter values for the plots
truths = copy.deepcopy(true_param)
truths["eta"] = q
truths = np.fromiter(truths.values(), dtype=float)
detector_param = {
'ra': config["ra"],
'dec': dec,
'gmst': gmst,
'psi': config["psi"],
'epoch': epoch,
't_c': config["t_c"],
}
print(f"The injected parameters are {true_param}")
# Generating the geocenter waveform
h_sky = waveform(freqs, true_param)
# Setup interferometers
ifos = [H1, L1, V1]
psd_files = ["./psds/psd.txt", "./psds/psd.txt", "./psds/psd_virgo.txt"]
# inject signal into ifos
for idx, ifo in enumerate(ifos):
key, subkey = jax.random.split(key)
ifo.inject_signal(
subkey,
freqs,
h_sky,
detector_param,
psd_file=psd_files[idx] # note: the function load_psd actaully loads the asd
)
print("Signal injected")
# Compute the SNR
h1_snr = utils.compute_snr(H1, h_sky, detector_param)
l1_snr = utils.compute_snr(L1, h_sky, detector_param)
v1_snr = utils.compute_snr(V1, h_sky, detector_param)
network_snr = np.sqrt(h1_snr**2 + l1_snr**2 + v1_snr**2)
# If the SNR is too low, we need to generate new parameters
if network_snr < args.SNR_threshold:
print(f"Network SNR is less than {args.SNR_threshold}, generating new parameters")
if args.load_existing_config:
raise ValueError("SNR is less than threshold, but loading existing config. This should not happen!")
print("H1 SNR:", h1_snr)
print("L1 SNR:", l1_snr)
print("V1 SNR:", v1_snr)
print("Network SNR:", network_snr)
print(f"Saving network SNR")
with open(outdir + 'network_snr.txt', 'w') as file:
file.write(str(network_snr))
print("Start prior setup")
# Priors without transformation
if args.tight_Mc_prior:
print("INFO: Using a tight chirp mass prior")
true_mc = true_param["M_c"]
Mc_prior = Uniform(true_mc - 0.1, true_mc + 0.1, naming=['M_c'])
else:
Mc_prior = Uniform(prior_low[0], prior_high[0], naming=['M_c'])
q_prior = Uniform(prior_low[1], prior_high[1], naming=['q'],
transforms={
'q': (
'eta',
lambda params: params['q'] / (1 + params['q']) ** 2
)
}
)
s1z_prior = Uniform(prior_low[2], prior_high[2], naming=['s1_z'])
s2z_prior = Uniform(prior_low[3], prior_high[3], naming=['s2_z'])
lambda_1_prior = Uniform(prior_low[4], prior_high[4], naming=['lambda_1'])
lambda_2_prior = Uniform(prior_low[5], prior_high[5], naming=['lambda_2'])
dL_prior = Uniform(prior_low[6], prior_high[6], naming=['d_L'])
tc_prior = Uniform(prior_low[7], prior_high[7], naming=['t_c'])
phic_prior = Uniform(prior_low[8], prior_high[8], naming=['phase_c'])
cos_iota_prior = Uniform(prior_low[9], prior_high[9], naming=["cos_iota"],
transforms={
"cos_iota": (
"iota",
lambda params: jnp.arccos(
jnp.arcsin(jnp.sin(params["cos_iota"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
psi_prior = Uniform(prior_low[10], prior_high[10], naming=["psi"])
ra_prior = Uniform(prior_low[11], prior_high[11], naming=["ra"])
sin_dec_prior = Uniform(prior_low[12], prior_high[12], naming=["sin_dec"],
transforms={
"sin_dec": (
"dec",
lambda params: jnp.arcsin(
jnp.arcsin(jnp.sin(params["sin_dec"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
# Save the prior bounds
print("Saving prior bounds")
utils.save_prior_bounds(prior_low, prior_high, outdir)
# Compose the prior
prior_list = [
Mc_prior,
q_prior,
s1z_prior,
s2z_prior,
lambda_1_prior,
lambda_2_prior,
dL_prior,
tc_prior,
phic_prior,
cos_iota_prior,
psi_prior,
ra_prior,
sin_dec_prior,
]
complete_prior = Composite(prior_list)
bounds = jnp.array([[p.xmin, p.xmax] for p in complete_prior.priors])
print("Finished prior setup")
print("Initializing likelihood")
if args.relative_binning_ref_params_equal_true_params:
ref_params = true_param
print("Using the true parameters as reference parameters for the relative binning")
else:
ref_params = None
print("Will search for reference waveform for relative binning")
# ### TODO remove
# # Explicitly fix relative binning for NRTidalv2
# if args.waveform_approximant in ["IMRPhenomD_NRTidalv2", "NRTidalv2"]:
# # ## TODO this might be broken?
# # # # Explicitly set the f_min and f_max used there
# # # relbin_kwargs = {"f_min": config["fmin"], "f_max": config["f_sampling"] / 2}
# # relbin_kwargs = {}
# # # Set the reference parameters at the ideal location for not breaking relative binning
# # print("Setting the reference parameters to not break the relative binning for NRTidalv2")
# # ref_params = true_param
# # ref_params["lambda_1"] = 1.0
# # ref_params["lambda_2"] = 1.0
# print("Now, the reference parameters are: ")
# print(ref_params)
# else:
# relbin_kwargs = {}
relbin_kwargs = {}
if args.waveform_approximant == "IMRPhenomD_NRTidalv2":
print("Using IMRPhenomD_NRTidalv2 no taper as the reference waveform for the likelihood")
reference_waveform = RippleIMRPhenomD_NRTidalv2_no_taper(f_ref=config["fref"])
else:
reference_waveform = waveform
likelihood = HeterodynedTransientLikelihoodFD(
ifos,
prior=complete_prior,
bounds=bounds,
n_bins = args.relative_binning_binsize,
waveform=waveform,
reference_waveform=reference_waveform,
trigger_time=config["trigger_time"],
duration=config["duration"],
post_trigger_duration=config["post_trigger_duration"],
ref_params=ref_params,
**relbin_kwargs
)
if args.save_likelihood:
print(f"INFO: Saving the likelihood to {outdir}")
import pickle
with open(f'{outdir}likelihood.pickle', 'wb') as handle:
pickle.dump(likelihood, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Save the ref params
utils.save_relative_binning_ref_params(likelihood, outdir)
# Generate arguments for the local samplercd
mass_matrix = jnp.eye(len(prior_list))
for idx, prior in enumerate(prior_list):
mass_matrix = mass_matrix.at[idx, idx].set(prior.xmax - prior.xmin) # fetch the prior range
local_sampler_arg = {'step_size': mass_matrix * args.eps_mass_matrix} # set the overall step size
hyperparameters["local_sampler_arg"] = local_sampler_arg
# Create jim object
jim = Jim(
likelihood,
complete_prior,
**hyperparameters
)
if args.smart_initial_guess:
n_chains = hyperparameters["n_chains"]
n_dim = len(prior_list)
initial_guess = utils.generate_smart_initial_guess(gmst, [H1, L1, V1], true_param, n_chains, n_dim, prior_low, prior_high)
# Plot it
utils.plot_chains(initial_guess, "initial_guess", outdir, truths = truths)
else:
initial_guess = jnp.array([])
### Finally, do the sampling
jim.sample(jax.random.PRNGKey(24), initial_guess = initial_guess)
# === Show results, save output ===
# Print a summary to screen:
jim.print_summary()
# Save and plot the results of the run
# - training phase
name = outdir + f'results_training.npz'
print(f"Saving samples to {name}")
state = jim.Sampler.get_sampler_state(training = True)
chains, log_prob, local_accs, global_accs, loss_vals = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"], state["loss_vals"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
if args.save_training_chains:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals, chains=chains)
else:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals)
utils.plot_accs(local_accs, "Local accs (training)", "local_accs_training", outdir)
utils.plot_accs(global_accs, "Global accs (training)", "global_accs_training", outdir)
utils.plot_loss_vals(loss_vals, "Loss", "loss_vals", outdir)
utils.plot_log_prob(log_prob, "Log probability (training)", "log_prob_training", outdir)
# - production phase
name = outdir + f'results_production.npz'
state = jim.Sampler.get_sampler_state(training = False)
chains, log_prob, local_accs, global_accs = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
np.savez(name, chains=chains, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs)
utils.plot_accs(local_accs, "Local accs (production)", "local_accs_production", outdir)
utils.plot_accs(global_accs, "Global accs (production)", "global_accs_production", outdir)
utils.plot_log_prob(log_prob, "Log probability (production)", "log_prob_production", outdir)
# Plot the chains as corner plots
utils.plot_chains(chains, "chains_production", outdir, truths = truths)
# Save the NF and show a plot of samples from the flow
print("Saving the NF")
jim.Sampler.save_flow(outdir + "nf_model")
name = outdir + 'results_NF.npz'
chains = jim.Sampler.sample_flow(10_000)
np.savez(name, chains = chains)
# Finally, copy over this script to the outdir for reproducibility
shutil.copy2(__file__, outdir + "copy_injection_recovery.py")
print("Saving the jim hyperparameters")
jim.save_hyperparameters(outdir = outdir)
end_time = time.time()
runtime = end_time - start_time
print(f"Time taken: {runtime} seconds ({(runtime)/60} minutes)")
print(f"Saving runtime")
with open(outdir + 'runtime.txt', 'w') as file:
file.write(str(runtime))
print("Finished injection recovery successfully!")
############
### MAIN ###
############
def main(given_args = None):
parser = get_parser()
args = parser.parse_args()
print(given_args)
# Update with given args
if given_args is not None:
args.__dict__.update(given_args)
if args.load_existing_config and args.N == "":
raise ValueError("If load_existing_config is True, you need to specify the N argument to locate the existing injection. ")
print("------------------------------------")
print("Arguments script:")
for key, value in args.__dict__.items():
print(f"{key}: {value}")
print("------------------------------------")
print("Starting main code")
# If no N is given, fetch N from the structure of outdir
if len(args.N) == 0:
N = utils.get_N(args.outdir)
args.N = N
# TODO fix that os uses these
# import os
# os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = str(args.GPU_memory_fraction)
# os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_device)
# print(f"Running on GPU {args.GPU_device}")
# Execute the script
body(args)
if __name__ == "__main__":
main()
|
ThibeauWoutersREPO_NAMETurboPE-BNSPATH_START.@TurboPE-BNS_extracted@TurboPE-BNS-main@injections@outdir_NRTv2@injection_38@copy_injection_recovery.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.