metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "test_tz_convert.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/datetimes/methods/test_tz_convert.py",
"type": "Python"
}
|
from datetime import datetime
import dateutil.tz
from dateutil.tz import gettz
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
from pandas import (
DatetimeIndex,
Index,
NaT,
Timestamp,
date_range,
offsets,
)
import pandas._testing as tm
class TestTZConvert:
def test_tz_convert_nat(self):
# GH#5546
dates = [NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9], dtype=np.int32)
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("h", 1), ("min", 60), ("s", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See GH#4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32))
def test_dti_tz_convert_dst(self):
for freq, n in [("h", 1), ("min", 60), ("s", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19], dtype=np.int32))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5], dtype=np.int32))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20], dtype=np.int32))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4], dtype=np.int32))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="ME", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="ME")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="h", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="h")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="min", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="min")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
"pytz/US/Eastern",
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
if isinstance(tz, str) and tz.startswith("pytz/"):
pytz = pytest.importorskip("pytz")
tz = pytz.timezone(tz.removeprefix("pytz/"))
rng = date_range("3/11/2012", "3/12/2012", freq="h", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="h", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@datetimes@methods@test_tz_convert.py@.PATH_END.py
|
{
"filename": "run_episode.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/chrono-tensorflow/PPO/run_episode.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 11:36:45 2019
@author: simonebenatti
"""
import sys
sys.path.append('../envs')
import chtrain as gym
import numpy as np
from policy import Policy
from utils import Scaler
def run_parallel_episodes(arg):
total_steps = 0
env_c = gym.Init(arg[4], False)
policy = Policy(arg[0], arg[1], arg[2], arg[4], True)
scaler = Scaler(arg[0], arg[4])
scaler.resume()
observes, actions, rewards, unscaled_obs = run_episode(env_c, policy, scaler, arg[3])
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
policy.close_sess()
return trajectory
def run_episode(env, policy, scaler, time_state):
""" Run single episode
Args:
env: environment (object)
policy: policy object with sample() method
scaler: scaler object, scales/offsets each observation
Returns: 4-tuple of NumPy arrays
observes: shape = (episode len, obs_dim)
actions: shape = (episode len, act_dim)
rewards: shape = (episode len,)
unscaled_obs: dataset for training scaler, shape = (episode len, obs_dim)
"""
obs = env.reset() #resets whenever an episode begins
observes, actions, rewards, unscaled_obs = [], [], [], []
done = False
step = 0.0
scale, offset = scaler.get()
if time_state:
scale[-1] = 1.0 # don't scale time step feature
offset[-1] = 0.0 # don't offset time step feature
while not done:
obs = obs.astype(np.float64).reshape((1, -1))
if time_state:
obs = np.append(obs, [[step]], axis=1) # add time step feature TODO: check if this extra state is useful
unscaled_obs.append(obs)
obs = (obs - offset) * scale # center and scale observations TODO: check ifscaler is useful (it should be according to literature)
observes.append(obs)
action = policy.sample(obs).reshape((1, -1)).astype(np.float64)
actions.append(action)
obs, reward, done, _ = env.step(action) #state, reward, done, info = env.step(action)
if not isinstance(reward, float):
reward = np.asscalar(reward)
rewards.append(reward)
step += 1e-3 # increments time step feature
return (np.concatenate(observes), np.concatenate(actions),
np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs))
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@chrono-tensorflow@PPO@run_episode.py@.PATH_END.py
|
{
"filename": "_range.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/scene/yaxis/_range.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class RangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="range", parent_name="layout.scene.yaxis", **kwargs):
super(RangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", False),
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"autorange": False}),
items=kwargs.pop(
"items",
[
{
"editType": "plot",
"impliedEdits": {"^autorange": False},
"valType": "any",
},
{
"editType": "plot",
"impliedEdits": {"^autorange": False},
"valType": "any",
},
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@scene@yaxis@_range.py@.PATH_END.py
|
{
"filename": "_maxallowed.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/scene/yaxis/_maxallowed.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MaxallowedValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="maxallowed", parent_name="layout.scene.yaxis", **kwargs
):
super(MaxallowedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"^autorange": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@scene@yaxis@_maxallowed.py@.PATH_END.py
|
{
"filename": "test_b1.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/tests/test_qso_derivatives/test_b1.py",
"type": "Python"
}
|
from . import numdifftools, numpy as np
from pyRSD.rsd.power.qso.derivatives import dPqso_db1
NMU = 41
def test_partial(driver):
model = driver.theory.model
# get the deriv arguments
k = driver.data.combined_k
mu = np.linspace(0., 1., NMU)
# get the deriv arguments
k = driver.data.combined_k
mu = np.linspace(0., 1., NMU)
# broadcast to the right shape
k = k[:, np.newaxis]
mu = mu[np.newaxis, :]
k, mu = np.broadcast_arrays(k, mu)
k = k.ravel(order='F')
mu = mu.ravel(order='F')
pars = driver.theory.fit_params
args = (model, pars, k, mu)
# our derivative
x = dPqso_db1.eval(*args)
# numerical derivative
def f(x):
model.b1 = x
return driver.theory.model.power(k, mu)
g = numdifftools.Derivative(f, step=1e-3)
y = g(model.b1)
# compare
np.testing.assert_allclose(x, y, rtol=1e-2)
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@tests@test_qso_derivatives@test_b1.py@.PATH_END.py
|
{
"filename": "_xperiodalignment.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnel/_xperiodalignment.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XperiodalignmentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xperiodalignment", parent_name="funnel", **kwargs):
super(XperiodalignmentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["start", "middle", "end"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnel@_xperiodalignment.py@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="scattersmith", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@_name.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/dtype_policies/__init__.py",
"type": "Python"
}
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
DTypePolicyMap,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...DTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "DTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...DTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return DTypePolicy(identifier)
try:
return DTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@dtype_policies@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "gully/blase",
"repo_path": "blase_extracted/blase-main/README.md",
"type": "Markdown"
}
|
# blasé
Interpretable Machine Learning for high-resolution astronomical spectroscopy.
<a href="https://blase.readthedocs.io/en/latest/"><img src="https://img.shields.io/badge/Read-the%20docs-blue"></a>
<a href="https://ui.adsabs.harvard.edu/abs/2022ApJ...941..200G/abstract"><img src="https://img.shields.io/badge/Paper-Gully--Santiago & Morley (2022)-green"></a>
## _Handles stellar and telluric lines simultaneously_
We can combine stellar, [telluric](https://en.wikipedia.org/wiki/Telluric_contamination), and instrumental models into a unified forward model of your entire high-bandwidth, high-resolution spectrum. We can obtain best-in-class models of Earth's atmosphere, line-by-line, automatically, for free (or cheap).
## _Massively scalable_
By using autodiff, we can fit over 10,000 spectral lines simultaneously. This enormous amount of flexibility is unavailable in conventional frameworks that do not have [autodiff](https://en.wikipedia.org/wiki/Automatic_differentiation).

^ We do this for 10,000 lines simultaneously.
## _Rooted in physics_
We first clone a precomputed synthetic spectrum, such as PHOENIX, and then **transfer learn** with data. By regularizing to the cloned model, we get the best of both worlds: data driven when the Signal-to-Noise ratio is high, and model-driven when we lack data to say otherwise.
## _Blazing fast with GPUs_
We achieve $>60 \times$ speedups with NVIDIA GPUs, so training takes minutes instead of hours.
## Get started
Visit our [step-by-step tutorials](https://blase.readthedocs.io/en/latest/tutorials/index.html) or [installation](https://blase.readthedocs.io/en/latest/install.html) pages to get started. We also have [deep dives](https://blase.readthedocs.io/en/latest/deep_dives/index.html#), or you can [read the paper](https://ui.adsabs.harvard.edu/abs/2022ApJ...941..200G/abstract). Have a question or a research project in mind? Open [an Issue](https://github.com/gully/blase/issues) or [email gully](https://gully.github.io/).
Copyright 2020, 2021, 2022, 2023 The Authors
|
gullyREPO_NAMEblasePATH_START.@blase_extracted@blase-main@README.md@.PATH_END.py
|
{
"filename": "_hovertemplatesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contour/_hovertemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertemplatesrc", parent_name="contour", **kwargs):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contour@_hovertemplatesrc.py@.PATH_END.py
|
{
"filename": "field_functions.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/fields/field_functions.py",
"type": "Python"
}
|
from collections.abc import Callable
from inspect import signature
import numpy as np
from yt.utilities.lib.misc_utilities import obtain_position_vector
def get_radius(data, field_prefix, ftype):
center = data.get_field_parameter("center").to("code_length")
DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).to("code_length")
# This is in code_length so it can be the destination for our r later.
radius2 = data.ds.arr(
np.zeros(data[ftype, field_prefix + "x"].shape, dtype="float64"), "code_length"
)
r = np.empty_like(radius2, subok=False)
if any(data.ds.periodicity):
rdw = radius2.v
for i, ax in enumerate("xyz"):
pos = data[ftype, f"{field_prefix}{ax}"]
if str(pos.units) != "code_length":
pos = pos.to("code_length")
np.subtract(
pos.d,
center[i].d,
r,
)
if data.ds.periodicity[i]:
np.abs(r, r)
np.subtract(r, DW.d[i], rdw)
np.abs(rdw, rdw)
np.minimum(r, rdw, r)
np.multiply(r, r, r)
np.add(radius2.d, r, radius2.d)
if data.ds.dimensionality < i + 1:
break
# Using the views into the array is not changing units and as such keeps
# from having to do symbolic manipulations
np.sqrt(radius2.d, radius2.d)
# Alias it, just for clarity.
radius = radius2
return radius
def get_periodic_rvec(data):
coords = obtain_position_vector(data).d
if sum(data.ds.periodicity) == 0:
return coords
le = data.ds.domain_left_edge.in_units("code_length").d
dw = data.ds.domain_width.in_units("code_length").d
for i in range(coords.shape[0]):
if not data.ds.periodicity[i]:
continue
coords[i, ...] -= le[i]
# figure out which measure is less
mins = np.argmin(
[
np.abs(np.mod(coords[i, ...], dw[i])),
np.abs(np.mod(coords[i, ...], -dw[i])),
],
axis=0,
)
temp_coords = np.mod(coords[i, ...], dw[i])
# Where second measure is better, updating temporary coords
ii = mins == 1
temp_coords[ii] = np.mod(coords[i, ...], -dw[i])[ii]
# Putting the temporary coords into the actual storage
coords[i, ...] = temp_coords
coords[i, ...] + le[i]
return coords
def validate_field_function(function: Callable) -> None:
"""
Inspect signature, raise a TypeError if invalid, return None otherwise.
"""
# This is a helper function to user-intended field registration methods
# (e.g. Dataset.add_field and yt.derived_field)
# it is not used in FieldInfoContainer.add_field to optimize performance
# (inspect.signature is quite expensive and we don't want to validate yt's
# internal code every time a dataset's fields are defined).
# lookup parameters that do not have default values
fparams = signature(function).parameters
nodefaults = tuple(p.name for p in fparams.values() if p.default is p.empty)
if nodefaults != ("field", "data"):
raise TypeError(
f"Received field function {function} with invalid signature. "
f"Expected exactly 2 positional parameters ('field', 'data'), got {nodefaults!r}"
)
if any(
fparams[name].kind == fparams[name].KEYWORD_ONLY for name in ("field", "data")
):
raise TypeError(
f"Received field function {function} with invalid signature. "
"Parameters 'field' and 'data' must accept positional values "
"(they cannot be keyword-only)"
)
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@fields@field_functions.py@.PATH_END.py
|
{
"filename": "Basic example 5--resampling DES Y1.ipynb",
"repo_name": "tmcclintock/AReconstructionTool",
"repo_path": "AReconstructionTool_extracted/AReconstructionTool-master/notebooks/Basic example 5--resampling DES Y1.ipynb",
"type": "Jupyter Notebook"
}
|
# Resampling DES Y1
The DES Y1 3x2pt analysis is a tricky beast because it has SO many parameters (26). Samplers don't know the marginal likelihoods of only the interesting parameters (cosmology), and only ever report the joint posterior of all parameters given the data. For this reason, if we want to resample the DES Y1 chain, we have to traing the Gaussian processes on all parameters in the chain.
## This notebook is in development.
```python
#Import things
import numpy as np
import matplotlib.pyplot as plt
import resampler as samp
import scipy.optimize as op
import chainconsumer as CC
import emcee #for doing MCMC
%matplotlib inline
```
```python
#Plot formatting
plt.rc("font", size=18, family="serif")
plt.rc("text", usetex=True)
```
```python
#Read in the chain
input_chain = np.load("DES_data/DES_vc_params.npy")
lnpost = np.load("DES_data/DES_vc_lnpost.npy")
weights = np.load("DES_data/DES_vc_weights.npy")
print("chain shape is ", input_chain.shape)
print("lnpost shape is ", lnpost.shape)
print("weights shape is ", weights.shape)
```
```python
#Pick out training points
N_training = 1200
IS = isamp.ImportanceSampler(input_chain, lnpost, scale = 3.5)
IS.select_training_points(N_training, method="LH")
```
```python
#Train the GP inside of the sampler
IS.train()
```
```python
plt.scatter(input_chain[-10000:,4],input_chain[-10000:,0])
points,_ = IS.get_training_data()
plt.scatter(points[:,4], points[:,0], c='k', s=10)
```
```python
#Resample the chain with an MCMC
start = np.loadtxt("DES_data/DES_vc_bestfit.txt")
nwalkers = 200
ndim = len(input_chain[0])
sampler = emcee.EnsembleSampler(nwalkers, ndim, IS.predict)
print("Running first burn-in")
p0 = np.array([start + start*1e-3*np.random.randn(ndim) for i in range(nwalkers)])
p0, lp, _ = sampler.run_mcmc(p0, 1000)
print("Running second burn-in")
p0 = p0[np.argmax(lp)] + p0[np.argmax(lp)]*1e-4*np.random.randn(nwalkers, ndim)
p0, lp, _ = sampler.run_mcmc(p0, 1000)
sampler.reset()
print("Running production...")
sampler.run_mcmc(p0, 3000);
```
```python
test_chain = sampler.flatchain
#print("Means and stds of input chain: ", np.mean(input_chain, 0)[:4], np.std(input_chain, 0)[:4])
#print("Means and stds of test chain: ", np.mean(test_chain, 0)[:4], np.std(test_chain, 0)[:4])
```
```python
c = CC.ChainConsumer()
plot_input_chain = [input_chain[:,4], input_chain[:,0]]
plot_test_chain = [test_chain[:,4], test_chain[:,0]]
#labels = [r"$\Omega_m$", r"$h$", r"$\Omega_b$", r"$n_s$", r"$A_s$"]
labels = [r"$\Omega_m$", r"$A_s$"]
c.add_chain(plot_input_chain, parameters=labels, name="Input chain", weights=weights)
c.add_chain(plot_test_chain, parameters=labels, name="Resampled chain")
fig = c.plotter.plot()
#fig.savefig("DESY1_resampling_example.png", dpi=300, bbox_inches="tight")
```
```python
c2 = CC.ChainConsumer()
c2.add_chain(input_chain[:,:5], name="Input chain", weights=weights)
c2.add_chain(test_chain[:,:5], name="Resampled chain")
fig = c2.plotter.plot()
#fig.savefig("DESY1_resampling_example.png", dpi=300, bbox_inches="tight")
```
```python
```
|
tmcclintockREPO_NAMEAReconstructionToolPATH_START.@AReconstructionTool_extracted@AReconstructionTool-master@notebooks@Basic example 5--resampling DES Y1.ipynb@.PATH_END.py
|
{
"filename": "hera_sim_tour.ipynb",
"repo_name": "HERA-Team/hera_sim",
"repo_path": "hera_sim_extracted/hera_sim-main/docs/tutorials/hera_sim_tour.ipynb",
"type": "Jupyter Notebook"
}
|
# Tour of hera_sim
This notebook briefly introduces some of the effects that can be modeled with `hera_sim`.
```python
%matplotlib inline
import uvtools
import numpy as np
import pylab as plt
from astropy.units import sday
from hera_sim import DATA_PATH
plt.rcParams["figure.figsize"] = [14, 8]
```
```python
from hera_sim import foregrounds, noise, sigchain, rfi, defaults
```
```python
defaults.set("h1c")
fqs = np.linspace(0.1, 0.2, 1024, endpoint=False)
lsts = np.linspace(0, 2 * np.pi, 10000, endpoint=False)
times = lsts / (2 * np.pi) * sday.to('s')
bl_len_ns = np.array([30.0, 0, 0])
h1c_beam = defaults("omega_p")(fqs)
```
## Foregrounds
### Diffuse Foregrounds
```python
Tsky_mdl = noise.HERA_Tsky_mdl["xx"]
vis_fg_diffuse = foregrounds.diffuse_foreground(
lsts, fqs, bl_len_ns, Tsky_mdl=Tsky_mdl, omega_p=h1c_beam
)
```
```python
uvtools.plot.labeled_waterfall(vis_fg_diffuse, mode="log", freqs=fqs * 1e9, lsts=lsts)
uvtools.plot.labeled_waterfall(vis_fg_diffuse, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


### Point-Source Foregrounds
```python
vis_fg_pntsrc = foregrounds.pntsrc_foreground(lsts, fqs, bl_len_ns, nsrcs=200)
```
```python
uvtools.plot.labeled_waterfall(vis_fg_pntsrc, mode="log", freqs=fqs * 1e9, lsts=lsts)
uvtools.plot.labeled_waterfall(vis_fg_pntsrc, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


### Diffuse and Point-Source Foregrounds
```python
vis_fg = vis_fg_diffuse + vis_fg_pntsrc
```
```python
uvtools.plot.labeled_waterfall(vis_fg, mode="log", freqs=fqs * 1e9, lsts=lsts)
uvtools.plot.labeled_waterfall(vis_fg, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


## Noise
```python
nos_jy = noise.sky_noise_jy(
lsts, fqs, omega_p=h1c_beam, Tsky_mdl=noise.HERA_Tsky_mdl['xx']
)
```
```python
uvtools.plot.labeled_waterfall(nos_jy, mode="log", freqs=fqs * 1e9, lsts=lsts,
dynamic_range=2)
uvtools.plot.labeled_waterfall(nos_jy, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


```python
vis_fg_nos = vis_fg + nos_jy
```
```python
uvtools.plot.labeled_waterfall(vis_fg_nos, mode="log", freqs=fqs * 1e9, lsts=lsts,
dynamic_range=3)
uvtools.plot.labeled_waterfall(vis_fg_nos, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


## RFI
```python
rfi1 = rfi.rfi_stations(lsts, fqs, stations=DATA_PATH / "HERA_H1C_RFI_STATIONS.npy")
rfi2 = rfi.rfi_impulse(lsts, fqs, impulse_chance=0.05, impulse_strength=1e6)
rfi3 = rfi.rfi_scatter(lsts, fqs, scatter_chance=0.01, scatter_strength=1e6)
rfi_all = rfi1 + rfi2 + rfi3
```
```python
uvtools.plot.labeled_waterfall(rfi_all, mode="abs", freqs=fqs * 1e9, lsts=lsts)
uvtools.plot.labeled_waterfall(rfi_all, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


```python
vis_fg_nos_rfi = vis_fg_nos + rfi_all
```
```python
uvtools.plot.labeled_waterfall(vis_fg_nos_rfi, mode="log", freqs=fqs * 1e9, lsts=lsts)
uvtools.plot.labeled_waterfall(vis_fg_nos_rfi, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


## Gains
```python
g = sigchain.gen_gains(fqs, [1, 2, 3])
plt.figure()
for i in g:
plt.plot(fqs, np.abs(g[i]), label=str(i))
plt.legend()
plt.show()
```

```python
vis_total = sigchain.apply_gains(vis_fg_nos_rfi, g, (1, 2))
uvtools.plot.labeled_waterfall(vis_total, mode="log", freqs=fqs * 1e9, lsts=lsts,
dynamic_range=6)
uvtools.plot.labeled_waterfall(vis_total, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


## Crosstalk
```python
xtalk = sigchain.gen_whitenoise_xtalk(fqs)
if xtalk.ndim == 1:
xtalk = np.reshape(xtalk, (1, -1))
vis_xtalk = vis_fg_nos_rfi + xtalk
vis_xtalk = sigchain.apply_gains(vis_xtalk, g, (1, 2))
```
```python
uvtools.plot.labeled_waterfall(vis_xtalk, mode="log", freqs=fqs * 1e9, lsts=lsts,
dynamic_range=6)
uvtools.plot.labeled_waterfall(vis_xtalk, mode="phs", freqs=fqs * 1e9, lsts=lsts);
```


|
HERA-TeamREPO_NAMEhera_simPATH_START.@hera_sim_extracted@hera_sim-main@docs@tutorials@hera_sim_tour.ipynb@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcats/legendgrouptitle/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="parcats.legendgrouptitle.font",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcats@legendgrouptitle@font@_variant.py@.PATH_END.py
|
{
"filename": "np_dtypes_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/numpy_ops/np_dtypes_test.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf-numpy dtype utilities."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.platform import test
class DTypeTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters([False, True])
def testAllowF64False(self, prefer_f32):
np_dtypes.set_allow_float64(False)
np_dtypes.set_prefer_float32(prefer_f32)
self.assertEqual(dtypes.float32, np_dtypes.default_float_type())
self.assertEqual(dtypes.float32,
np_dtypes._result_type(np.zeros([], np.float64), 1.1))
def testAllowF64TruePreferF32False(self):
np_dtypes.set_allow_float64(True)
np_dtypes.set_prefer_float32(False)
self.assertEqual(dtypes.float64, np_dtypes.default_float_type())
self.assertEqual(dtypes.float64, np_dtypes._result_type(1.1))
self.assertEqual(dtypes.complex128, np_dtypes._result_type(1.j))
def testAllowF64TruePreferF32True(self):
np_dtypes.set_allow_float64(True)
np_dtypes.set_prefer_float32(True)
self.assertEqual(dtypes.float32, np_dtypes.default_float_type())
self.assertEqual(dtypes.float32, np_dtypes._result_type(1.1))
self.assertEqual(dtypes.float64,
np_dtypes._result_type(np.zeros([], np.float64), 1.1))
self.assertEqual(dtypes.complex64, np_dtypes._result_type(1.1j))
self.assertEqual(dtypes.complex128,
np_dtypes._result_type(np.zeros([], np.complex128), 1.1j))
self.assertEqual(dtypes.complex64,
np_dtypes._result_type(np.zeros([], np.float32), 1.1j))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@numpy_ops@np_dtypes_test.py@.PATH_END.py
|
{
"filename": "limpy_v2.ipynb",
"repo_name": "Anirbancosmo/limpy",
"repo_path": "limpy_extracted/limpy-master/examples/limpy_v2.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import camb
import numpy as np
import scipy.integrate as si
from camb import get_matter_power_interpolator
from colossus.cosmology import cosmology as col_cosmology
from colossus.lss import bias, mass_function
import limpy.cosmos as cosmos
import limpy.lines as ll
```
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
```python
line_name = "CII158"
model_name="Silva15-m1"
sfr_model="Behroozi19"
line_models=ll.line_modeling(line_name = line_name, model_name= model_name,
sfr_model=sfr_model, parameters={'use_scatter':True})
```
```python
mh=np.logspace(10,13, num=500)
z= 7
lc1=line_models.line_luminosity(mh, z)
lc2=line_models.line_luminosity(mh, z)
lc3=line_models.line_luminosity(mh, z)
```
```python
line_models_ns=ll.line_modeling(line_name = line_name, model_name= model_name,
sfr_model=sfr_model, parameters={'use_scatter':False})
lc4=line_models_ns.line_luminosity(mh, z)
```
```python
# Plotting
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 6))
s = 2
plt.scatter(mh, lc1, label='random scatter 1', s=s, alpha=0.9)
plt.scatter(mh, lc2, label='random scatter 2', s=s, alpha=0.9)
plt.scatter(mh, lc3, label='random scatter 3', s=s, alpha=0.9)
plt.plot(mh, lc4, lw=2, color="r", label="without scatter")
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e10, 1e13)
plt.xlabel(r'$M_{\rm halo}$')
plt.ylabel(r'$L_{CII}$')
plt.legend()
plt.grid(True)
plt.savefig("lim_scatter.pdf", bbox_inches="tight")
```

```python
# Check power spectrum
```
```python
k= np.logspace(-2,1)
z=2
lim_theory= ll.theory(parameters={'use_scatter':True, "a_std": 2, "b_std": 1})
lim_theory_ns= ll.theory(parameters={'use_scatter':False, "a_std": 2, "b_std": 1})
```
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
```python
pk1= lim_theory.Pk_line(k, z)
pk2= lim_theory.Pk_line(k, z)
pk3= lim_theory.Pk_line(k, z)
pk4= lim_theory_ns.Pk_line(k, z)
```
```python
# Plotting
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 6))
plt.plot(k, pk1, lw=1, label="scatter 1")
plt.plot(k, pk2, lw=1, label="scatter 2")
plt.plot(k, pk3, lw=1, label="scatter 3")
plt.plot(k, pk4, lw=1, color="r", label="without scatter")
plt.yscale('log')
plt.xscale('log')
plt.ylabel(r'$P(k)$')
plt.xlabel(r'$k$')
plt.legend()
plt.grid(True)
plt.savefig("lim_scatter.pdf", bbox_inches="tight")
```

# check cosmology
```python
k= np.logspace(-2,1)
z=2
plt.figure(figsize=(6, 6))
h =[0.5,0.6, 0.7, 0.8]
pk1 = ll.theory(parameters={'omega_m': 0.2}).Pk_line(k, z)
pk2 = ll.theory(parameters={'omega_m': 0.3}).Pk_line(k, z)
pk3 = ll.theory(parameters={'omega_m': 0.4}).Pk_line(k, z)
```
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.2
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.2
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.4
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.4
<Figure size 600x600 with 0 Axes>
```python
```
array([-6.69098994e+11, -7.79312279e+11, -8.96432093e+11, -1.01669167e+12,
-1.13428779e+12, -1.24100141e+12, -1.32615939e+12, -1.37765484e+12,
-1.38479024e+12, -1.34293952e+12, -1.25830267e+12, -1.14736418e+12,
-1.03862505e+12, -9.50343111e+11, -8.84269631e+11, -8.05233991e+11,
-6.80934480e+11, -5.49290572e+11, -4.73723913e+11, -4.18598411e+11,
-3.31072471e+11, -2.74168875e+11, -2.27441867e+11, -1.81845480e+11,
-1.48181859e+11, -1.19622065e+11, -9.63529032e+10, -7.79526813e+10,
-6.32464543e+10, -5.15647284e+10, -4.23142892e+10, -3.49939640e+10,
-2.91889827e+10, -2.45631301e+10, -2.08477700e+10, -1.78283169e+10,
-1.53371702e+10, -1.32446914e+10, -1.14551180e+10, -9.89996158e+09,
-8.53268824e+09, -7.32618343e+09, -6.26611726e+09, -5.34500899e+09,
-4.55682723e+09, -3.89448020e+09, -3.34787952e+09, -2.90440761e+09,
-2.55007138e+09, -2.27072998e+09])
```python
# Plotting
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 6))
plt.plot(k, pk1, lw=2, label=r"$\Omega_m = 0.2$")
plt.plot(k, pk2, lw=2, label=r"$\Omega_m = 0.3$")
plt.plot(k, pk3, lw=2, label=r"$\Omega_m = 0.4$")
plt.yscale('log')
plt.xscale('log')
plt.ylabel(r'$P(k)$')
plt.xlabel(r'$k$')
plt.legend()
plt.grid(True)
plt.savefig("lim_scatter.pdf", bbox_inches="tight")
```

```python
z=4
b1 = ll.theory(parameters={"use_scatter": True}).b_line(z)
b2 = ll.theory(parameters={"use_scatter": True}).b_line( z)
b3 = ll.theory(parameters={"use_scatter": True}).b_line( z)
```
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<---Parameters used in cosmo.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
<--- Parameters used in lines.py--->:
Hubble constant (h): 0.6776
Omega matter (Omega_m): 0.3111
```python
b1
```
2.922747117249655
```python
b2
```
2.91555205021663
```python
b3
```
2.9522080585893073
```python
```
```python
np.identity(2,2)
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[76], line 1
----> 1 np.identity(2,2)
File ~/anaconda3/lib/python3.11/site-packages/numpy/core/numeric.py:2160, in identity(n, dtype, like)
2157 return _identity_with_like(like, n, dtype=dtype)
2159 from numpy import eye
-> 2160 return eye(n, dtype=dtype, like=like)
File ~/anaconda3/lib/python3.11/site-packages/numpy/lib/twodim_base.py:211, in eye(N, M, k, dtype, order, like)
209 if M is None:
210 M = N
--> 211 m = zeros((N, M), dtype=dtype, order=order)
212 if k >= M:
213 return m
TypeError: Cannot interpret '2' as a data type
```python
```
|
AnirbancosmoREPO_NAMElimpyPATH_START.@limpy_extracted@limpy-master@examples@limpy_v2.ipynb@.PATH_END.py
|
{
"filename": "pixelmask.py",
"repo_name": "sdss/mangadap",
"repo_path": "mangadap_extracted/mangadap-main/mangadap/util/pixelmask.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
A class hierarchy for pixel masks.
----
.. include license and copyright
.. include:: ../include/copy.rst
----
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import numpy
import astropy.constants
from .bitmask import BitMask
from ..par.artifactdb import ArtifactDB
from ..par.emissionlinedb import EmissionLineDB
from matplotlib import pyplot
class PixelMask:
"""
Base class for a general 1D or 2D pixel mask.
Attributes:
shape (tuple): Shape of the array for the mask.
"""
def __init__(self):
self.shape = None
def _set_shape(self, x, ny=None):
"""
Set :attr:`shape` of the object
Args:
x (numpy.ndarray): Vector with the x-coordinates
ny (int): (**Optional**) Size of the second dimension.
Default is that there is no second dimension.
"""
self.shape = (len(x),) if ny is None else (ny,len(x))
def _empty_mask(self, x, ny=None):
"""
Return an empty mask with the correct shape.
Args:
x (numpy.ndarray): Coordinate vector
ny (int): (**Optional**) Size of the second dimension.
Default is that there is only one dimension.
Returns:
numpy.ndarray : Boolean mask of the correct shape.
"""
self._set_shape(x, ny=ny)
return numpy.full(self.shape, False, dtype=bool)
def _mask_coordinate_ranges(self, x, rng, ny=None):
"""
Flag any x coordinates between a set of range limits as True.
The mask is repeated in the second dimension, if requested.
Args:
x (numpy.ndarray): Coordinate vector
rng (list, numpy.ndarray): (List of) Coordinate ranges that
should be masked.
ny (int): (**Optional**) Size of the second dimension.
Default is that there is only one dimension.
Returns:
numpy.ndarray : Boolean mask of the correct shape.
"""
self._set_shape(x, ny=ny)
_rng = numpy.atleast_2d(rng)
# Account for any undefined limits
_rng[numpy.equal(_rng[:,0], None),0] = x[0]-1
_rng[numpy.equal(_rng[:,1], None),1] = x[-1]+1
# Generate the mask
mask = numpy.any( numpy.array([ numpy.logical_and(x>l, x<u) \
for l,u in zip(_rng[:,0], _rng[:,1])]), axis=0)
return mask if len(self.shape) == 1 else numpy.array([mask]*self.shape[0])
class SpectralPixelMask(PixelMask):
"""
Container that produces a mask for the stellar continuum based on a
set of emission lines and artifacts.
Args:
artdb (:class:`mangadap.proc.artifactdb.ArtifactDB`):
(**Optional**) Database with the list of artifacts to mask.
emldb (:class:`mangadap.proc.emissionlinedb.EmissionLineDB`):
(**Optional**) Database with the list of emission lines to
mask.
waverange (numpy.ndarray): (**Optional**) Any pixels **outside**
this wavelength range are masked.
Attributes:
artdb (:class:`mangadap.proc.artifactdb.ArtifactDB`):
Database with the list of artifacts to mask.
emldb (:class:`mangadap.proc.emissionlinedb.EmissionLineDB`):
Database with the list of emission lines to
mask.
waverange (numpy.ndarray): Any pixels **outside**
this wavelength range are masked.
"""
def __init__(self, artdb=None, emldb=None, waverange=None, nsig=None):
if artdb is not None and not isinstance(artdb, ArtifactDB):
raise TypeError('Must provide an ArtifactDB for artifacts to mask.')
self.artdb = artdb
if emldb is not None and not isinstance(emldb, EmissionLineDB):
raise TypeError('Must provide EmissionLineDB for emission-lines to mask.')
self.emldb = emldb
if waverange is not None and len(waverange) != 2:
raise ValueError('Provided wavelength range must have two and only two elements.')
self.waverange = waverange
## KHRR added nsig here and above
self.nsig = 3. if nsig is None else nsig
def _waverange_mask(self, wave, nspec=None):
"""
Mask the pixels **not** within the selected wavelength range.
Args:
wave (numpy.ndarray): Wavelength coordinate vector
nspec (int): (**Optional**) Number of spectra to mask.
Default is just one.
Returns:
numpy.ndarray : Boolean mask of the correct shape.
"""
if self.waverange is None:
return self._empty_mask(wave, ny=nspec)
return numpy.invert(self._mask_coordinate_ranges(wave, self.waverange, ny=nspec))
def _artifact_mask(self, wave, nspec=None):
"""
Mask the pixels in the wavelength range(s) defined by the
artifact database.
Args:
wave (numpy.ndarray): Wavelength coordinate vector
nspec (int): (**Optional**) Number of spectra to mask.
Default is just one.
Returns:
numpy.ndarray : Boolean mask of the correct shape.
"""
if self.artdb is None:
return self._empty_mask(wave, ny=nspec)
return self._mask_coordinate_ranges(wave, self.artdb['waverange'], ny=nspec)
def _check_eml_kin_argument(self, kin):
"""
Check and return the correct kinematics vectors. If a single
number is provided, the function returns the number repeated for
each emission line.
Args:
kin (float, list, numpy.ndarray):
An input set of kinematics to used by the mask.
Returns:
numpy.ndarray: A 1D float array of the correct shape with
the kinematics
Raises:
ValueError:
Raised if the length of the `kin` array is not the same
as the number of emission lines in :attr:`emldb`.
"""
if kin is None:
return None
if isinstance(kin, (int, float, numpy.floating, numpy.integer)):
return numpy.full(self.emldb.size, kin, dtype=float)
if isinstance(kin, (list, numpy.ndarray)):
if len(kin) != self.emldb.size:
raise ValueError('Provided vector does not have a matching length.')
return numpy.atleast_1d(kin).astype(float)
def _get_emission_line_bands(self, velocity_offsets=0.0, sigma=250.0): #, nsigma=3.0):
r"""
Set the emission-line masks, using the emission-line parameters
defined by :attr:`emldb` (see
:class:`mangadap.par.emissionlinedb.EmissionLineDB`.
All emission lines in the database, except for those marked with
`action==i` are masked.
The center of the masked region is constructed using
`emldb['restwave']` and the provided velocity
(`velocity_offset`). The velocity of any lines marked as sky
(`action==s`) is always set to 0.
The width of the mask is set to be :math:`\pm n_\sigma \sigma`
about this center, where both :math:`n_\sigma` and
:math:`\sigma` are parameters (`nsigma`, `sigma`). If `sigma` is
None, `emldb['sig']` is used for each line.
Args:
velocity_offsets (float, numpy.ndarray): (**Optional**) The
velocity offset to apply to each emission-line mask.
Must be either a single number or one number per
emission line. Assumed to be 0 if set to None.
sigma (float, numpy.ndarray): (**Optional**) Line velocity
dispersions used to set the mask width. Must be either
a single number or one number per emission line. If
None, the dispersion provided by the emission-line
database is used (`emldb['sig']`).
nsigma (float, numpy.ndarray): (**Optional**) The half-width
of the band in units of the provided velocity
dipsersions. Must be either a single number or one
number per emission line. Cannot be None.
Returns:
numpy.ndarray : A :math:`N_l \times 2` array with the
wavelength limits of the emission-line bands.
Raises:
ValueError: Raised if `nsigma` is None, or any `nsigma` is
not greater than 0; raised if the half-width of any mask
is not greater than 0.
"""
# Mask everything but the lines to ignore
indx = numpy.invert(self.emldb['action'] == 'i')
nbands = numpy.sum(indx)
# No lines to mask
if nbands == 0:
return None
# Get the number of standard deviations to cover with the mask
_nsigma = self._check_eml_kin_argument(self.nsig) # used to be (nsigma)
if _nsigma is None:
raise ValueError('Must provide the number of sigma to cover with the mask.')
if numpy.any(numpy.invert(_nsigma > 0)):
raise ValueError('Must provide a non-zero number of sigma for mask hal-fwidth.')
# Get the mask centers
_velocity_offsets = self._check_eml_kin_argument(velocity_offsets)
_velocity_offsets[ self.emldb['action'] == 's' ] = 0.0
center = self.emldb['restwave'][indx] if _velocity_offsets is None else \
self.emldb['restwave'][indx] \
* (1.0 + _velocity_offsets[indx]/astropy.constants.c.to('km/s').value)
# Get the mask widths
_sigma = self._check_eml_kin_argument(sigma)
halfwidth = _nsigma[indx] * (self.emldb['sig'][indx] if _sigma is None else _sigma[indx])
if numpy.any(numpy.invert(halfwidth > 0)):
raise ValueError('All emission-line mask half-widths must be larger than 0.')
return numpy.array([ center*(1.0-halfwidth/astropy.constants.c.to('km/s').value),
center*(1.0+halfwidth/astropy.constants.c.to('km/s').value) ]).T
def _emission_line_mask(self, wave, nspec=None, velocity_offsets=0.0, sigma=250.0): #nsigma=3.0):
"""
Mask the pixels in the wavelength range(s) defined by the
emission-line database that has been adjusted by a set of
velocities and dispersions.
Currently, the velocity offsets are applied to all lines for
each spectrum, whereas sigma and nsigma are applied to all
spectra for each line.
Args:
wave (numpy.ndarray): Wavelength coordinate vector
nspec (int): (**Optional**) Number of spectra to mask.
Default is just one.
velocity_offsets (float, numpy.ndarray): (**Optional**) One
or more velocity offsets to apply to the emission-line
bands on a spectrum-by-spectrum basis. Default is to
apply no velocity offset.
sigma (float, numpy.ndarray): (**Optional**) One or more
velocity dispersions to use for setting the width of the
emission-line band on a line-by-line basis. Default is
a width based on a dispersion of 250 km/s.
nsigma (float, numpy.ndarray): (**Optional**) One or more
numbers that sets the width of the band in units of the
provided velocity dipsersions band on a line-by-line
basis.
Returns:
numpy.ndarray : Boolean mask of the correct shape.
"""
if self.emldb is None:
return self._empty_mask(wave, ny=nspec)
if isinstance(velocity_offsets, (list, numpy.ndarray)) and len(velocity_offsets) > 1:
_velocity_offsets = numpy.asarray(velocity_offsets)
if len(_velocity_offsets) != nspec:
raise ValueError('Velocity offsets do not match the number of spectra.')
mask = self._empty_mask(wave, ny=nspec)
for i in range(len(_velocity_offsets)):
waverange = self._get_emission_line_bands(velocity_offsets=_velocity_offsets[i],
sigma=sigma) #nsigma=self.nsig)
mask[i,:] = self._mask_coordinate_ranges(wave, waverange)
return mask
_velocity_offsets = velocity_offsets[0] \
if isinstance(velocity_offsets, (list, numpy.ndarray)) \
and len(velocity_offsets) == 1 else velocity_offsets
waverange = self._get_emission_line_bands(velocity_offsets=_velocity_offsets, sigma=sigma)
# nsigma=self.nsig)
return self._mask_coordinate_ranges(wave, waverange, ny=nspec)
def boolean(self, wave, nspec=None, velocity_offsets=0.0, sigma=250.0): # nsigma=3.0):
"""
Construct the full boolean mask that includes the desired
wavelength range, omitting artifacts, and omitting emission
lines.
Args:
wave (numpy.ndarray): Wavelength coordinate vector
nspec (int): (**Optional**) Number of spectra to mask.
Default is just one.
velocity_offsets (float, numpy.ndarray): (**Optional**) One
or more velocity offsets to apply to the emission-line
bands on a spectrum-by-spectrum basis. Default is to
apply no velocity offset.
sigma (float, numpy.ndarray): (**Optional**) One or more
velocity dispersions to use for setting the width of the
emission-line band on a line-by-line basis. Default is
a width based on a dispersion of 250 km/s.
nsigma (float, numpy.ndarray): (**Optional**) One or more
numbers that sets the width of the band in units of the
provided velocity dipsersions band on a line-by-line
basis.
Returns:
numpy.ndarray : Boolean mask of the correct shape.
"""
if nspec is not None and not nspec > 0:
raise ValueError('Number of spectra must be larger than 0!')
return self._waverange_mask(wave, nspec=nspec) | self._artifact_mask(wave, nspec=nspec) \
| self._emission_line_mask(wave, nspec=nspec, velocity_offsets=velocity_offsets,
sigma=sigma) # nsigma=self.nsig)
def bits(self, bitmask, wave, nspec=None, mask=None, velocity_offsets=0.0, sigma=250.0,
# nsigma=3.0,
waverange_flag='OUTSIDE_RANGE', art_flag='ARTIFACT',
eml_flag='EML_REGION'):
"""
Construct a bit mask that signifies pixels as outside the
desired wavelength range, as being affected by an artifact, and
being designated as an emission-line region. To simply flag the
pixels as a binary masked or unmasked, use :func:`boolean`.
Args:
bitmask (:class:`mangadap.util.bitmask.BitMask`): Bitmask
used to flag pixels. The flags waverange_flag,
art_flag, and eml_flag *must* be defined in the provided
instance of BitMask.
wave (numpy.ndarray): Wavelength coordinate vector
nspec (int): (**Optional**) Number of spectra to mask.
Default is just one.
mask (numpy.array): (**Optional**) Baseline mask to add
pixels masks. Should have data type that matches
provided bitmask. Default is to start with all pixels
unmasked.
velocity_offsets (float, numpy.ndarray): (**Optional**) One
or more velocity offsets to apply to the emission-line
bands on a spectrum-by-spectrum basis. Default is to
apply no velocity offset.
sigma (float, numpy.ndarray): (**Optional**) One or more
velocity dispersions to use for setting the width of the
emission-line band on a line-by-line basis. Default is
a width based on a dispersion of 250 km/s.
nsigma (float, numpy.ndarray): (**Optional**) One or more
numbers that sets the width of the band in units of the
provided velocity dipsersions band on a line-by-line
basis.
waverange_flag (str): (**Optional**) Bitmask name used to
flag a pixel as outside of the desired wavelength range.
Default is 'OUTSIDE_RANGE'.
art_flag (str): (**Optional**) Bitmask name used to flag a
pixel being affected by an artifact. Default is
'ARTIFACT'.
eml_flag (str): (**Optional**) Bitmask name used to flag a
pixel being within an emission-line region. Default is
'EML_REGION'.
Returns:
numpy.ndarray : Bit mask of the correct shape.
"""
# Check the bitmask type
if not isinstance(bitmask, BitMask):
raise TypeError('Must provide object of type BitMask.')
if nspec is not None and not nspec > 0:
raise ValueError('Number of spectra must be larger than 0!')
# Get the wavelength range mask
wavemask = self._waverange_mask(wave, nspec=nspec)
# Check that the input mask has the same size
if mask is not None and wavemask.shape != mask.shape:
raise ValueError('Input mask does not have the correct shape.')
# Get the artifact mask
artmask = self._artifact_mask(wave, nspec=nspec)
# Get the emission-line mask
emlmask = self._emission_line_mask(wave, nspec=nspec, velocity_offsets=velocity_offsets,
sigma=sigma) # nsigma=self.nsig)
# Construct and return the mask
_mask = numpy.zeros(wavemask.shape, dtype=bitmask.minimum_dtype()) \
if mask is None else mask
if numpy.sum(wavemask) > 0:
_mask[wavemask] = bitmask.turn_on(_mask[wavemask], flag=waverange_flag)
if numpy.sum(artmask) > 0:
_mask[artmask] = bitmask.turn_on(_mask[artmask], flag=art_flag)
if numpy.sum(emlmask) > 0:
_mask[emlmask] = bitmask.turn_on(_mask[emlmask], flag=eml_flag)
return _mask
|
sdssREPO_NAMEmangadapPATH_START.@mangadap_extracted@mangadap-main@mangadap@util@pixelmask.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "VMBoehm/MADLens",
"repo_path": "MADLens_extracted/MADLens-master/setup.py",
"type": "Python"
}
|
from setuptools import setup
setup(name='MADLens',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='a differentiable lensing simulator',
url='http://github.com/VMBoehm/MADLens',
author='Vanessa Martina Boehm',
author_email='vboehm@berkeley.edu',
license='GNU GPLv3',
packages=['MADLens', 'MADLens.tests'],
install_requires=['numpy', 'nbodykit', 'dask[array]', 'vmad', 'abopt', 'absl-py','fastpm','mpi4py','cython'],
)
|
VMBoehmREPO_NAMEMADLensPATH_START.@MADLens_extracted@MADLens-master@setup.py@.PATH_END.py
|
{
"filename": "tpu.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/tpu/tpu.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
import collections
import enum
from typing import Any, Callable, Iterable, List, Optional, Text, Tuple, Union
from absl import logging
import numpy as np
from tensorflow.compiler.tf2xla.python import xla as tf2xla
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf.tpu import dynamic_padding_pb2 as dynamic_padding
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as embedding_pb2
from tensorflow.python import tf2
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import array_ops_stack
from tensorflow.python.ops import cond
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import tpu_name_util
from tensorflow.python.tpu import tpu_replication
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.types import core as core_types
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import traceback_utils
from tensorflow.python.util import variable_utils
from tensorflow.python.util.tf_export import tf_export
# Ops which can be safely pruned from XLA compile if they have no consumers.
# These ops should also have no inputs.
_UNCONNECTED_OPS_TO_PRUNE = set(["Placeholder", "VarHandleOp"])
_POST_DEVICE_REWRITE_ATTR = "_post_device_rewrite"
_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status"
_PIVOT_FOR_CLUSTER = "_pivot_for_cluster"
core = tpu_name_util.core
def _tpu_system_device_name(job: Optional[Text]) -> Text:
"""Returns the device name for the TPU_SYSTEM device of `job`."""
if job is None:
return "/device:TPU_SYSTEM:0"
else:
return "/job:%s/device:TPU_SYSTEM:0" % job
@tf_export(v1=["tpu.initialize_system"])
def initialize_system(
embedding_config: Optional[embedding_pb2.TPUEmbeddingConfiguration] = None,
job: Optional[Text] = None,
compilation_failure_closes_chips: bool = True,
tpu_cancellation_closes_chips: Optional[bool] = None,
) -> core_types.Tensor:
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, a `TPUEmbeddingConfiguration` proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
compilation_failure_closes_chips: Set the configuration whether
we want to close TPU chips when there is a compilation failure.
tpu_cancellation_closes_chips: Set the configuration whether
we want to close TPU chips when a TPU execution is cancelled. If the value
is None, the behavior will be determined by the command line flag
`tpu_cancellation_closes_chips` for the TPU worker. WARNING: this argument
only applies to TFRT TPU runtime.
Returns:
A serialized `TopologyProto` that describes the TPU system. Note:
the topology must be evaluated using `Session.run` before it can be used.
"""
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
# The enum is defined in core/tpu/kernels/tpu_execute_op_options.h.
tpu_cancellation_closes_chips_enum = 0
if tpu_cancellation_closes_chips is not None:
if tpu_cancellation_closes_chips:
tpu_cancellation_closes_chips_enum = 1
else:
tpu_cancellation_closes_chips_enum = 2
with ops.device(_tpu_system_device_name(job)):
topology = tpu_ops.configure_distributed_tpu(
compilation_failure_closes_chips=compilation_failure_closes_chips,
tpu_cancellation_closes_chips=tpu_cancellation_closes_chips_enum,
)
if embedding_config is None:
return topology
# This set of control dependencies is needed as this function is expected to
# return an op which will return the topology when executed, but we need to
# call the embedding initialization op between initializing the TPU and
# returning the topology.
with ops.control_dependencies([topology]):
embedding_init = tpu_ops.configure_tpu_embedding(config=config_string)
with ops.control_dependencies([embedding_init]):
return array_ops.identity(topology, name="tpu_init_identity")
def initialize_system_for_tpu_embedding(
embedding_config: embedding_pb2.TPUEmbeddingConfiguration,
job: Optional[Text] = None,
) -> ops.Operation:
"""Initializes a distributed TPU Embedding system for use with TensorFlow.
The following two are equivalent:
1. initialize_system() with embedding_config.
2. initialize_system() without embedding_config, then
initialize_system_for_tpu_embedding().
initialize_system() should not be called with embedding_config if
initialize_system_for_tpu_embedding() is meant to be called later.
Args:
embedding_config: a `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables.
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be initialized. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
Returns:
A no-op.
"""
config_string = embedding_config.SerializeToString()
with ops.device(_tpu_system_device_name(job)):
return tpu_ops.configure_tpu_embedding(config=config_string)
@tf_export(v1=["tpu.shutdown_system"])
def shutdown_system(job: Optional[Text] = None) -> ops.Operation:
"""Shuts down a running a distributed TPU system.
Args:
job: The job (the XXX in TensorFlow device specification /job:XXX) that
contains the TPU devices that will be shutdown. If job=None it is
assumed there is only one job in the TensorFlow flock, and an error will
be returned if this assumption does not hold.
"""
with ops.device(_tpu_system_device_name(job)):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
@auto_control_deps.register_acd_resource_resolver
def tpu_replicated_input_resolver(
op: ops.Operation,
resource_reads: object_identity.ObjectIdentitySet,
resource_writes: object_identity.ObjectIdentitySet) -> bool:
"""Replaces TPUReplicatedInput outputs with its inputs in resource_inputs."""
# Ignore TPUReplicatedInput for ACD purposes since we will be directly adding
# control deps on the replicated inputs.
if op.type == "TPUReplicatedInput":
if resource_reads or resource_writes:
resource_reads.clear()
resource_writes.clear()
return True
else:
return False
# Replace tensors in `resource_inputs` which are outputs of TPUReplicatedInput
# with the actual replicated inputs. This allows ACD to correct add control
# deps when there are multiple calls to `run` in a
# `tf.function`.
def replace_with_unreplicated_resources(resource_inputs):
"""Replaces handles in `resource_inputs` with their unreplicated inputs."""
to_remove = []
to_add = []
for resource in resource_inputs:
if resource.op.type == "TPUReplicatedInput":
to_remove.append(resource)
to_add.extend(resource.op.inputs)
for t in to_remove:
resource_inputs.discard(t)
resource_inputs.update(to_add)
return to_add or to_remove
return bool(replace_with_unreplicated_resources(resource_reads) or
replace_with_unreplicated_resources(resource_writes))
@tf_export(v1=["tpu.PaddingSpec"])
class PaddingSpec(enum.IntEnum):
"""Represents the type of padding policies for tpu.replicate."""
# By default the policy is set to AUTO, the dynamic input shape dimension will
# be pad to maximum of all the replicas.
AUTO = 0
# Bucketize the dynamic input shape dimension into a power of 2.
POWER_OF_TWO = 1
@tf_export("tpu.XLAOptions")
class XLAOptions(
collections.namedtuple("XLAOptions", [
"use_spmd_for_xla_partitioning",
"enable_xla_dynamic_padder",
])):
"""XLA compilation options.
Attributes:
use_spmd_for_xla_partitioning: Boolean. Whether to use XLA's SPMD
partitioner instead of MPMD partitioner when compiler partitioning is
requested.
enable_xla_dynamic_padder: Boolean. Whether to enable XLA dynamic padder
infrastructure to handle dynamic shapes inputs inside XLA. True by
default. Disabling this may cause correctness issues with dynamic shapes
inputs, as XLA will just assume the inputs are with padded shapes. However
users can optionally set it to False to improve device time if masking is
already handled in the user side.
"""
def __new__(cls,
use_spmd_for_xla_partitioning=True,
enable_xla_dynamic_padder=True):
return super(XLAOptions, cls).__new__(cls, use_spmd_for_xla_partitioning,
enable_xla_dynamic_padder)
@tf_export(v1=["tpu.replicate"])
@traceback_utils.filter_traceback
def replicate(
computation: Callable[..., Any],
inputs: Optional[List[List[core_types.Tensor]]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
maximum_shapes: Optional[Any] = None,
padding_spec: Optional[PaddingSpec] = None,
xla_options: Optional[XLAOptions] = None) -> List[Any]:
"""Builds a graph operator that runs a replicated TPU computation.
Example for the basic usage that `inputs` has static shape:
```python
def computation(x):
x = x + 1
return tf.math.reduce_mean(x)
x = tf.convert_to_tensor([1., 2., 3.])
y = tf.convert_to_tensor([4., 5., 6.])
tf.compat.v1.tpu.replicate(computation, inputs=[[x], [y]])
```
If the `inputs` has dynamic shapes and you would like to automatically
bucketize the inputs to avoid XLA recompilation. See the advanced example
below:
```python
def computation(x):
x = x + 1
return tf.math.reduce_mean(x)
# Assume input tensors in two replicas `x` and `y` both have dynamic shape
# ([None, 2]).
tf.compat.v1.tpu.replicate(
computation,
inputs=[x, y],
maximum_shapes=[tf.TensorShape([None, None])],
padding_spec=tf.compat.v1.tpu.PaddingSpec.POWER_OF_TWO)
```
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the
padding policy when the `inputs` to `tpu.replicate` is dynamic.
One usage is to enable automatic bucketizing on the inputs by setting the
value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the
recompilation in the XLA side.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of outputs, indexed by `[replica_num]` each output can be a nested
structure same as what computation() returns with a few exceptions.
Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
return split_compile_and_replicate(
computation,
inputs,
infeed_queue,
device_assignment,
name,
maximum_shapes=maximum_shapes,
padding_spec=padding_spec,
xla_options=xla_options)[1]
def _ceil_to_pow_of_n(x, n):
"""Ceil input `x` to power of `n`."""
x = math_ops.cast(x, dtypes.float32)
lognx = math_ops.log(x) / math_ops.log(n * 1.0)
lognx = math_ops.ceil(lognx)
result = math_ops.pow(n * 1.0, lognx)
result = math_ops.cast(result, dtypes.int32)
return result
def _pad_all_input(
inputs: Iterable[core_types.Tensor],
padded_shapes: List[Optional[tensor_shape.TensorShape]],
padding_spec: PaddingSpec
) -> Tuple[List[List[Any]], List[dynamic_padding.PaddingMap]]:
"""Pad all input tensors given padded_shapes.
The real shape tensors will be concatenated with the padded original inputs.
Args:
inputs: The original inputs.
padded_shapes: A list of padded shapes for each input. If an entry is None,
no padding is performed.
padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the
padding policy when the `inputs` to `tf.tpu.replicate` is dynamic.
One usage is to enable automatic bucketizing on the inputs by setting the
value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the
recompilation in the XLA side.
Returns:
The padded inputs and a PaddingMap list which maps the padded input
dimension to the real shape argument index.
"""
# maximum_static_shapes[idx][i] indicates the maximum static size of ith
# dimension of the idx input among all the replicas.
maximum_static_shapes = []
# need_padding[idx][i] indicates whether the ith dimension of the idx input
# needs padding.
need_padding = []
input_shape_tensors = []
for core_idx, inputs_per_core in enumerate(inputs):
for idx, input_tensor in enumerate(inputs_per_core):
input_shape = input_tensor.get_shape().as_list()
if core_idx == 0:
input_shape_tensors.append([])
maximum_static_shapes.append(input_shape)
need_padding.append(np.full_like(input_shape, False, dtype=bool))
else:
for i, s in enumerate(input_shape):
if s is None or s != maximum_static_shapes[idx][i]:
need_padding[idx][i] = True
maximum_static_shapes[idx] = max(input_shape,
maximum_static_shapes[idx])
# Append _POST_DEVICE_REWRITE_ATTR attributes to the real shape ops.
real_input_shape = array_ops.shape(input_tensor)
real_input_shape.op._set_attr( # pylint: disable=protected-access
_POST_DEVICE_REWRITE_ATTR,
attr_value_pb2.AttrValue(b=True))
input_shape_tensors[idx].append(real_input_shape)
maximum_shapes = []
for shapes_per_input in input_shape_tensors:
maximum_shapes.append(
math_ops.reduce_max(array_ops_stack.stack(shapes_per_input), axis=0))
padded_inputs = []
real_shapes = []
padding_maps = []
for core_idx, inputs_per_core in enumerate(inputs):
padded_inputs.append([])
real_shapes.append([])
real_shape_idx = len(inputs_per_core) - 1
for idx, input_tensor in enumerate(inputs_per_core):
input_shape_tensor = input_shape_tensors[idx][core_idx]
input_shape = input_tensor.get_shape().as_list()
padded_shape = padded_shapes[idx]
# If we have no padded_shape, then skip padding.
if any(need_padding[idx]) and padded_shape is not None:
for i, s in enumerate(input_shape):
if need_padding[idx][i]:
if core_idx == 0:
real_shape_idx += 1
padding_map = dynamic_padding.PaddingMap()
padding_map.arg_index = idx
padding_map.shape_index = i
padding_map.padding_arg_index = real_shape_idx
padding_maps.append(padding_map)
real_shapes[core_idx].append(
math_ops.cast(input_shape_tensor[i], dtypes.int32))
paddings = []
for i, s in enumerate(padded_shape.dims):
if need_padding[idx][i]:
# The minimum padded dimension size is 2 as XLA doesn't support size
# 1 dynamic size.
minimum_dynamic_dim_size = 2
if s.value is not None:
# Pad to the given maximum value.
max_dim_size = max(s.value, minimum_dynamic_dim_size)
else:
# If maximum value is not given, then pad to the maximum dimension
# among all the cores.
max_dim_size = math_ops.maximum(maximum_shapes[idx][i],
minimum_dynamic_dim_size)
if padding_spec == PaddingSpec.POWER_OF_TWO:
max_dim_size = _ceil_to_pow_of_n(max_dim_size, 2)
# Pad to the given maximum value.
padding = [0, max_dim_size - input_shape_tensor[i]]
else:
padding = [0, 0]
paddings.append(padding)
if input_tensor.get_shape().is_fully_defined():
# TODO(rxsang): This is a hack to make sure padded_input has dynamic
# shapes, so any tf.size/tf.shape op performed on it won't be constant
# folded. Do we have better ways to do it?
padded_input = cond.cond(
array_ops.constant(True),
lambda: array_ops.pad(input_tensor, paddings), # pylint: disable=cell-var-from-loop
lambda: input_tensor)
else:
padded_input = array_ops.pad(input_tensor, paddings)
# Append _POST_DEVICE_REWRITE_ATTR attributes to all padded inputs.
padded_input.op._set_attr( # pylint: disable=protected-access
_POST_DEVICE_REWRITE_ATTR,
attr_value_pb2.AttrValue(b=True))
padded_inputs[core_idx].append(padded_input)
else:
padded_inputs[core_idx].append(input_tensor)
num_replicas = len(padded_inputs)
for i in range(num_replicas):
padded_inputs[i].extend(real_shapes[i])
return padded_inputs, padding_maps
def _flatten_and_filter_composite(maybe_composite, non_composite_output,
composite_output=None):
"""For an input, replaced the input by a tuple if the input is composite.
If `maybe_composite` is not composite, return the parameter
`non_composite_output` otherwise return a tuple which consists of the value of
the parameter `composite_output` the same number of times as there are
components of the composite tensor.
This is useful for computing a mask when flattening nested data with
`expand_composites=True`. For example
```python
nest.flatten(data, expand_composites=True)
```
and
```python
nest.flatten(nest.map(
data, lambda x: _flatten_and_filter_composite(x, False, True)))
```
will have the same length and second will be True if the tensor in the first
is derived from a expanding a composite tensor.
Args:
maybe_composite: A value to test for being a composite tensor.
non_composite_output: The value to return when `maybe_composite` is not a
composite.
composite_output: the value to fill the output tuple with if
`maybe_composite` is a composite.
Returns:
`non_composite_output` or a tuple with multiple copies of
`composite_output`.
"""
if isinstance(maybe_composite, composite_tensor.CompositeTensor):
num_components = len(nest.flatten(maybe_composite, expand_composites=True))
return (composite_output,) * num_components
return non_composite_output
def split_compile_and_replicate(
computation: Callable[..., Any],
inputs: Optional[List[List[core_types.Tensor]]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
use_tpu: bool = True,
maximum_shapes: Optional[Any] = None,
padding_spec: Optional[PaddingSpec] = None,
xla_options: Optional[XLAOptions] = None,
) -> List[List[core_types.Tensor]]:
"""Builds graph operators that runs compilation and replicated computation.
This is a lower level interface than replicate that returns a separate compile
and execute output tensor. In the generated graph the compile op feeds into
the execute op and no additional compilation is incurred when running the
compile op before the execute op. The compile op returns additional
information about the compilation but does not return the compiled program.
Args:
computation: A Python function that builds the computation to replicate.
inputs: A list of lists of input tensors or `None` (equivalent to
`[[]]`), indexed by `[replica_num][input_num]`. All replicas must
have the same number of inputs. Each input can be a nested structure
containing values that are convertible to tensors. Note that passing an
N-dimension list of compatible values will result in a N-dimension list of
scalar tensors rather than a single Rank-N tensors. If you need different
behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to computation.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each replica of the computation uses
only one core, and there is either only one replica, or the number of
replicas is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
backends. Currently, only supports a default placement (computation is
placed on GPU if one is available, and on CPU if not).
maximum_shapes: A nested structure of tf.TensorShape representing the shape
to which the respective component of each input element in each replica
should be padded. Any unknown dimensions (e.g.
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
object) will be padded to the maximum size of that dimension over all
replicas. The structure of `maximum_shapes` needs to be the same as
`inputs[0]`.
padding_spec: An enum specified by `tf.tpu.PaddingSpec`. This describes the
padding policy when the `inputs` to `tf.tpu.replicate` is dynamic.
One usage is to enable automatic bucketizing on the inputs by setting the
value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the
recompilation in the XLA side.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of lists with the first list corresponding to the compile op and the
second a list of output tensors, indexed by `[replica_num][output_num]`.
Raises:
ValueError: If all replicas do not have equal numbers of input tensors.
ValueError: If the number of inputs per replica does not match
the number of formal parameters to `computation`.
ValueError: If the static `inputs` dimensions don't match with the values
given in `maximum_shapes`.
ValueError: If the structure of inputs per replica does not match
the structure of `maximum_shapes`.
"""
del name
inputs = [[]] if inputs is None else inputs
xla_options = xla_options or XLAOptions()
metadata_kwargs = {}
if device_assignment is not None:
# Turn the Numpy array into a flattened list so we can pass it as an
# operator attribute.
metadata_kwargs = {
"topology":
device_assignment.topology.serialized(),
"device_assignment":
device_assignment.core_assignment.flatten().tolist()
}
metadata_kwargs["num_cores_per_replica"] = (
device_assignment.num_cores_per_replica)
# This entry is used for enabling automatic outside compilation.
metadata_kwargs["allow_soft_placement"] = config.get_soft_device_placement()
if config.get_soft_device_placement():
logging.info("Automatic outside compilation is enabled. "
"Ops without XLA kernels will be automatically "
"placed on CPU.")
if not isinstance(inputs, list):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples, "
f"received {type(inputs)}")
if any(not isinstance(inp, (list, tuple)) for inp in inputs):
raise TypeError(
"tpu.replicate() inputs must be a list of lists/tuples, "
f"received types: {[type(inp) for inp in inputs]}")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Checks all replicas have the same structure.
for i in range(1, num_replicas):
nest.assert_same_structure(inputs[0], inputs[i])
# Explicitly read variables.
inputs = variable_utils.convert_variables_to_tensors(inputs)
# Flatten inputs. This structure may contain None values, which will be
# handled later.
flat_inputs_with_nones = [
nest.flatten(per_replica_input, expand_composites=True)
for per_replica_input in inputs
]
# Mask parallel to one replica's inputs with True for tensors coming from
# composites.
is_composite = nest.flatten(nest.map_structure(
lambda x: _flatten_and_filter_composite(x, False, True), inputs[0]))
# Converts inputs to Tensors, replacing Nones with a placeholder 0 since
# tpu_ops.tpu_replicated_input() can't handle non-Tensor values.
flat_inputs = []
for inp in flat_inputs_with_nones:
flat_inputs.append([
constant_op.constant(0) if x is None else ops.convert_to_tensor(x)
for x in inp
])
# Verifies that all replicas have matching numbers and types of inputs
flat_input_types = [x.dtype for x in flat_inputs[0]]
input_arity = len(inputs[0])
flat_input_arity = len(flat_input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in flat_inputs[i]]
if types != flat_input_types:
raise ValueError("Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
flat_input_types, i, types))
arg_error = xla.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
f"You specified {input_arity} inputs: {[i.name for i in inputs[0]]}, "
f"but the computation needs {arg_error}")
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
f"You specified {input_arity} inputs: {[i.name for i in inputs[0]]} ",
f"and {infeed_queue.number_of_tuple_elements} additional inputs "
f"from infeed, but the computation needs {arg_error}")
dynamic_shape_inputs = False
if maximum_shapes:
if infeed_queue:
raise ValueError(
"Dynamic input shapes are not supported with infeed queues")
# Make sure maximum_shapes has the same structure as inputs.
nest.assert_same_structure(inputs[0], maximum_shapes, check_types=False)
# Flatten padded shapes:
# For composite tensor components, we don't want to pad them. For each
# entry of maximum_shapes that corresponds to a composite tensor, replace it
# by a tuple of Nones of the same length as the number of components of the
# composite tensor. When we flatten a second time, this makes
# flat_maximum_shapes have the same length as flat_inputs[i]. We can then
# avoid padding these tensors. The assumption is that they will be used by
# outside compilation or that the components are statically shaped and will
# be used by tpu compatible ops.
flat_maximum_shapes = nest.flatten(
[_flatten_and_filter_composite(x, y)
for x, y in zip(nest.flatten(inputs[0]),
nest.flatten(maximum_shapes))])
flat_maximum_shapes = [
tensor_shape.TensorShape(s) if s is not None else None
for s in flat_maximum_shapes
]
nest.assert_same_structure(flat_inputs[0], flat_maximum_shapes,
check_types=False)
unpadded_inputs = flat_inputs
flat_inputs, padding_maps = _pad_all_input(unpadded_inputs,
flat_maximum_shapes,
padding_spec)
if padding_maps:
dynamic_shape_inputs = True
logging.info("TPU has inputs with dynamic shapes: %s", inputs[0])
metadata_kwargs["step_marker_location"] = getattr(
computation, "step_marker_location", "STEP_MARK_AT_ENTRY")
metadata_kwargs["use_spmd_for_xla_partitioning"] = \
xla_options.use_spmd_for_xla_partitioning
graph = ops.get_default_graph()
# Fan-in: Builds a TPUReplicatedInput node for each input.
flat_replicated_inputs = []
for i in range(0, len(flat_inputs[0])):
replicas = [flat_inputs[replica][i] for replica in range(num_replicas)]
flat_replicated_inputs.append(
tpu_ops.tpu_replicated_input(
replicas, name="input{}".format(i)))
if isinstance(graph, func_graph.FuncGraph):
# When we are in Tensorflow 2.0 function, 'graph' will be a FuncGraph
# object. If both outside graph and this function have a TPU cluster,
# they will have the same cluster name and it will cause problems (because
# we lower functional ops in Tensorflow 2.0). Append function name to
# 'cluster_name' to avoid cluster name collision.
cluster_name = graph.unique_name("cluster_" + graph.name)
else:
cluster_name = graph.unique_name("cluster")
pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
pivot._set_attr(_PIVOT_FOR_CLUSTER, # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name)))
context = tpu_replication.TPUReplicateContext(
name=cluster_name, num_replicas=num_replicas, pivot=pivot)
try:
context.Enter()
metadata = tpu_ops.tpu_replicate_metadata(
num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
if dynamic_shape_inputs and xla_options.enable_xla_dynamic_padder:
for padding_map in padding_maps:
input_shape = flat_replicated_inputs[padding_map.arg_index].shape
flat_replicated_inputs[
padding_map.arg_index] = tf2xla.set_dynamic_dimension_size(
flat_replicated_inputs[padding_map.arg_index],
padding_map.shape_index,
flat_replicated_inputs[padding_map.padding_arg_index])
flat_replicated_inputs[padding_map.arg_index].set_shape(input_shape)
# Add identity ops so even unused inputs are "consumed" by the
# computation. This is to avoid orphaned TPUReplicatedInput nodes.
# TODO(phawkins): consider instead pruning unused TPUReplicatedInput
# and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
flat_replicated_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(flat_replicated_inputs)
]
for i, composite in zip(flat_replicated_inputs, is_composite):
# pylint: disable=protected-access
# Add an attribute to the identity node so that they could be removed in
# encapsulate TPU computation pass if unused. However we don't remove
# inputs when dynamic padding is enabled.
# TODO(rxsang): Use other ways except argument index in padding_map so
# outside compilation can work with dynamic padding correctly.
if not dynamic_shape_inputs or composite:
i.op._set_attr("_tpu_input_identity",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
# Clobber replicated placeholders with Nones.
computation_inputs = [
None if inp is None else replicated for replicated, inp in zip(
flat_replicated_inputs, flat_inputs_with_nones[0])
]
# Unflatten the computation inputs to match original input structure.
computation_inputs = nest.pack_sequence_as(
structure=inputs[0],
flat_sequence=computation_inputs[:flat_input_arity],
expand_composites=True)
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
# Partitioned variables is not supported (b/112311320).
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
saved_custom_getter = vscope.custom_getter
def custom_getter(getter, name, *args, **kwargs):
"""Variables on TPU have a few restrictions."""
partitioner = kwargs.get("partitioner", None)
if partitioner is not None:
kwargs["partitioner"] = None
logging.warning(
"Partitioned variables are not supported on TPU. Got "
"`partitioner` that is %s for variable %s. "
"Setting `partitioner` to `None`.", partitioner, name)
if saved_custom_getter is None:
return getter(name, *args, **kwargs)
else:
return saved_custom_getter(getter, name, *args, **kwargs)
vscope.set_use_resource(True)
vscope.set_custom_getter(custom_getter)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
vscope.set_custom_getter(saved_custom_getter)
outputs = variable_utils.convert_variables_to_tensors(outputs)
need_spmd_partitioning = (
xla_options.use_spmd_for_xla_partitioning and
device_assignment is not None and
device_assignment.num_cores_per_replica > 1)
outputs_is_flat = xla.is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps, pack_template = _postprocess_flat_outputs(
outputs, need_spmd_partitioning)
else:
output_tensors, control_deps, pack_template = (
_postprocess_non_flat_outputs(outputs, need_spmd_partitioning))
if tensor_tracer.TensorTracer.is_enabled():
if tf2.enabled():
logging.warn("TF API ver >= 2.0 detected. "
"Tensor Tracer v1 is not enabled.")
else:
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, control_deps,
num_replicas)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
host_compute_core = context.HostComputeCore()
if host_compute_core:
attr_value = attr_value_pb2.AttrValue()
attr_value.list.s.extend(compat.as_bytes(x) for x in host_compute_core)
metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access
with ops.control_dependencies([metadata]):
if use_tpu:
compile_status = tpu_ops.tpu_compilation_result()
op = compile_status.op
attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access
else:
compile_status = control_flow_ops.no_op(name="compilation_status")
if not output_tensors:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
compile_status,
[
control_flow_ops.group(control_deps, name="shard_%d" % i)
for i in range(num_replicas)
]
]
# Fan-out: Builds a TPUReplicatedOutput node for each output.
replicated_outputs = [[] for i in range(num_replicas)]
for i, t in enumerate(output_tensors):
# None values returned by the computation can't be sent to
# tpu_ops.tpu_replicated_output(), we handle them specially here. We can
# avoid the placeholder 0 routine required on the inputs since outputs are
# replicated per-tensor, not per-replica, so we can skip replication.
if t is None:
for replica in range(num_replicas):
replicated_outputs[replica].append(None)
continue
# Fan-out: Builds a TPUReplicatedOutput node for each output.
ys = tpu_ops.tpu_replicated_output(
t, num_replicas, name="output{}".format(i))
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
with ops.control_dependencies(control_deps):
for replica in range(num_replicas):
replicated_outputs[replica].append(
array_ops.identity(
ys[replica], name="output_%d_shard_%d" % (i, replica)))
replicated_outputs = [
nest.pack_sequence_as(pack_template, replica_outs, expand_composites=True)
for replica_outs in replicated_outputs
]
return [compile_status, replicated_outputs]
def _postprocess_flat_outputs(
outputs: Any,
need_spmd_partitioning: bool
) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]:
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
need_spmd_partitioning: Whether XLA SPMD partitioning is needed.
Returns:
- Tensors extracted from outputs.
- Operations extracted from outputs.
- A pack template for use with nest.pack_sequence_as to pack the tensors.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# For legacy / backwards compatibility reasons we return a list for "flat"
# output values (even if the user's flat return value was a different type or
# even just a scalar value) so use nest.flatten to compute a flat list pack
# template.
pack_template = nest.flatten(outputs, expand_composites=False)
# Even though outputs is already "flat", we flatten any composites so their
# component tensors can be tagged and replicated. The pack_template will be
# used by the caller to repack the composite tensors.
outputs = nest.flatten(outputs, expand_composites=True)
# Append `no_op` here so that fetching any return value of this function
# will trigger TPUExecute node.
outputs += (control_flow_ops.no_op(),)
maybe_convert = lambda x: None if x is None else ops.convert_to_tensor(x)
try:
if need_spmd_partitioning:
outputs = [
o if isinstance(o, ops.Operation) else maybe_convert(o)
for o in outputs
]
else:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else maybe_convert(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
f"convertible to Tensors. Got error: {e}")
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
# Trim operations off the end of the pack template. output_operations has 1
# extra element due to the no-op that is added.
if len(output_operations) > 1:
pack_template = pack_template[:1 - len(output_operations)]
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
new_output_tensors = []
for t in output_tensors:
if t is None:
new_output_tensors.append(None)
elif need_spmd_partitioning:
o = array_ops.identity(t)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
new_output_tensors.append(o)
else:
with ops.device(t.device if t.device else core(0)):
o = array_ops.identity(t)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
new_output_tensors.append(o)
return new_output_tensors, output_operations, pack_template
def _postprocess_non_flat_outputs(
outputs: Any,
need_spmd_partitioning: bool
) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]:
"""Validates non-flat outputs, add backs device assignments and other attrs.
Args:
outputs: Output from `computation` inside `tpu.rewrite`.
need_spmd_partitioning: Whether XLA SPMD partitioning is needed.
Returns:
- Tensors extracted from outputs.
- An empty Operations list because Operations are not allowed in non-flat
outputs.
- A pack template for use with nest.pack_sequence_as to pack the tensors.
"""
# Flatten output items.
flat_outputs = nest.flatten(outputs, expand_composites=True)
# Convert all non-None non-Operation outputs to Tensors.
for i, o in enumerate(flat_outputs):
if o is None:
flat_outputs[i] = None
continue
if isinstance(o, ops.Operation):
raise ValueError(
"tpu.rewrite does not support Operation as return value in non-flat "
"output structure. You can set returned Operations as control "
"dependencies of returned Tensors so Operations are triggered when "
f'Tensors are evaluated. Operation found: "{o.name}"')
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
f'convertible to Tensors. Got error: "{e}"')
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
if need_spmd_partitioning:
o = array_ops.identity(o)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
flat_outputs[i] = array_ops.identity(o)
else:
with ops.device(o.device if o.device else core(0)):
o = array_ops.identity(o)
# pylint: disable=protected-access
o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
flat_outputs[i] = array_ops.identity(o)
# All flat_outputs are Tensors, and no Operations.
return flat_outputs, [], outputs
def split_compile_and_shard(
computation: Callable[..., Any],
inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None,
num_shards: int = 1,
input_shard_axes: Optional[List[int]] = None,
outputs_from_all_shards: Union[bool, List[bool]] = True,
output_shard_axes: Optional[List[int]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None,
) -> Tuple[ops.Operation, List[core_types.Tensor]]:
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shard_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A tuple of (compile op, [output tensors]).
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
# TODO(phawkins): consider adding support for broadcasting Tensors passed as
# inputs.
if num_shards <= 0:
raise ValueError(
f"num_shards must be a positive integer. Received {num_shards}")
inputs = [] if inputs is None else inputs
if not isinstance(inputs, list):
raise TypeError("tpu.shard()'s inputs must be a list of Tensors or None. "
f"Received {type(inputs)}")
# Converts inputs to Tensors.
inputs = [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
f"of inputs. Received {len(inputs)} inputs and "
f"{len(input_shard_axes)} input_shard_axes.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
compile_op, outputs = split_compile_and_replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return compile_op, [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
f"of outputs. Received {num_outputs} outputs "
f"and {len(output_shard_axes)} output_shard_axes.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError(
"Length of outputs_from_all_shards must be equal to the number of "
f"outputs. Received {num_outputs} outputs and "
f"{len(outputs_from_all_shards)} outputs_from_all_shards.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops_stack.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return compile_op, results
@tf_export(v1=["tpu.shard"])
@traceback_utils.filter_traceback
def shard(
computation: Callable[..., Any],
inputs: Optional[List[core_types.Tensor]] = None,
num_shards: int = 1,
input_shard_axes: Optional[List[int]] = None,
outputs_from_all_shards: Union[bool, List[bool]] = True,
output_shard_axes: Optional[List[int]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None) -> List[core_types.Tensor]:
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty list), each
of which has a corresponding split axis (from `input_shard_axes`). Each input
is split into `num_shards` pieces along the corresponding axis, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shard_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). Each
input tensor has a corresponding shard axes, given by `input_shard_axes`,
which must have size divisible by `num_shards`.
num_shards: The number of shards.
input_shard_axes: A list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: Boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: A list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs
of `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of output tensors.
Raises:
ValueError: If num_shards <= 0
ValueError: If len(input_shard_axes) != len(inputs)
ValueError: If len(output_shard_axes) != len(outputs from `computation`)
"""
return split_compile_and_shard(
computation,
inputs=inputs,
num_shards=num_shards,
input_shard_axes=input_shard_axes,
outputs_from_all_shards=outputs_from_all_shards,
output_shard_axes=output_shard_axes,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)[1]
@tf_export(v1=["tpu.batch_parallel"])
@traceback_utils.filter_traceback
def batch_parallel(
computation: Callable[..., Any],
inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None,
num_shards: int = 1,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty list).
Each input is split into `num_shards` pieces along the 0-th dimension, and
computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: A Python function that builds a computation to apply to each
shard of the input.
inputs: A list of input tensors or None (equivalent to an empty list). The
0-th dimension of each Tensor must have size divisible by `num_shards`.
num_shards: The number of shards.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: If not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. Uses a default device assignment if `None`. The
`DeviceAssignment` may be omitted if each shard of the computation uses
only one core, and there is either only one shard, or the number of shards
is equal to the number of cores in the TPU system.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
A list of output tensors.
Raises:
ValueError: If `num_shards <= 0`
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)
@tf_export(v1=["tpu.rewrite"])
@traceback_utils.filter_traceback
def rewrite(
computation: Callable[..., Any],
inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None,
xla_options: Optional[XLAOptions] = None) -> Any:
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`rewrite` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s constructed during `computation` will be executed when
evaluating any of the returned output tensors, not just the ones returned.
inputs: A list of input tensors or `None` (equivalent to an empty list).
Each input can be a nested structure containing values that are
convertible to tensors. Note that passing an N-dimension list of
compatible values will result in a N-dimension list of scalar tensors
rather than a single Rank-N tensors. If you need different behavior,
convert part of inputs to tensors with `tf.convert_to_tensor`.
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: (Deprecated) Does nothing.
xla_options: An instance of `tpu.XLAOptions` which indicates the options
passed to XLA compiler. Use `None` for default options.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
"""
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name,
xla_options=xla_options)[0]
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_DENYLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
"AssignSubVariableOp",
"VarHandleOp",
"Variable",
"VariableV2",
])
def under_tpu_inference_context() -> bool:
"""Check if it is currently under `_TPUInferenceContext`."""
graph = ops.get_default_graph()
while graph:
context = graph._get_control_flow_context() # pylint: disable=protected-access
while context:
if isinstance(context, _TPUInferenceContext):
return True
context = context.outer_context
if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access
graph = graph._outer_graph # pylint: disable=protected-access
elif isinstance(graph, func_graph.FuncGraph):
graph = graph.outer_graph
else:
return False
return False
class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `_TPUInferenceContext` is to indicate the mode of
operation and possibly sanity check operators inside a
tpu.rewrite_for_inference() computation.
"""
def __init__(self, name: Text, check_ops: bool = True):
super(_TPUInferenceContext, self).__init__()
self._name = name
self._check_ops = check_ops
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if self._check_ops and op.type in _DENYLISTED_INFERENCE_OPS:
raise NotImplementedError(
f"Operation of type {op.type} ({op.name}) is not supported on the "
"TPU for inference. Execution will fail if this op is used in the "
"graph. Make sure your variables are using variable_scope.")
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
def validate_inference_rewrite_for_variables(graph: ops.Graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append GuaranteeConstOps
after ReadVariableOps, but this mechanism works only if you are using
tf.compat.v1.get_variable() to create and access variables in your tpu
computation. This validation method can be called immediately after calling
tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added
to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(
tf.compat.v1.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any(x.type == "GuaranteeConst" for x in graph.get_operations()):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after running "
"tpu.rewrite_for_inference(...). Please check that you are using "
"tf.get_variable() to create and access variables in your tpu "
"computation.")
def rewrite_for_inference(
computation: Callable[..., Any],
inputs: Optional[List[core_types.Tensor]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None) -> List[core_types.Tensor]:
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.compat.v1.get_variable() to
create and access variables in your tpu computation. You can validate
whether this worked, by calling validate_inference_rewrite_for_variables()
method immediately after this method to check whether GuaranteeConstOps
where added to the graph.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors. If the function returns m outputs, rewrite will return a list of
m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
def prune_unconnected_ops_from_xla(prune_graph: ops.Graph):
"""Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE.
Args:
prune_graph: A tensorflow graph from which we wish to prune unconnected ops
as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have
no inputs and no consumers. These can often be left behind due to graph
construction rewiring (for instance TF-Hub). While they never execute,
they will cause XLA compile to fail so we strip them from XLA compile by
removing the tpu_replicate attribute.
"""
# Scan over the top level graph and all function graphs.
for graph in [prune_graph] + [
f for f in prune_graph._functions.values() # pylint: disable=protected-access
]:
if not isinstance(graph, ops.Graph):
continue
for op in graph.get_operations():
if op.type not in _UNCONNECTED_OPS_TO_PRUNE:
continue
outputs_consumed = False
for output in op.outputs:
if output.consumers():
outputs_consumed = True
break
if not outputs_consumed:
logging.info(
"Pruning OP %s of type %s from XLA Compile due to "
"it being disconnected.", op.name, op.type)
op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR) # pylint: disable=protected-access
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@tpu@tpu.py@.PATH_END.py
|
{
"filename": "test_samples.py",
"repo_name": "handley-lab/anesthetic",
"repo_path": "anesthetic_extracted/anesthetic-master/tests/test_samples.py",
"type": "Python"
}
|
import anesthetic.examples._matplotlib_agg # noqa: F401
import pytest
from contextlib import nullcontext
from math import floor, ceil
import numpy as np
from pandas import MultiIndex
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from anesthetic.weighted_pandas import WeightedSeries, WeightedDataFrame
from anesthetic import (
Samples, MCMCSamples, NestedSamples, make_1d_axes, make_2d_axes,
read_chains
)
from anesthetic.samples import merge_nested_samples, merge_samples_weighted
from anesthetic.weighted_labelled_pandas import (WeightedLabelledSeries,
WeightedLabelledDataFrame)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_array_less, assert_allclose)
from pandas.testing import assert_frame_equal
from matplotlib.colors import to_hex
from scipy.stats import ks_2samp, kstest, norm
from utils import skipif_no_fastkde, astropy_mark_xfail, fastkde_mark_skip
@pytest.fixture(autouse=True)
def close_figures_on_teardown():
yield
plt.close("all")
def test_build_samples():
np.random.seed(3)
nsamps = 1000
ndims = 3
data = np.random.randn(nsamps, ndims)
logL = np.random.rand(nsamps)
weights = np.random.randint(1, 20, size=nsamps)
params = ['A', 'B', 'C']
labels = {'A': '$A$', 'B': '$B$', 'C': '$C$'}
labels = [labels.get(p, p) for p in params]
s = Samples(data=data)
assert len(s) == nsamps
assert_array_equal(s.columns, np.array([0, 1, 2], dtype=object))
s = Samples(data=data, logL=logL)
assert len(s) == nsamps
assert_array_equal(s.columns, np.array([0, 1, 2, 'logL'], dtype=object))
s = Samples(data=data, weights=weights)
assert len(s) == nsamps
assert_array_equal(s.columns, np.array([0, 1, 2], dtype=object))
assert s.index.nlevels == 2
s = Samples(data=data, weights=weights, logL=logL)
assert len(s) == nsamps
assert_array_equal(s.columns, np.array([0, 1, 2, 'logL'], dtype=object))
assert s.index.nlevels == 2
s = Samples(data=data, columns=params)
assert len(s) == nsamps
assert_array_equal(s.columns, ['A', 'B', 'C'])
s = Samples(data=data, columns=params, labels=labels)
mc = MCMCSamples(data=data, logL=logL, weights=weights)
assert len(mc) == nsamps
assert np.all(np.isfinite(mc.logL))
ns = NestedSamples(data=data, logL=logL, weights=weights)
assert len(ns) == nsamps
assert np.all(np.isfinite(ns.logL))
logL[:10] = -1e300
weights[:10] = 0.
mc = MCMCSamples(data=data, logL=logL, weights=weights, logzero=-1e29)
ns = NestedSamples(data=data, logL=logL, weights=weights, logzero=-1e29)
assert_array_equal(mc.columns, np.array([0, 1, 2, 'logL'], dtype=object))
assert_array_equal(ns.columns, np.array([0, 1, 2, 'logL'], dtype=object))
assert mc.index.nlevels == 2
assert ns.index.nlevels == 2
assert np.all(mc.logL[:10] == -np.inf)
assert np.all(ns.logL[:10] == -np.inf)
assert np.all(mc.logL[10:] == logL[10:])
assert np.all(ns.logL[10:] == logL[10:])
mc = MCMCSamples(data=data, logL=logL, weights=weights, logzero=-1e301)
ns = NestedSamples(data=data, logL=logL, weights=weights, logzero=-1e301)
assert np.all(np.isfinite(mc.logL))
assert np.all(np.isfinite(ns.logL))
assert np.all(mc.logL == logL)
assert np.all(ns.logL == logL)
assert not hasattr(mc, 'root')
assert not hasattr(ns, 'root')
def test_different_parameters():
np.random.seed(3)
params_x = ['x0', 'x1', 'x2', 'x3', 'x4']
params_y = ['x0', 'x1', 'x2']
fig, axes = make_1d_axes(params_x)
ns = read_chains('./tests/example_data/pc')
ns.plot_1d(axes)
fig, axes = make_2d_axes(params_y)
ns.plot_2d(axes)
fig, axes = make_2d_axes(params_x)
ns.plot_2d(axes)
fig, axes = make_2d_axes([params_x, params_y])
ns.plot_2d(axes)
def test_manual_columns():
old_params = ['x0', 'x1', 'x2', 'x3', 'x4']
mcmc_params = ['logL', 'chain']
ns_params = ['logL', 'logL_birth', 'nlive']
mcmc = read_chains('./tests/example_data/gd')
ns = read_chains('./tests/example_data/pc')
assert_array_equal(mcmc.drop_labels().columns, old_params + mcmc_params)
assert_array_equal(ns.drop_labels().columns, old_params + ns_params)
new_params = ['y0', 'y1', 'y2', 'y3', 'y4']
mcmc = read_chains('./tests/example_data/gd', columns=new_params)
ns = read_chains('./tests/example_data/pc', columns=new_params)
assert_array_equal(mcmc.drop_labels().columns, new_params + mcmc_params)
assert_array_equal(ns.drop_labels().columns, new_params + ns_params)
def test_plot_2d_kinds():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
params_x = ['x0', 'x1', 'x2', 'x3']
params_y = ['x0', 'x1', 'x2']
params = [params_x, params_y]
# Check dictionaries
axes = ns.plot_2d(params, kind={'lower': 'kde_2d'})
assert (~axes.isnull()).to_numpy().sum() == 3
axes = ns.plot_2d(params, kind={'upper': 'scatter_2d'})
assert (~axes.isnull()).to_numpy().sum() == 6
axes = ns.plot_2d(params, kind={'upper': 'kde_2d', 'diagonal': 'kde_1d'})
assert (~axes.isnull()).to_numpy().sum() == 9
axes = ns.plot_2d(params, kind={'lower': 'kde_2d', 'diagonal': 'kde_1d'})
assert (~axes.isnull()).to_numpy().sum() == 6
axes = ns.plot_2d(params, kind={'lower': 'kde_2d', 'diagonal': 'kde_1d'})
assert (~axes.isnull()).to_numpy().sum() == 6
axes = ns.plot_2d(params, kind={'lower': 'kde_2d',
'diagonal': 'kde_1d',
'upper': 'scatter_2d'})
assert (~axes.isnull()).to_numpy().sum() == 12
# Check strings
axes = ns.plot_2d(params, kind='kde')
assert (~axes.isnull()).to_numpy().sum() == 6
axes = ns.plot_2d(params, kind='kde_1d')
assert (~axes.isnull()).to_numpy().sum() == 3
axes = ns.plot_2d(params, kind='kde_2d')
assert (~axes.isnull()).to_numpy().sum() == 3
# Check kinds vs kind kwarg
axes = ns.plot_2d(params, kinds='kde')
assert (~axes.isnull()).to_numpy().sum() == 6
axes = ns.plot_2d(params, kinds='kde_1d')
assert (~axes.isnull()).to_numpy().sum() == 3
axes = ns.plot_2d(params, kinds='kde_2d')
assert (~axes.isnull()).to_numpy().sum() == 3
# Check incorrect inputs
with pytest.raises(ValueError):
ns.plot_2d(params, kind={'lower': 'not a plot kind'})
with pytest.raises(ValueError):
ns.plot_2d(params, kind={'diagonal': 'not a plot kind'})
with pytest.raises(ValueError):
ns.plot_2d(params, kind={'lower': 'kde', 'spam': 'kde'})
with pytest.raises(ValueError):
ns.plot_2d(params, kind={'ham': 'kde'})
with pytest.raises(ValueError):
ns.plot_2d(params, kind=0)
with pytest.raises(ValueError):
ns.plot_2d(params, kind='eggs')
def test_plot_2d_kinds_multiple_calls():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
params = ['x0', 'x1', 'x2', 'x3']
axes = ns.plot_2d(params, kind={'diagonal': 'kde_1d',
'lower': 'kde_2d',
'upper': 'scatter_2d'})
ns.plot_2d(axes, kind={'diagonal': 'hist_1d'})
axes = ns.plot_2d(params, kind={'diagonal': 'hist_1d'})
ns.plot_2d(axes, kind={'diagonal': 'kde_1d',
'lower': 'kde_2d',
'upper': 'scatter_2d'})
def test_root_and_label():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
assert ns.root == './tests/example_data/pc'
assert ns.label == 'pc'
ns = NestedSamples()
assert not hasattr(ns, 'root')
assert ns.label is None
mc = read_chains('./tests/example_data/gd')
assert (mc.root == './tests/example_data/gd')
assert mc.label == 'gd'
mc = MCMCSamples()
assert not hasattr(mc, 'root')
assert mc.label is None
def test_plot_2d_legend():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
mc = read_chains('./tests/example_data/gd')
params = ['x0', 'x1', 'x2', 'x3']
# Test label kwarg for kde
fig, axes = make_2d_axes(params, upper=False)
ns.plot_2d(axes, label='l1', kind=dict(diagonal='kde_1d', lower='kde_2d'))
mc.plot_2d(axes, label='l2', kind=dict(diagonal='kde_1d', lower='kde_2d'))
for y, row in axes.iterrows():
for x, ax in row.items():
if ax is not None:
leg = ax.legend()
assert leg.get_texts()[0].get_text() == 'l1'
assert leg.get_texts()[1].get_text() == 'l2'
handles, labels = ax.get_legend_handles_labels()
assert labels == ['l1', 'l2']
if x == y:
assert all([isinstance(h, Line2D) for h in handles])
else:
assert all([isinstance(h, Rectangle) for h in handles])
# Test label kwarg for hist and scatter
fig, axes = make_2d_axes(params, lower=False)
ns.plot_2d(axes, label='l1', kind=dict(diagonal='hist_1d',
upper='scatter_2d'))
mc.plot_2d(axes, label='l2', kind=dict(diagonal='hist_1d',
upper='scatter_2d'))
for y, row in axes.iterrows():
for x, ax in row.items():
if ax is not None:
leg = ax.legend()
assert leg.get_texts()[0].get_text() == 'l1'
assert leg.get_texts()[1].get_text() == 'l2'
handles, labels = ax.get_legend_handles_labels()
assert labels == ['l1', 'l2']
if x == y:
assert all([isinstance(h, Rectangle) for h in handles])
else:
assert all([isinstance(h, Line2D)
for h in handles])
# test default labelling
fig, axes = make_2d_axes(params, upper=False)
ns.plot_2d(axes)
mc.plot_2d(axes)
for y, row in axes.iterrows():
for x, ax in row.items():
if ax is not None:
handles, labels = ax.get_legend_handles_labels()
assert labels == ['pc', 'gd']
# Test label kwarg to constructors
ns = read_chains('./tests/example_data/pc', label='l1')
mc = read_chains('./tests/example_data/gd', label='l2')
params = ['x0', 'x1', 'x2', 'x3']
fig, axes = make_2d_axes(params, upper=False)
ns.plot_2d(axes)
mc.plot_2d(axes)
for y, row in axes.iterrows():
for x, ax in row.items():
if ax is not None:
handles, labels = ax.get_legend_handles_labels()
assert labels == ['l1', 'l2']
@pytest.mark.parametrize('kind', ['kde', 'hist', skipif_no_fastkde('fastkde')])
def test_plot_2d_colours(kind):
np.random.seed(3)
gd = read_chains("./tests/example_data/gd")
gd.drop(columns='x3', inplace=True, level=0)
pc = read_chains("./tests/example_data/pc")
pc.drop(columns='x4', inplace=True, level=0)
mn = read_chains("./tests/example_data/mn")
mn.drop(columns='x2', inplace=True, level=0)
fig = plt.figure()
fig, axes = make_2d_axes(['x0', 'x1', 'x2', 'x3', 'x4'], fig=fig)
kinds = {'diagonal': kind + '_1d',
'lower': kind + '_2d',
'upper': 'scatter_2d'}
gd.plot_2d(axes, kind=kinds, label="A")
pc.plot_2d(axes, kind=kinds, label="B")
mn.plot_2d(axes, kind=kinds, label="C")
gd.plot_2d(axes, kind=kinds, label="D", color='C7')
pc.plot_2d(axes, kind=kinds, label="E", color='C6')
mn.plot_2d(axes, kind=kinds, label="F", color='C5')
from collections import defaultdict
d = defaultdict(set)
for y, rows in axes.iterrows():
for x, ax in rows.items():
handles, labels = ax.get_legend_handles_labels()
for handle, label in zip(handles, labels):
if isinstance(handle, Rectangle):
color = handle.get_facecolor()
else:
color = handle.get_color()
color = to_hex(color)
d[label].add(color)
for v in d.values():
assert len(v) == 1
@pytest.mark.parametrize('kwargs', [dict(color='r', alpha=0.5, ls=':', lw=1),
dict(c='r', linestyle=':', linewidth=1),
dict(ec='r', fc='b'),
dict(edgecolor='r', facecolor='b'),
dict(cmap=plt.cm.RdBu),
dict(colormap=plt.cm.RdBu),
dict(cmap="viridis"),
dict(colormap="viridis")])
@pytest.mark.parametrize('kind', ['kde', 'hist', 'default',
skipif_no_fastkde('fastkde')])
def test_plot_2d_kwargs(kind, kwargs):
np.random.seed(42)
pc = read_chains("./tests/example_data/pc")
fig, axes = make_2d_axes(['x0', 'x1'])
pc.plot_2d(axes, kind=kind, **kwargs)
@pytest.mark.parametrize('kind', ['kde', 'hist', skipif_no_fastkde('fastkde')])
def test_plot_1d_colours(kind):
np.random.seed(3)
gd = read_chains("./tests/example_data/gd")
gd.drop(columns='x3', inplace=True, level=0)
pc = read_chains("./tests/example_data/pc")
pc.drop(columns='x4', inplace=True, level=0)
mn = read_chains("./tests/example_data/mn")
mn.drop(columns='x2', inplace=True, level=0)
fig = plt.figure()
fig, axes = make_1d_axes(['x0', 'x1', 'x2', 'x3', 'x4'], fig=fig)
gd.plot_1d(axes, kind=kind + '_1d', label="gd")
pc.plot_1d(axes, kind=kind + '_1d', label="pc")
mn.plot_1d(axes, kind=kind + '_1d', label="mn")
gd_colors = []
pc_colors = []
mn_colors = []
for x, ax in axes.items():
handles, labels = ax.get_legend_handles_labels()
for handle, label in zip(handles, labels):
if isinstance(handle, Rectangle):
color = to_hex(handle.get_facecolor())
else:
color = handle.get_color()
if label == 'gd':
gd_colors.append(color)
elif label == 'pc':
pc_colors.append(color)
elif label == 'mn':
mn_colors.append(color)
assert len(set(gd_colors)) == 1
assert len(set(mn_colors)) == 1
assert len(set(pc_colors)) == 1
@astropy_mark_xfail
def test_astropyhist():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
with pytest.raises(ValueError):
ns.plot_1d(['x0', 'x1', 'x2', 'x3'], kind='hist_1d', bins='knuth')
def test_hist_levels():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
ns.plot_2d(['x0', 'x1', 'x2', 'x3'], kind={'lower': 'hist_2d'},
levels=[0.95, 0.68], bins=20)
def test_plot_2d_no_axes():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
axes = ns[['x0', 'x1', 'x2']].plot_2d()
assert axes.iloc[-1, 0].get_xlabel() == '$x_0$'
assert axes.iloc[-1, 1].get_xlabel() == '$x_1$'
assert axes.iloc[-1, 2].get_xlabel() == '$x_2$'
axes = ns[['x0', 'x1', 'x2']].drop_labels().plot_2d()
assert axes.iloc[-1, 0].get_xlabel() == 'x0'
assert axes.iloc[-1, 1].get_xlabel() == 'x1'
assert axes.iloc[-1, 2].get_xlabel() == 'x2'
with pytest.warns(UserWarning):
axes = ns[['x0', 'logL_birth']].plot_2d()
axes = ns.drop_labels()[['x0', 'logL_birth']].plot_2d()
def test_plot_1d_no_axes():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
axes = ns[['x0', 'x1', 'x2']].plot_1d()
assert axes.iloc[0].get_xlabel() == '$x_0$'
assert axes.iloc[1].get_xlabel() == '$x_1$'
assert axes.iloc[2].get_xlabel() == '$x_2$'
axes = ns[['x0', 'x1', 'x2']].drop_labels().plot_1d()
assert axes.iloc[0].get_xlabel() == 'x0'
assert axes.iloc[1].get_xlabel() == 'x1'
assert axes.iloc[2].get_xlabel() == 'x2'
with pytest.warns(UserWarning):
axes = ns.plot_1d()
axes = ns[['x0', 'logL_birth']].plot_1d()
axes = ns.drop_labels()[['x0', 'logL_birth']].plot_1d()
@pytest.mark.parametrize('kind', ['kde', 'hist', skipif_no_fastkde('fastkde')])
def test_plot_logscale_1d(kind):
ns = read_chains('./tests/example_data/pc')
params = ['x0', 'x1', 'x2', 'x3', 'x4']
# 1d
axes = ns.plot_1d(params, kind=kind + '_1d', logx=['x2'])
for x, ax in axes.items():
if x == 'x2':
assert ax.get_xscale() == 'log'
else:
assert ax.get_xscale() == 'linear'
ax = axes.loc['x2']
if 'kde' in kind:
p = ax.get_children()
arg = np.argmax(p[0].get_ydata())
pmax = np.log10(p[0].get_xdata()[arg])
d = 0.1
else:
arg = np.argmax([p.get_height() for p in ax.patches])
pmax = np.log10(ax.patches[arg].get_x())
d = np.log10(ax.patches[arg+1].get_x() / ax.patches[arg].get_x())
assert pmax == pytest.approx(-1, abs=d)
@pytest.mark.parametrize('kind', ['kde', 'hist', skipif_no_fastkde('fastkde')])
def test_plot_logscale_2d(kind):
ns = read_chains('./tests/example_data/pc')
params = ['x0', 'x1', 'x2', 'x3', 'x4']
# 2d, logx only
axes = ns.plot_2d(params, kind=kind, logx=['x2'])
for y, rows in axes.iterrows():
for x, ax in rows.items():
if ax is not None:
if x == 'x2':
assert ax.get_xscale() == 'log'
else:
assert ax.get_xscale() == 'linear'
ax.get_yscale() == 'linear'
if x == y:
if x == 'x2':
assert ax.twin.get_xscale() == 'log'
else:
assert ax.twin.get_xscale() == 'linear'
assert ax.twin.get_yscale() == 'linear'
# 2d, logy only
axes = ns.plot_2d(params, kind=kind, logy=['x2'])
for y, rows in axes.iterrows():
for x, ax in rows.items():
if ax is not None:
ax.get_xscale() == 'linear'
if y == 'x2':
assert ax.get_yscale() == 'log'
else:
assert ax.get_yscale() == 'linear'
if x == y:
assert ax.twin.get_xscale() == 'linear'
assert ax.twin.get_yscale() == 'linear'
# 2d, logx and logy
axes = ns.plot_2d(params, kind=kind, logx=['x2'], logy=['x2'])
for y, rows in axes.iterrows():
for x, ax in rows.items():
if ax is not None:
if x == 'x2':
assert ax.get_xscale() == 'log'
else:
assert ax.get_xscale() == 'linear'
if y == 'x2':
assert ax.get_yscale() == 'log'
else:
assert ax.get_yscale() == 'linear'
if x == y:
if x == 'x2':
assert ax.twin.get_xscale() == 'log'
else:
assert ax.twin.get_xscale() == 'linear'
assert ax.twin.get_yscale() == 'linear'
def test_logscale_ticks():
np.random.seed(42)
ndim = 5
data1 = np.exp(10 * np.random.randn(200, ndim))
data2 = np.exp(10 * np.random.randn(200, ndim) - 50)
params = [f'a{i}' for i in range(ndim)]
fig, axes = make_2d_axes(params, logx=params, logy=params, upper=False)
samples1 = Samples(data1, columns=params)
samples2 = Samples(data2, columns=params)
samples1.plot_2d(axes)
samples2.plot_2d(axes)
for y, col in axes.iterrows():
for x, ax in col.items():
if ax is not None:
xlims = ax.get_xlim()
xticks = ax.get_xticks()
assert np.sum((xticks > xlims[0]) & (xticks < xlims[1])) > 1
ylims = ax.get_ylim()
yticks = ax.get_yticks()
assert np.sum((yticks > ylims[0]) & (yticks < ylims[1])) > 1
if x == y:
data_min = ax.twin.dataLim.intervalx[0]
data_max = ax.twin.dataLim.intervalx[1]
assert xlims[0] == pytest.approx(data_min, rel=1e-14)
assert xlims[1] == pytest.approx(data_max, rel=1e-14)
else:
assert_array_equal(xlims, ax.dataLim.intervalx)
assert_array_equal(ylims, ax.dataLim.intervaly)
@pytest.mark.parametrize('k', ['hist_1d', 'hist'])
@pytest.mark.parametrize('b', ['scott', 10, np.logspace(-3, 0, 20)])
@pytest.mark.parametrize('r', [None, (1e-5, 1)])
def test_plot_logscale_hist_kwargs(k, b, r):
ns = read_chains('./tests/example_data/pc')
with pytest.warns(UserWarning) if k == 'hist' else nullcontext():
axes = ns[['x2']].plot_1d(kind=k, logx=['x2'], bins=b, range=r)
ax = axes.loc['x2']
assert ax.get_xscale() == 'log'
arg = np.argmax([p.get_height() for p in ax.patches])
pmax = np.log10(ax.patches[arg].get_x())
d = np.log10(ax.patches[arg+1].get_x() / ax.patches[arg].get_x())
assert pmax == pytest.approx(-1, abs=d)
def test_logscale_failure_without_match():
ns = read_chains('./tests/example_data/pc')
params = ['x0', 'x2']
# 1d
axes = ns.plot_1d(params)
with pytest.raises(ValueError):
ns.plot_1d(axes, logx=['x2'])
fig, axes = make_1d_axes(params)
with pytest.raises(ValueError):
ns.plot_1d(axes, logx=['x2'])
# 2d
axes = ns.plot_2d(params)
with pytest.raises(ValueError):
ns.plot_2d(axes, logx=['x2'])
axes = ns.plot_2d(params)
with pytest.raises(ValueError):
ns.plot_2d(axes, logy=['x2'])
axes = ns.plot_2d(params)
with pytest.raises(ValueError):
ns.plot_2d(axes, logx=['x2'], logy=['x2'])
fig, axes = make_2d_axes(params)
with pytest.raises(ValueError):
ns.plot_2d(axes, logx=['x2'])
fig, axes = make_2d_axes(params)
with pytest.raises(ValueError):
ns.plot_2d(axes, logy=['x2'])
fig, axes = make_2d_axes(params)
with pytest.raises(ValueError):
ns.plot_2d(axes, logx=['x2'], logy=['x2'])
def test_mcmc_stats():
mcmc = read_chains('./tests/example_data/cb')
chains = mcmc.groupby(('chain', '$n_\\mathrm{chain}$'), group_keys=False)
n0, n1 = chains.count().iloc[:, 0] # number samples in first chain
mcmc_head = chains.head(200).copy()
mcmc_tail = mcmc.remove_burn_in(burn_in=200)
mcmc_half = mcmc.remove_burn_in(burn_in=0.5)
# check indices after burn-in removal
assert mcmc_tail.index.get_level_values(0)[0] == 200
assert mcmc_tail.index.get_level_values(0)[n0] == 200 + n0 + 200
assert mcmc_half.index.get_level_values(0)[0] == floor(n0/2)
assert mcmc_half.index.get_level_values(0)[ceil(n0/2)] == n0 + floor(n1/2)
# check Gelman--Rubin statistic
assert mcmc_head.Gelman_Rubin() > 0.1
assert mcmc_tail.Gelman_Rubin() < 0.01
assert mcmc_half.Gelman_Rubin() < 0.01
assert mcmc_half.Gelman_Rubin(['x0']) < 0.01
assert mcmc_half.Gelman_Rubin(['x1']) < 0.01
with pytest.raises(np.linalg.LinAlgError):
mcmc['y1'] = mcmc.x1
mcmc['y2'] = mcmc.x1
mcmc['y3'] = mcmc.x1
mcmc.Gelman_Rubin(['x0', 'x1', 'y1', 'y2', 'y3'])
# check per-parameter Gelman--Rubin statistic
GR_par = mcmc_head.Gelman_Rubin(per_param='par')
GR_cov = mcmc_head.Gelman_Rubin(per_param='cov')
assert_array_equal(np.ravel(GR_par), np.diag(GR_cov))
assert np.all(GR_par > 0.1)
assert np.all(GR_cov > 0.1)
GR_par = mcmc_tail.Gelman_Rubin(per_param='par')
GR_cov = mcmc_tail.Gelman_Rubin(per_param='cov')
assert_array_equal(np.ravel(GR_par), np.diag(GR_cov))
assert np.all(GR_par < 0.01)
assert np.all(GR_cov < 0.01)
GR_par = mcmc_half.Gelman_Rubin(per_param='par')
GR_cov = mcmc_half.Gelman_Rubin(per_param='cov')
assert_array_equal(np.ravel(GR_par), np.diag(GR_cov))
assert np.all(GR_par < 0.01)
assert np.all(GR_cov < 0.01)
assert len(mcmc_half.Gelman_Rubin(per_param=True)) == 2
assert len(mcmc_half.Gelman_Rubin(per_param='all')) == 2
assert_array_equal(mcmc_half.Gelman_Rubin(per_param=True)[1], GR_par)
assert_array_equal(mcmc_half.Gelman_Rubin(per_param='all')[1], GR_cov)
# more burn-in checks
mcmc_new = mcmc.remove_burn_in(burn_in=200.9)
assert len(mcmc_new) == n0 - 200 + n1 - 200
assert mcmc_new.index.get_level_values(0)[0] == 200
assert mcmc_new.index.get_level_values(0)[n0] == 200 + n0 + 200
mcmc_new = mcmc.remove_burn_in(burn_in=-0.5)
assert len(mcmc_new) == floor(n0/2) + floor(n1/2)
assert mcmc_new.index.get_level_values(0)[0] == ceil(n0/2)
assert mcmc_new.index.get_level_values(0)[floor(n0/2)] == n0 + floor(n1/2)
mcmc_new = mcmc.remove_burn_in(burn_in=-200)
assert len(mcmc_new) == 200 + 200
assert mcmc_new.index.get_level_values(0)[0] == n0 - 200
assert mcmc_new.index.get_level_values(0)[200] == n0 + n1 - 200
mcmc_new = mcmc.remove_burn_in(burn_in=[0.8, -0.75])
assert len(mcmc_new) == ceil(n0/5) + floor(3*n1/4)
assert mcmc_new.index.get_level_values(0)[0] == floor(4*n0/5)
assert mcmc_new.index.get_level_values(0)[ceil(n0/5)] == n0 + ceil(n1/4)
mcmc_new = mcmc.remove_burn_in(burn_in=[2, -100])
assert len(mcmc_new) == n0 - 2 + 100
assert mcmc_new.index.get_level_values(0)[0] == 2
assert mcmc_new.index.get_level_values(0)[n0-2] == n0 + n1 - 100
# test reset index
mcmc_new = mcmc.remove_burn_in(burn_in=200, reset_index=True)
assert len(mcmc_new) == n0 - 200 + n1 - 200
assert mcmc_new.index.get_level_values(0)[0] == 0
assert mcmc_new.index.get_level_values(0)[-1] == n0 - 200 + n1 - 200 - 1
# test inplace
assert mcmc.index.get_level_values(0)[0] == 0
assert mcmc.index.get_level_values(0)[n0] == n0
mcmc_new = mcmc.remove_burn_in(burn_in=200, inplace=True)
assert mcmc_new is None
assert len(mcmc) == n0 - 200 + n1 - 200
assert mcmc.index.get_level_values(0)[0] == 200
assert mcmc.index.get_level_values(0)[n0] == 200 + n0 + 200
with pytest.raises(ValueError):
mcmc.remove_burn_in(burn_in=[1, 2, 3])
def test_logX():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
logX = pc.logX()
assert isinstance(logX, WeightedSeries)
assert_array_equal(logX.index, pc.index)
nsamples = 10
logX = pc.logX(nsamples=nsamples)
assert isinstance(logX, WeightedDataFrame)
assert_array_equal(logX.index, pc.index)
assert_array_equal(logX.columns, np.arange(nsamples))
assert logX.columns.name == 'samples'
assert not (logX.diff(axis=0) > 0).to_numpy().any()
n = 1000
logX = pc.logX(n)
assert (abs(logX.mean(axis=1) - pc.logX()) < logX.std(axis=1) * 3).all()
def test_logdX():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
logdX = pc.logdX()
assert isinstance(logdX, WeightedSeries)
assert_array_equal(logdX.index, pc.index)
nsamples = 10
logdX = pc.logdX(nsamples=nsamples)
assert isinstance(logdX, WeightedDataFrame)
assert_array_equal(logdX.index, pc.index)
assert_array_equal(logdX.columns, np.arange(nsamples))
assert logdX.columns.name == 'samples'
assert not (logdX > 0).to_numpy().any()
n = 1000
logdX = pc.logdX(n)
assert (abs(logdX.mean(axis=1) - pc.logdX()) < logdX.std(axis=1) * 3).all()
def test_logbetaL():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
logX = pc.logX()
assert isinstance(logX, WeightedSeries)
assert_array_equal(logX.index, pc.index)
nsamples = 10
logX = pc.logX(nsamples=nsamples)
assert isinstance(logX, WeightedDataFrame)
assert_array_equal(logX.index, pc.index)
assert_array_equal(logX.columns, np.arange(nsamples))
assert logX.columns.name == 'samples'
assert not (logX.diff(axis=0) > 0).to_numpy().any()
n = 1000
logX = pc.logX(n)
assert (abs(logX.mean(axis=1) - pc.logX()) < logX.std(axis=1) * 3).all()
def test_logw():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
logw = pc.logw()
assert isinstance(logw, WeightedSeries)
assert_array_equal(logw.index, pc.index)
nsamples = 10
beta = [0., 0.5, 1.]
logw = pc.logw(nsamples=nsamples)
assert isinstance(logw, WeightedDataFrame)
assert_array_equal(logw.index, pc.index)
assert logw.columns.name == 'samples'
assert_array_equal(logw.columns, range(nsamples))
logw = pc.logw(beta=beta)
assert isinstance(logw, WeightedDataFrame)
assert_array_equal(logw.index, pc.index)
assert logw.columns.name == 'beta'
assert_array_equal(logw.columns, beta)
logw = pc.logw(nsamples=nsamples, beta=beta)
assert isinstance(logw, WeightedDataFrame)
assert logw.columns.names == ['beta', 'samples']
assert logw.columns.levshape == (len(beta), nsamples)
n = 1000
logw = pc.logw(n)
assert (abs(logw.mean(axis=1) - pc.logw()) < logw.std(axis=1) * 3).all()
def test_logZ():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
logZ = pc.logZ()
assert isinstance(logZ, float)
nsamples = 10
beta = [0., 0.5, 1.]
logZ = pc.logZ(nsamples=nsamples)
assert isinstance(logZ, WeightedLabelledSeries)
assert logZ.index.name == 'samples'
assert logZ.name == 'logZ'
assert_array_equal(logZ.index, range(nsamples))
logZ = pc.logZ(beta=beta)
assert isinstance(logZ, WeightedLabelledSeries)
assert logZ.index.name == 'beta'
assert logZ.name == 'logZ'
assert len(logZ) == len(beta)
logZ = pc.logZ(nsamples=nsamples, beta=beta)
assert isinstance(logZ, WeightedLabelledSeries)
assert logZ.index.names == ['beta', 'samples']
assert logZ.name == 'logZ'
assert logZ.index.levshape == (len(beta), nsamples)
n = 1000
logZ = pc.logZ(n)
assert abs(logZ.mean() - pc.logZ()) < logZ.std() * 3
def test_D_KL():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
D_KL = pc.D_KL()
assert isinstance(D_KL, float)
nsamples = 10
beta = [0., 0.5, 1.]
D_KL = pc.D_KL(nsamples=nsamples)
assert isinstance(D_KL, WeightedLabelledSeries)
assert D_KL.index.name == 'samples'
assert D_KL.name == 'D_KL'
assert_array_equal(D_KL.index, range(nsamples))
D_KL = pc.D_KL(beta=beta)
assert isinstance(D_KL, WeightedLabelledSeries)
assert D_KL.index.name == 'beta'
assert D_KL.name == 'D_KL'
assert len(D_KL) == len(beta)
D_KL = pc.D_KL(nsamples=nsamples, beta=beta)
assert isinstance(D_KL, WeightedLabelledSeries)
assert D_KL.index.names == ['beta', 'samples']
assert D_KL.name == 'D_KL'
assert D_KL.index.levshape == (len(beta), nsamples)
n = 1000
D_KL = pc.D_KL(n)
assert abs(D_KL.mean() - pc.D_KL()) < D_KL.std() * 3
def test_d_G():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
d_G = pc.d_G()
assert isinstance(d_G, float)
nsamples = 10
beta = [0., 0.5, 1.]
d_G = pc.d_G(nsamples=nsamples)
assert isinstance(d_G, WeightedLabelledSeries)
assert d_G.index.name == 'samples'
assert d_G.name == 'd_G'
assert_array_equal(d_G.index, range(nsamples))
d_G = pc.d_G(beta=beta)
assert isinstance(d_G, WeightedLabelledSeries)
assert d_G.index.name == 'beta'
assert d_G.name == 'd_G'
assert len(d_G) == len(beta)
d_G = pc.d_G(nsamples=nsamples, beta=beta)
assert isinstance(d_G, WeightedLabelledSeries)
assert d_G.index.names == ['beta', 'samples']
assert d_G.name == 'd_G'
assert d_G.index.levshape == (len(beta), nsamples)
n = 1000
d_G = pc.d_G(n)
assert abs(d_G.mean() - pc.d_G()) < d_G.std() * 3
def test_logL_P():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
logL_P = pc.logL_P()
assert isinstance(logL_P, float)
nsamples = 10
beta = [0., 0.5, 1.]
logL_P = pc.logL_P(nsamples=nsamples)
assert isinstance(logL_P, WeightedLabelledSeries)
assert logL_P.index.name == 'samples'
assert logL_P.name == 'logL_P'
assert_array_equal(logL_P.index, range(nsamples))
logL_P = pc.logL_P(beta=beta)
assert isinstance(logL_P, WeightedLabelledSeries)
assert logL_P.index.name == 'beta'
assert logL_P.name == 'logL_P'
assert len(logL_P) == len(beta)
logL_P = pc.logL_P(nsamples=nsamples, beta=beta)
assert isinstance(logL_P, WeightedLabelledSeries)
assert logL_P.index.names == ['beta', 'samples']
assert logL_P.name == 'logL_P'
assert logL_P.index.levshape == (len(beta), nsamples)
n = 1000
logL_P = pc.logL_P(n)
assert abs(logL_P.mean() - pc.logL_P()) < logL_P.std() * 3
@pytest.mark.parametrize('beta', [None, 0.5, [0, 0.5, 1]])
@pytest.mark.parametrize('nsamples', [None, 10, 100])
def test_Occams_razor(nsamples, beta):
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
logw = pc.logw(nsamples, beta)
assert_allclose(pc.logZ(logw), pc.logL_P(logw) - pc.D_KL(logw))
def test_stats():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
nsamples = 10
beta = [0., 0.5, 1.]
vals = ['logZ', 'D_KL', 'logL_P', 'd_G']
delta_vals = ['Delta_logZ', 'Delta_D_KL', 'Delta_logL_P', 'Delta_d_G']
labels = [r'$\ln\mathcal{Z}$',
r'$\mathcal{D}_\mathrm{KL}$',
r'$\langle\ln\mathcal{L}\rangle_\mathcal{P}$',
r'$d_\mathrm{G}$']
delta_labels = [r'$\Delta\ln\mathcal{Z}$',
r'$\Delta\mathcal{D}_\mathrm{KL}$',
r'$\Delta\langle\ln\mathcal{L}\rangle_\mathcal{P}$',
r'$\Delta d_\mathrm{G}$']
stats = pc.stats()
assert isinstance(stats, WeightedLabelledSeries)
assert_array_equal(stats.drop_labels().index, vals)
assert_array_equal(stats.get_labels(), labels)
stats = pc.stats(norm=pc.stats())
assert isinstance(stats, WeightedLabelledSeries)
assert_array_equal(stats.drop_labels().index, vals + delta_vals)
assert_array_equal(stats.get_labels(), labels + delta_labels)
stats = pc.stats(nsamples=nsamples)
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals)
assert_array_equal(stats.get_labels(), labels)
assert stats.index.name == 'samples'
assert_array_equal(stats.index, range(nsamples))
stats = pc.stats(nsamples=nsamples, norm=pc.stats())
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals + delta_vals)
assert_array_equal(stats.get_labels(), labels + delta_labels)
assert stats.index.name == 'samples'
assert_array_equal(stats.index, range(nsamples))
stats = pc.stats(nsamples=nsamples, norm=pc.stats(nsamples=nsamples))
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals + delta_vals)
assert_array_equal(stats.get_labels(), labels + delta_labels)
assert stats.index.name == 'samples'
assert_array_equal(stats.index, range(nsamples))
stats = pc.stats(beta=beta)
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals)
assert_array_equal(stats.get_labels(), labels)
assert stats.index.name == 'beta'
assert_array_equal(stats.index, beta)
stats = pc.stats(beta=beta, norm=pc.stats())
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals + delta_vals)
assert_array_equal(stats.get_labels(), labels + delta_labels)
assert stats.index.name == 'beta'
assert_array_equal(stats.index, beta)
stats = pc.stats(beta=beta, norm=pc.stats(beta=beta))
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals + delta_vals)
assert_array_equal(stats.get_labels(), labels + delta_labels)
assert stats.index.name == 'beta'
assert_array_equal(stats.index, beta)
stats = pc.stats(nsamples=nsamples, beta=beta)
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals)
assert_array_equal(stats.get_labels(), labels)
assert stats.index.names == ['beta', 'samples']
assert stats.index.levshape == (len(beta), nsamples)
stats = pc.stats(nsamples=nsamples, beta=beta, norm=pc.stats())
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals + delta_vals)
assert_array_equal(stats.get_labels(), labels + delta_labels)
assert stats.index.names == ['beta', 'samples']
assert stats.index.levshape == (len(beta), nsamples)
stats = pc.stats(nsamples=nsamples, beta=beta,
norm=pc.stats(nsamples=nsamples, beta=beta))
assert isinstance(stats, WeightedLabelledDataFrame)
assert_array_equal(stats.drop_labels().columns, vals + delta_vals)
assert_array_equal(stats.get_labels(), labels + delta_labels)
assert stats.index.names == ['beta', 'samples']
assert stats.index.levshape == (len(beta), nsamples)
for beta in [1., 0., 0.5]:
np.random.seed(42)
pc.beta = beta
n = 1000
PC = pc.stats(n, beta)
assert abs(pc.logZ() - PC['logZ'].mean()) < PC['logZ'].std()
assert PC['d_G'].mean() < 5 + 3 * PC['d_G'].std()
assert PC.cov()['D_KL']['logZ'] < 0
assert abs(PC.logZ.mean() - pc.logZ()) < PC.logZ.std() * 3
assert abs(PC.D_KL.mean() - pc.D_KL()) < PC.D_KL.std() * 3
assert abs(PC.d_G.mean() - pc.d_G()) < PC.d_G.std() * 3
assert abs(PC.logL_P.mean() - pc.logL_P()) < PC.logL_P.std() * 3
n = 100
assert ks_2samp(pc.logZ(n, beta), PC.logZ).pvalue > 0.05
assert ks_2samp(pc.D_KL(n, beta), PC.D_KL).pvalue > 0.05
assert ks_2samp(pc.d_G(n, beta), PC.d_G).pvalue > 0.05
if beta != 0:
assert ks_2samp(pc.logL_P(n, beta), PC.logL_P).pvalue > 0.05
assert abs(pc.set_beta(0.0).logZ()) < 1e-2
assert pc.set_beta(0.9).logZ() < pc.set_beta(1.0).logZ()
assert_array_almost_equal(pc.set_beta(1).get_weights(),
pc.set_beta(1).get_weights())
assert_array_almost_equal(pc.set_beta(.5).get_weights(),
pc.set_beta(.5).get_weights())
assert_array_equal(pc.set_beta(0).get_weights(),
pc.set_beta(0).get_weights())
@pytest.mark.parametrize('kind', ['kde', 'hist', 'kde_1d', 'hist_1d',
skipif_no_fastkde('fastkde_1d')])
def test_masking_1d(kind):
pc = read_chains("./tests/example_data/pc")
mask = pc['x0'].to_numpy() > 0
with pytest.warns(UserWarning) if kind in ['kde',
'hist'] else nullcontext():
pc[mask].plot_1d(['x0', 'x1', 'x2'], kind=kind)
@pytest.mark.parametrize('kind', ['kde', 'scatter', 'scatter_2d', 'kde_2d',
'hist_2d', skipif_no_fastkde('fastkde_2d')])
def test_masking_2d(kind):
pc = read_chains("./tests/example_data/pc")
mask = pc['x0'].to_numpy() > 0
with pytest.warns(UserWarning) if kind == 'kde' else nullcontext():
pc[mask].plot_2d(['x0', 'x1', 'x2'], kind={'lower': kind})
def test_merging():
np.random.seed(3)
samples_1 = read_chains('./tests/example_data/pc')
samples_2 = read_chains('./tests/example_data/pc_250')
samples = merge_nested_samples([samples_1, samples_2])
nlive_1 = samples_1.nlive.mode().to_numpy()[0]
nlive_2 = samples_2.nlive.mode().to_numpy()[0]
nlive = samples.nlive.mode().to_numpy()[0]
assert nlive_1 == 125
assert nlive_2 == 250
assert nlive == nlive_1 + nlive_2
assert (samples_1.logZ() > samples.logZ() > samples_2.logZ()
or samples_1.logZ() < samples.logZ() < samples_2.logZ())
def test_weighted_merging():
# Generate some data to try it out:
samples_1 = read_chains('./tests/example_data/pc')
samples_2 = read_chains('./tests/example_data/pc_250')
samples_1[('xtest', '$x_t$')] = 7*samples_1['x3']
samples_2[('xtest', "$x_t$")] = samples_2['x3']
mean1 = samples_1.xtest.mean()
mean2 = samples_2.xtest.mean()
# Test with evidence weights
weight1 = np.exp(samples_1.logZ())
weight2 = np.exp(samples_2.logZ())
samples = merge_samples_weighted([samples_1, samples_2],
label='Merged label')
mean = samples.xtest.mean()
assert np.isclose(mean, (mean1*weight1+mean2*weight2)/(weight1+weight2))
assert samples.label == 'Merged label'
# Test that label is None when no label is passed
samples_1.label = "1"
samples_2.label = "2"
samples = merge_samples_weighted([samples_1, samples_2])
assert samples.label is None
# Test with explicit weights
weight1 = 31
weight2 = 13
samples = merge_samples_weighted(
[samples_1, samples_2], weights=[weight1, weight2])
mean = samples.xtest.mean()
assert np.isclose(mean, (mean1*weight1+mean2*weight2)/(weight1+weight2))
# Test plot still works (see issue #189)
prior_samples = []
for i in range(3):
d = {"x": np.random.uniform(size=1000),
"y": np.random.uniform(size=1000)}
tmp = Samples(d)
prior_samples.append(tmp)
merge_prior = merge_samples_weighted(prior_samples, weights=np.ones(3))
merge_prior.plot_2d(["x", "y"])
# Test if correct exceptions are raised:
# MCMCSamples are passed without weights
with pytest.raises(ValueError):
merge_samples_weighted([MCMCSamples(samples_1)])
# len(weights) != len(samples)
with pytest.raises(ValueError):
merge_samples_weighted([samples_1, samples_2], weights=[1, 2, 3])
# A samples is passed and not a sequence
with pytest.raises(TypeError):
merge_samples_weighted(samples_1, weights=[1, 2, 3])
def test_beta():
pc = read_chains("./tests/example_data/pc")
weights = pc.get_weights()
assert_array_equal(weights, pc.get_weights())
assert_array_equal(pc.index.get_level_values('weights'), pc.get_weights())
assert pc.beta == 1
prior = pc.set_beta(0)
assert prior.beta == 0
assert_array_equal(prior.index.get_level_values('weights'),
prior.get_weights())
assert pc.beta == 1
assert_array_equal(pc.index.get_level_values('weights'), pc.get_weights())
assert_array_almost_equal(sorted(prior.get_weights(), reverse=True),
prior.get_weights())
for beta in np.linspace(0, 2, 10):
pc.set_beta(beta, inplace=True)
assert pc.beta == beta
assert_array_equal(pc.index.get_level_values('weights'),
pc.get_weights())
assert not np.array_equal(pc.index.get_level_values('weights'),
weights)
for beta in np.linspace(0, 2, 10):
pc.beta = beta
assert pc.beta == beta
assert_array_equal(pc.index.get_level_values('weights'),
pc.get_weights())
assert not np.array_equal(pc.index.get_level_values('weights'),
weights)
def test_beta_with_logL_infinities():
ns = read_chains("./tests/example_data/pc")
ns.loc[:10, ('logL', r'$\ln\mathcal{L}$')] = -np.inf
ns.loc[1000, ('logL', r'$\ln\mathcal{L}$')] = -np.inf
with pytest.warns(RuntimeWarning):
ns.recompute(inplace=True)
assert (ns.logL == -np.inf).sum() == 0
def test_prior():
ns = read_chains("./tests/example_data/pc")
prior = ns.prior()
assert prior.beta == 0
assert_frame_equal(prior, ns.set_beta(0))
def test_live_points():
np.random.seed(4)
pc = read_chains("./tests/example_data/pc")
for i, logL in pc.logL.iloc[::49].items():
live_points = pc.live_points(logL)
assert len(live_points) == int(pc.nlive[i[0]])
live_points_from_int = pc.live_points(i[0])
assert_array_equal(live_points_from_int, live_points)
live_points_from_index = pc.live_points(i)
assert_array_equal(live_points_from_index, live_points)
assert pc.live_points(0).index[0] == 0
last_live_points = pc.live_points()
logL = pc.logL_birth.max()
assert (last_live_points.logL >= logL).all()
assert len(last_live_points) == pc.nlive.mode().to_numpy()[0]
assert not live_points.isweighted()
def test_dead_points():
np.random.seed(4)
pc = read_chains("./tests/example_data/pc")
for i, logL in pc.logL.iloc[::49].items():
dead_points = pc.dead_points(logL)
assert len(dead_points) == int(len(pc[:i[0]]))
dead_points_from_int = pc.dead_points(i[0])
assert_array_equal(dead_points_from_int, dead_points)
dead_points_from_index = pc.dead_points(i)
assert_array_equal(dead_points_from_index, dead_points)
assert pc.dead_points(1).index[0] == 0
last_dead_points = pc.dead_points()
logL = pc.logL_birth.max()
assert (last_dead_points.logL <= logL).all()
assert len(last_dead_points) == len(pc) - pc.nlive.mode().to_numpy()[0]
assert not dead_points.isweighted()
def test_contour():
np.random.seed(4)
pc = read_chains("./tests/example_data/pc")
cut_float = 30.0
assert cut_float == pc.contour(cut_float)
cut_int = 0
assert pc.logL.min() == pc.contour(cut_int)
cut_none = None
nlive = pc.nlive.mode().to_numpy()[0]
assert sorted(pc.logL)[-nlive] == pc.contour(cut_none)
@pytest.mark.parametrize("cut", [200, 0.0, None])
def test_truncate(cut):
np.random.seed(4)
pc = read_chains("./tests/example_data/pc")
truncated_run = pc.truncate(cut)
assert not truncated_run.index.duplicated().any()
if cut is None:
assert_array_equal(pc, truncated_run)
def test_hist_range_1d():
"""Test to provide a solution to #89"""
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
ax = ns.plot_1d('x0', kind='hist_1d')
x1, x2 = ax['x0'].get_xlim()
assert x1 > -1
assert x2 < +1
ax = ns.plot_1d('x0', kind='hist_1d', bins=np.linspace(-1, 1, 11))
x1, x2 = ax['x0'].get_xlim()
assert x1 <= -1
assert x2 >= +1
def test_compute_insertion():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
assert 'insertion' not in ns
ns._compute_insertion_indexes()
assert 'insertion' in ns
nlive = ns.nlive.mode().to_numpy()[0]
assert_array_less(ns.insertion, nlive)
u = ns.insertion.to_numpy()/nlive
assert kstest(u[nlive:-nlive], 'uniform').pvalue > 0.05
pvalues = [kstest(u[i:i+nlive], 'uniform').pvalue
for i in range(nlive, len(ns)-2*nlive, nlive)]
assert kstest(pvalues, 'uniform').pvalue > 0.05
def test_posterior_points():
np.random.seed(3)
ns = read_chains('./tests/example_data/pc')
assert_array_equal(ns.posterior_points(), ns.posterior_points())
assert_array_equal(ns.posterior_points(0.5), ns.posterior_points(0.5))
def test_prior_points():
ns = read_chains('./tests/example_data/pc')
assert_array_equal(ns.prior_points(), ns.posterior_points(0))
def test_NestedSamples_importance_sample():
np.random.seed(3)
ns0 = read_chains('./tests/example_data/pc')
pi0 = ns0.set_beta(0)
NS0 = ns0.stats(nsamples=2000)
with pytest.raises(NotImplementedError):
ns0.importance_sample(ns0.logL, action='spam')
ns_masked = ns0.importance_sample(ns0.logL, action='replace')
assert_array_equal(ns0.logL, ns_masked.logL)
assert_array_equal(ns0.logL_birth, ns_masked.logL_birth)
assert_array_equal(ns0.get_weights(), ns_masked.get_weights())
ns_masked = ns0.importance_sample(np.zeros_like(ns0.logL), action='add')
assert_array_equal(ns0.logL, ns_masked.logL)
assert_array_equal(ns0.logL_birth, ns_masked.logL_birth)
assert_array_equal(ns0.get_weights(), ns_masked.get_weights())
mask = ((ns0.x0 > -0.3) & (ns0.x2 > 0.2) & (ns0.x4 < 3.5)).to_numpy()
ns_masked = merge_nested_samples((ns0[mask], ))
V_prior = pi0[mask].get_weights().sum() / pi0.get_weights().sum()
V_posterior = ns0[mask].get_weights().sum() / ns0.get_weights().sum()
ns1 = ns0.importance_sample(mask, action='mask')
assert_array_equal(ns_masked.logL, ns1.logL)
assert_array_equal(ns_masked.logL_birth, ns1.logL_birth)
assert_array_equal(ns_masked.get_weights(), ns1.get_weights())
logL_new = np.where(mask, 0, -np.inf)
ns1 = ns0.importance_sample(logL_new)
NS1 = ns1.stats(nsamples=2000)
assert_array_equal(ns1, ns_masked)
logZ_V = NS0.logZ.mean() + np.log(V_posterior) - np.log(V_prior)
assert abs(NS1.logZ.mean() - logZ_V) < 1.5 * NS1.logZ.std()
logL_new = np.where(mask, 0, -1e30)
ns1 = ns0.importance_sample(logL_new)
NS1 = ns1.stats(nsamples=2000)
logZ_V = NS0.logZ.mean() + np.log(V_posterior)
assert abs(NS1.logZ.mean() - logZ_V) < 1.5 * NS1.logZ.std()
ns0.importance_sample(logL_new, inplace=True)
assert type(ns0) is NestedSamples
assert_array_equal(ns0, ns1)
assert ns0.root == ns1.root
assert ns0.label == ns1.label
assert ns0.beta == ns1.beta
assert ns0 is not ns1
def test_MCMCSamples_importance_sample():
np.random.seed(3)
mc0 = read_chains('./tests/example_data/gd')
with pytest.raises(NotImplementedError):
mc0.importance_sample(mc0.logL, action='spam')
# new gaussian logL
logL_i = norm.logpdf(mc0.x3, loc=0.4, scale=0.1)
# add logL
mc1 = mc0.importance_sample(np.zeros_like(mc0.logL), action='add')
assert_array_equal(mc0.logL, mc1.logL)
assert_array_equal(mc0.get_weights(), mc1.get_weights())
mc1 = mc0.importance_sample(logL_new=logL_i)
assert np.all(mc1.logL.to_numpy() != mc0.logL.to_numpy())
assert not np.all(mc1.get_weights() == mc0.get_weights())
# replace logL
mc2 = mc0.importance_sample(mc0.logL, action='replace')
assert_array_equal(mc0.logL, mc2.logL)
assert_array_equal(mc0.get_weights(), mc2.get_weights())
mc2 = mc0.importance_sample(mc0.logL.to_numpy()+logL_i, action='replace')
assert np.all(mc2.logL.to_numpy() != mc0.logL.to_numpy())
assert not np.all(mc2.get_weights() == mc0.get_weights())
assert_array_equal(mc1.logL.to_numpy(), mc2.logL.to_numpy())
assert_array_almost_equal(mc1.logL.to_numpy(), mc2.logL.to_numpy())
# mask logL
mask = ((mc0.x0 > -0.3) & (mc0.x2 > 0.2) & (mc0.x4 < 3.5)).to_numpy()
mc_masked = mc0[mask]
mc3 = mc0.importance_sample(mask, action='mask')
assert_array_equal(mc_masked.logL, mc3.logL)
assert_array_equal(mc_masked.get_weights(), mc3.get_weights())
assert np.all(mc3.x0 > -0.3)
for mc in [mc1, mc2, mc3]:
assert mc.root == mc0.root
assert mc.label == mc0.label
assert mc._metadata == mc0._metadata
assert mc is not mc0
mc0.importance_sample(mask, action='mask', inplace=True)
assert isinstance(mc0, MCMCSamples)
assert_array_equal(mc3, mc0)
assert mc3.root == mc0.root
assert mc3.label == mc0.label
assert mc3._metadata == mc0._metadata
assert mc3 is not mc0
def test_logzero_mask_prior_level():
np.random.seed(3)
ns0 = read_chains('./tests/example_data/pc')
pi0 = ns0.set_beta(0)
NS0 = ns0.stats(nsamples=2000)
mask = ((ns0.x0 > -0.3) & (ns0.x2 > 0.2) & (ns0.x4 < 3.5)).to_numpy()
V_prior = pi0[mask].get_weights().sum() / pi0.get_weights().sum()
V_posterior = ns0[mask].get_weights().sum() / ns0.get_weights().sum()
logZ_V = NS0.logZ.mean() + np.log(V_posterior) - np.log(V_prior)
ns1 = merge_nested_samples((ns0[mask],))
NS1 = ns1.stats(nsamples=2000)
assert abs(NS1.logZ.mean() - logZ_V) < 1.5 * NS1.logZ.std()
def test_logzero_mask_likelihood_level():
np.random.seed(3)
ns0 = read_chains('./tests/example_data/pc')
NS0 = ns0.stats(nsamples=2000)
mask = ((ns0.x0 > -0.3) & (ns0.x2 > 0.2) & (ns0.x4 < 3.5)).to_numpy()
V_posterior = ns0[mask].get_weights().sum() / ns0.get_weights().sum()
logZ_V = NS0.logZ.mean() + np.log(V_posterior)
ns1 = read_chains('./tests/example_data/pc')
ns1.logL = np.where(mask, ns1.logL, -1e30)
mask = ns1.logL.to_numpy() > ns1.logL_birth.to_numpy()
ns1 = merge_nested_samples((ns1[mask],))
NS1 = ns1.stats(nsamples=2000)
assert abs(NS1.logZ.mean() - logZ_V) < 1.5 * NS1.logZ.std()
def test_recompute():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
recompute = pc.recompute()
assert recompute is not pc
pc.loc[1000, ('logL', r'$\ln\mathcal{L}$')] = pc.logL_birth.iloc[1000]-1
with pytest.warns(RuntimeWarning):
recompute = pc.recompute()
assert len(recompute) == len(pc) - 1
mn = read_chains('./tests/example_data/mn_old')
with pytest.raises(RuntimeError):
mn.recompute()
def test_NaN():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
with pytest.warns(RuntimeWarning, match="NaN encountered in logL."):
pc_new = pc.copy()
pc_new.loc[2, ('logL', r'$\ln\mathcal{L}$')] = np.nan
pc_new.recompute(inplace=True)
assert len(pc_new) == len(pc) - 1
assert pc_new.nlive.iloc[0] == 124
def test_unsorted():
np.random.seed(4)
pc = read_chains('./tests/example_data/pc')
i = np.random.choice(len(pc), len(pc), replace=False)
pc_resort = NestedSamples(data=pc.loc[i, ['x0', 'x1', 'x2', 'x3', 'x4']],
logL=pc.loc[i, 'logL'],
logL_birth=pc.loc[i, 'logL_birth'])
assert_array_equal(pc_resort, pc)
def test_copy():
np.random.seed(3)
pc = read_chains('./tests/example_data/pc')
new = pc.copy()
assert new is not pc
def test_plotting_with_integer_names():
np.random.seed(3)
samples_1 = Samples(data=np.random.rand(1000, 3))
samples_2 = Samples(data=np.random.rand(1000, 3))
samples_1.compress()
ax = samples_1.plot_2d([0, 1, 2])
samples_2.plot_2d(ax)
ax = samples_1.plot_1d([0, 1, 2])
samples_2.plot_1d(ax)
assert samples_1[0].shape == (1000,)
assert_array_equal(samples_1.loc[:, 0], samples_1[0])
assert_array_equal(samples_1.loc[:, 0], samples_1.iloc[:, 0])
with pytest.raises(KeyError):
samples_1['0']
def test_logL_list():
np.random.seed(5)
default = read_chains('./tests/example_data/pc')
logL = default.logL.tolist()
logL_birth = default.logL_birth.tolist()
data = default.iloc[:, :5].to_numpy().tolist()
samples = NestedSamples(data=data, logL=logL, logL_birth=logL_birth)
assert_array_equal(default, samples)
def test_samples_dot_plot():
samples = read_chains('./tests/example_data/pc')
axes = samples[['x0', 'x1', 'x2', 'x3', 'x4']].plot.hist()
assert len(axes.containers) == 5
fig, ax = plt.subplots()
axes = samples.x0.plot.kde(subplots=True, ax=ax)
assert len(axes) == 1
axes = samples[['x0', 'x1']].plot.kde(subplots=True)
assert len(axes) == 2
axes = samples.plot.kde_2d('x0', 'x1')
assert len(axes.collections) > 0
assert axes.get_xlabel() == '$x_0$'
assert axes.get_ylabel() == '$x_1$'
axes = samples.plot.hist_2d('x1', 'x0')
assert len(axes.collections) == 1
assert axes.get_xlabel() == '$x_1$'
assert axes.get_ylabel() == '$x_0$'
axes = samples.plot.scatter_2d('x2', 'x3')
assert axes.get_xlabel() == '$x_2$'
assert axes.get_ylabel() == '$x_3$'
assert len(axes.lines) == 1
fig, ax = plt.subplots()
axes = samples.x1.plot.kde_1d(ax=ax)
assert len(axes.lines) == 1
fig, ax = plt.subplots()
axes = samples.x2.plot.hist_1d(ax=ax)
assert len(axes.containers) == 1
fig, ax = plt.subplots()
axes = samples.x2.plot.hist_1d(ax=ax, range=[0, 0.2])
assert axes.get_xlim()[1] < 0.3
axes = samples.drop_labels().plot.kde_2d('x0', 'x1')
assert len(axes.collections) > 0
assert axes.get_xlabel() == 'x0'
assert axes.get_ylabel() == 'x1'
axes = samples.drop_labels().plot.hist_2d('x1', 'x0')
assert len(axes.collections) == 1
assert axes.get_xlabel() == 'x1'
assert axes.get_ylabel() == 'x0'
axes = samples.drop_labels().plot.scatter_2d('x2', 'x3')
assert axes.get_xlabel() == 'x2'
assert axes.get_ylabel() == 'x3'
try:
axes = samples.plot.fastkde_2d('x0', 'x1')
assert axes.get_xlabel() == '$x_0$'
assert axes.get_ylabel() == '$x_1$'
assert len(axes.collections) > 0
plt.close("all")
axes = samples.drop_labels().plot.fastkde_2d('x0', 'x1')
assert axes.get_xlabel() == 'x0'
assert axes.get_ylabel() == 'x1'
assert len(axes.collections) > 0
plt.close("all")
axes = samples.x0.plot.fastkde_1d()
assert len(axes.lines) == 1
plt.close("all")
axes = samples[['x0', 'x1', 'x2', 'x3', 'x4']].plot.fastkde_1d()
assert len(axes.lines) == 5
plt.close("all")
except ImportError:
pass
@pytest.mark.parametrize('kind', ['kde', 'hist', 'kde_1d', 'hist_1d',
skipif_no_fastkde('fastkde_1d')])
def test_samples_dot_plot_legend(kind):
samples = read_chains('./tests/example_data/pc')
fig, ax = plt.subplots()
getattr(samples.x0.plot, kind)(ax=ax)
getattr(samples.x1.plot, kind)(ax=ax)
getattr(samples.x2.plot, kind)(ax=ax)
ax.legend()
assert ax.get_legend().get_texts()[0].get_text() == '$x_0$'
assert ax.get_legend().get_texts()[1].get_text() == '$x_1$'
assert ax.get_legend().get_texts()[2].get_text() == '$x_2$'
def test_fixed_width():
samples = read_chains('./tests/example_data/pc')
labels = samples.get_labels()
columns = ['A really really long column label'] + list(samples.columns[1:])
samples.columns = columns
assert 'A really r...' in str(samples)
mcolumns = MultiIndex.from_arrays([columns, labels])
samples.columns = mcolumns
assert 'A really re...' in str(WeightedLabelledDataFrame(samples))
mcolumns = MultiIndex.from_arrays([columns, np.random.rand(len(columns))])
samples.columns = mcolumns
assert 'A really re...' in str(WeightedLabelledDataFrame(samples))
def test_samples_plot_labels():
samples = read_chains('./tests/example_data/pc')
columns = ['x0', 'x1', 'x2', 'x3', 'x4']
axes = samples.plot_2d(columns)
for col, ax in zip(columns, axes.loc[:, 'x0']):
assert samples.get_label(col, 1) == ax.get_ylabel()
for col, ax in zip(columns, axes.loc['x4', :]):
assert samples.get_label(col, 1) == ax.get_xlabel()
samples = samples.drop_labels()
axes = samples.plot_2d(columns)
for col, ax in zip(columns, axes.loc[:, 'x0']):
assert samples.get_label(col) == ax.get_ylabel()
for col, ax in zip(columns, axes.loc['x4', :]):
assert samples.get_label(col) == ax.get_xlabel()
samples.set_label('x0', 'x0')
axes = samples.plot_2d(columns)
for col, ax in zip(columns, axes.loc[:, 'x0']):
assert samples.get_label(col) == ax.get_ylabel()
for col, ax in zip(columns, axes.loc['x4', :]):
assert samples.get_label(col) == ax.get_xlabel()
@pytest.mark.parametrize('kind', ['kde', 'hist', skipif_no_fastkde('fastkde')])
def test_samples_empty_1d_ylabels(kind):
samples = read_chains('./tests/example_data/pc')
columns = ['x0', 'x1', 'x2', 'x3', 'x4']
axes = samples.plot_1d(columns, kind=kind+'_1d')
for col in columns:
assert axes[col].get_ylabel() == ''
axes = samples.plot_2d(columns, kind=kind)
for col in columns:
assert axes[col][col].get_ylabel() == samples.get_labels_map()[col]
assert axes[col][col].twin.get_ylabel() == ''
def test_constructors():
samples = read_chains('./tests/example_data/pc')
assert isinstance(samples['x0'], WeightedLabelledSeries)
assert isinstance(samples.loc[0], WeightedLabelledSeries)
assert samples['x0'].islabelled()
assert samples.loc[0].islabelled()
assert isinstance(samples.T.loc['x0'], WeightedLabelledSeries)
assert isinstance(samples.T[0], WeightedLabelledSeries)
assert samples.T.loc['x0'].islabelled()
assert samples.T[0].islabelled()
assert isinstance(samples['x0'].to_frame(), WeightedLabelledDataFrame)
def test_old_gui():
# with pytest.raises(TypeError): TODO reinstate for >=2.1
with pytest.raises(ValueError):
Samples(root='./tests/example_data/gd')
# with pytest.raises(TypeError): TODO reinstate for >=2.1
with pytest.raises(ValueError):
MCMCSamples(root='./tests/example_data/gd')
# with pytest.raises(TypeError): TODO reinstate for >=2.1
with pytest.raises(ValueError):
NestedSamples(root='./tests/example_data/pc')
samples = read_chains('./tests/example_data/pc')
for kind in ['kde', 'hist']:
with pytest.warns(UserWarning):
samples.plot_2d(['x0', 'x1', 'x2'], kind={'lower': kind})
with pytest.warns(UserWarning):
samples.plot_1d(['x0', 'x1', 'x2'], kind=kind)
with pytest.raises(ValueError):
samples.plot_2d(['x0', 'x1', 'x2'], types={'lower': 'kde'})
with pytest.raises(ValueError):
samples.plot_1d(['x0', 'x1', 'x2'], plot_type='kde')
with pytest.raises(NotImplementedError):
samples.tex['x0'] = '$x_0$'
with pytest.raises(NotImplementedError):
samples.D(1000)
with pytest.raises(NotImplementedError):
samples.d(1000)
fig, ax = plt.subplots()
with pytest.raises(ValueError):
samples.plot(ax, 'x0')
with pytest.raises(ValueError):
samples.plot(ax, 'x0', 'y0')
with pytest.raises(NotImplementedError):
make_2d_axes(['x0', 'y0'], tex={'x0': '$x_0$', 'y0': '$y_0$'})
with pytest.raises(NotImplementedError):
samples.ns_output(1000)
with pytest.raises(NotImplementedError):
make_2d_axes(['x0', 'y0'], tex={'x0': '$x_0$', 'y0': '$y_0$'})
with pytest.raises(NotImplementedError):
make_1d_axes(['x0', 'y0'], tex={'x0': '$x_0$', 'y0': '$y_0$'})
with pytest.raises(NotImplementedError):
samples.dlogX(1000)
def test_groupby_stats():
mcmc = read_chains('./tests/example_data/cb')
params = ['x0', 'x1']
chains = mcmc[params + ['chain']].groupby(('chain', '$n_\\mathrm{chain}$'))
assert chains.mean().isweighted() is True
assert chains.std().isweighted() is True
assert chains.median().isweighted() is True
assert chains.var().isweighted() is True
assert chains.kurt().isweighted() is True
assert chains.kurtosis().isweighted() is True
assert chains.skew().isweighted() is True
assert chains.sem().isweighted() is True
assert chains.corr().isweighted() is True
assert chains.cov().isweighted() is True
assert chains.hist().isweighted() is True
assert chains.corrwith(mcmc).isweighted() is True
w1 = mcmc.loc[mcmc.chain == 1].get_weights().sum()
w2 = mcmc.loc[mcmc.chain == 2].get_weights().sum()
assert np.all(chains.mean().get_weights() == [w1, w2])
assert np.all(chains.std().get_weights() == [w1, w2])
assert np.all(chains.median().get_weights() == [w1, w2])
assert np.all(chains.var().get_weights() == [w1, w2])
assert np.all(chains.kurt().get_weights() == [w1, w2])
assert np.all(chains.kurtosis().get_weights() == [w1, w2])
assert np.all(chains.skew().get_weights() == [w1, w2])
assert np.all(chains.sem().get_weights() == [w1, w2])
w = [w1 for _ in range(len(params))] + [w2 for _ in range(len(params))]
assert np.all(chains.corr().get_weights() == w)
assert np.all(chains.cov().get_weights() == w)
assert np.all(chains.corrwith(mcmc).get_weights() == [w1, w2])
for chain in [1, 2]:
mask = (mcmc.chain == chain).to_numpy()
assert_allclose(mcmc.loc[mask, params].mean(),
chains.mean().loc[chain])
assert_allclose(mcmc.loc[mask, params].std(),
chains.std().loc[chain])
assert_allclose(mcmc.loc[mask, params].median(),
chains.median().loc[chain])
assert_allclose(mcmc.loc[mask, params].var(),
chains.var().loc[chain])
assert_allclose(mcmc.loc[mask, params].kurt(),
chains.kurt().loc[chain])
assert_allclose(mcmc.loc[mask, params].kurtosis(),
chains.kurtosis().loc[chain])
assert_allclose(mcmc.loc[mask, params].skew(),
chains.skew().loc[chain])
assert_allclose(mcmc.loc[mask, params].sem(),
chains.sem().loc[chain])
assert_allclose(mcmc.loc[mask, params].cov(),
chains.cov().loc[chain])
assert_allclose(mcmc.loc[mask, params].corr(),
chains.corr().loc[chain])
assert_allclose([1, 1], chains.corrwith(mcmc.loc[mask, params]
).loc[chain])
group = chains.get_group(chain).drop(
columns=('chain', '$n_\\mathrm{chain}$'))
assert_allclose(mcmc.loc[mask, params].mean(), group.mean())
assert_allclose(mcmc.loc[mask, params].std(), group.std())
assert_allclose(mcmc.loc[mask, params].median(), group.median())
assert_allclose(mcmc.loc[mask, params].var(), group.var())
assert_allclose(mcmc.loc[mask, params].kurt(), group.kurt())
assert_allclose(mcmc.loc[mask, params].kurtosis(), group.kurtosis())
assert_allclose(mcmc.loc[mask, params].skew(), group.skew())
assert_allclose(mcmc.loc[mask, params].sem(), group.sem())
assert_allclose(mcmc.loc[mask, params].cov(), group.cov())
assert_allclose(mcmc.loc[mask, params].corr(), group.corr())
assert_allclose(mcmc[params].mean(), chains.mean().mean())
for col in params:
if 'chain' not in col:
for chain in [1, 2]:
mask = (mcmc.chain == chain).to_numpy()
assert_allclose(mcmc.loc[mask, col].mean(),
chains[col].mean().loc[chain])
assert_allclose(mcmc.loc[mask, col].std(),
chains[col].std().loc[chain])
assert_allclose(mcmc.loc[mask, col].median(),
chains[col].median().loc[chain])
assert_allclose(mcmc.loc[mask, col].var(),
chains[col].var().loc[chain])
assert_allclose(mcmc.loc[mask, col].kurt(),
chains[col].kurt().loc[chain])
assert_allclose(mcmc.loc[mask, col].kurtosis(),
chains[col].kurtosis().loc[chain])
assert_allclose(mcmc.loc[mask, col].skew(),
chains[col].skew().loc[chain])
assert_allclose(mcmc.loc[mask, col].sem(),
chains[col].sem().loc[chain])
assert_allclose(mcmc.loc[mask, col].cov(mcmc.loc[mask, col]),
chains[col].cov(mcmc.loc[mask, col])
.loc[chain])
assert_allclose(mcmc.loc[mask, col].corr(mcmc.loc[mask, col]),
chains[col].corr(mcmc.loc[mask, col])
.loc[chain])
q = np.random.rand()
assert_allclose(mcmc.loc[mask, col].quantile(q),
chains[col].quantile(q).loc[chain])
group = chains[col].get_group(chain)
assert_allclose(mcmc.loc[mask, col].mean(), group.mean())
assert_allclose(mcmc.loc[mask, col].std(), group.std())
assert_allclose(mcmc.loc[mask, col].median(), group.median())
assert_allclose(mcmc.loc[mask, col].var(), group.var())
assert_allclose(mcmc.loc[mask, col].kurt(), group.kurt())
assert_allclose(mcmc.loc[mask, col].kurtosis(),
group.kurtosis())
assert_allclose(mcmc.loc[mask, col].skew(), group.skew())
assert_allclose(mcmc.loc[mask, col].sem(), group.sem())
assert_allclose(mcmc.loc[mask, col].cov(mcmc.loc[mask, col]),
group.cov(mcmc.loc[mask, col]))
assert_allclose(mcmc.loc[mask, col].corr(mcmc.loc[mask, col]),
group.corr(mcmc.loc[mask, col]))
sample = chains.sample(5)
assert len(sample) == 10
assert sample.value_counts('chain')[1] == 5
assert sample.value_counts('chain')[2] == 5
chains = mcmc.chain.groupby(mcmc.chain)
sample = chains.sample(5)
assert len(sample) == 10
assert sample.value_counts()[1] == 5
assert sample.value_counts()[2] == 5
def test_groupby_plots():
mcmc = read_chains('./tests/example_data/cb')
params = ['x0', 'x1']
chains = mcmc[params + ['chain']].groupby(('chain', '$n_\\mathrm{chain}$'))
for param in params:
gb_plot = chains.hist(param)
for chain in [1, 2]:
mcmc_axes = mcmc.loc[mcmc.chain == chain].hist(param).flatten()
gb_axes = gb_plot[chain].values[0].flatten()
mcmc_widths = [p.get_width() for ax in mcmc_axes
for p in ax.patches]
gb_widths = [p.get_width() for ax in gb_axes for p in ax.patches]
assert_allclose(mcmc_widths, gb_widths)
mcmc_heights = [p.get_height() for ax in mcmc_axes
for p in ax.patches]
gb_heights = [p.get_height() for ax in gb_axes for p in ax.patches]
assert_allclose(mcmc_heights, gb_heights)
plt.close('all')
for param in params:
_, gb_ax = plt.subplots()
gb_plots = chains[param].plot.hist(ax=gb_ax)
_, mcmc_ax = plt.subplots()
for chain, gb_ax in zip([1, 2], gb_plots):
mcmc_ax = mcmc.loc[mcmc.chain == chain][param].plot.hist(
ax=mcmc_ax)
mcmc_widths = [p.get_width() for p in mcmc_ax.patches]
gb_widths = [p.get_width() for p in gb_ax.patches]
assert_allclose(mcmc_widths, gb_widths)
plt.close('all')
for param in params:
_, gb_ax = plt.subplots()
gb_plots = chains[param].plot.hist_1d(ax=gb_ax)
_, mcmc_ax = plt.subplots()
for chain, gb_ax in zip([1, 2], gb_plots):
mcmc_ax = mcmc.loc[mcmc.chain == chain][param].plot.hist_1d(
ax=mcmc_ax)
mcmc_widths = [p.get_width() for p in mcmc_ax.patches]
gb_widths = [p.get_width() for p in gb_ax.patches]
assert_allclose(mcmc_widths, gb_widths)
plt.close('all')
for param in params:
_, gb_ax = plt.subplots()
gb_plots = chains[param].plot.kde(ax=gb_ax)
_, mcmc_ax = plt.subplots()
for chain, gb_ax in zip([1, 2], gb_plots):
mcmc_ax = mcmc.loc[mcmc.chain == chain][param].plot.kde(
ax=mcmc_ax)
[assert_allclose(m.get_data(), g.get_data())
for m, g in zip(mcmc_ax.get_lines(), gb_ax.get_lines())]
plt.close('all')
for param in params:
_, gb_ax = plt.subplots()
gb_plots = chains[param].plot.kde_1d(ax=gb_ax)
_, mcmc_ax = plt.subplots()
for chain, gb_ax in zip([1, 2], gb_plots):
mcmc_ax = mcmc.loc[mcmc.chain == chain][param].plot.kde_1d(
ax=mcmc_ax)
[assert_allclose(m.get_data(), g.get_data())
for m, g in zip(mcmc_ax.get_lines(), gb_ax.get_lines())]
plt.close('all')
for chain, gb_ax in zip([1, 2], chains.plot.hist_2d(*params)):
mcmc_ax = mcmc.loc[mcmc.chain == chain].plot.hist_2d(*params)
mcmc_widths = [p.get_width() for p in mcmc_ax.patches]
gb_widths = [p.get_width() for p in gb_ax.patches]
assert_allclose(mcmc_widths, gb_widths)
mcmc_heights = [p.get_height() for p in mcmc_ax.patches]
gb_heights = [p.get_height() for p in gb_ax.patches]
assert_allclose(mcmc_heights, gb_heights)
mcmc_colors = [p.get_facecolor() for p in mcmc_ax.patches]
gb_colors = [p.get_facecolor() for p in gb_ax.patches]
assert_allclose(mcmc_colors, gb_colors)
plt.close('all')
for chain, gb_ax in zip([1, 2], chains.plot.kde_2d(*params)):
mcmc_ax = mcmc.loc[mcmc.chain == chain].plot.kde_2d(*params)
mcmc_verts = [p.get_verts() for p in mcmc_ax.patches]
gb_verts = [p.get_verts() for p in gb_ax.patches]
assert_allclose(mcmc_verts, gb_verts)
mcmc_colors = [p.get_facecolor() for p in mcmc_ax.patches]
gb_colors = [p.get_facecolor() for p in gb_ax.patches]
assert_allclose(mcmc_colors, gb_colors)
plt.close('all')
if not fastkde_mark_skip.args[0]:
for param in params:
_, gb_ax = plt.subplots()
gb_plots = chains[param].plot.fastkde_1d(ax=gb_ax)
_, mcmc_ax = plt.subplots()
for chain, gb_ax in zip([1, 2], gb_plots):
mcmc_ax = mcmc.loc[mcmc.chain == chain][param].plot.fastkde_1d(
ax=mcmc_ax)
[assert_allclose(m.get_data(), g.get_data())
for m, g in zip(mcmc_ax.get_lines(), gb_ax.get_lines())]
plt.close('all')
for chain, gb_ax in zip([1, 2], chains.plot.fastkde_2d(*params)):
mcmc_ax = mcmc.loc[mcmc.chain == chain].plot.fastkde_2d(*params)
mcmc_verts = [p.get_verts() for p in mcmc_ax.patches]
gb_verts = [p.get_verts() for p in gb_ax.patches]
assert_allclose(mcmc_verts, gb_verts)
mcmc_colors = [p.get_facecolor() for p in mcmc_ax.patches]
gb_colors = [p.get_facecolor() for p in gb_ax.patches]
assert_allclose(mcmc_colors, gb_colors)
plt.close('all')
def test_hist_1d_no_Frequency():
np.random.seed(42)
pc = read_chains("./tests/example_data/pc")
axes = pc.plot_2d(['x0', 'x1', 'x2'], kind={'diagonal': 'hist_1d'})
for i in range(len(axes)):
assert axes.iloc[i, i].twin.get_ylabel() != 'Frequency'
axes = pc.plot_1d(['x0', 'x1', 'x2'], kind='hist_1d')
for ax in axes:
assert ax.get_ylabel() != 'Frequency'
fig, ax = plt.subplots()
ax = pc['x0'].plot(kind='hist_1d', ax=ax)
assert ax.get_ylabel() != 'Frequency'
fig, ax = plt.subplots()
ax = pc.x0.plot.hist_1d(ax=ax)
assert ax.get_ylabel() != 'Frequency'
@pytest.mark.parametrize('kind', ['kde', 'hist'])
def test_axes_limits_1d(kind):
np.random.seed(42)
pc = read_chains("./tests/example_data/pc")
axes = pc.plot_1d('x0', kind=f'{kind}_1d')
xmin, xmax = axes['x0'].get_xlim()
assert -0.9 < xmin < 0
assert 0 < xmax < 0.9
pc.x0 += 3
pc.plot_1d(axes, kind=f'{kind}_1d')
xmin, xmax = axes['x0'].get_xlim()
assert -0.9 < xmin < 0
assert 3 < xmax < 3.9
pc.x0 -= 6
pc.plot_1d(axes, kind=f'{kind}_1d')
xmin, xmax = axes['x0'].get_xlim()
assert -3.9 < xmin < -3
assert 3 < xmax < 3.9
@pytest.mark.parametrize('kind, kwargs',
[('kde', {}),
('hist', {'levels': [0.95, 0.68]}),
])
def test_axes_limits_2d(kind, kwargs):
np.random.seed(42)
pc = read_chains("./tests/example_data/pc")
axes = pc.plot_2d(['x0', 'x1'], kind=f'{kind}_2d', **kwargs)
xmin, xmax = axes['x0']['x1'].get_xlim()
ymin, ymax = axes['x0']['x1'].get_ylim()
assert -0.9 < xmin < 0
assert 0 < xmax < 0.9
assert -0.9 < ymin < 0
assert 0 < ymax < 0.9
pc.x0 += 3
pc.x1 -= 3
pc.plot_2d(axes, kind=f'{kind}_2d', **kwargs)
xmin, xmax = axes['x0']['x1'].get_xlim()
ymin, ymax = axes['x0']['x1'].get_ylim()
assert -0.9 < xmin < 0
assert 3 < xmax < 3.9
assert -3.9 < ymin < -3
assert 0 < ymax < 0.9
pc.x0 -= 6
pc.x1 += 6
pc.plot_2d(axes, kind=f'{kind}_2d', **kwargs)
xmin, xmax = axes['x0']['x1'].get_xlim()
ymin, ymax = axes['x0']['x1'].get_ylim()
assert -3.9 < xmin < -3
assert 3 < xmax < 3.9
assert -3.9 < ymin < -3
assert 3 < ymax < 3.9
|
handley-labREPO_NAMEanestheticPATH_START.@anesthetic_extracted@anesthetic-master@tests@test_samples.py@.PATH_END.py
|
{
"filename": "test_csr.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/sparse/tests/test_csr.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_
from scipy.sparse import csr_matrix, hstack
import pytest
def _check_csr_rowslice(i, sl, X, Xcsr):
np_slice = X[i, sl]
csr_slice = Xcsr[i, sl]
assert_array_almost_equal(np_slice, csr_slice.toarray()[0])
assert_(type(csr_slice) is csr_matrix)
def test_csr_rowslice():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
slices = [slice(None, None, None),
slice(None, None, -1),
slice(1, -2, 2),
slice(-2, 1, -2)]
for i in range(N):
for sl in slices:
_check_csr_rowslice(i, sl, X, Xcsr)
def test_csr_getrow():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_row = X[i:i + 1, :]
csr_row = Xcsr.getrow(i)
assert_array_almost_equal(arr_row, csr_row.toarray())
assert_(type(csr_row) is csr_matrix)
def test_csr_getcol():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_col = X[:, i:i + 1]
csr_col = Xcsr.getcol(i)
assert_array_almost_equal(arr_col, csr_col.toarray())
assert_(type(csr_col) is csr_matrix)
@pytest.mark.parametrize("matrix_input, axis, expected_shape",
[(csr_matrix([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 2, 3, 0]]),
0, (0, 4)),
(csr_matrix([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 2, 3, 0]]),
1, (3, 0)),
(csr_matrix([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 2, 3, 0]]),
'both', (0, 0)),
(csr_matrix([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 3, 0]]),
0, (0, 5))])
def test_csr_empty_slices(matrix_input, axis, expected_shape):
# see gh-11127 for related discussion
slice_1 = matrix_input.A.shape[0] - 1
slice_2 = slice_1
slice_3 = slice_2 - 1
if axis == 0:
actual_shape_1 = matrix_input[slice_1:slice_2, :].A.shape
actual_shape_2 = matrix_input[slice_1:slice_3, :].A.shape
elif axis == 1:
actual_shape_1 = matrix_input[:, slice_1:slice_2].A.shape
actual_shape_2 = matrix_input[:, slice_1:slice_3].A.shape
elif axis == 'both':
actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].A.shape
actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].A.shape
assert actual_shape_1 == expected_shape
assert actual_shape_1 == actual_shape_2
def test_csr_bool_indexing():
data = csr_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
list_indices1 = [False, True, False]
array_indices1 = np.array(list_indices1)
list_indices2 = [[False, True, False], [False, True, False], [False, True, False]]
array_indices2 = np.array(list_indices2)
list_indices3 = ([False, True, False], [False, True, False])
array_indices3 = (np.array(list_indices3[0]), np.array(list_indices3[1]))
slice_list1 = data[list_indices1].toarray()
slice_array1 = data[array_indices1].toarray()
slice_list2 = data[list_indices2]
slice_array2 = data[array_indices2]
slice_list3 = data[list_indices3]
slice_array3 = data[array_indices3]
assert (slice_list1 == slice_array1).all()
assert (slice_list2 == slice_array2).all()
assert (slice_list3 == slice_array3).all()
def test_csr_hstack_int64():
"""
Tests if hstack properly promotes to indices and indptr arrays to np.int64
when using np.int32 during concatenation would result in either array
overflowing.
"""
max_int32 = np.iinfo(np.int32).max
# First case: indices would overflow with int32
data = [1.0]
row = [0]
max_indices_1 = max_int32 - 1
max_indices_2 = 3
# Individual indices arrays are representable with int32
col_1 = [max_indices_1 - 1]
col_2 = [max_indices_2 - 1]
X_1 = csr_matrix((data, (row, col_1)))
X_2 = csr_matrix((data, (row, col_2)))
assert max(max_indices_1 - 1, max_indices_2 - 1) < max_int32
assert X_1.indices.dtype == X_1.indptr.dtype == np.int32
assert X_2.indices.dtype == X_2.indptr.dtype == np.int32
# ... but when concatenating their CSR matrices, the resulting indices
# array can't be represented with int32 and must be promoted to int64.
X_hs = hstack([X_1, X_2], format="csr")
assert X_hs.indices.max() == max_indices_1 + max_indices_2 - 1
assert max_indices_1 + max_indices_2 - 1 > max_int32
assert X_hs.indices.dtype == X_hs.indptr.dtype == np.int64
# Even if the matrices are empty, we must account for their size
# contribution so that we may safely set the final elements.
X_1_empty = csr_matrix(X_1.shape)
X_2_empty = csr_matrix(X_2.shape)
X_hs_empty = hstack([X_1_empty, X_2_empty], format="csr")
assert X_hs_empty.shape == X_hs.shape
assert X_hs_empty.indices.dtype == np.int64
# Should be just small enough to stay in int32 after stack. Note that
# we theoretically could support indices.max() == max_int32, but due to an
# edge-case in the underlying sparsetools code
# (namely the `coo_tocsr` routine),
# we require that max(X_hs_32.shape) < max_int32 as well.
# Hence we can only support max_int32 - 1.
col_3 = [max_int32 - max_indices_1 - 1]
X_3 = csr_matrix((data, (row, col_3)))
X_hs_32 = hstack([X_1, X_3], format="csr")
assert X_hs_32.indices.dtype == np.int32
assert X_hs_32.indices.max() == max_int32 - 1
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@sparse@tests@test_csr.py@.PATH_END.py
|
{
"filename": "license.md",
"repo_name": "golmschenk/eesunhong",
"repo_path": "eesunhong_extracted/eesunhong-main/third_party/minuit/license.md",
"type": "Markdown"
}
|
GNU Lesser General Public License
=================================
_Version 2.1, February 1999_
_Copyright © 1991, 1999 Free Software Foundation, Inc._
_51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA_
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
_This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1._
### Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: **(1)** we copyright the
library, and **(2)** we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the “Lesser” General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
“work based on the library” and a “work that uses the library”. The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
### TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
**0.** This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called “this License”).
Each licensee is addressed as “you”.
A “library” means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The “Library”, below, refers to any such software library or work
which has been distributed under these terms. A “work based on the
Library” means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term “modification”.)
“Source code” for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
**1.** You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
**2.** You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
* **a)** The modified work must itself be a software library.
* **b)** You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
* **c)** You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
* **d)** If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
**3.** You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
**4.** You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
**5.** A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a “work that uses the Library”. Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a “work that uses the Library” with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a “work that uses the
library”. The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a “work that uses the Library” uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
**6.** As an exception to the Sections above, you may also combine or
link a “work that uses the Library” with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
* **a)** Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable “work that
uses the Library”, as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
* **b)** Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
* **c)** Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
* **d)** If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
* **e)** Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the “work that uses the
Library” must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
**7.** You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
* **a)** Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
* **b)** Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
**8.** You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
**9.** You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
**10.** Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
**11.** If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
**12.** If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
**13.** The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
“any later version”, you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
**14.** If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
### NO WARRANTY
**15.** BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY “AS IS” WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
**16.** IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
_END OF TERMS AND CONDITIONS_
### How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms of the
ordinary General Public License).
To apply these terms, attach the following notices to the library. It is
safest to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least the
“copyright” line and a pointer to where the full notice is found.
<one line to give the library's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a “copyright disclaimer” for the library, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
<signature of Ty Coon>, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!
|
golmschenkREPO_NAMEeesunhongPATH_START.@eesunhong_extracted@eesunhong-main@third_party@minuit@license.md@.PATH_END.py
|
{
"filename": "prizmo_main.py",
"repo_name": "tgrassi/prizmo",
"repo_path": "prizmo_extracted/prizmo-main/src_py/prizmo_main.py",
"type": "Python"
}
|
from prizmo_commons import print_title
from prizmo_preprocess import preprocess
import os
def prepare(H2_inc, CO_inc):
print_title("main")
if not os.path.isfile("../main.f90"):
print("skipping, main.f90 file not found")
return
update_code = ""
if H2_inc:
preprocess("../main.f90", {"INITIAL_H2": "x(prizmo_idx_H2) = 0d0"})
update_code += "rad_Ncol_H2 = rad_Ncol_H2 + x(prizmo_idx_H2) * dr\n"
else:
preprocess("../main.f90", {"INITIAL_H2": ""})
if CO_inc:
update_code += "rad_Ncol_CO = rad_Ncol_CO + x(prizmo_idx_CO) * dr\n"
update_code += "vert_Ncol_CO(ix) = vert_Ncol_CO(ix) + x(prizmo_idx_CO) * dz\n"
preprocess("../main.f90", {"UPDATE_COLUMN": update_code})
|
tgrassiREPO_NAMEprizmoPATH_START.@prizmo_extracted@prizmo-main@src_py@prizmo_main.py@.PATH_END.py
|
{
"filename": "_autotypenumbers.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/zaxis/_autotypenumbers.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AutotypenumbersValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="autotypenumbers", parent_name="layout.scene.zaxis", **kwargs
):
super(AutotypenumbersValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["convert types", "strict"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@zaxis@_autotypenumbers.py@.PATH_END.py
|
{
"filename": "image.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/galsim/image.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
__all__ = [ 'Image', '_Image',
'ImageS', 'ImageI', 'ImageF', 'ImageD',
'ImageCF', 'ImageCD', 'ImageUS', 'ImageUI', ]
import numpy as np
from . import _galsim
from .position import PositionI, _PositionD, parse_pos_args
from .bounds import BoundsI, BoundsD, _BoundsI
from ._utilities import lazy_property
from .errors import GalSimError, GalSimBoundsError, GalSimValueError, GalSimImmutableError
from .errors import GalSimUndefinedBoundsError, GalSimIncompatibleValuesError, convert_cpp_errors
# Sometimes (on 32-bit systems) there are two numpy.int32 types. This can lead to some confusion
# when doing arithmetic with images. So just make sure both of them point to ImageViewI in the
# _cpp_type dict. One of them is what you get when you just write numpy.int32. The other is
# what numpy decides an int16 + int32 is.
# For more information regarding this rather unexpected behaviour for numpy.int32 types, see
# the following (closed, marked "wontfix") ticket on the numpy issue tracker:
# http://projects.scipy.org/numpy/ticket/1246
alt_int32 = (np.array([0], dtype=np.int16) + np.array([0], dtype=np.int32)).dtype.type
class Image:
"""A class for storing image data along with the pixel scale or WCS information
The Image class encapsulates all the relevant information about an image including a NumPy array
for the pixel values, a bounding box, and some kind of WCS that converts between pixel
coordinates and world coordinates. The NumPy array may be constructed by the Image class
itself, or an existing array can be provided by the user.
This class creates shallow copies unless a deep copy is explicitly requested using the `copy`
method. The main reason for this is that it allows users to work directly with and modify
subimages of larger images (for example, to successively draw many galaxies into one large
image). For other implications of this convention, see the description of initialization
instructions below.
In most applications with images, we will use (x,y) to refer to the coordinates. We adopt
the same meaning for these coordinates as most astronomy applications do: ds9, SAOImage,
SExtractor, etc. all treat x as the column number and y as the row number. However, this
is different from the default convention used by numpy. In numpy, the access is by
[row_num,col_num], which means this is really [y,x] in terms of the normal x,y values.
Users are typically insulated from this concern by the Image API, but if you access the
numpy array directly via the ``array`` attribute, you will need to be careful about this
difference.
There are 6 data types that the Image can use for the data values. These are ``numpy.uint16``,
``numpy.uint32``, ``numpy.int16``, ``numpy.int32``, ``numpy.float32``, and ``numpy.float64``.
If you are constructing a new Image from scratch, the default is ``numpy.float32``, but you
can specify one of the other data types.
There are several ways to construct an Image:
(Optional arguments are shown with their default values after the = sign.)
``Image(ncol, nrow, dtype=numpy.float32, init_value=0, xmin=1, ymin=1, ...)``
This constructs a new image, allocating memory for the pixel values according to
the number of columns and rows. You can specify the data type as ``dtype`` if you
want. The default is ``numpy.float32`` if you don't specify it. You can also
optionally provide an initial value for the pixels, which defaults to 0.
The optional ``xmin,ymin`` allow you to specify the location of the lower-left
pixel, which defaults to (1,1). Reminder, with our convention for x,y coordinates
described above, ncol is the number of pixels in the x direction, and nrow is the
number of pixels in the y direction.
``Image(bounds, dtype=numpy.float32, init_value=0, ...)``
This constructs a new image, allocating memory for the pixel values according to a
given `Bounds` object. Particularly, the bounds should be a `BoundsI` instance.
You can specify the data type as ``dtype`` if you want. The default is
``numpy.float32`` if you don't specify it. You can also optionally provide an
initial value for the pixels, which defaults to 0.
``Image(array, xmin=1, ymin=1, make_const=False, copy=False ...)``
This views an existing NumPy array as an Image, where updates to either the image
or the original array will affect the other one. The data type is taken from
``array.dtype``, which must be one of the allowed types listed above. You can also
optionally set the origin ``xmin, ymin`` if you want it to be something other than
(1,1).
You can also optionally force the Image to be read-only with ``make_const=True``,
though if the original NumPy array is modified then the contents of ``Image.array``
will change.
If you want to make a copy of the input array, rather than just view the existing
array, you can force a copy with::
>>> image = galsim.Image(array, copy=True)
``Image(image, dtype=image.dtype, copy=True)``
This creates a copy of an Image, possibly changing the type. e.g.::
>>> image_float = galsim.Image(64, 64) # default dtype=numpy.float32
>>> image_double = galsim.Image(image_float, dtype=numpy.float64)
You can see a list of valid values for dtype in ``galsim.Image.valid_dtypes``.
Without the ``dtype`` argument, this is equivalent to ``image.copy()``, which makes
a deep copy. If you want a copy that shares data with the original, see
the `view` method.
If you only want to enforce the image to have a given type and not make a copy
if the array is already the correct type, you can use, e.g.::
>>> image_double = galsim.Image(image, dtype=numpy.float64, copy=False)
You can specify the ``ncol``, ``nrow``, ``bounds``, ``array``, or ``image`` parameters by
keyword argument if you want, or you can pass them as simple arg as shown aboves, and the
constructor will figure out what they are.
The other keyword arguments (shown as ... above) relate to the conversion between sky
coordinates, which is how all the GalSim objects are defined, and the pixel coordinates.
There are three options for this:
scale
You can optionally specify a pixel scale to use. This would normally have
units arcsec/pixel, but it doesn't have to be arcsec. If you want to
use different units for the physical scale of your galsim objects, then
the same unit would be used here.
wcs
A WCS object that provides a non-trivial mapping between sky units and
pixel units. The ``scale`` parameter is equivalent to
``wcs=PixelScale(scale)``. But there are a number of more complicated options.
See the WCS class for more details.
None
If you do not provide either of the above, then the conversion is undefined.
When drawing onto such an image, a suitable pixel scale will be automatically
set according to the Nyquist scale of the object being drawn.
After construction, you can set or change the scale or wcs with::
>>> image.scale = new_scale
>>> image.wcs = new_wcs
Note that ``image.scale`` will only work if the WCS is a `PixelScale`. Once you set the
wcs to be something non-trivial, then you must interact with it via the ``wcs`` attribute.
The ``image.scale`` syntax will raise an exception.
There are also two read-only attributes::
>>> image.bounds
>>> image.array
The ``array`` attribute is a NumPy array of the Image's pixels. The individual elements in the
array attribute are accessed as ``image.array[y,x]``, matching the standard NumPy convention,
while the Image class's own accessor uses either ``(x,y)`` or ``[x,y]``.
That is, the following are equivalent::
>>> ixy = image(x,y)
>>> ixy = image[x,y]
>>> ixy = image.array[y,x]
>>> ixy = image.getValue(x,y)
Similarly, for setting individual pixel values, the following are equivalent::
>>> image[x,y] = new_ixy
>>> image.array[y,x] = new_ixy
>>> image.setValue(x,y,new_ixy)
"""
_cpp_type = { np.uint16 : _galsim.ImageViewUS,
np.uint32 : _galsim.ImageViewUI,
np.int16 : _galsim.ImageViewS,
np.int32 : _galsim.ImageViewI,
np.float32 : _galsim.ImageViewF,
np.float64 : _galsim.ImageViewD,
np.complex64 : _galsim.ImageViewCF,
np.complex128 : _galsim.ImageViewCD,
}
_cpp_valid_dtypes = list(_cpp_type.keys())
_alias_dtypes = {
int : np.int32, # So that user gets what they would expect
float : np.float64, # if using dtype=int or float or complex
complex : np.complex128,
np.int64 : np.int32, # Not equivalent, but will convert
}
# Note: Numpy uses int64 for int on 64 bit machines. We don't implement int64 at all,
# so we cannot quite match up to the numpy convention for dtype=int. e.g. via
# int : numpy.zeros(1,dtype=int).dtype.type
# If this becomes too confusing, we might need to add an ImageL class that uses int64.
# Hard to imagine a use case where this would be required though...
# This one is in the public API. (No leading underscore.)
valid_dtypes = _cpp_valid_dtypes + list(_alias_dtypes.keys())
def __init__(self, *args, **kwargs):
# Parse the args, kwargs
ncol = None
nrow = None
bounds = None
array = None
image = None
if len(args) > 2:
raise TypeError("Error, too many unnamed arguments to Image constructor")
elif len(args) == 2:
ncol = args[0]
nrow = args[1]
xmin = kwargs.pop('xmin',1)
ymin = kwargs.pop('ymin',1)
elif len(args) == 1:
if isinstance(args[0], np.ndarray):
array = args[0]
array, xmin, ymin = self._get_xmin_ymin(array, kwargs)
make_const = kwargs.pop('make_const',False)
elif isinstance(args[0], BoundsI):
bounds = args[0]
elif isinstance(args[0], (list, tuple)):
array = np.array(args[0])
array, xmin, ymin = self._get_xmin_ymin(array, kwargs)
make_const = kwargs.pop('make_const',False)
elif isinstance(args[0], Image):
image = args[0]
else:
raise TypeError("Unable to parse %s as an array, bounds, or image."%args[0])
else:
if 'array' in kwargs:
array = kwargs.pop('array')
array, xmin, ymin = self._get_xmin_ymin(array, kwargs)
make_const = kwargs.pop('make_const',False)
elif 'bounds' in kwargs:
bounds = kwargs.pop('bounds')
elif 'image' in kwargs:
image = kwargs.pop('image')
else:
ncol = kwargs.pop('ncol',None)
nrow = kwargs.pop('nrow',None)
xmin = kwargs.pop('xmin',1)
ymin = kwargs.pop('ymin',1)
# Pop off the other valid kwargs:
dtype = kwargs.pop('dtype', None)
init_value = kwargs.pop('init_value', None)
scale = kwargs.pop('scale', None)
wcs = kwargs.pop('wcs', None)
copy = kwargs.pop('copy', None)
# Check that we got them all
if kwargs:
raise TypeError("Image constructor got unexpected keyword arguments: %s",kwargs)
# Figure out what dtype we want:
dtype = Image._alias_dtypes.get(dtype,dtype)
if dtype is not None and dtype not in Image.valid_dtypes:
raise GalSimValueError("Invalid dtype.", dtype, Image.valid_dtypes)
if array is not None:
if copy is None: copy = False
if dtype is None:
dtype = array.dtype.type
if dtype in Image._alias_dtypes:
dtype = Image._alias_dtypes[dtype]
array = array.astype(dtype, copy=copy)
elif dtype not in Image._cpp_valid_dtypes:
raise GalSimValueError("Invalid dtype of provided array.", array.dtype,
Image._cpp_valid_dtypes)
elif copy:
array = np.array(array)
else:
array = array.astype(dtype, copy=copy)
# Be careful here: we have to watch out for little-endian / big-endian issues.
# The path of least resistance is to check whether the array.dtype is equal to the
# native one (using the dtype.isnative flag), and if not, make a new array that has a
# type equal to the same one but with the appropriate endian-ness.
if not array.dtype.isnative:
array = array.astype(array.dtype.newbyteorder('='))
self._dtype = array.dtype.type
elif dtype is not None:
self._dtype = dtype
else:
self._dtype = np.float32
# Construct the image attribute
if (ncol is not None or nrow is not None):
if ncol is None or nrow is None:
raise GalSimIncompatibleValuesError(
"Both nrow and ncol must be provided", ncol=ncol, nrow=nrow)
if ncol != int(ncol) or nrow != int(nrow):
raise TypeError("nrow, ncol must be integers")
ncol = int(ncol)
nrow = int(nrow)
self._array = self._make_empty(shape=(nrow,ncol), dtype=self._dtype)
self._bounds = BoundsI(xmin, xmin+ncol-1, ymin, ymin+nrow-1)
if init_value:
self.fill(init_value)
elif bounds is not None:
if not isinstance(bounds, BoundsI):
raise TypeError("bounds must be a galsim.BoundsI instance")
self._array = self._make_empty(bounds.numpyShape(), dtype=self._dtype)
self._bounds = bounds
if init_value:
self.fill(init_value)
elif array is not None:
self._array = array.view()
nrow,ncol = array.shape
self._bounds = BoundsI(xmin, xmin+ncol-1, ymin, ymin+nrow-1)
if make_const or not array.flags.writeable:
self._array.flags.writeable = False
if init_value is not None:
raise GalSimIncompatibleValuesError(
"Cannot specify init_value with array", init_value=init_value, array=array)
elif image is not None:
if not isinstance(image, Image):
raise TypeError("image must be an Image")
if init_value is not None:
raise GalSimIncompatibleValuesError(
"Cannot specify init_value with image", init_value=init_value, image=image)
if wcs is None and scale is None:
wcs = image.wcs
self._bounds = image.bounds
if dtype is None:
self._dtype = image.dtype
else:
# Allow dtype to force a retyping of the provided image
# e.g. im = ImageF(...)
# im2 = ImageD(im)
self._dtype = dtype
if copy is False:
self._array = image.array.astype(self._dtype, copy=False)
else:
self._array = self._make_empty(shape=image.bounds.numpyShape(), dtype=self._dtype)
self._array[:,:] = image.array[:,:]
else:
self._array = np.zeros(shape=(1,1), dtype=self._dtype)
self._bounds = BoundsI()
if init_value is not None:
raise GalSimIncompatibleValuesError(
"Cannot specify init_value without setting an initial size",
init_value=init_value, ncol=ncol, nrow=nrow, bounds=bounds)
# Construct the wcs attribute
if scale is not None:
if wcs is not None:
raise GalSimIncompatibleValuesError(
"Cannot provide both scale and wcs to Image constructor", wcs=wcs, scale=scale)
self.wcs = PixelScale(float(scale))
else:
if wcs is not None and not isinstance(wcs, BaseWCS):
raise TypeError("wcs parameters must be a galsim.BaseWCS instance")
self.wcs = wcs
@staticmethod
def _get_xmin_ymin(array, kwargs):
"""A helper function for parsing xmin, ymin, bounds options with a given array
"""
if not isinstance(array, np.ndarray):
raise TypeError("array must be a numpy.ndarray instance")
xmin = kwargs.pop('xmin',1)
ymin = kwargs.pop('ymin',1)
if 'bounds' in kwargs:
b = kwargs.pop('bounds')
if not isinstance(b, BoundsI):
raise TypeError("bounds must be a galsim.BoundsI instance")
if b.xmax-b.xmin+1 != array.shape[1]:
raise GalSimIncompatibleValuesError(
"Shape of array is inconsistent with provided bounds", array=array, bounds=b)
if b.ymax-b.ymin+1 != array.shape[0]:
raise GalSimIncompatibleValuesError(
"Shape of array is inconsistent with provided bounds", array=array, bounds=b)
if b.isDefined():
xmin = b.xmin
ymin = b.ymin
else:
# Indication that array is formally undefined, even though provided.
if 'dtype' not in kwargs:
kwargs['dtype'] = array.dtype.type
array = None
xmin = None
ymin = None
elif array.shape[1] == 0:
# Another way to indicate that we don't have a defined image.
if 'dtype' not in kwargs:
kwargs['dtype'] = array.dtype.type
array = None
xmin = None
ymin = None
return array, xmin, ymin
def __repr__(self):
s = 'galsim.Image(bounds=%r' % self.bounds
if self.bounds.isDefined():
s += ', array=\n%r' % self.array
s += ', wcs=%r' % self.wcs
if self.isconst:
s += ', make_const=True'
s += ')'
return s
def __str__(self):
# Get the type name without the <type '...'> part.
t = str(self.dtype).split("'")[1]
if self.wcs is not None and self.wcs._isPixelScale:
return 'galsim.Image(bounds=%s, scale=%s, dtype=%s)'%(self.bounds, self.scale, t)
else:
return 'galsim.Image(bounds=%s, wcs=%s, dtype=%s)'%(self.bounds, self.wcs, t)
# Pickling almost works out of the box, but numpy arrays lose their non-writeable flag
# when pickled, so make sure to set it to preserve const Images.
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_image',None)
return d, self.isconst
def __setstate__(self, args):
d, isconst = args
self.__dict__ = d
if isconst:
self._array.flags.writeable = False
# Read-only attributes:
@property
def dtype(self):
"""The dtype of the underlying numpy array.
"""
return self._dtype
@property
def bounds(self):
"""The bounds of the `Image`.
"""
return self._bounds
@property
def array(self):
"""The underlying numpy array.
"""
return self._array
@property
def nrow(self):
"""The number of rows in the image
"""
return self._array.shape[0]
@property
def ncol(self):
"""The number of columns in the image
"""
return self._array.shape[1]
@property
def isconst(self):
"""Whether the `Image` is constant. I.e. modifying its values is an error.
"""
return self._array.flags.writeable == False
@property
def iscomplex(self):
"""Whether the `Image` values are complex.
"""
return self._array.dtype.kind == 'c'
@property
def isinteger(self):
"""Whether the `Image` values are integral.
"""
return self._array.dtype.kind in ('i','u')
@property
def iscontiguous(self):
"""Indicates whether each row of the image is contiguous in memory.
Note: it is ok for the end of one row to not be contiguous with the start of the
next row. This just checks that each individual row has a stride of 1.
"""
return self._array.strides[1]//self._array.itemsize == 1
@lazy_property
def _image(self):
cls = self._cpp_type[self.dtype]
_data = self._array.__array_interface__['data'][0]
return cls(_data,
self._array.strides[1]//self._array.itemsize,
self._array.strides[0]//self._array.itemsize,
self._bounds._b)
# Allow scale to work as a PixelScale wcs.
@property
def scale(self):
"""The pixel scale of the `Image`. Only valid if the wcs is a `PixelScale`.
If the WCS is either not set (i.e. it is ``None``) or it is a `PixelScale`, then
it is permissible to change the scale with::
>>> image.scale = new_pixel_scale
"""
try:
return self.wcs.scale
except Exception:
if self.wcs:
raise GalSimError(
"image.wcs is not a simple PixelScale; scale is undefined.") from None
else:
return None
@scale.setter
def scale(self, value):
if self.wcs is not None and not self.wcs._isPixelScale:
raise GalSimError("image.wcs is not a simple PixelScale; scale is undefined.")
else:
self.wcs = PixelScale(value)
# Convenience functions
@property
def xmin(self):
"""Alias for self.bounds.xmin."""
return self._bounds.xmin
@property
def xmax(self):
"""Alias for self.bounds.xmax."""
return self._bounds.xmax
@property
def ymin(self):
"""Alias for self.bounds.ymin."""
return self._bounds.ymin
@property
def ymax(self):
"""Alias for self.bounds.ymax."""
return self._bounds.ymax
@property
def outer_bounds(self):
"""The bounds of the outer edge of the pixels.
Equivalent to galsim.BoundsD(im.xmin-0.5, im.xmax+0.5, im.ymin-0.5, im.ymax+0.5)
"""
return BoundsD(self.xmin-0.5, self.xmax+0.5, self.ymin-0.5, self.ymax+0.5)
# real, imag for everything, even real images.
@property
def real(self):
"""Return the real part of an image.
This is a property, not a function. So write ``im.real``, not ``im.real()``.
This works for real or complex. For real images, it acts the same as `view`.
"""
return _Image(self.array.real, self.bounds, self.wcs)
@property
def imag(self):
"""Return the imaginary part of an image.
This is a property, not a function. So write ``im.imag``, not ``im.imag()``.
This works for real or complex. For real images, the returned array is read-only and
all elements are 0.
"""
return _Image(self.array.imag, self.bounds, self.wcs)
@property
def conjugate(self):
"""Return the complex conjugate of an image.
This works for real or complex. For real images, it acts the same as `view`.
Note that for complex images, this is not a conjugate view into the original image.
So changing the original image does not change the conjugate (or vice versa).
"""
return _Image(self.array.conjugate(), self.bounds, self.wcs)
def copy(self):
"""Make a copy of the `Image`
"""
return _Image(self.array.copy(), self.bounds, self.wcs)
def get_pixel_centers(self):
"""A convenience function to get the x and y values at the centers of the image pixels.
Returns:
(x, y), each of which is a numpy array the same shape as ``self.array``
"""
x,y = np.meshgrid(np.arange(self.array.shape[1], dtype=float),
np.arange(self.array.shape[0], dtype=float))
x += self.bounds.xmin
y += self.bounds.ymin
return x, y
def _make_empty(self, shape, dtype):
"""Helper function to make an empty numpy array of the given shape, making sure that
the array is 16-btye aligned so it is usable by FFTW.
"""
# cf. http://stackoverflow.com/questions/9895787/memory-alignment-for-fast-fft-in-python-using-shared-arrrays
nbytes = shape[0] * shape[1] * np.dtype(dtype).itemsize
if nbytes == 0:
# Make degenerate images have 1 element. Otherwise things get weird.
return np.zeros(shape=(1,1), dtype=self._dtype)
buf = np.zeros(nbytes + 16, dtype=np.uint8)
start_index = -buf.__array_interface__['data'][0] % 16
a = buf[start_index:start_index + nbytes].view(dtype).reshape(shape)
#assert a.ctypes.data % 16 == 0
return a
def resize(self, bounds, wcs=None):
"""Resize the image to have a new bounds (must be a `BoundsI` instance)
Note that the resized image will have uninitialized data. If you want to preserve
the existing data values, you should either use `subImage` (if you want a smaller
portion of the current `Image`) or make a new `Image` and copy over the current values
into a portion of the new image (if you are resizing to a larger `Image`).
Parameters:
bounds: The new bounds to resize to.
wcs: If provided, also update the wcs to the given value. [default: None,
which means keep the existing wcs]
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify an immutable Image", self)
if not isinstance(bounds, BoundsI):
raise TypeError("bounds must be a galsim.BoundsI instance")
self._array = self._make_empty(shape=bounds.numpyShape(), dtype=self.dtype)
self._bounds = bounds
if wcs is not None:
self.wcs = wcs
def subImage(self, bounds):
"""Return a view of a portion of the full image
This is equivalent to self[bounds]
"""
if not isinstance(bounds, BoundsI):
raise TypeError("bounds must be a galsim.BoundsI instance")
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError("Attempt to access subImage of undefined image")
if not self.bounds.includes(bounds):
raise GalSimBoundsError("Attempt to access subImage not (fully) in image",
bounds,self.bounds)
i1 = bounds.ymin - self.ymin
i2 = bounds.ymax - self.ymin + 1
j1 = bounds.xmin - self.xmin
j2 = bounds.xmax - self.xmin + 1
subarray = self.array[i1:i2, j1:j2]
# NB. The wcs is still accurate, since the sub-image uses the same (x,y) values
# as the original image did for those pixels. It's only once you recenter or
# reorigin that you need to update the wcs. So that's taken care of in im.shift.
return _Image(subarray, bounds, self.wcs)
def setSubImage(self, bounds, rhs):
"""Set a portion of the full image to the values in another image
This is equivalent to self[bounds] = rhs
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify the values of an immutable Image", self)
self.subImage(bounds).copyFrom(rhs)
def __getitem__(self, *args):
"""Return either a subimage or a single pixel value.
For example,::
>>> subimage = im[galsim.BoundsI(3,7,3,7)]
>>> value = im[galsim.PositionI(5,5)]
>>> value = im[5,5]
"""
if len(args) == 1:
if isinstance(args[0], BoundsI):
return self.subImage(*args)
elif isinstance(args[0], PositionI):
return self(*args)
elif isinstance(args[0], tuple):
return self.getValue(*args[0])
else:
raise TypeError("image[index] only accepts BoundsI or PositionI for the index")
elif len(args) == 2:
return self(*args)
else:
raise TypeError("image[..] requires either 1 or 2 args")
def __setitem__(self, *args):
"""Set either a subimage or a single pixel to new values.
For example,::
>>> im[galsim.BoundsI(3,7,3,7)] = im2
>>> im[galsim.PositionI(5,5)] = 17.
>>> im[5,5] = 17.
"""
if len(args) == 2:
if isinstance(args[0], BoundsI):
self.setSubImage(*args)
elif isinstance(args[0], PositionI):
self.setValue(*args)
elif isinstance(args[0], tuple):
self.setValue(*args)
else:
raise TypeError("image[index] only accepts BoundsI or PositionI for the index")
elif len(args) == 3:
return self.setValue(*args)
else:
raise TypeError("image[..] requires either 1 or 2 args")
def wrap(self, bounds, hermitian=False):
"""Wrap the values in a image onto a given subimage and return the subimage.
This would typically be used on a k-space image where you initially draw a larger image
than you want for the FFT and then wrap it onto a smaller subset. This will cause
aliasing of course, but this is often preferable to just using the smaller image
without wrapping.
For complex images of FFTs, one often only stores half the image plane with the
implicit understanding that the function is Hermitian, so im(-x,-y) == im(x,y).conjugate().
In this case, the wrapping needs to work slightly differently, so you can specify
that your image is implicitly Hermitian with the ``hermitian`` argument. Options are:
hermitian=False
(default) Normal non-Hermitian image.
hermitian='x'
Only x>=0 values are stored with x<0 values being implicitly Hermitian.
In this case im.bounds.xmin and bounds.xmin must be 0.
hermitian='y'
Only y>=0 values are stored with y<0 values being implicitly Hermitian.
In this case im.bounds.ymin and bounds.ymin must be 0.
Also, in the two Hermitian cases, the direction that is not implicitly Hermitian must be
symmetric in the image's bounds. The wrap bounds must be almost symmetric, but missing
the most negative value. For example,::
>>> N = 100
>>> im_full = galsim.ImageCD(bounds=galsim.BoundsI(0,N/2,-N/2,N/2), scale=dk)
>>> # ... fill with im[i,j] = FT(kx=i*dk, ky=j*dk)
>>> N2 = 64
>>> im_wrap = im_full.wrap(galsim.BoundsI(0,N/2,-N2/2,N2/2-1, hermitian='x')
This sets up im_wrap to be the properly Hermitian version of the data appropriate for
passing to an FFT.
Note that this routine modifies the original image (and not just the subimage onto which
it is wrapped), so if you want to keep the original pristine, you should call
``wrapped_image = image.copy().wrap(bounds)``.
Parameters:
bounds: The bounds of the subimage onto which to wrap the full image.
hermitian: Whether the image is implicitly Hermitian and if so, whether it is the
x or y values that are not stored. [default: False]
Returns:
the subimage, image[bounds], after doing the wrapping.
"""
if not isinstance(bounds, BoundsI):
raise TypeError("bounds must be a galsim.BoundsI instance")
# Get this at the start to check for invalid bounds and raise the exception before
# possibly writing data past the edge of the image.
if not hermitian:
return self._wrap(bounds, False, False)
elif hermitian == 'x':
if self.bounds.xmin != 0:
raise GalSimIncompatibleValuesError(
"hermitian == 'x' requires self.bounds.xmin == 0",
hermitian=hermitian, bounds=self.bounds)
if bounds.xmin != 0:
raise GalSimIncompatibleValuesError(
"hermitian == 'x' requires bounds.xmin == 0",
hermitian=hermitian, bounds=bounds)
return self._wrap(bounds, True, False)
elif hermitian == 'y':
if self.bounds.ymin != 0:
raise GalSimIncompatibleValuesError(
"hermitian == 'y' requires self.bounds.ymin == 0",
hermitian=hermitian, bounds=self.bounds)
if bounds.ymin != 0:
raise GalSimIncompatibleValuesError(
"hermitian == 'y' requires bounds.ymin == 0",
hermitian=hermitian, bounds=bounds)
return self._wrap(bounds, False, True)
else:
raise GalSimValueError("Invalid value for hermitian", hermitian, (False, 'x', 'y'))
def _wrap(self, bounds, hermx, hermy):
"""A version of `wrap` without the sanity checks.
Equivalent to ``image.wrap(bounds, hermitian='x' if hermx else 'y' if hermy else False)``
"""
ret = self.subImage(bounds)
_galsim.wrapImage(self._image, bounds._b, hermx, hermy)
return ret
def bin(self, nx, ny):
"""Bin the image pixels in blocks of nx x ny pixels.
This returns a new image that is a binned version of the current image.
Adjacent pixel values in nx x ny blocks are added together to produce the flux in each
output pixel.
If the current number of pixels in each direction is not a multiple of nx, ny, then the
last pixel in each direction will be the sum of fewer than nx or ny pixels as needed.
See also subsample, which is the opposite of this.
If the wcs is a Jacobian (or simpler), the output image will have its wcs set properly.
But if the wcs is more complicated, the output wcs would be fairly complicated to figure
out properly, so we leave it as None. The user should set it themselves if required.
Parameters:
nx: The number of adjacent pixels in the x direction to add together into each
output pixel.
ny: The number of adjacent pixels in the y direction to add together into each
output pixel.
Returns:
a new `Image`
"""
ncol = self.xmax - self.xmin + 1
nrow = self.ymax - self.ymin + 1
nbins_x = (ncol-1) // nx + 1
nbins_y = (nrow-1) // ny + 1
nbins = nbins_x * nbins_y
# target_bins just provides a number from 0..nbins for each target pixel
target_bins = np.arange(nbins).reshape(nbins_y, nbins_x)
# current_bins is the same number for each pixel in the current image.
current_bins = np.repeat(np.repeat(target_bins, ny, axis=0), nx, axis=1)
current_bins = current_bins[0:nrow, 0:ncol]
# bincount with weights is a tricky way to do the sum over the bins
target_ar = np.bincount(current_bins.ravel(), weights=self.array.ravel())
target_ar = target_ar.reshape(target_bins.shape)
if self.wcs is None or not self.wcs._isUniform:
target_wcs = None
else:
if self.wcs._isPixelScale and nx == ny:
target_wcs = PixelScale(self.scale * nx)
else:
dudx, dudy, dvdx, dvdy = self.wcs.jacobian().getMatrix().ravel()
dudx *= nx
dvdx *= nx
dudy *= ny
dvdy *= ny
target_wcs = JacobianWCS(dudx, dudy, dvdx, dvdy)
# Set the origin so that corresponding image positions correspond to the same world_pos
x0 = (self.wcs.origin.x - self.xmin + 0.5) / nx + 0.5
y0 = (self.wcs.origin.y - self.ymin + 0.5) / ny + 0.5
target_wcs = target_wcs.shiftOrigin(_PositionD(x0,y0), self.wcs.world_origin)
target_bounds = _BoundsI(1, nbins_x, 1, nbins_y)
return _Image(target_ar, target_bounds, target_wcs)
def subsample(self, nx, ny, dtype=None):
"""Subdivide the image pixels into nx x ny sub-pixels.
This returns a new image that is a subsampled version of the current image.
Each pixel's flux is split (uniformly) into nx x ny smaller pixels.
See also bin, which is the opposite of this. Note that subsample(nx,ny) followed by
bin(nx,ny) is essentially a no op.
If the wcs is a Jacobian (or simpler), the output image will have its wcs set properly.
But if the wcs is more complicated, the output wcs would be fairly complicated to figure
out properly, so we leave it as None. The user should set it themselves if required.
Parameters:
nx: The number of sub-pixels in the x direction for each original pixel.
ny: The number of sub-pixels in the y direction for each original pixel.
dtype: Optionally provide a dtype for the return image. [default: None, which
means to use the same dtype as the original image]
Returns:
a new `Image`
"""
ncol = self.xmax - self.xmin + 1
nrow = self.ymax - self.ymin + 1
npix_x = ncol * nx
npix_y = nrow * ny
flux_factor = nx * ny
target_ar = np.repeat(np.repeat(self.array, ny, axis=0), nx, axis=1)
target_ar = target_ar.astype(dtype, copy=False) # Cute. This is a no op if dtype=None
target_ar /= flux_factor
if self.wcs is None or not self.wcs._isUniform:
target_wcs = None
else:
if self.wcs._isPixelScale and nx == ny:
target_wcs = PixelScale(self.scale / nx)
else:
dudx, dudy, dvdx, dvdy = self.wcs.jacobian().getMatrix().ravel()
dudx /= nx
dvdx /= nx
dudy /= ny
dvdy /= ny
target_wcs = JacobianWCS(dudx, dudy, dvdx, dvdy)
# Set the origin so that corresponding image positions correspond to the same world_pos
x0 = (self.wcs.origin.x - self.xmin + 0.5) * nx + 0.5
y0 = (self.wcs.origin.y - self.ymin + 0.5) * ny + 0.5
target_wcs = target_wcs.shiftOrigin(_PositionD(x0,y0), self.wcs.world_origin)
target_bounds = _BoundsI(1, npix_x, 1, npix_y)
return _Image(target_ar, target_bounds, target_wcs)
def calculate_fft(self):
"""Performs an FFT of an `Image` in real space to produce a k-space `Image`.
Note: the image will be padded with zeros as needed to make an image with bounds that
look like ``BoundsI(-N/2, N/2-1, -N/2, N/2-1)``.
The input image must have a `PixelScale` wcs. The output image will be complex (an
`ImageCF` or `ImageCD` instance) and its scale will be 2pi / (N dx), where dx is the scale
of the input image.
Returns:
an `Image` instance with the k-space image.
"""
if self.wcs is None:
raise GalSimError("calculate_fft requires that the scale be set.")
if not self.wcs._isPixelScale:
raise GalSimError("calculate_fft requires that the image has a PixelScale wcs.")
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError(
"calculate_fft requires that the image have defined bounds.")
No2 = max(-self.bounds.xmin, self.bounds.xmax+1, -self.bounds.ymin, self.bounds.ymax+1)
full_bounds = _BoundsI(-No2, No2-1, -No2, No2-1)
if self.bounds == full_bounds:
# Then the image is already in the shape we need.
ximage = self
else:
# Then we pad out with zeros
ximage = Image(full_bounds, dtype=self.dtype, init_value=0)
ximage[self.bounds] = self[self.bounds]
dx = self.scale
# dk = 2pi / (N dk)
dk = np.pi / (No2 * dx)
out = Image(_BoundsI(0,No2,-No2,No2-1), dtype=np.complex128, scale=dk)
with convert_cpp_errors():
_galsim.rfft(ximage._image, out._image, True, True)
out *= dx*dx
out.setOrigin(0,-No2)
return out
def calculate_inverse_fft(self):
"""Performs an inverse FFT of an `Image` in k-space to produce a real-space `Image`.
The starting image is typically an `ImageCD`, although if the Fourier function is real
valued, then you could get away with using an `ImageD` or `ImageF`.
The image is assumed to be Hermitian. In fact, only the portion with x >= 0 needs to
be defined, with f(-x,-y) taken to be conj(f(x,y)).
Note: the k-space image will be padded with zeros and/or wrapped as needed to make an
image with bounds that look like ``BoundsI(0, N/2, -N/2, N/2-1)``. If you are building a
larger k-space image and then wrapping, you should wrap directly into an image of
this shape.
The input image must have a `PixelScale` wcs. The output image will be real (an `ImageD`
instance) and its scale will be 2pi / (N dk), where dk is the scale of the input image.
Returns:
an `Image` instance with the real-space image.
"""
if self.wcs is None:
raise GalSimError("calculate_inverse_fft requires that the scale be set.")
if not self.wcs._isPixelScale:
raise GalSimError("calculate_inverse_fft requires that the image has a PixelScale wcs.")
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError("calculate_inverse_fft requires that the image have "
"defined bounds.")
if not self.bounds.includes(0,0):
raise GalSimBoundsError("calculate_inverse_fft requires that the image includes (0,0)",
PositionI(0,0), self.bounds)
No2 = max(self.bounds.xmax, -self.bounds.ymin, self.bounds.ymax)
target_bounds = _BoundsI(0, No2, -No2, No2-1)
if self.bounds == target_bounds:
# Then the image is already in the shape we need.
kimage = self
else:
# Then we can pad out with zeros and wrap to get this in the form we need.
full_bounds = _BoundsI(0, No2, -No2, No2)
kimage = Image(full_bounds, dtype=self.dtype, init_value=0)
posx_bounds = _BoundsI(0, self.bounds.xmax, self.bounds.ymin, self.bounds.ymax)
kimage[posx_bounds] = self[posx_bounds]
kimage = kimage.wrap(target_bounds, hermitian = 'x')
dk = self.scale
# dx = 2pi / (N dk)
dx = np.pi / (No2 * dk)
# For the inverse, we need a bit of extra space for the fft.
out_extra = Image(_BoundsI(-No2,No2+1,-No2,No2-1), dtype=float, scale=dx)
with convert_cpp_errors():
_galsim.irfft(kimage._image, out_extra._image, True, True)
# Now cut off the bit we don't need.
out = out_extra.subImage(_BoundsI(-No2,No2-1,-No2,No2-1))
out *= (dk * No2 / np.pi)**2
out.setCenter(0,0)
return out
@classmethod
def good_fft_size(cls, input_size):
"""Round the given input size up to the next higher power of 2 or 3 times a power of 2.
This rounds up to the next higher value that is either 2^k or 3*2^k. If you are
going to be performing FFTs on an image, these will tend to be faster at performing
the FFT.
"""
return _galsim.goodFFTSize(int(input_size))
def copyFrom(self, rhs):
"""Copy the contents of another image
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify the values of an immutable Image", self)
if not isinstance(rhs, Image):
raise TypeError("Trying to copyFrom a non-image")
if self.bounds.numpyShape() != rhs.bounds.numpyShape():
raise GalSimIncompatibleValuesError(
"Trying to copy images that are not the same shape", self_image=self, rhs=rhs)
self._copyFrom(rhs)
def _copyFrom(self, rhs):
"""Same as copyFrom, but no sanity checks.
"""
self._array[:,:] = self._safe_cast(rhs.array)
def view(self, scale=None, wcs=None, origin=None, center=None,
make_const=False, dtype=None, contiguous=False):
"""Make a view of this image, which lets you change the scale, wcs, origin, etc.
but view the same underlying data as the original image.
If you do not provide either ``scale`` or ``wcs``, the view will keep the same wcs
as the current `Image` object.
Parameters:
scale: If provided, use this as the pixel scale for the image. [default: None]
wcs: If provided, use this as the wcs for the image. [default: None]
origin: If provided, use this as the origin position of the view.
[default: None]
center: If provided, use this as the center position of the view.
[default: None]
make_const: Make the view's data array immutable. [default: False]
dtype: If provided, ensure that the output has this dtype. If the original
Image is a different dtype, then a copy will be made. [default: None]
contiguous: If provided, ensure that the output array is contiguous. [default: False]
"""
if origin is not None and center is not None:
raise GalSimIncompatibleValuesError(
"Cannot provide both center and origin", center=center, origin=origin)
if scale is not None:
if wcs is not None:
raise GalSimIncompatibleValuesError(
"Cannot provide both scale and wcs", scale=scale, wcs=wcs)
wcs = PixelScale(scale)
elif wcs is not None:
if not isinstance(wcs, BaseWCS):
raise TypeError("wcs parameters must be a galsim.BaseWCS instance")
else:
wcs = self.wcs
# Figure out the dtype for the return Image
dtype = dtype if dtype else self.dtype
# If currently empty, just return a new empty image.
if not self.bounds.isDefined():
return Image(wcs=wcs, dtype=dtype)
# Recast the array type if necessary
if dtype != self.array.dtype:
array = self.array.astype(dtype)
elif contiguous:
array = np.ascontiguousarray(self.array)
else:
array = self.array
# Make the array const if requested
if make_const:
array = array.view()
array.flags.writeable = False
# Make the return Image
ret = _Image(array, self.bounds, wcs)
# Update the origin if requested
if origin is not None:
ret.setOrigin(origin)
elif center is not None:
ret.setCenter(center)
return ret
def _view(self):
"""Equivalent to `view`, but without some of the sanity checks and extra options.
"""
return _Image(self.array.view(), self.bounds, self.wcs)
def shift(self, *args, **kwargs):
"""Shift the pixel coordinates by some (integral) dx,dy.
The arguments here may be either (dx, dy) or a PositionI instance.
Or you can provide dx, dy as named kwargs.
In terms of columns and rows, dx means a shift in the x value of each column in the
array, and dy means a shift in the y value of each row. In other words, the following
will return the same value for ixy. The shift function just changes the coordinates (x,y)
used for that pixel::
>>> ixy = im(x,y)
>>> im.shift(3,9)
>>> ixy = im(x+3, y+9)
"""
delta = parse_pos_args(args, kwargs, 'dx', 'dy', integer=True)
self._shift(delta)
def _shift(self, delta):
"""Equivalent to `shift`, but without some of the sanity checks and ``delta`` must
be a `PositionI` instance.
Parameters:
delta: The amount to shift as a `PositionI`.
"""
# The parse_pos_args function is a bit slow, so go directly to this point when we
# call shift from setCenter or setOrigin.
if delta.x != 0 or delta.y != 0:
self._bounds = self._bounds.shift(delta)
if self.wcs is not None:
self.wcs = self.wcs.shiftOrigin(delta)
def setCenter(self, *args, **kwargs):
"""Set the center of the image to the given (integral) (xcen, ycen)
The arguments here may be either (xcen, ycen) or a PositionI instance.
Or you can provide xcen, ycen as named kwargs.
In terms of the rows and columns, xcen is the new x value for the central column, and ycen
is the new y value of the central row. For even-sized arrays, there is no central column
or row, so the convention we adopt in this case is to round up. For example::
>>> im = galsim.Image(numpy.array(range(16),dtype=float).reshape((4,4)))
>>> im(1,1)
0.0
>>> im(4,1)
3.0
>>> im(4,4)
15.0
>>> im(3,3)
10.0
>>> im.setCenter(0,0)
>>> im(0,0)
10.0
>>> im(-2,-2)
0.0
>>> im(1,-2)
3.0
>>> im(1,1)
15.0
>>> im.setCenter(234,456)
>>> im(234,456)
10.0
>>> im.bounds
galsim.BoundsI(xmin=232, xmax=235, ymin=454, ymax=457)
"""
cen = parse_pos_args(args, kwargs, 'xcen', 'ycen', integer=True)
self._shift(cen - self.center)
def setOrigin(self, *args, **kwargs):
"""Set the origin of the image to the given (integral) (x0, y0)
The arguments here may be either (x0, y0) or a PositionI instance.
Or you can provide x0, y0 as named kwargs.
In terms of the rows and columns, x0 is the new x value for the first column,
and y0 is the new y value of the first row. For example::
>>> im = galsim.Image(numpy.array(range(16),dtype=float).reshape((4,4)))
>>> im(1,1)
0.0
>>> im(4,1)
3.0
>>> im(1,4)
12.0
>>> im(4,4)
15.0
>>> im.setOrigin(0,0)
>>> im(0,0)
0.0
>>> im(3,0)
3.0
>>> im(0,3)
12.0
>>> im(3,3)
15.0
>>> im.setOrigin(234,456)
>>> im(234,456)
0.0
>>> im.bounds
galsim.BoundsI(xmin=234, xmax=237, ymin=456, ymax=459)
"""
origin = parse_pos_args(args, kwargs, 'x0', 'y0', integer=True)
self._shift(origin - self.origin)
@property
def center(self):
"""The current nominal center (xcen,ycen) of the image as a PositionI instance.
In terms of the rows and columns, xcen is the x value for the central column, and ycen
is the y value of the central row. For even-sized arrays, there is no central column
or row, so the convention we adopt in this case is to round up. For example::
>>> im = galsim.Image(numpy.array(range(16),dtype=float).reshape((4,4)))
>>> im.center
galsim.PositionI(x=3, y=3)
>>> im(im.center)
10.0
>>> im.setCenter(56,72)
>>> im.center
galsim.PositionI(x=56, y=72)
>>> im(im.center)
10.0
"""
return self.bounds.center
@property
def true_center(self):
"""The current true center of the image as a PositionD instance.
Unline the nominal center returned by im.center, this value may be half-way between
two pixels if the image has an even number of rows or columns. It gives the position
(x,y) at the exact center of the image, regardless of whether this is at the center of
a pixel (integer value) or halfway between two (half-integer). For example::
>>> im = galsim.Image(numpy.array(range(16),dtype=float).reshape((4,4)))
>>> im.center
galsim.PositionI(x=3, y=3)
>>> im.true_center
galsim.PositionI(x=2.5, y=2.5)
>>> im.setCenter(56,72)
>>> im.center
galsim.PositionI(x=56, y=72)
>>> im.true_center
galsim.PositionD(x=55.5, y=71.5)
>>> im.setOrigin(0,0)
>>> im.true_center
galsim.PositionD(x=1.5, y=1.5)
"""
return self.bounds.true_center
@property
def origin(self):
"""Return the origin of the image. i.e. the (x,y) position of the lower-left pixel.
In terms of the rows and columns, this is the (x,y) coordinate of the first column, and
first row of the array. For example::
>>> im = galsim.Image(numpy.array(range(16),dtype=float).reshape((4,4)))
>>> im.origin
galsim.PositionI(x=1, y=1)
>>> im(im.origin)
0.0
>>> im.setOrigin(23,45)
>>> im.origin
galsim.PositionI(x=23, y=45)
>>> im(im.origin)
0.0
>>> im(23,45)
0.0
>>> im.bounds
galsim.BoundsI(xmin=23, xmax=26, ymin=45, ymax=48)
"""
return self.bounds.origin
def __call__(self, *args, **kwargs):
"""Get the pixel value at given position
The arguments here may be either (x, y) or a PositionI instance.
Or you can provide x, y as named kwargs.
"""
pos = parse_pos_args(args, kwargs, 'x', 'y', integer=True)
return self.getValue(pos.x,pos.y)
def getValue(self, x, y):
"""This method is a synonym for im(x,y). It is a bit faster than im(x,y), since GalSim
does not have to parse the different options available for __call__. (i.e. im(x,y) or
im(pos) or im(x=x,y=y))
Parameters:
x: The x coordinate of the pixel to get.
y: The y coordinate of the pixel to get.
"""
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError("Attempt to access values of an undefined image")
if not self.bounds.includes(x,y):
raise GalSimBoundsError("Attempt to access position not in bounds of image.",
PositionI(x,y), self.bounds)
return self._getValue(x,y)
def _getValue(self, x, y):
"""Equivalent to `getValue`, except there are no checks that the values fall
within the bounds of the image.
"""
return self._array[y-self.ymin, x-self.xmin]
def setValue(self, *args, **kwargs):
"""Set the pixel value at given (x,y) position
The arguments here may be either (x, y, value) or (pos, value) where pos is a PositionI.
Or you can provide x, y, value as named kwargs.
This is equivalent to self[x,y] = rhs
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify the values of an immutable Image", self)
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError("Attempt to set value of an undefined image")
pos, value = parse_pos_args(args, kwargs, 'x', 'y', integer=True, others=['value'])
if not self.bounds.includes(pos):
raise GalSimBoundsError("Attempt to set position not in bounds of image",
pos, self.bounds)
self._setValue(pos.x,pos.y,value)
def _setValue(self, x, y, value):
"""Equivalent to `setValue` except that there are no checks that the values
fall within the bounds of the image, and the coordinates must be given as ``x``, ``y``.
Parameters:
x: The x coordinate of the pixel to set.
y: The y coordinate of the pixel to set.
value: The value to set the pixel to.
"""
self._array[y-self.ymin, x-self.xmin] = value
def addValue(self, *args, **kwargs):
"""Add some amount to the pixel value at given (x,y) position
The arguments here may be either (x, y, value) or (pos, value) where pos is a PositionI.
Or you can provide x, y, value as named kwargs.
This is equivalent to self[x,y] += rhs
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify the values of an immutable Image", self)
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError("Attempt to set value of an undefined image")
pos, value = parse_pos_args(args, kwargs, 'x', 'y', integer=True, others=['value'])
if not self.bounds.includes(pos):
raise GalSimBoundsError("Attempt to set position not in bounds of image",
pos,self.bounds)
self._addValue(pos.x,pos.y,value)
def _addValue(self, x, y, value):
"""Equivalent to `addValue` except that there are no checks that the values
fall within the bounds of the image, and the coordinates must be given as ``x``, ``y``.
Parameters:
x: The x coordinate of the pixel to add to.
y: The y coordinate of the pixel to add to.
value: The value to add to this pixel.
"""
self._array[y-self.ymin, x-self.xmin] += value
def fill(self, value):
"""Set all pixel values to the given ``value``
Parameter:
value: The value to set all the pixels to.
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify the values of an immutable Image", self)
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError("Attempt to set values of an undefined image")
self._fill(value)
def _fill(self, value):
"""Equivalent to `fill`, except that there are no checks that the bounds are defined.
"""
self._array[:,:] = value
def setZero(self):
"""Set all pixel values to zero.
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify the values of an immutable Image", self)
self._fill(0) # This might be made faster with a C++ call to use memset
def invertSelf(self):
"""Set all pixel values to their inverse: x -> 1/x.
Note: any pixels whose value is 0 originally are ignored. They remain equal to 0
on the output, rather than turning into inf.
"""
if self.isconst:
raise GalSimImmutableError("Cannot modify the values of an immutable Image", self)
if not self.bounds.isDefined():
raise GalSimUndefinedBoundsError("Attempt to set values of an undefined image")
self._invertSelf()
def _invertSelf(self):
"""Equivalent to `invertSelf`, except that there are no checks that the bounds are defined.
"""
# C++ version skips 0's to 1/0 -> 0 instead of inf.
_galsim.invertImage(self._image)
def replaceNegative(self, replace_value=0):
"""Replace any negative values currently in the image with 0 (or some other value).
Sometimes FFT drawing can result in tiny negative values, which may be undesirable for
some purposes. This method replaces those values with 0 or some other value if desired.
Parameters:
replace_value: The value with which to replace any negative pixels. [default: 0]
"""
self.array[self.array<0] = replace_value
def calculateHLR(self, center=None, flux=None, flux_frac=0.5):
"""Returns the half-light radius of a drawn object.
This method is equivalent to `GSObject.calculateHLR` when the object has already been
been drawn onto an image. Note that the profile should be drawn using a method that
integrates over pixels and does not add noise. (The default method='auto' is acceptable.)
If the image has a wcs other than a `PixelScale`, an AttributeError will be raised.
Parameters:
center: The position in pixels to use for the center, r=0.
[default: self.true_center]
flux: The total flux. [default: sum(self.array)]
flux_frac: The fraction of light to be enclosed by the returned radius.
[default: 0.5]
Returns:
an estimate of the half-light radius in physical units defined by the pixel scale.
"""
if center is None:
center = self.true_center
if flux is None:
flux = np.sum(self.array, dtype=float)
# Use radii at centers of pixels as approximation to the radial integral
x,y = self.get_pixel_centers()
x -= center.x
y -= center.y
rsq = x*x + y*y
# Sort by radius
indx = np.argsort(rsq.ravel())
rsqf = rsq.ravel()[indx]
data = self.array.ravel()[indx]
cumflux = np.cumsum(data, dtype=float)
# Find the first value with cumflux > 0.5 * flux
k = np.argmax(cumflux > flux_frac * flux)
flux_k = cumflux[k] / flux # normalize to unit total flux
# Interpolate (linearly) between this and the previous value.
if k == 0:
hlrsq = rsqf[0] * (flux_frac / flux_k)
else:
fkm1 = cumflux[k-1] / flux
# For brevity in the next formula:
fk = flux_k
f = flux_frac
hlrsq = (rsqf[k-1] * (fk-f) + rsqf[k] * (f-fkm1)) / (fk-fkm1)
# This has all been done in pixels. So normalize according to the pixel scale.
hlr = np.sqrt(hlrsq) * self.scale
return hlr
def calculateMomentRadius(self, center=None, flux=None, rtype='det'):
"""Returns an estimate of the radius based on unweighted second moments of a drawn object.
This method is equivalent to `GSObject.calculateMomentRadius` when the object has already
been drawn onto an image. Note that the profile should be drawn using a method that
integrates over pixels and does not add noise. (The default method='auto' is acceptable.)
If the image has a wcs other than a `PixelScale`, an AttributeError will be raised.
Parameters:
center: The position in pixels to use for the center, r=0.
[default: self.true_center]
flux: The total flux. [default: sum(self.array)]
rtype: There are three options for this parameter:
- 'trace' means return sqrt(T/2)
- 'det' means return det(Q)^1/4
- 'both' means return both: (sqrt(T/2), det(Q)^1/4)
[default: 'det']
Returns:
an estimate of the radius in physical units defined by the pixel scale
(or both estimates if rtype == 'both').
"""
if rtype not in ('trace', 'det', 'both'):
raise GalSimValueError("Invalid rtype.", rtype, ('trace', 'det', 'both'))
if center is None:
center = self.true_center
if flux is None:
flux = np.sum(self.array, dtype=float)
# Use radii at centers of pixels as approximation to the radial integral
x,y = self.get_pixel_centers()
x -= center.x
y -= center.y
if rtype in ('trace', 'both'):
# Calculate trace measure:
rsq = x*x + y*y
Irr = np.sum(rsq * self.array, dtype=float) / flux
# This has all been done in pixels. So normalize according to the pixel scale.
sigma_trace = (Irr/2.)**0.5 * self.scale
if rtype in ('det', 'both'):
# Calculate det measure:
Ixx = np.sum(x*x * self.array, dtype=float) / flux
Iyy = np.sum(y*y * self.array, dtype=float) / flux
Ixy = np.sum(x*y * self.array, dtype=float) / flux
# This has all been done in pixels. So normalize according to the pixel scale.
sigma_det = (Ixx*Iyy-Ixy**2)**0.25 * self.scale
if rtype == 'trace':
return sigma_trace
elif rtype == 'det':
return sigma_det
else:
return sigma_trace, sigma_det
def calculateFWHM(self, center=None, Imax=0.):
"""Returns the full-width half-maximum (FWHM) of a drawn object.
This method is equivalent to `GSObject.calculateFWHM` when the object has already
been drawn onto an image. Note that the profile should be drawn using a method that
does not integrate over pixels, so either 'sb' or 'no_pixel'. Also, if there is a
significant amount of noise in the image, this method may not work well.
If the image has a wcs other than a `PixelScale`, an AttributeError will be raised.
Parameters:
center: The position in pixels to use for the center, r=0.
[default: self.true_center]
Imax: The maximum surface brightness. [default: max(self.array)]
Note: If Imax is provided, and the maximum pixel value is larger than
this value, Imax will be updated to use the larger value.
Returns:
an estimate of the full-width half-maximum in physical units defined by the pixel scale.
"""
if center is None:
center = self.true_center
# If the full image has a larger maximum, use that.
Imax2 = np.max(self.array)
if Imax2 > Imax: Imax = Imax2
# Use radii at centers of pixels.
x,y = self.get_pixel_centers()
x -= center.x
y -= center.y
rsq = x*x + y*y
# Sort by radius
indx = np.argsort(rsq.ravel())
rsqf = rsq.ravel()[indx]
data = self.array.ravel()[indx]
# Find the first value with I < 0.5 * Imax
k = np.argmax(data < 0.5 * Imax)
Ik = data[k] / Imax
# Interpolate (linearly) between this and the previous value.
if k == 0:
rsqhm = rsqf[0] * (0.5 / Ik)
else:
Ikm1 = data[k-1] / Imax
rsqhm = (rsqf[k-1] * (Ik-0.5) + rsqf[k] * (0.5-Ikm1)) / (Ik-Ikm1)
# This has all been done in pixels. So normalize according to the pixel scale.
fwhm = 2. * np.sqrt(rsqhm) * self.scale
return fwhm
# Define a utility function to be used by the arithmetic functions below
def check_image_consistency(self, im2, integer=False):
if integer and not self.isinteger:
raise GalSimValueError("Image must have integer values.",self)
if isinstance(im2, Image):
if self.array.shape != im2.array.shape:
raise GalSimIncompatibleValuesError("Image shapes are inconsistent",
im1=self, im2=im2)
if integer and not im2.isinteger:
raise GalSimValueError("Image must have integer values.",im2)
def __add__(self, other):
self.check_image_consistency(other)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array + a, self.bounds, self.wcs)
__radd__ = __add__
def _safe_cast(self, array):
# Assign the given array to self.array, safely casting it to the required type.
# Most important is to make sure integer types round first before casting, since
# numpy's astype doesn't do any rounding.
if self.isinteger:
array = np.around(array)
return array.astype(self.array.dtype, copy=False)
def __iadd__(self, other):
self.check_image_consistency(other)
try:
a = other.array
dt = a.dtype
except AttributeError:
a = other
dt = type(a)
if dt == self.array.dtype:
self.array[:,:] += a
else:
self.array[:,:] = self._safe_cast(self.array + a)
return self
def __sub__(self, other):
self.check_image_consistency(other)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array - a, self.bounds, self.wcs)
def __rsub__(self, other):
return _Image(other-self.array, self.bounds, self.wcs)
def __isub__(self, other):
self.check_image_consistency(other)
try:
a = other.array
dt = a.dtype
except AttributeError:
a = other
dt = type(a)
if dt == self.array.dtype:
self.array[:,:] -= a
else:
self.array[:,:] = self._safe_cast(self.array - a)
return self
def __mul__(self, other):
self.check_image_consistency(other)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array * a, self.bounds, self.wcs)
__rmul__ = __mul__
def __imul__(self, other):
self.check_image_consistency(other)
try:
a = other.array
dt = a.dtype
except AttributeError:
a = other
dt = type(a)
if dt == self.array.dtype:
self.array[:,:] *= a
else:
self.array[:,:] = self._safe_cast(self.array * a)
return self
def __div__(self, other):
self.check_image_consistency(other)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array / a, self.bounds, self.wcs)
__truediv__ = __div__
def __rdiv__(self, other):
return _Image(other / self.array, self.bounds, self.wcs)
__rtruediv__ = __rdiv__
def __idiv__(self, other):
self.check_image_consistency(other)
try:
a = other.array
dt = a.dtype
except AttributeError:
a = other
dt = type(a)
if dt == self.array.dtype and not self.isinteger:
# if dtype is an integer type, then numpy doesn't allow true division /= to assign
# back to an integer array. So for integers (or mixed types), don't use /=.
self.array[:,:] /= a
else:
self.array[:,:] = self._safe_cast(self.array / a)
return self
__itruediv__ = __idiv__
def __floordiv__(self, other):
self.check_image_consistency(other, integer=True)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array // a, self.bounds, self.wcs)
def __rfloordiv__(self, other):
self.check_image_consistency(other, integer=True)
return _Image(other // self.array, self.bounds, self.wcs)
def __ifloordiv__(self, other):
self.check_image_consistency(other, integer=True)
try:
a = other.array
dt = a.dtype
except AttributeError:
a = other
dt = type(a)
if dt == self.array.dtype:
self.array[:,:] //= a
else:
self.array[:,:] = self._safe_cast(self.array // a)
return self
def __mod__(self, other):
self.check_image_consistency(other, integer=True)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array % a, self.bounds, self.wcs)
def __rmod__(self, other):
self.check_image_consistency(other, integer=True)
return _Image(other % self.array, self.bounds, self.wcs)
def __imod__(self, other):
self.check_image_consistency(other, integer=True)
try:
a = other.array
dt = a.dtype
except AttributeError:
a = other
dt = type(a)
if dt == self.array.dtype:
self.array[:,:] %= a
else:
self.array[:,:] = self._safe_cast(self.array % a)
return self
def __pow__(self, other):
result = self.copy()
result **= other
return result
def __ipow__(self, other):
if not isinstance(other, int) and not isinstance(other, float):
raise TypeError("Can only raise an image to a float or int power!")
self.array[:,:] **= other
return self
def __neg__(self):
result = self.copy()
result *= np.int64(-1)
return result
# Define &, ^ and | only for integer-type images
def __and__(self, other):
self.check_image_consistency(other, integer=True)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array & a, self.bounds, self.wcs)
__rand__ = __and__
def __iand__(self, other):
self.check_image_consistency(other, integer=True)
try:
self.array[:,:] &= other.array
except AttributeError:
self.array[:,:] &= other
return self
def __xor__(self, other):
self.check_image_consistency(other, integer=True)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array ^ a, self.bounds, self.wcs)
__rxor__ = __xor__
def __ixor__(self, other):
self.check_image_consistency(other, integer=True)
try:
self.array[:,:] ^= other.array
except AttributeError:
self.array[:,:] ^= other
return self
def __or__(self, other):
self.check_image_consistency(other, integer=True)
try:
a = other.array
except AttributeError:
a = other
return _Image(self.array | a, self.bounds, self.wcs)
__ror__ = __or__
def __ior__(self, other):
self.check_image_consistency(other, integer=True)
try:
self.array[:,:] |= other.array
except AttributeError:
self.array[:,:] |= other
return self
def transpose(self):
"""Return the tranpose of the image.
Note: The returned image will have an undefined wcs.
If you care about the wcs, you will need to set it yourself.
"""
bT = _BoundsI(self.ymin, self.ymax, self.xmin, self.xmax)
return _Image(self.array.T, bT, None)
def flip_lr(self):
"""Return a version of the image flipped left to right.
Note: The returned image will have an undefined wcs.
If you care about the wcs, you will need to set it yourself.
"""
return _Image(self.array[:,::-1], self._bounds, None)
def flip_ud(self):
"""Return a version of the image flipped top to bottom.
Note: The returned image will have an undefined wcs.
If you care about the wcs, you will need to set it yourself.
"""
return _Image(self.array[::-1,:], self._bounds, None)
def rot_cw(self):
"""Return a version of the image rotated 90 degrees clockwise.
Note: The returned image will have an undefined wcs.
If you care about the wcs, you will need to set it yourself.
"""
bT = _BoundsI(self.ymin, self.ymax, self.xmin, self.xmax)
return _Image(self.array.T[::-1,:], bT, None)
def rot_ccw(self):
"""Return a version of the image rotated 90 degrees counter-clockwise.
Note: The returned image will have an undefined wcs.
If you care about the wcs, you will need to set it yourself.
"""
bT = _BoundsI(self.ymin, self.ymax, self.xmin, self.xmax)
return _Image(self.array.T[:,::-1], bT, None)
def rot_180(self):
"""Return a version of the image rotated 180 degrees.
Note: The returned image will have an undefined wcs.
If you care about the wcs, you will need to set it yourself.
"""
return _Image(self.array[::-1,::-1], self._bounds, None)
def depixelize(self, x_interpolant):
"""Return a depixelized version of the image.
Specifically, this function creates an image that could be used with `InterpolatedImage`
with the given x_interpolant, which when drawn with method=auto would produce the
current image.
>>> alt_image = image.depixelize(x_interpolant)
>>> ii = galsim.InterpolatedImage(alt_image, x_interpolant=x_interpolant)
>>> image2 = ii.drawImage(image.copy(), method='auto')
image2 will end up approximately equal to the original image.
.. warning::
This function is fairly expensive, both in memory and CPU time, so it should
only be called on fairly small images (~100x100 or smaller typically).
The memory requirement scales as Npix^2, and the execution time scales as Npix^3.
However, the expensive part of the calculation is independent of the image values.
It only depends on the size of the image and interpolant being used. So this part
of the calculation is cached and reused if possible. If you make repeated calls
to depixelize using the same image size and interpolant, it will be much faster
after the first call.
If you need to release the cache (since it can be a non-trivial amount of memory),
you may do so using `Image.clear_depixelize_cache`.
Parameters:
x_interpolant: The `Interpolant` to use in the `InterpolatedImage` to describe
how the profile should be interpolated between the pixel centers.
Returns:
an `Image` representing the underlying profile without the pixel convolution.
"""
ny, nx = self.array.shape
npix = nx * ny
# Each kernel is the integral of the interpolant over 1 pixel.
unit_integrals = x_interpolant.unit_integrals(max_len=max(nx,ny))
# The rest of the implementation is done in C++. cf. src/Image.cpp
im2 = self.copy()
_unit_integrals = unit_integrals.__array_interface__['data'][0]
_galsim.depixelizeImage(im2._image, _unit_integrals, unit_integrals.size)
return im2
@staticmethod
def clear_depixelize_cache():
"""Release the cached solver used by depixelize to make repeated calls more efficient.
"""
_galsim.ClearDepixelizeCache()
def __eq__(self, other):
# Note that numpy.array_equal can return True if the dtypes of the two arrays involved are
# different, as long as the contents of the two arrays are logically the same. For example:
#
# >>> double_array = np.arange(1024).reshape(32, 32)*np.pi
# >>> int_array = np.arange(1024).reshape(32, 32)
# >>> assert galsim.ImageD(int_array) == galsim.ImageF(int_array) # passes
# >>> assert galsim.ImageD(double_array) == galsim.ImageF(double_array) # fails
return (self is other or
(isinstance(other, Image) and
self.bounds == other.bounds and
self.wcs == other.wcs and
(not self.bounds.isDefined() or np.array_equal(self.array,other.array)) and
self.isconst == other.isconst))
def __ne__(self, other): return not self.__eq__(other)
# Not immutable object. So shouldn't be used as a hash.
__hash__ = None
def _Image(array, bounds, wcs):
"""Equivalent to ``Image(array, bounds, wcs)``, but without the overhead of sanity checks,
and the other options for how to provide the arguments.
"""
ret = Image.__new__(Image)
ret.wcs = wcs
ret._dtype = array.dtype.type
if ret._dtype in Image._alias_dtypes:
ret._dtype = Image._alias_dtypes[ret._dtype]
array = array.astype(ret._dtype)
ret._array = array
ret._bounds = bounds
return ret
# These are essentially aliases for the regular Image with the correct dtype
def ImageUS(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.uint16)
"""
kwargs['dtype'] = np.uint16
return Image(*args, **kwargs)
def ImageUI(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.uint32)
"""
kwargs['dtype'] = np.uint32
return Image(*args, **kwargs)
def ImageS(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.int16)
"""
kwargs['dtype'] = np.int16
return Image(*args, **kwargs)
def ImageI(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.int32)
"""
kwargs['dtype'] = np.int32
return Image(*args, **kwargs)
def ImageF(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.float32)
"""
kwargs['dtype'] = np.float32
return Image(*args, **kwargs)
def ImageD(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.float64)
"""
kwargs['dtype'] = np.float64
return Image(*args, **kwargs)
def ImageCF(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.complex64)
"""
kwargs['dtype'] = np.complex64
return Image(*args, **kwargs)
def ImageCD(*args, **kwargs):
"""Alias for galsim.Image(..., dtype=numpy.complex128)
"""
kwargs['dtype'] = np.complex128
return Image(*args, **kwargs)
# Put this at the end to avoid circular imports
from .wcs import BaseWCS, PixelScale, JacobianWCS
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@galsim@image.py@.PATH_END.py
|
{
"filename": "get_colors.py",
"repo_name": "annayqho/TheCannon",
"repo_path": "TheCannon_extracted/TheCannon-master/code/lamost/mass_age/cn/get_colors.py",
"type": "Python"
}
|
import pyfits
import numpy as np
def get_colors(catalog):
"""
Pull colors from catalog
Parameters
----------
catalog: filename
"""
print("Get Colors")
a = pyfits.open(catalog)
data = a[1].data
a.close()
all_ids = data['LAMOST_ID_1']
all_ids = np.array([val.strip() for val in all_ids])
# G magnitude
gmag = data['gpmag']
gmag_err = data['e_gpmag']
# R magnitude
rmag = data['rpmag']
rmag_err = data['e_rpmag']
# I magnitude
imag = data['ipmag']
imag_err = data['e_ipmag']
# W1
W1 = data['W1mag']
W1_err = data['e_W1mag']
# W1
W2 = data['W2mag']
W2_err = data['e_W2mag']
# J magnitude
Jmag = data['Jmag']
Jmag_err = data['e_Jmag']
# H magnitude
Hmag = data['Hmag']
Hmag_err = data['e_Hmag']
# K magnitude
Kmag = data['Kmag']
Kmag_err = data['e_Kmag']
# Stack
mag = np.vstack((
gmag, rmag, imag, Jmag, Hmag, Kmag, W2, W1)) # 8, nobj
mag_err = np.vstack((
gmag_err, rmag_err, imag_err, Jmag_err,
Hmag_err, Kmag_err, W2_err, W1_err))
# Make g-r, r-i, i-J, etc
col = mag[:-1] - mag[1:]
col_ivar = 1/(mag_err[:-1]**2 + mag_err[1:]**2)
# There's something wrong with the i-band, I think..so the second color r-i
#bad = col[:,1] < 0.0
#col_ivar[bad] = 0.0
return all_ids, col, col_ivar
|
annayqhoREPO_NAMETheCannonPATH_START.@TheCannon_extracted@TheCannon-master@code@lamost@mass_age@cn@get_colors.py@.PATH_END.py
|
{
"filename": "searchapi.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/tools/searchapi.ipynb",
"type": "Jupyter Notebook"
}
|
# SearchApi
This notebook shows examples of how to use SearchApi to search the web. Go to [https://www.searchapi.io/](https://www.searchapi.io/) to sign up for a free account and get API key.
```python
import os
os.environ["SEARCHAPI_API_KEY"] = ""
```
```python
from langchain_community.utilities import SearchApiAPIWrapper
```
```python
search = SearchApiAPIWrapper()
```
```python
search.run("Obama's first name?")
```
'Barack Hussein Obama II'
## Using as part of a Self Ask With Search Chain
```python
os.environ["OPENAI_API_KEY"] = ""
```
```python
from langchain.agents import AgentType, initialize_agent
from langchain_community.utilities import SearchApiAPIWrapper
from langchain_core.tools import Tool
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
search = SearchApiAPIWrapper()
tools = [
Tool(
name="Intermediate Answer",
func=search.run,
description="useful for when you need to ask with search",
)
]
self_ask_with_search = initialize_agent(
tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True
)
self_ask_with_search.run("Who lived longer: Plato, Socrates, or Aristotle?")
```
[1m> Entering new AgentExecutor chain...[0m
[32;1m[1;3m Yes.
Follow up: How old was Plato when he died?[0m
Intermediate answer: [36;1m[1;3meighty[0m
[32;1m[1;3mFollow up: How old was Socrates when he died?[0m
Intermediate answer: [36;1m[1;3m| Socrates |
| -------- |
| Born | c. 470 BC Deme Alopece, Athens |
| Died | 399 BC (aged approximately 71) Athens |
| Cause of death | Execution by forced suicide by poisoning |
| Spouse(s) | Xanthippe, Myrto |
[0m
[32;1m[1;3mFollow up: How old was Aristotle when he died?[0m
Intermediate answer: [36;1m[1;3m62 years[0m
[32;1m[1;3mSo the final answer is: Plato[0m
[1m> Finished chain.[0m
'Plato'
## Custom parameters
SearchApi wrapper can be customized to use different engines like [Google News](https://www.searchapi.io/docs/google-news), [Google Jobs](https://www.searchapi.io/docs/google-jobs), [Google Scholar](https://www.searchapi.io/docs/google-scholar), or others which can be found in [SearchApi](https://www.searchapi.io/docs/google) documentation. All parameters supported by SearchApi can be passed when executing the query.
```python
search = SearchApiAPIWrapper(engine="google_jobs")
```
```python
search.run("AI Engineer", location="Portugal", gl="pt")[0:500]
```
'Azure AI Engineer Be an XpanderCandidatar-meCandidatar-meCandidatar-me\n\nShare:\n\nAzure AI Engineer\n\nA área Digital Xperience da Xpand IT é uma equipa tecnológica de rápido crescimento que se concentra em tecnologias Microsoft e Mobile. A sua principal missão é fornecer soluções de software de alta qualidade que atendam às necessidades do utilizador final, num mundo tecnológico continuamente exigente e em ritmo acelerado, proporcionando a melhor experiência em termos de personalização, performance'
## Getting results with metadata
```python
import pprint
```
```python
search = SearchApiAPIWrapper(engine="google_scholar")
results = search.results("Large Language Models")
pprint.pp(results)
```
{'search_metadata': {'id': 'search_qVdXG2jzvrlqTzayeYoaOb8A',
'status': 'Success',
'created_at': '2023-09-25T15:22:30Z',
'request_time_taken': 3.21,
'parsing_time_taken': 0.03,
'total_time_taken': 3.24,
'request_url': 'https://scholar.google.com/scholar?q=Large+Language+Models&hl=en',
'html_url': 'https://www.searchapi.io/api/v1/searches/search_qVdXG2jzvrlqTzayeYoaOb8A.html',
'json_url': 'https://www.searchapi.io/api/v1/searches/search_qVdXG2jzvrlqTzayeYoaOb8A'},
'search_parameters': {'engine': 'google_scholar',
'q': 'Large Language Models',
'hl': 'en'},
'search_information': {'query_displayed': 'Large Language Models',
'total_results': 6420000,
'page': 1,
'time_taken_displayed': 0.06},
'organic_results': [{'position': 1,
'title': 'ChatGPT for good? On opportunities and '
'challenges of large language models for '
'education',
'data_cid': 'uthwmf2nU3EJ',
'link': 'https://www.sciencedirect.com/science/article/pii/S1041608023000195',
'publication': 'E Kasneci, K Seßler, S Küchemann, M '
'Bannert… - Learning and individual …, '
'2023 - Elsevier',
'snippet': '… state of large language models and their '
'applications. We then highlight how these '
'models can be … With regard to challenges, '
'we argue that large language models in '
'education require …',
'inline_links': {'cited_by': {'cites_id': '8166055256995715258',
'total': 410,
'link': 'https://scholar.google.com/scholar?cites=8166055256995715258&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '8166055256995715258',
'total': 10,
'link': 'https://scholar.google.com/scholar?cluster=8166055256995715258&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:uthwmf2nU3EJ:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'edarxiv.org',
'format': 'PDF',
'link': 'https://edarxiv.org/5er8f/download?format=pdf'},
'authors': [{'name': 'E Kasneci',
'id': 'bZVkVvoAAAAJ',
'link': 'https://scholar.google.com/citations?user=bZVkVvoAAAAJ&hl=en&oi=sra'},
{'name': 'K Seßler',
'id': 'MbMBoN4AAAAJ',
'link': 'https://scholar.google.com/citations?user=MbMBoN4AAAAJ&hl=en&oi=sra'},
{'name': 'S Küchemann',
'id': 'g1jX5QUAAAAJ',
'link': 'https://scholar.google.com/citations?user=g1jX5QUAAAAJ&hl=en&oi=sra'},
{'name': 'M Bannert',
'id': 'TjfQ8QkAAAAJ',
'link': 'https://scholar.google.com/citations?user=TjfQ8QkAAAAJ&hl=en&oi=sra'}]},
{'position': 2,
'title': 'Large language models in medicine',
'data_cid': 'Ph9AwHTmhzAJ',
'link': 'https://www.nature.com/articles/s41591-023-02448-8',
'publication': 'AJ Thirunavukarasu, DSJ Ting, K '
'Elangovan… - Nature medicine, 2023 - '
'nature.com',
'snippet': '… HuggingChat offers a free-to-access '
'chatbot with a similar interface to ChatGPT '
'but uses Large Language Model Meta AI '
'(LLaMA) as its backend model 30 . Finally, '
'cheap imitations of …',
'inline_links': {'cited_by': {'cites_id': '3497017024792502078',
'total': 25,
'link': 'https://scholar.google.com/scholar?cites=3497017024792502078&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '3497017024792502078',
'total': 3,
'link': 'https://scholar.google.com/scholar?cluster=3497017024792502078&hl=en&as_sdt=0,33'}},
'authors': [{'name': 'AJ Thirunavukarasu',
'id': '3qb1AYwAAAAJ',
'link': 'https://scholar.google.com/citations?user=3qb1AYwAAAAJ&hl=en&oi=sra'},
{'name': 'DSJ Ting',
'id': 'KbrpC8cAAAAJ',
'link': 'https://scholar.google.com/citations?user=KbrpC8cAAAAJ&hl=en&oi=sra'},
{'name': 'K Elangovan',
'id': 'BE_lVTQAAAAJ',
'link': 'https://scholar.google.com/citations?user=BE_lVTQAAAAJ&hl=en&oi=sra'}]},
{'position': 3,
'title': 'Extracting training data from large language '
'models',
'data_cid': 'mEYsWK6bWKoJ',
'link': 'https://www.usenix.org/conference/usenixsecurity21/presentation/carlini-extracting',
'publication': 'N Carlini, F Tramer, E Wallace, M '
'Jagielski… - 30th USENIX Security …, '
'2021 - usenix.org',
'snippet': '… language model trained on scrapes of the '
'public Internet, and are able to extract '
'hundreds of verbatim text sequences from the '
'model’… models are more vulnerable than '
'smaller models. …',
'inline_links': {'cited_by': {'cites_id': '12274731957504198296',
'total': 742,
'link': 'https://scholar.google.com/scholar?cites=12274731957504198296&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '12274731957504198296',
'total': 8,
'link': 'https://scholar.google.com/scholar?cluster=12274731957504198296&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:mEYsWK6bWKoJ:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33',
'cached_page_link': 'https://scholar.googleusercontent.com/scholar?q=cache:mEYsWK6bWKoJ:scholar.google.com/+Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'usenix.org',
'format': 'PDF',
'link': 'https://www.usenix.org/system/files/sec21-carlini-extracting.pdf'},
'authors': [{'name': 'N Carlini',
'id': 'q4qDvAoAAAAJ',
'link': 'https://scholar.google.com/citations?user=q4qDvAoAAAAJ&hl=en&oi=sra'},
{'name': 'F Tramer',
'id': 'ijH0-a8AAAAJ',
'link': 'https://scholar.google.com/citations?user=ijH0-a8AAAAJ&hl=en&oi=sra'},
{'name': 'E Wallace',
'id': 'SgST3LkAAAAJ',
'link': 'https://scholar.google.com/citations?user=SgST3LkAAAAJ&hl=en&oi=sra'},
{'name': 'M Jagielski',
'id': '_8rw_GMAAAAJ',
'link': 'https://scholar.google.com/citations?user=_8rw_GMAAAAJ&hl=en&oi=sra'}]},
{'position': 4,
'title': 'Emergent abilities of large language models',
'data_cid': 'hG0iVOrOguoJ',
'link': 'https://arxiv.org/abs/2206.07682',
'publication': 'J Wei, Y Tay, R Bommasani, C Raffel, B '
'Zoph… - arXiv preprint arXiv …, 2022 - '
'arxiv.org',
'snippet': 'Scaling up language models has been shown to '
'predictably improve performance and sample '
'efficiency on a wide range of downstream '
'tasks. This paper instead discusses an …',
'inline_links': {'cited_by': {'cites_id': '16898296257676733828',
'total': 621,
'link': 'https://scholar.google.com/scholar?cites=16898296257676733828&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '16898296257676733828',
'total': 12,
'link': 'https://scholar.google.com/scholar?cluster=16898296257676733828&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:hG0iVOrOguoJ:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33',
'cached_page_link': 'https://scholar.googleusercontent.com/scholar?q=cache:hG0iVOrOguoJ:scholar.google.com/+Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'arxiv.org',
'format': 'PDF',
'link': 'https://arxiv.org/pdf/2206.07682.pdf?trk=cndc-detail'},
'authors': [{'name': 'J Wei',
'id': 'wA5TK_0AAAAJ',
'link': 'https://scholar.google.com/citations?user=wA5TK_0AAAAJ&hl=en&oi=sra'},
{'name': 'Y Tay',
'id': 'VBclY_cAAAAJ',
'link': 'https://scholar.google.com/citations?user=VBclY_cAAAAJ&hl=en&oi=sra'},
{'name': 'R Bommasani',
'id': 'WMBXw1EAAAAJ',
'link': 'https://scholar.google.com/citations?user=WMBXw1EAAAAJ&hl=en&oi=sra'},
{'name': 'C Raffel',
'id': 'I66ZBYwAAAAJ',
'link': 'https://scholar.google.com/citations?user=I66ZBYwAAAAJ&hl=en&oi=sra'},
{'name': 'B Zoph',
'id': 'NL_7iTwAAAAJ',
'link': 'https://scholar.google.com/citations?user=NL_7iTwAAAAJ&hl=en&oi=sra'}]},
{'position': 5,
'title': 'A survey on evaluation of large language '
'models',
'data_cid': 'ZYohnzOz-XgJ',
'link': 'https://arxiv.org/abs/2307.03109',
'publication': 'Y Chang, X Wang, J Wang, Y Wu, K Zhu… - '
'arXiv preprint arXiv …, 2023 - arxiv.org',
'snippet': '… 3.1 Natural Language Processing Tasks … '
'the development of language models, '
'particularly large language models, was to '
'enhance performance on natural language '
'processing tasks, …',
'inline_links': {'cited_by': {'cites_id': '8717195588046785125',
'total': 31,
'link': 'https://scholar.google.com/scholar?cites=8717195588046785125&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '8717195588046785125',
'total': 3,
'link': 'https://scholar.google.com/scholar?cluster=8717195588046785125&hl=en&as_sdt=0,33'},
'cached_page_link': 'https://scholar.googleusercontent.com/scholar?q=cache:ZYohnzOz-XgJ:scholar.google.com/+Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'arxiv.org',
'format': 'PDF',
'link': 'https://arxiv.org/pdf/2307.03109'},
'authors': [{'name': 'X Wang',
'id': 'Q7Ieos8AAAAJ',
'link': 'https://scholar.google.com/citations?user=Q7Ieos8AAAAJ&hl=en&oi=sra'},
{'name': 'J Wang',
'id': 'YomxTXQAAAAJ',
'link': 'https://scholar.google.com/citations?user=YomxTXQAAAAJ&hl=en&oi=sra'},
{'name': 'Y Wu',
'id': 'KVeRu2QAAAAJ',
'link': 'https://scholar.google.com/citations?user=KVeRu2QAAAAJ&hl=en&oi=sra'},
{'name': 'K Zhu',
'id': 'g75dFLYAAAAJ',
'link': 'https://scholar.google.com/citations?user=g75dFLYAAAAJ&hl=en&oi=sra'}]},
{'position': 6,
'title': 'Evaluating large language models trained on '
'code',
'data_cid': '3tNvW3l5nU4J',
'link': 'https://arxiv.org/abs/2107.03374',
'publication': 'M Chen, J Tworek, H Jun, Q Yuan, HPO '
'Pinto… - arXiv preprint arXiv …, 2021 - '
'arxiv.org',
'snippet': '… We introduce Codex, a GPT language model '
'finetuned on publicly available code from '
'GitHub, and study its Python code-writing '
'capabilities. A distinct production version '
'of Codex …',
'inline_links': {'cited_by': {'cites_id': '5664817468434011102',
'total': 941,
'link': 'https://scholar.google.com/scholar?cites=5664817468434011102&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '5664817468434011102',
'total': 2,
'link': 'https://scholar.google.com/scholar?cluster=5664817468434011102&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:3tNvW3l5nU4J:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33',
'cached_page_link': 'https://scholar.googleusercontent.com/scholar?q=cache:3tNvW3l5nU4J:scholar.google.com/+Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'arxiv.org',
'format': 'PDF',
'link': 'https://arxiv.org/pdf/2107.03374.pdf?trk=public_post_comment-text'},
'authors': [{'name': 'M Chen',
'id': '5fU-QMwAAAAJ',
'link': 'https://scholar.google.com/citations?user=5fU-QMwAAAAJ&hl=en&oi=sra'},
{'name': 'J Tworek',
'id': 'ZPuESCQAAAAJ',
'link': 'https://scholar.google.com/citations?user=ZPuESCQAAAAJ&hl=en&oi=sra'},
{'name': 'Q Yuan',
'id': 'B059m2EAAAAJ',
'link': 'https://scholar.google.com/citations?user=B059m2EAAAAJ&hl=en&oi=sra'}]},
{'position': 7,
'title': 'Large language models in machine translation',
'data_cid': 'sY5m_Y3-0Y4J',
'link': 'http://research.google/pubs/pub33278.pdf',
'publication': 'T Brants, AC Popat, P Xu, FJ Och, J Dean '
'- 2007 - research.google',
'snippet': '… the benefits of largescale statistical '
'language modeling in ma… trillion tokens, '
'resulting in language models having up to '
'300 … is inexpensive to train on large data '
'sets and approaches the …',
'type': 'PDF',
'inline_links': {'cited_by': {'cites_id': '10291286509313494705',
'total': 737,
'link': 'https://scholar.google.com/scholar?cites=10291286509313494705&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '10291286509313494705',
'total': 31,
'link': 'https://scholar.google.com/scholar?cluster=10291286509313494705&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:sY5m_Y3-0Y4J:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33',
'cached_page_link': 'https://scholar.googleusercontent.com/scholar?q=cache:sY5m_Y3-0Y4J:scholar.google.com/+Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'research.google',
'format': 'PDF',
'link': 'http://research.google/pubs/pub33278.pdf'},
'authors': [{'name': 'FJ Och',
'id': 'ITGdg6oAAAAJ',
'link': 'https://scholar.google.com/citations?user=ITGdg6oAAAAJ&hl=en&oi=sra'},
{'name': 'J Dean',
'id': 'NMS69lQAAAAJ',
'link': 'https://scholar.google.com/citations?user=NMS69lQAAAAJ&hl=en&oi=sra'}]},
{'position': 8,
'title': 'A watermark for large language models',
'data_cid': 'BlSyLHT4iiEJ',
'link': 'https://arxiv.org/abs/2301.10226',
'publication': 'J Kirchenbauer, J Geiping, Y Wen, J '
'Katz… - arXiv preprint arXiv …, 2023 - '
'arxiv.org',
'snippet': '… To derive this watermark, we examine what '
'happens in the language model just before it '
'produces a probability vector. The last '
'layer of the language model outputs a vector '
'of logits l(t). …',
'inline_links': {'cited_by': {'cites_id': '2417017327887471622',
'total': 104,
'link': 'https://scholar.google.com/scholar?cites=2417017327887471622&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '2417017327887471622',
'total': 4,
'link': 'https://scholar.google.com/scholar?cluster=2417017327887471622&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:BlSyLHT4iiEJ:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33',
'cached_page_link': 'https://scholar.googleusercontent.com/scholar?q=cache:BlSyLHT4iiEJ:scholar.google.com/+Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'arxiv.org',
'format': 'PDF',
'link': 'https://arxiv.org/pdf/2301.10226.pdf?curius=1419'},
'authors': [{'name': 'J Kirchenbauer',
'id': '48GJrbsAAAAJ',
'link': 'https://scholar.google.com/citations?user=48GJrbsAAAAJ&hl=en&oi=sra'},
{'name': 'J Geiping',
'id': '206vNCEAAAAJ',
'link': 'https://scholar.google.com/citations?user=206vNCEAAAAJ&hl=en&oi=sra'},
{'name': 'Y Wen',
'id': 'oUYfjg0AAAAJ',
'link': 'https://scholar.google.com/citations?user=oUYfjg0AAAAJ&hl=en&oi=sra'},
{'name': 'J Katz',
'id': 'yPw4WjoAAAAJ',
'link': 'https://scholar.google.com/citations?user=yPw4WjoAAAAJ&hl=en&oi=sra'}]},
{'position': 9,
'title': 'ChatGPT and other large language models are '
'double-edged swords',
'data_cid': 'So0q8TRvxhYJ',
'link': 'https://pubs.rsna.org/doi/full/10.1148/radiol.230163',
'publication': 'Y Shen, L Heacock, J Elias, KD Hentel, B '
'Reig, G Shih… - Radiology, 2023 - '
'pubs.rsna.org',
'snippet': '… Large Language Models (LLMs) are deep '
'learning models trained to understand and '
'generate natural language. Recent studies '
'demonstrated that LLMs achieve great success '
'in a …',
'inline_links': {'cited_by': {'cites_id': '1641121387398204746',
'total': 231,
'link': 'https://scholar.google.com/scholar?cites=1641121387398204746&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '1641121387398204746',
'total': 3,
'link': 'https://scholar.google.com/scholar?cluster=1641121387398204746&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:So0q8TRvxhYJ:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33'},
'authors': [{'name': 'Y Shen',
'id': 'XaeN2zgAAAAJ',
'link': 'https://scholar.google.com/citations?user=XaeN2zgAAAAJ&hl=en&oi=sra'},
{'name': 'L Heacock',
'id': 'tYYM5IkAAAAJ',
'link': 'https://scholar.google.com/citations?user=tYYM5IkAAAAJ&hl=en&oi=sra'}]},
{'position': 10,
'title': 'Pythia: A suite for analyzing large language '
'models across training and scaling',
'data_cid': 'aaIDvsMAD8QJ',
'link': 'https://proceedings.mlr.press/v202/biderman23a.html',
'publication': 'S Biderman, H Schoelkopf… - '
'International …, 2023 - '
'proceedings.mlr.press',
'snippet': '… large language models, we prioritize '
'consistency in model … out the most '
'performance from each model. For example, we '
'… models, as it is becoming widely used for '
'the largest models, …',
'inline_links': {'cited_by': {'cites_id': '14127511396791067241',
'total': 89,
'link': 'https://scholar.google.com/scholar?cites=14127511396791067241&as_sdt=5,33&sciodt=0,33&hl=en'},
'versions': {'cluster_id': '14127511396791067241',
'total': 3,
'link': 'https://scholar.google.com/scholar?cluster=14127511396791067241&hl=en&as_sdt=0,33'},
'related_articles_link': 'https://scholar.google.com/scholar?q=related:aaIDvsMAD8QJ:scholar.google.com/&scioq=Large+Language+Models&hl=en&as_sdt=0,33',
'cached_page_link': 'https://scholar.googleusercontent.com/scholar?q=cache:aaIDvsMAD8QJ:scholar.google.com/+Large+Language+Models&hl=en&as_sdt=0,33'},
'resource': {'name': 'mlr.press',
'format': 'PDF',
'link': 'https://proceedings.mlr.press/v202/biderman23a/biderman23a.pdf'},
'authors': [{'name': 'S Biderman',
'id': 'bO7H0DAAAAAJ',
'link': 'https://scholar.google.com/citations?user=bO7H0DAAAAAJ&hl=en&oi=sra'},
{'name': 'H Schoelkopf',
'id': 'XLahYIYAAAAJ',
'link': 'https://scholar.google.com/citations?user=XLahYIYAAAAJ&hl=en&oi=sra'}]}],
'related_searches': [{'query': 'large language models machine',
'highlighted': ['machine'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=1&q=large+language+models+machine&qst=ib'},
{'query': 'large language models pruning',
'highlighted': ['pruning'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=2&q=large+language+models+pruning&qst=ib'},
{'query': 'large language models multitask learners',
'highlighted': ['multitask learners'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=3&q=large+language+models+multitask+learners&qst=ib'},
{'query': 'large language models speech recognition',
'highlighted': ['speech recognition'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=4&q=large+language+models+speech+recognition&qst=ib'},
{'query': 'large language models machine translation',
'highlighted': ['machine translation'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=5&q=large+language+models+machine+translation&qst=ib'},
{'query': 'emergent abilities of large language models',
'highlighted': ['emergent abilities of'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=6&q=emergent+abilities+of+large+language+models&qst=ir'},
{'query': 'language models privacy risks',
'highlighted': ['privacy risks'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=7&q=language+models+privacy+risks&qst=ir'},
{'query': 'language model fine tuning',
'highlighted': ['fine tuning'],
'link': 'https://scholar.google.com/scholar?hl=en&as_sdt=0,33&qsp=8&q=language+model+fine+tuning&qst=ir'}],
'pagination': {'current': 1,
'next': 'https://scholar.google.com/scholar?start=10&q=Large+Language+Models&hl=en&as_sdt=0,33',
'other_pages': {'2': 'https://scholar.google.com/scholar?start=10&q=Large+Language+Models&hl=en&as_sdt=0,33',
'3': 'https://scholar.google.com/scholar?start=20&q=Large+Language+Models&hl=en&as_sdt=0,33',
'4': 'https://scholar.google.com/scholar?start=30&q=Large+Language+Models&hl=en&as_sdt=0,33',
'5': 'https://scholar.google.com/scholar?start=40&q=Large+Language+Models&hl=en&as_sdt=0,33',
'6': 'https://scholar.google.com/scholar?start=50&q=Large+Language+Models&hl=en&as_sdt=0,33',
'7': 'https://scholar.google.com/scholar?start=60&q=Large+Language+Models&hl=en&as_sdt=0,33',
'8': 'https://scholar.google.com/scholar?start=70&q=Large+Language+Models&hl=en&as_sdt=0,33',
'9': 'https://scholar.google.com/scholar?start=80&q=Large+Language+Models&hl=en&as_sdt=0,33',
'10': 'https://scholar.google.com/scholar?start=90&q=Large+Language+Models&hl=en&as_sdt=0,33'}}}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@tools@searchapi.ipynb@.PATH_END.py
|
{
"filename": "merge_FITS_masks.py",
"repo_name": "IanHeywood/oxkat",
"repo_path": "oxkat_extracted/oxkat-master/tools/merge_FITS_masks.py",
"type": "Python"
}
|
#!/usr/bin/env python
# ian.heywood@physics.ox.ac.uk
from astropy.io import fits
import numpy
import random
from scipy.ndimage.morphology import binary_dilation
import shutil
import sys
def genhex():
ran = random.randrange(10**80)
myhex = "%064x" % ran
myhex = myhex[:32]
return myhex
def getImage(fitsfile):
input_hdu = fits.open(fitsfile)[0]
if len(input_hdu.data.shape) == 2:
image = numpy.array(input_hdu.data[:,:])
elif len(input_hdu.data.shape) == 3:
image = numpy.array(input_hdu.data[0,:,:])
else:
image = numpy.array(input_hdu.data[0,0,:,:])
return image
def flushFits(newimage,fitsfile):
f = fits.open(fitsfile,mode='update')
input_hdu = f[0]
if len(input_hdu.data.shape) == 2:
input_hdu.data[:,:] = newimage
elif len(input_hdu.data.shape) == 3:
input_hdu.data[0,:,:] = newimage
else:
input_hdu.data[0,0,:,:] = newimage
f.flush()
def main():
prefix = sys.argv[1]
opfits = sys.argv[2]
modelfits = prefix+'-MFS-model.fits'
makemaskfits = prefix+'-MFS-image.fits.mask.fits'
shutil.copyfile(modelfits,opfits)
modeldata = getImage(modelfits)
makemaskdata = getImage(makemaskfits)
finalmaskdata = modeldata+makemaskdata
finalmaskdata = binary_dilation(finalmaskdata,iterations=4)
flushFits(finalmaskdata,opfits)
if __name__ == "__main__":
main()
|
IanHeywoodREPO_NAMEoxkatPATH_START.@oxkat_extracted@oxkat-master@tools@merge_FITS_masks.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contour/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="y", parent_name="contour", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "array"}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contour@_y.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/text-splitters/tests/unit_tests/conftest.py",
"type": "Python"
}
|
"""Configuration for unit tests."""
from importlib import util
from typing import Dict, Sequence
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@text-splitters@tests@unit_tests@conftest.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "JLBLine/WODEN",
"repo_path": "WODEN_extracted/WODEN-master/wodenpy/array_layout/__init__.py",
"type": "Python"
}
|
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@wodenpy@array_layout@__init__.py@.PATH_END.py
|
|
{
"filename": "stopping_conditions.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/community/interface/stopping_conditions.py",
"type": "Python"
}
|
from amuse.units import units, generic_unit_system
from amuse.units import nbody_system as nbody
from amuse.support.exceptions import AmuseException
from amuse.rfi.core import legacy_function
from amuse.rfi.core import LegacyFunctionSpecification
class StoppingConditionInterface:
@legacy_function
def has_stopping_condition():
"""
Return 1 if the stopping condition with
the given index is supported by the code,
0 otherwise.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'type', dtype='int32', direction=function.IN,
description="The type index of the stopping condition")
function.addParameter(
'result', dtype='int32', direction=function.OUT,
description="1 if the stopping condition is supported")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def enable_stopping_condition():
"""
Will enable the stopping if it is supported
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'type', dtype='int32', direction=function.IN,
description="The type index of the stopping condition")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def disable_stopping_condition():
"""
Will disable the stopping if it is supported
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'type', dtype='int32', direction=function.IN,
description="The index of the stopping condition")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def is_stopping_condition_enabled():
"""
Return 1 if the stopping condition with
the given index is enabled,0 otherwise.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'type', dtype='int32', direction=function.IN,
description="The index of the stopping condition")
function.addParameter(
'result', dtype='int32', direction=function.OUT,
description="1 if the stopping condition is enabled")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def is_stopping_condition_set():
"""
Return 1 if the stopping condition with
the given index is enabled,0 otherwise.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'type', dtype='int32', direction=function.IN,
description="The index of the stopping condition")
function.addParameter(
'result', dtype='int32', direction=function.OUT,
description="1 if the stopping condition is enabled")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_number_of_stopping_conditions_set():
"""
Return the number of stopping conditions set, one
condition can be set multiple times.
Stopping conditions are set when the code determines
that the conditions are met. The objects or information
about the condition can be retrieved with
the get_stopping_condition_info method.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'result', dtype='int32', direction=function.OUT,
description="> 1 if any stopping condition is set")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_stopping_condition_info():
"""
Generic function for getting the information connected to
a stopping condition. Index can be between 0 and
the result of the :method:`get_number_of_stopping_conditions_set`
method.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index', dtype='int32', direction=function.IN,
description=(
"Index in the array[0,number_of_stopping_conditions_set>"
)
)
function.addParameter(
'type', dtype='int32', direction=function.OUT,
description=(
"Kind of the condition, can be used to retrieve specific "
"information"
)
)
function.addParameter(
'number_of_particles', dtype='int32', direction=function.OUT,
description="Number of particles that met this condition")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def get_stopping_condition_particle_index():
"""
For collision detection
"""
function = LegacyFunctionSpecification()
function.can_handle_array = True
function.addParameter(
'index', dtype='int32', direction=function.IN,
description=(
"Index in the array[0,number_of_stopping_conditions_set>"
)
)
function.addParameter(
'index_of_the_column', dtype='int32', direction=function.IN,
description=(
"Column index involved in the condition (for pair collisions "
"0 and 1 are possible)"
)
)
function.addParameter(
'index_of_particle', dtype='int32', direction=function.OUT,
description=(
"Set to the identifier of particle[index_of_the_column][index]"
)
)
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_stopping_condition_timeout_parameter():
"""
Set max computer time available (in seconds).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='float64', direction=function.IN,
description="Available wallclock time in seconds")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def get_stopping_condition_timeout_parameter():
"""
Retrieve max computer time available (in seconds).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='float64', direction=function.OUT,
description="Current value of available wallclock time in seconds")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_stopping_condition_number_of_steps_parameter():
"""
Set max inner loop evaluations.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='int32', direction=function.IN,
description="Available inner loop evaluations")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def get_stopping_condition_number_of_steps_parameter():
"""
Retrieve max inner loop evaluations.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='int32', direction=function.OUT,
description="Current number of available inner loop evaluations")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - ERROR
"""
return function
@legacy_function
def set_stopping_condition_out_of_box_parameter():
"""
Set size of box.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='float64', direction=function.IN,
description="Size of box")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def get_stopping_condition_out_of_box_parameter():
"""
Get size of box
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='float64', direction=function.OUT,
description="Size of box")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def set_stopping_condition_minimum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_minimum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stopping_condition_maximum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_maximum_density_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stopping_condition_minimum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_minimum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def set_stopping_condition_maximum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.IN)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_maximum_internal_energy_parameter():
function = LegacyFunctionSpecification()
function.addParameter('value', dtype='float64', direction=function.OUT)
function.result_type = 'int32'
return function
@legacy_function
def get_stopping_condition_out_of_box_use_center_of_mass_parameter():
"""
If True use the center of mass to determine the location of the box, if
False use (0,0,0)
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='bool', direction=function.OUT,
description="True if detection should use center of mass")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
@legacy_function
def set_stopping_condition_out_of_box_use_center_of_mass_parameter():
"""
If True use the center of mass to determine the location of the box, if
False use (0,0,0)
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter(
'value', dtype='bool', direction=function.IN,
description="True if detection should use center of mass")
function.result_type = 'int32'
function.result_doc = """
0 - OK
-1 - Value out of range
"""
return function
class StoppingCondition:
def __init__(self, conditions, type, description):
self.conditions = conditions
self.type = type
self.description = description
self.__doc__ = description
def enable(self):
if self.is_supported():
self.conditions.code.enable_stopping_condition(self.type)
else:
name = [
name for name, value in self.conditions.all_conditions()
if value is self
][0]
raise AmuseException(
f"Can't enable stopping condition '{name}', since "
f"'{type(self.conditions.code).__name__}' does not "
"support this condition."
)
def disable(self):
if self.is_supported():
self.conditions.code.disable_stopping_condition(self.type)
else:
name = [
name for name, value in self.conditions.all_conditions()
if value is self
][0]
raise AmuseException(
f"Can't disable stopping condition '{name}', since "
f"'{type(self.conditions.code).__name__}' does not "
"support this condition."
)
def is_enabled(self):
return self.conditions.code.is_stopping_condition_enabled(
self.type
) == 1
def is_supported(self):
return self.conditions.code.has_stopping_condition(self.type) == 1
def is_set(self):
return self.conditions.code.is_stopping_condition_set(self.type) == 1
def get_set_condition_indices(self, index_in_condition):
indices = list(
range(self.conditions.code.get_number_of_stopping_conditions_set())
)
if len(indices) == 0:
return []
types, number_of_particles = \
self.conditions.code.get_stopping_condition_info(
indices
)
result = []
for index, type, number_of_particles_in_condition in zip(
indices, types, number_of_particles
):
if (
type == self.type
and index_in_condition < number_of_particles_in_condition
):
result.append(index)
return result
def particles(
self, index_in_the_condition=0, particles_set_name="particles"
):
selected = self.get_set_condition_indices(index_in_the_condition)
particles = getattr(self.conditions.code, particles_set_name)
if len(selected) == 0:
return particles[0:0]
else:
return particles.get_stopping_condition_particle_index(
selected,
[index_in_the_condition]*len(selected)
)
class StoppingConditions:
def __init__(self, code):
self.code = code
self.collision_detection = StoppingCondition(
self,
0,
(
"If enabled, the code will stop at the end of the inner loop "
"when two stars connect"
)
)
self.pair_detection = StoppingCondition(
self,
1,
(
"If enabled, the code will stop at the end of the inner loop "
"when two stars are bound"
)
)
self.escaper_detection = StoppingCondition(
self,
2,
(
"If enabled, the code will stop at the end of the inner loop "
"when a star escapes"
)
)
self.timeout_detection = StoppingCondition(
self,
3,
(
"If enabled, the code will stop at the end of the inner loop "
"when the computer time is above a set timeout"
)
)
self.number_of_steps_detection = StoppingCondition(
self,
4,
(
"If enabled, the code will stop at the end of the inner loop "
"when the number of evaluations reached the set max number"
)
)
self.out_of_box_detection = StoppingCondition(
self,
5,
(
"If enabled, the code will stop if a particle escapes the box "
"of size out_of_box_size"
)
)
self.density_limit_detection = StoppingCondition(
self,
6,
(
"If enabled, the code will stop if a gas particle has a "
"density out of the range "
"[stopping_condition_minimum_density, "
"stopping_condition_maximum_density]"
)
)
self.internal_energy_limit_detection = StoppingCondition(
self,
7,
(
"If enabled, the code will stop if a gas particle has an "
"internal energy out of the range "
"[stopping_condition_minimum_internal_energy, "
"stopping_condition_maximum_internal_energy]"
)
)
self.interaction_over_detection = StoppingCondition(
self,
8,
(
"If enabled, the code will stop if the interaction between "
"particles is over"
)
)
self.supernova_detection = StoppingCondition(
self,
9,
(
"If enabled, the code will stop at the end of the inner loop "
"when two a star goes supernova"
)
)
def all_conditions(self):
for name in dir(self):
if name.startswith("_"):
continue
else:
value = getattr(self, name)
if isinstance(value, StoppingCondition):
yield name, value
def __str__(self):
parts = []
parts.append(
f"Stopping conditions of a '{type(self.code).__name__}' object\n"
)
supported = self.supported_conditions()
enabled = [
name for name, condition in self.all_conditions()
if condition.is_enabled()
]
hit = [
name for name, condition in self.all_conditions()
if condition.is_set()
]
parts.append('* supported conditions: ')
parts.append(', '.join(supported))
parts.append('\n')
parts.append('* enabled conditions: ')
if enabled:
parts.append(', '.join(enabled))
else:
parts.append('none')
parts.append('\n')
parts.append('* set conditions: ')
if hit:
parts.append(', '.join(hit))
else:
parts.append('none')
parts.append('\n')
return ''.join(parts)
def supported_conditions(self):
return [
name for name, condition in self.all_conditions()
if condition.is_supported()
]
def define_parameters(self, handler):
handler.add_method_parameter(
"get_stopping_condition_timeout_parameter",
"set_stopping_condition_timeout_parameter",
"stopping_conditions_timeout",
"max wallclock time available for the evolve step",
default_value=4.0 | units.s
)
handler.add_method_parameter(
"get_stopping_condition_number_of_steps_parameter",
"set_stopping_condition_number_of_steps_parameter",
"stopping_conditions_number_of_steps",
"max inner loop evals",
default_value=1.0
)
handler.add_method_parameter(
"get_stopping_condition_out_of_box_parameter",
"set_stopping_condition_out_of_box_parameter",
"stopping_conditions_out_of_box_size",
"size of cube",
default_value=0.0 | nbody.length
)
handler.add_method_parameter(
"get_stopping_condition_minimum_density_parameter",
"set_stopping_condition_minimum_density_parameter",
"stopping_condition_minimum_density",
"minimum density of a gas particle",
default_value=-1.0 | generic_unit_system.density
)
handler.add_method_parameter(
"get_stopping_condition_maximum_density_parameter",
"set_stopping_condition_maximum_density_parameter",
"stopping_condition_maximum_density",
"maximum density of a gas particle",
default_value=-1.0 | generic_unit_system.density
)
handler.add_method_parameter(
"get_stopping_condition_minimum_internal_energy_parameter",
"set_stopping_condition_minimum_internal_energy_parameter",
"stopping_condition_minimum_internal_energy",
"minimum internal energy of a gas particle",
default_value=-1.0 | generic_unit_system.specific_energy
)
handler.add_method_parameter(
"get_stopping_condition_maximum_internal_energy_parameter",
"set_stopping_condition_maximum_internal_energy_parameter",
"stopping_condition_maximum_internal_energy",
"maximum internal energy of a gas particle",
default_value=-1.0 | generic_unit_system.specific_energy
)
handler.add_method_parameter(
"get_stopping_condition_out_of_box_use_center_of_mass_parameter",
"set_stopping_condition_out_of_box_use_center_of_mass_parameter",
"stopping_conditions_out_of_box_use_center_of_mass",
(
"if True use the center of mass to determine the location of "
"the box, if False use (0,0,0), is not used by all codes"
),
default_value=False
)
def define_methods(self, handler):
handler.add_method(
'get_stopping_condition_particle_index',
(
handler.NO_UNIT,
handler.NO_UNIT,
),
(
handler.INDEX,
handler.ERROR_CODE,
)
)
handler.add_method(
'has_stopping_condition',
(
handler.NO_UNIT,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
'is_stopping_condition_enabled',
(
handler.NO_UNIT,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
'is_stopping_condition_set',
(
handler.NO_UNIT,
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
'get_stopping_condition_info',
(
handler.NO_UNIT,
),
(
handler.NO_UNIT,
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
'get_number_of_stopping_conditions_set',
(
),
(
handler.NO_UNIT,
handler.ERROR_CODE,
)
)
handler.add_method(
'enable_stopping_condition',
(handler.NO_UNIT,),
(
handler.ERROR_CODE
)
)
handler.add_method(
'disable_stopping_condition',
(handler.NO_UNIT,),
(
handler.ERROR_CODE
)
)
handler.add_method(
"get_stopping_condition_timeout_parameter",
(),
(units.s, handler.ERROR_CODE,)
)
handler.add_method(
"set_stopping_condition_timeout_parameter",
(units.s, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_stopping_condition_number_of_steps_parameter",
(),
(handler.NO_UNIT, handler.ERROR_CODE,)
)
handler.add_method(
"set_stopping_condition_number_of_steps_parameter",
(handler.NO_UNIT, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_stopping_condition_out_of_box_parameter",
(),
(nbody.length, handler.ERROR_CODE,)
)
handler.add_method(
"set_stopping_condition_out_of_box_parameter",
(nbody.length, ),
(handler.ERROR_CODE,)
)
handler.add_method(
"get_stopping_condition_minimum_density_parameter",
(), (generic_unit_system.density, handler.ERROR_CODE,))
handler.add_method(
"set_stopping_condition_minimum_density_parameter",
(generic_unit_system.density, ), (handler.ERROR_CODE,))
handler.add_method(
"get_stopping_condition_maximum_density_parameter",
(), (generic_unit_system.density, handler.ERROR_CODE,))
handler.add_method(
"set_stopping_condition_maximum_density_parameter",
(generic_unit_system.density, ), (handler.ERROR_CODE,))
handler.add_method(
"get_stopping_condition_minimum_internal_energy_parameter",
(), (generic_unit_system.specific_energy, handler.ERROR_CODE,))
handler.add_method(
"set_stopping_condition_minimum_internal_energy_parameter",
(generic_unit_system.specific_energy, ), (handler.ERROR_CODE,))
handler.add_method(
"get_stopping_condition_maximum_internal_energy_parameter",
(), (generic_unit_system.specific_energy, handler.ERROR_CODE,))
handler.add_method(
"set_stopping_condition_maximum_internal_energy_parameter",
(generic_unit_system.specific_energy, ), (handler.ERROR_CODE,))
def define_particle_set(self, handler, name_of_the_set='particles'):
handler.add_query(
name_of_the_set, 'get_stopping_condition_particle_index'
)
def define_state(self, handler):
for method_name in [
'get_stopping_condition_particle_index',
'has_stopping_condition',
'is_stopping_condition_enabled',
'is_stopping_condition_set',
'get_stopping_condition_info',
'get_number_of_stopping_conditions_set',
'enable_stopping_condition',
'disable_stopping_condition'
]:
handler.add_method('!UNINITIALIZED!END', method_name)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@community@interface@stopping_conditions.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/newshape/legendgrouptitle/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="layout.newshape.legendgrouptitle.font",
**kwargs,
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@newshape@legendgrouptitle@font@_size.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/benchmark/fortran/pm_distNorm/setNormRandBox_Basic_vs_Polar/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
dirname = os.path.basename(os.getcwd())
fontsize = 14
df = pd.read_csv("main.out", delimiter = ",")
colnames = list(df.columns.values)
####################################################################################################################################
#### Plot the runtimes.
####################################################################################################################################
ax = plt.figure(figsize = 1.25 * np.array([6.4,4.6]), dpi = 200)
ax = plt.subplot()
for colname in colnames[:]:
plt.hist( np.log10(df[colname].values)
, histtype = "step"
, linewidth = 2
, alpha = .5
, bins = 50
)
plt.xticks(fontsize = fontsize)
plt.yticks(fontsize = fontsize)
ax.set_xlabel("Log10( Runtime [ seconds ] )", fontsize = fontsize)
ax.set_ylabel("Count", fontsize = fontsize)
ax.set_title(" vs. ".join(colnames[:])+"\nLower is better.", fontsize = fontsize)
#ax.set_xscale("log")
#ax.set_yscale("log")
plt.minorticks_on()
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
ax.legend ( colnames[:]
#, loc='center left'
#, bbox_to_anchor=(1, 0.5)
, fontsize = fontsize
)
plt.tight_layout()
plt.savefig("benchmark." + dirname + ".runtime.png")
####################################################################################################################################
#### Plot the runtime ratios.
####################################################################################################################################
ax = plt.figure(figsize = 1.25 * np.array([6.4,4.6]), dpi = 200)
ax = plt.subplot()
for colname in colnames[1:]:
plt.hist( np.log10(df[colname].values / df[colnames[0]].values)
, histtype = "step"
, linewidth = 2
, alpha = .5
, bins = 50
)
plt.xticks(fontsize = fontsize)
plt.yticks(fontsize = fontsize)
ax.set_xlabel("Log10( Runtime Ratio compared to {} )".format(colnames[0]), fontsize = fontsize)
ax.set_ylabel("Count", fontsize = fontsize)
ax.set_title("Runtime Ratio Comparison. Lower means faster.\nLower than 0 means faster than {}().".format(colnames[0]), fontsize = fontsize)
#ax.set_xscale("log")
#ax.set_yscale("log")
plt.minorticks_on()
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
ax.legend ( colnames[1:]
#, bbox_to_anchor = (1, 0.5)
#, loc = "center left"
, fontsize = fontsize
)
plt.tight_layout()
plt.savefig("benchmark." + dirname + ".runtime.ratio.png")
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@benchmark@fortran@pm_distNorm@setNormRandBox_Basic_vs_Polar@main.py@.PATH_END.py
|
{
"filename": "headers.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/wsgiref/headers.py",
"type": "Python"
}
|
"""Manage HTTP Response Headers
Much of this module is red-handedly pilfered from email.message in the stdlib,
so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
written by Barry Warsaw.
"""
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
import re
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers:
"""Manage a collection of HTTP response headers"""
def __init__(self, headers=None):
headers = headers if headers is not None else []
if type(headers) is not list:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
if __debug__:
for k, v in headers:
self._convert_string_type(k)
self._convert_string_type(v)
def _convert_string_type(self, value):
"""Convert/check value type."""
if type(value) is str:
return value
raise AssertionError("Header names/values must be"
" of type str (got {0})".format(repr(value)))
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append(
(self._convert_string_type(name), self._convert_string_type(val)))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = self._convert_string_type(name.lower())
self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrence gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def __contains__(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = self._convert_string_type(name.lower())
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = self._convert_string_type(name.lower())
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._headers)
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def __bytes__(self):
return str(self).encode('iso-8859-1')
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((self._convert_string_type(name),
self._convert_string_type(value)))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
_value = self._convert_string_type(_value)
parts.append(_value)
for k, v in _params.items():
k = self._convert_string_type(k)
if v is None:
parts.append(k.replace('_', '-'))
else:
v = self._convert_string_type(v)
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((self._convert_string_type(_name), "; ".join(parts)))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@wsgiref@headers.py@.PATH_END.py
|
{
"filename": "jackknife_weights.py",
"repo_name": "oliverphilcox/RascalC",
"repo_path": "RascalC_extracted/RascalC-master/python/jackknife_weights.py",
"type": "Python"
}
|
## Script to generate RR pair counts from a given set of random particles. This is based on the Corrfunc code of Sinha & Garrison.
## Weights and weighted pair counts are saved in the ../weight_files/ subdirectory.
## If the periodic flag is set, we assume a periodic simulation and measure mu from the Z-axis.
import sys
import numpy as np
import math
# PARAMETERS
if len(sys.argv)!=8:
print("Usage: python jackknife_weights.py {RANDOM_PARTICLE_FILE} {BIN_FILE} {MU_MAX} {N_MU_BINS} {NTHREADS} {PERIODIC} {OUTPUT_DIR}")
sys.exit(1)
fname = str(sys.argv[1])
binfile = str(sys.argv[2])
mu_max = float(sys.argv[3])
nmu_bins = int(sys.argv[4])
nthreads = int(sys.argv[5])
periodic = int(sys.argv[6])
outdir=str(sys.argv[7])
## First read in weights and positions:
print("Reading in data")
X, Y, Z, W, J = np.loadtxt(fname, usecols=range(5)).T
J = J.astype(int) # jackknife region is integer
#J = np.array(J,dtype=int) # convert jackknives to integers
N = len(X) # number of particles
weight_sum = np.sum(W)# normalization by summed weights
J_regions = np.unique(J) # jackknife regions in use
N_jack = len(J_regions) # number of non-empty jackknife regions
print("Number of random particles %.1e"%N)
## Determine number of radial bins in binning file:
print("Counting lines in binfile");
with open(binfile) as f:
for i, l in enumerate(f):
pass
nrbins = i + 1
print('%s radial bins are used in this file.' %nrbins)
if not periodic:
# Compute RR counts for the non-periodic case (measuring mu from the radial direction)
print("Using non-periodic input data");
def coord_transform(x,y,z):
# Convert the X,Y,Z coordinates into Ra,Dec,comoving_distance (for use in corrfunc)
# Shamelessly stolen from astropy
xsq = x ** 2.
ysq = y ** 2.
zsq = z ** 2.
com_dist = (xsq + ysq + zsq) ** 0.5
s = (xsq + ysq) ** 0.5
if np.isscalar(x) and np.isscalar(y) and np.isscalar(z):
Ra = math.atan2(y, x)*180./np.pi
Dec = math.atan2(z, s)*180./np.pi
else:
Ra = np.arctan2(y, x)*180./np.pi+180.
Dec = np.arctan2(z, s)*180./np.pi
return com_dist, Ra, Dec
# Convert coordinates to spherical coordinates
com_dist,Ra,Dec = coord_transform(X,Y,Z);
# Now compute RR counts
from Corrfunc.mocks.DDsmu_mocks import DDsmu_mocks
RR_aA=np.zeros([N_jack,nrbins*nmu_bins]);
# Iterate over jackknife regions
for i,j in enumerate(J_regions):
filt=np.where(J==j)
print("Computing pair counts for non-empty jackknife %s of %s." %(i+1,N_jack))
# Compute pair counts between jackknife region and entire survey volume
cross_RR=DDsmu_mocks(0,2,nthreads,mu_max,nmu_bins,binfile,Ra,Dec,com_dist,weights1=W,weight_type='pair_product',
RA2=Ra[filt],DEC2=Dec[filt],CZ2=com_dist[filt],weights2=W[filt],verbose=False,is_comoving_dist=True)
# Weight by average particle weighting
RR_aA[i]=cross_RR[:]['npairs']*cross_RR[:]['weightavg']
else:
# Compute RR counts for the periodic case (measuring mu from the Z-axis)
print("Using periodic input data");
from Corrfunc.theory.DDsmu import DDsmu
RR_aA=np.zeros([N_jack,nrbins*nmu_bins]);
# Iterate over jackknife regions
for i,j in enumerate(J_regions):
filt=np.where(J==j)
print("Computing pair counts for non-empty jackknife %s of %s." %(i+1,N_jack))
# Compute pair counts between jackknife region and entire survey volume
cross_RR=DDsmu(0,nthreads,binfile,mu_max,nmu_bins,X,Y,Z,weights1=W,weight_type='pair_product',
X2=X[filt],Y2=Y[filt],Z2=Z[filt],weights2=W[filt],periodic=False,verbose=False)
# Weight by average particle weighting
RR_aA[i]=cross_RR[:]['npairs']*cross_RR[:]['weightavg']
# Now compute weights from pair counts
w_aA=np.zeros_like(RR_aA)
RR_a = np.sum(RR_aA,axis=0)
for i,j in enumerate(J_regions):
w_aA[i]=RR_aA[i]/RR_a # jackknife weighting for bin and region
# Save output files:
import os
if not os.path.exists(outdir):
os.makedirs(outdir)
weight_file='jackknife_weights_n%d_m%d_j%d_11.dat'%(nrbins,nmu_bins,N_jack)
print("Saving jackknife weight as %s"%weight_file)
with open(os.path.join(outdir, weight_file), "w+") as weight_file:
for j_id,jackknife_weight in enumerate(w_aA):
weight_file.write("%d\t" %J_regions[j_id])
for i in range(len(jackknife_weight)):
weight_file.write("%.8e" %jackknife_weight[i])
if i == len(jackknife_weight)-1:
weight_file.write("\n");
else:
weight_file.write("\t");
RR_a_file = 'binned_pair_counts_n%d_m%d_j%d_11.dat'%(nrbins,nmu_bins,N_jack)
print("Saving binned pair counts as %s" %RR_a_file);
with open(os.path.join(outdir, RR_a_file), "w+") as RR_file:
for i in range(len(RR_a)):
RR_file.write("%.8e\n" %RR_a[i])
RR_aA_file = 'jackknife_pair_counts_n%d_m%d_j%d_11.dat'%(nrbins,nmu_bins,N_jack)
print("Saving normalized jackknife pair counts as %s"%RR_aA_file)
with open(os.path.join(outdir, RR_aA_file), "w+") as jackRR_file:
for j_id,pair_count in enumerate(RR_aA):
this_jk = J_regions[j_id]
norm = weight_sum**2.
jackRR_file.write("%d\t" %this_jk)
for i in range(len(pair_count)):
jackRR_file.write("%.8e" %(pair_count[i]/norm))
if i == len(pair_count)-1:
jackRR_file.write("\n");
else:
jackRR_file.write("\t");
print("Jackknife weights and pair counts written successfully to the %s directory"%outdir)
|
oliverphilcoxREPO_NAMERascalCPATH_START.@RascalC_extracted@RascalC-master@python@jackknife_weights.py@.PATH_END.py
|
{
"filename": "translator_test_client.py",
"repo_name": "icrar/daliuge",
"repo_path": "daliuge_extracted/daliuge-master/OpenAPI/tests/translator_test_client.py",
"type": "Python"
}
|
import sys
import translator_client as tc
translator_config = tc.Configuration()
translator_config.host = "localhost:8084"
with open(sys.argv[1], "rt") as f:
graph = f.read()
with tc.ApiClient(translator_config) as translator_client:
translator = tc.DefaultApi(translator_client)
html_content = translator.gen_pgt(
json_data=graph, lg_name="test", algo="metis", num_islands=1
)
print(html_content)
html_content = translator.gen_pg(
pgt_id="test",
dlg_mgr_host="localhost",
dlg_mgr_port=8001,
dlg_mgr_deploy="deploy",
)
|
icrarREPO_NAMEdaliugePATH_START.@daliuge_extracted@daliuge-master@OpenAPI@tests@translator_test_client.py@.PATH_END.py
|
{
"filename": "womjoin.py",
"repo_name": "msiebert1/UCSC_spectral_pipeline",
"repo_path": "UCSC_spectral_pipeline_extracted/UCSC_spectral_pipeline-master/spectral_reduction/tmath/wombat/womjoin.py",
"type": "Python"
}
|
def womjoin(hop):
"""join two spectra that abut in wavelength"""
import numpy as np
import logging
from tmath.wombat.inputter import inputter
from tmath.wombat import HOPSIZE
print('\nThis will join two hoppers (with no overlap)\n')
hopnum1=0
hopnum2=0
while (hopnum1 < 1) or (hopnum1 > HOPSIZE):
hopnum1=inputter('Enter first hopper: ','int',False)
while (hopnum2 < 1) or (hopnum2 > HOPSIZE):
hopnum2=inputter('Enter first hopper: ','int',False)
if (hop[hopnum1].wave[0] > hop[hopnum2].wave[0]):
hopnum1,hopnum2=hopnum2,hopnum1
wdelt1=hop[hopnum1].wave[1]-hop[hopnum1].wave[0]
wdelt2=hop[hopnum2].wave[1]-hop[hopnum2].wave[0]
# check if wavelength dispersion same
if (abs(wdelt1 -wdelt2) > 0.00001):
print('Spectra do not have same Angstrom/pixel')
print('Blue side: {}'.format(wdelt1))
print('Red side: {}'.format(wdelt2))
return hop
#check if spectra abut
if (abs(hop[hopnum2].wave[0] - (hop[hopnum1].wave[-1] + wdelt1)) \
> 0.00001):
print('\nSpectra do not abut\n')
print('Red end of blue: {}'.format(hop[hopnum1].wave[-1]))
print('Blue end of red: {}\n'.format(hop[hopnum2].wave[0]))
return hop
print('Joining from {} to {}'.format(hop[hopnum1].wave[-1], \
hop[hopnum2].wave[0]))
hopout=0
while (hopout < 1) or (hopout > HOPSIZE):
hopout=inputter('Enter hopper to store combined spectrum: ','int',False)
hop[hopout].wave=np.concatenate([hop[hopnum1].wave,hop[hopnum2].wave])
hop[hopout].flux=np.concatenate([hop[hopnum1].flux,hop[hopnum2].flux])
hop[hopout].var=np.concatenate([hop[hopnum1].var,hop[hopnum2].var])
hop[hopout].obname=hop[hopnum1].obname
hop[hopout].header=hop[hopnum1].header
logging.debug('Files: {} and {} joined from {} to {}'.format(hop[hopnum1].obname, hop[hopnum2].obname, hop[hopnum1].wave[-1],hop[hopnum2].wave[0]))
#FIX header
return hop
|
msiebert1REPO_NAMEUCSC_spectral_pipelinePATH_START.@UCSC_spectral_pipeline_extracted@UCSC_spectral_pipeline-master@spectral_reduction@tmath@wombat@womjoin.py@.PATH_END.py
|
{
"filename": "core.py",
"repo_name": "adrn/gala",
"repo_path": "gala_extracted/gala-main/gala/potential/frame/core.py",
"type": "Python"
}
|
__all__ = ['FrameBase']
# This package
from ..common import CommonBase
class FrameBase(CommonBase):
ndim = 3
def __init__(self, *args, units=None, **kwargs):
parameter_values = self._parse_parameter_values(*args, **kwargs)
self._setup_frame(parameters=parameter_values,
units=units)
def _setup_frame(self, parameters, units=None):
self.units = self._validate_units(units)
self.parameters = self._prepare_parameters(parameters, self.units)
|
adrnREPO_NAMEgalaPATH_START.@gala_extracted@gala-main@gala@potential@frame@core.py@.PATH_END.py
|
{
"filename": "glossary.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/doc/glossary.py",
"type": "Python"
}
|
"""
========
Glossary
========
.. glossary::
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operations can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called :term:`ufuncs`, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `numpy.doc.broadcasting` for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print("Logging call with parameters:", args, kwargs)
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
for details.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combination with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print("Key %d: %s" % (n, k))
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True])
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
record array
An :term:`ndarray` with :term:`structured data type`_ which has been
subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
making the fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New NumPy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print("Painting the city %s!" % self.color)
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
structured data type
A data type composed of other datatypes
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
"""
from __future__ import division, absolute_import, print_function
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@doc@glossary.py@.PATH_END.py
|
{
"filename": "2022_10_19_093542_fa319f214160_add_created_by.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/server/database/_migrations/versions/sqlite/2022_10_19_093542_fa319f214160_add_created_by.py",
"type": "Python"
}
|
"""add_created_by
Revision ID: fa319f214160
Revises: ad4b1b4d1e9d
Create Date: 2022-10-19 09:35:42.371899
"""
import sqlalchemy as sa
from alembic import op
import prefect
# revision identifiers, used by Alembic.
revision = "fa319f214160"
down_revision = "ad4b1b4d1e9d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("deployment", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"created_by",
prefect.server.utilities.database.Pydantic(
prefect.server.schemas.core.CreatedBy
),
nullable=True,
)
)
batch_op.add_column(
sa.Column(
"updated_by",
prefect.server.utilities.database.Pydantic(
prefect.server.schemas.core.UpdatedBy
),
nullable=True,
)
)
with op.batch_alter_table("flow_run", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"created_by",
prefect.server.utilities.database.Pydantic(
prefect.server.schemas.core.CreatedBy
),
nullable=True,
)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("flow_run", schema=None) as batch_op:
batch_op.drop_column("created_by")
with op.batch_alter_table("deployment", schema=None) as batch_op:
batch_op.drop_column("updated_by")
batch_op.drop_column("created_by")
# ### end Alembic commands ###
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@server@database@_migrations@versions@sqlite@2022_10_19_093542_fa319f214160_add_created_by.py@.PATH_END.py
|
{
"filename": "fm.py",
"repo_name": "meganmansfield/IGRINS_transit",
"repo_path": "IGRINS_transit_extracted/IGRINS_transit-main/retrieval_set_up/fm.py",
"type": "Python"
}
|
import math as mth
import math
import numpy as np
import scipy as sp
from array import *
from scipy import interpolate
from scipy import signal
from scipy import special
from scipy import interp
from scipy import ndimage
import pdb
import datetime
import pickle
from scipy import constants
from numba import jit
from numba import jit, vectorize, guvectorize, float64, complex64, int32,float64,cuda
import time
import h5py
import matplotlib as mpl
# mpl.use('TkAgg')
from matplotlib.pyplot import *
#GENERAL COLLECTION OF NECESSARY RADIATIVE TRANSFER FUNCTIONS. SOMEWHAT COMMENTED...
###BEGIN TRANSMISSION SPECTRA GPU ROUTINES##################################
@guvectorize([(float64[:], float64[:], float64[:], float64[:], float64[:,:,:], float64[:,:])], '(m),(n),(o),(o),(n,m,q)->(o,q)',target='cuda',nopython=True)
def xsec_interp_gpu_trans(logPgrid, logTgrid, Patm, Tatm, xsec, xsec_int):
Ng=xsec.shape[-1]
Natm=Patm.shape[0] #number of midpoint atmosphere points
for i in range(Natm-1): #looping through atmospheric layers
Pavg=0.5*(Patm[i]+Patm[i+1])
Tavg=0.5*(Tatm[i]+Tatm[i+1])
y=mth.log10(Patm[i])
x=mth.log10(Tatm[i])
if y > logPgrid[-1]: y=logPgrid[-1]
if y < logPgrid[0]: y=logPgrid[0]
if x > logTgrid[-1]: x=logTgrid[-1]
if x < logTgrid[0]: x=logTgrid[0]
#foo=logPgrid[logPgrid < -2.0]
p_ind_hi=0
while logPgrid[p_ind_hi] <= y: p_ind_hi=p_ind_hi+1
p_ind_low=p_ind_hi-1
T_ind_hi=0
while logTgrid[T_ind_hi] <= x: T_ind_hi=T_ind_hi+1
T_ind_low=T_ind_hi-1
y2=logPgrid[p_ind_hi]
y1=logPgrid[p_ind_low]
x2=logTgrid[T_ind_hi]
x1=logTgrid[T_ind_low]
w11=(x2-x)/(x2-x1)
w21=(x-x1)/(x2-x1)
yy1=(y2-y)/(y2-y1)
yy2=(y-y1)/(y2-y1)
ww11=yy1*w11
ww21=yy1*w21
ww12=yy2*w11
ww22=yy2*w21
for j in range(Ng): #looping through gases
Q11=xsec[T_ind_low,p_ind_low,j]
Q12=xsec[T_ind_low,p_ind_hi,j]
Q22=xsec[T_ind_hi,p_ind_hi,j]
Q21=xsec[T_ind_hi,p_ind_low,j]
xsec_int[i,j]=10**(ww11*Q11+ww21*Q21+ww12*Q12+ww22*Q22)
#########
#COMPUTES THE LIMB TRANSMITTANCE t AS A FUNCTION OF ALTITUDE/LEVEL, Z
#newest version, pulls abundance loop out seperately to make faster
@guvectorize([(float64[:,:], float64[:,:],float64[:,:], float64[:])], '(o,q), (m,l), (o,o) -> (o) ',target='cuda',nopython=True)
def CalcTran(xsecs, Fractions,uarr, trans):
ngas=len(Fractions)
nlevels=len(uarr)
nwno=xsecs.shape[0]
#ncont=xsecContinuum.shape[-1]
kb=1.38E-23
nlev=113 ###FINDME###---this is hard coded (its same value as nelvels but python gpu throws a fit if it's dynamically typed...)
wt_xsec=cuda.local.array(shape=(nlev,),dtype=float64) #stupid memory BS ot define local array
#trans=np.zeros(nlevels)
for i in range(nlevels):
wt_xsec[i]=0.
for k in range(ngas):
wt_xsec[i]+=Fractions[k,i]*xsecs[i,k]
for i in range(nlevels-2):
tautmp=0.E0
for j in range(i):
curlevel=i-j-1
tautmp+=2.*wt_xsec[curlevel]*uarr[i,j]
trans[i]=mth.exp(-tautmp)
######
##this is just a stupid function to take the dot-product so as to not have to copy the big ass
#t[wno, Z] array from GPU to CPU.
@guvectorize([(float64[:], float64[:],float64[:],float64, int32, float64[:])],'(o), (o), (o), (), () -> ()',target='cuda',nopython=True)
def CalcAnnulus(trans, Z, dZ, r0, locPc, depth):
nlevels=len(Z)
FF=0.
for i in range(nlevels):
if i >= locPc: trans[i]=0.
FF+=(1-trans[i])*(r0+Z[i])*dZ[i]
depth[0]=FF
#####
#compute path langths and path mass for each segment of each tangent height
@jit(nopython=True)
def compute_paths(Z, Pavg, Tavg,r0):
uarr=np.zeros((len(Z),len(Z)))
nlevels=len(Z)
kb=1.38E-23
for i in range(nlevels-2):
for j in range(i):
curlevel=i-j-1
r1=r0+Z[i]
r2=r0+Z[i-j]
r3=r0+Z[curlevel]
uarr[i,j]=(((r3**2-r1**2)**2)**0.25-((r2**2-r1**2)**2)**0.25)*(Pavg[curlevel]*1.0E5)/(kb*Tavg[curlevel])
return uarr
######END TRANSIT GPU ROUTINES##################################
#"wrapper script" for GPU transmission spectrum routines (what is called in make_trans_spec_1DRC and call_pymultinest)
def tran(T, P, mmw,Ps,Pc,H2O,CO,OH, HmFF,HmBF, H2,He, amp, power,M,Rstar,Rp):
#putting abundance arrays into 2D array for shipping off to GPU--same order as in xsects() loading routine
Fractions=np.array([H2*H2, He*H2, H2O, CO,OH,HmFF,HmBF ])
#loading cross-sections (kind of a dumb way of doing this)
logPgrid = restore.xsects[0] #pressure grid that xsecs are pre-computed on
logTgrid = restore.xsects[1] #temperature grid that xsecs are pre-computed on
wno = restore.xsects[2] #wavenumber array for xsecs
d_xsecarr = restore.xsects[3] #this is a memory pointer that puts the xses on GPU
#Computing hydrostatic grid
n = len(P)
Z=np.zeros(n) #level altitudes
dZ=np.zeros(n) #layer thickness array
r0=Rp*69911.*1.E3 #converting planet radius to meters
mmw=mmw*1.660539E-27 #converting mmw to Kg
kb=1.38E-23
G=6.67384E-11
M=1.898E27*M #jupiter masses to kg
#Compute avg Temperature at each grid mid-point
Tavg = np.array([0.0]*(n-1))
Pavg = np.array([0.0]*(n-1))
for z in range(n-1):
Pavg[z] = np.sqrt(P[z]*P[z+1])
Tavg[z] = interp(np.log10(Pavg[z]),sp.log10(P),T)
#create hydrostatic altitutde grid from P and T
Phigh=P.compress((P>Ps).flat) #deeper than reference pressure
Plow=P.compress((P<=Ps).flat) #shallower than reference pressure
for i in range(Phigh.shape[0]): #looping over levels above ref pressure
i=i+Plow.shape[0]-1
g=G*M/(r0+Z[i])**2 #value of gravity at each index/pressure layer
H=kb*Tavg[i]/(mmw[i]*g) #scale height
dZ[i]=H*np.log(P[i+1]/P[i]) #layer thickness in altitude units (m), dZ is negative below reference pressure
Z[i+1]=Z[i]-dZ[i] #level altitude
for i in range(Plow.shape[0]-1): #looping over levels below ref pressure
i=Plow.shape[0]-i-1
g=G*M/(r0+Z[i])**2
H=kb*Tavg[i]/(mmw[i]*g)
dZ[i]=H*np.log(P[i+1]/P[i])
Z[i-1]=Z[i]+dZ[i]
#Xsec interpolation on P-T grid
xsec_inter=xsec_interp_gpu_trans(logPgrid, logTgrid, P, T, d_xsecarr)
#compute transmission spectrum
path_arr=compute_paths(Z, Pavg, Tavg, r0) #path segments "ds" along each tangent beam
t=CalcTran(xsec_inter, Fractions, path_arr) #limb transmittance (t[wno, Z])
#t5=time.time()
#print 'TRANSMITTANCE', t5-t4
locPc=np.where(P >= Pc)[0][0] #finding cloud top pressure array index
annulus=CalcAnnulus(t, Z, dZ, r0, locPc) #computing the annlus integral
#t6=time.time()
#print 'DOT PRODUCT', t6-t5
annulus=annulus.copy_to_host() #copy annulus integral from GPU to CPU memory
#t7=time.time()
#print 'COPY', t7-t6
F=((r0+np.min(Z[:-1]))/(Rstar*6.955E8))**2+2./(Rstar*6.955E8)**2.*annulus #the usual transmission equation
#t8=time.time()
#print 'FINAL', t8-t7
return wno, F, Z
#*******************************************************************
# FILE: xsects.py
#
# DESCRIPTION: This function loads the cross-sections
#
#*******************************************************************
def xsects():
xsecpath='/data/mrline2/ABSCOEFF/SAMPLED_XSEC/' #location on agave where cross-sections live
### Read in x-section arrays
# H2-H2
file=xsecpath+'xsecarrH2H2_FREED_samp_3800_10000_R500000.h5'
hf=h5py.File(file, 'r')
wno=np.array(hf['wno'])
T=np.array(hf['T'])
P=np.array(hf['P'])
xsecarrH2=np.array(hf['xsec'])
hf.close()
print 'H2'
#define mega xsecarr
Ngas=7
xsecarr=(np.ones((len(wno), len(T), len(P), Ngas))*(-50))
####
xsecarr[:,:,:,0]=xsecarrH2.T
del xsecarrH2
# H2-He
file=xsecpath+'xsecarrH2He_FREED_samp_3800_10000_R500000.h5'
hf=h5py.File(file, 'r')
wno=np.array(hf['wno'])
T=np.array(hf['T'])
P=np.array(hf['P'])
xsecarr[:,:,:,1]=np.array(hf['xsec']).T
hf.close()
print 'He'
# H2O
file=xsecpath+'xsecarrH2O_POK_samp_3800_10000_R500000.h5'
hf=h5py.File(file, 'r')
wno=np.array(hf['wno'])
#T=np.array(hf['T'])
#P=np.array(hf['P'])
xsecarr[:,:,:,2]=np.array(hf['xsec']).T#np.array(hf['xsec']).T[:,5:,:-1]
for i in range(16): xsecarr[:,i,:,2]=xsecarr[:,16,:,2] #Ehsan's pokozatel xsecs stop at 500K, so forcing to 500K xsecs below 500K
hf.close()
print 'H2O'
#CO
file=xsecpath+'xsecarrCO_HITEMP_HELIOS_samp_3800_10000_R500000.h5'
hf=h5py.File(file, 'r')
wno=np.array(hf['wno'])
#T=np.array(hf['T'])
#P=np.array(hf['P'])
xsecarr[:,:,:,3]=np.array(hf['xsec']).T
hf.close()
print 'CO'
#'''
# OH
file=xsecpath+'xsecarrOH_HITEMP_HELIOS_samp_3800_10000_R500000.h5'
hf=h5py.File(file, 'r')
wno=np.array(hf['wno'])
#T=np.array(hf['T'])
#P=np.array(hf['P'])
xsecarr[:,:,:,4]=np.array(hf['xsec']).T
hf.close()
print 'OH'
#HmFF
file=xsecpath+'xsecarrHMFF_samp_3800_10000_R500000.h5'
hf=h5py.File(file, 'r')
wno=np.array(hf['wno'])
#T=np.array(hf['T'])
#P=np.array(hf['P'])
xsecarr[:,:,:,5]=np.array(hf['xsec']).T
hf.close()
print 'HmFF'
#HmBF
file=xsecpath+'xsecarrHMBF_samp_3800_10000_R500000.h5'
hf=h5py.File(file, 'r')
wno=np.array(hf['wno'])
#T=np.array(hf['T'])
#P=np.array(hf['P'])
xsecarr[:,:,:,6]=np.array(hf['xsec']).T
hf.close()
print 'HmBF'
# # HCN
# file=xsecpath+'xsecarrHCN_EXOMOL_HELIOS_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,5]=np.array(hf['xsec']).T
# hf.close()
# print 'HCN'
# #'''
# # HCN
# file=xsecpath+'xsecarrSiO_EBJT_EHSAN_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,6]=np.array(hf['xsec']).T
# hf.close()
# print 'SiO'
# #'''
# # HCN
# file=xsecpath+'xsecarrTiO_TOTO_EHSAN_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,7]=np.array(hf['xsec']).T
# hf.close()
# print 'TiO'
# #'''
# # HCN
# file=xsecpath+'xsecarrVO_VOMYT_EHSAN_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,7]=np.array(hf['xsec']).T
# hf.close()
# print 'VO'
# #'''
# # FeH
# file=xsecpath+'xsecarrFeH_MOLLIST_EHSAN_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,5]=np.array(hf['xsec']).T
# hf.close()
# print 'FeH'
# # C2H2
# file=xsecpath+'xsecarrC2H2_ExoMol_HELIOS_SUPER_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,5]=np.array(hf['xsec']).T
# hf.close()
# print 'C2H2'
# # CH4
# file=xsecpath+'xsecarrCH4_HITEMP_HELIOS_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,6]=np.array(hf['xsec']).T
# hf.close()
# print 'CH4'
# # 13CO
# file=xsecpath+'xsecarrCO_ISOTOPE_HITEMP_HELIOS_samp_3800_10000_R500000.h5'
# hf=h5py.File(file, 'r')
# wno=np.array(hf['wno'])
# #T=np.array(hf['T'])
# #P=np.array(hf['P'])
# xsecarr[:,:,:,8]=np.array(hf['xsec']).T
# hf.close()
# print 'C13O16'
#cropping the wavenumber grid over selected range wnomin to wnomax (current full span is 3800 - 10000 cm-1 (1 - 2.63 um), native R=500K)
wnomin =3500#minimum wno cut
wnomax =9500#maximum wno cut
loc=np.where((wno <= wnomax) & (wno >= wnomin))
loc=loc[0]
wno=wno[loc[::2]] #sampling down: doing every-<number>-wavenumber-point (so R=250K instead of R=500K)--works ok for final R < 60K
###note, can probably crop in T and P as well to save memory to add more xsecs at once...
xx=np.ascontiguousarray(xsecarr[loc[::2],:,:,:]) #putting as a "c-formatted" continuous array for GPU
del xsecarr
print 'DONE READ'
return np.log10(P),np.log10(T),wno,cuda.to_device(xx) #returing arrays, punting mastr xsecarry onto GPU--this eats the memory it's many GB big
#********************************************************************************
# FILE: TP.py
#
# DESCRIPTION: This function takes stellar, planetary, and atmospheric parameters
# and returns the temperature-pressure profile. -- This is the Guillot 2010 (rather the Parmenteir 2014 modification) profile
#
# CALLING SEQUENCE: >>> tp = TP(x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9])
#
# NOTE: run '$ module load python' before running '$ python'
#
# Test: >>> x = [0.93,0.598,4400,0.01424,100,10.**3.7,10.**(-2.-2.),10.**(-1-2),10.**(-2),1.]
# >>> from TP import TP
# >>> tp = TP(x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9])
# >>> T = tp[0]
# >>> P = tp[1]
#********************************************************************************
def TP(Teq, Teeff, g00, kv1, kv2, kth, alpha):
Teff = Teeff
f = 1.0 # solar re-radiation factor
A = 0.0 # planetary albedo
g0 = g00
# Compute equilibrium temperature and set up gamma's
T0 = Teq
gamma1 = kv1/kth
gamma2 = kv2/kth
# Initialize arrays
logtau =np.arange(-10,20,.1)
tau =10**logtau
#computing temperature
T4ir = 0.75*(Teff**(4.))*(tau+(2.0/3.0))
f1 = 2.0/3.0 + 2.0/(3.0*gamma1)*(1.+(gamma1*tau/2.0-1.0)*sp.exp(-gamma1*tau))+2.0*gamma1/3.0*(1.0-tau**2.0/2.0)*special.expn(2.0,gamma1*tau)
f2 = 2.0/3.0 + 2.0/(3.0*gamma2)*(1.+(gamma2*tau/2.0-1.0)*sp.exp(-gamma2*tau))+2.0*gamma2/3.0*(1.0-tau**2.0/2.0)*special.expn(2.0,gamma2*tau)
T4v1=f*0.75*T0**4.0*(1.0-alpha)*f1
T4v2=f*0.75*T0**4.0*alpha*f2
T=(T4ir+T4v1+T4v2)**(0.25)
P=tau*g0/(kth*0.1)/1.E5
# Return TP profile
return T, P
"""
TP_MS: Temperature-Pressure profile from Madhu & Seager (2009), Equations (1,2)
INPUTS:
P: array of pressure points
T0: T at the lowest pressure (highest altitude)
P1,P2: highest pressure in lvl 1, turn-over pressure in level 2
a1, a2: alpha_x as in paper
P3: top of level 3, below which is assumed isothermal
OUTPUTS:
Returned array of temperature values that are are mapped onto the input pressure array P
"""
def TP_MS(P,T0,P1,P2,P3,a1,a2):
P=P[::-1] #reverse array to be low -> high pressure
n=P.shape[0]
beta=0.5 #set beta as in paper
Tarr=np.zeros(n)
P0=P[-1] #first pressure point
Tarr1=(np.log(P/P0)/a1)**(1./beta)+T0 #Layer 1 (Equation 1)
T2=(np.log(P1/P0)/a1)**(1./beta)+T0-(np.log(P1/P2)/a2)**(1./beta)
Tarr2=(np.log(P/P2)/a2)**(1./beta)+T2 #Layer 2 (Equation 2)
Tarr[P<P1]=Tarr1[P<P1] #Load in Layer 1 equation into return array
Tarr[P>=P1]=Tarr2[P>=P1] #Load in Layer 2 equation into return array
loc3=np.where(P>=P3)[0]
Tarr[loc3]=Tarr2[loc3][-1]
Tarr[Tarr<100]=100
#pdb.set_trace()
return Tarr[::-1]
#**************************************************************
# FILE: restore.py
#
# DESCRIPTION: This class calls the function xsects(), thus
# loading the x-sections as global variables.
#
# USAGE: >>> from restore import restore
# >>> Pgrid = restore.xsects[0]
#
#**************************************************************
class restore():
xsects = xsects()
#**************************************************************************
# FILE:get_rot_ker.py
#
# DESCRIPTION: Computes rotation kernal for convolution
# vsini is in km/s, wStar is the model wavelength grid (constant R)
#**************************************************************************
def get_rot_ker(vsini, wStar):
nx, = wStar.shape
dRV = np.mean(2.0*(wStar[1:]-wStar[0:-1])/(wStar[1:]+wStar[0:-1]))*2.998E5
nker =401
hnker = (nker-1)//2
rker = np.zeros(nker)
for ii in range(nker):
ik = ii - hnker
x = ik*dRV / vsini
if np.abs(x) < 1.0:
y = 1/(np.pi * np.sqrt(1 - x**2))
rker[ii] = y
rker /= rker.sum()
return rker
def get_rot_ker_ecl(vsini, wStar):
nx, = wStar.shape
dRV = np.mean(2.0*(wStar[1:]-wStar[0:-1])/(wStar[1:]+wStar[0:-1]))*2.998E5
nker = 401
hnker = (nker-1)//2
rker = np.zeros(nker)
for ii in range(nker):
ik = ii - hnker
x = ik*dRV / vsini
if np.abs(x) < 1.0:
y = np.sqrt(1-x**2)
rker[ii] = y
rker /= rker.sum()
return rker
#**************************************************************************
# FILE:fx.py
#
# DESCRIPTION: Fowrad model...this takes in "input parameters (x)"
# and sets it up to pass into pymultinest
#
# USAGE:
#**************************************************************************
#needs to take in data wavelength grid here
def fx_trans(x):
#print(x)
logH2O, logCO, logOH, logHmFF, logHmBF, Tiso, xRp, logPc =x
npars=np.array(x).shape[0]
fH2He = 1.-(10.**logH2O+10.**logCO+10**logOH+10**logHmFF+10**logHmBF)
if fH2He < 0.0:
fH2He = 0.0
frac=0.176471
fH2=fH2He/(1.+frac)
fHe=frac*fH2
# mmw = 2.*fH2 + 4.*fHe + (18.*10.**logH2O + 28.*10.**logCO + 17.*10.**logOH + 1.*10.**logHmFF + 1.*10.**logHmBF)
mmw=1.3
#planet params
Rp=1.83
Rstar=1.73
Mp=0.92
logP = np.arange(-8.,1.,0.08)+0.08 #FINDME--If you change this also change in
P = 10.0**logP
T=P*0.+Tiso #making a constant temperature array
#call the guillot TP function here, then interpolate in log(P) to the pressures here
#np.interp
Ps=0.1
wnocrop, Depth, Z=tran(T, P,mmw+P[1:]*0.,Ps,10**logPc,10**logH2O+P[1:]*0., 10**logCO+P[1:]*0.,10**logOH+P[1:]*0.,10**logHmFF+P[1:]*0.,10**logHmBF+P[1:]*0.,fH2+P[1:]*0.,fHe+P[1:]*0., 0, 0,Mp,Rstar,Rp*xRp)
#gas arrays are one element smaller because they're calculated at the midpoint.
return wnocrop, Depth
|
meganmansfieldREPO_NAMEIGRINS_transitPATH_START.@IGRINS_transit_extracted@IGRINS_transit-main@retrieval_set_up@fm.py@.PATH_END.py
|
{
"filename": "test_prims.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/tests/test_prims.py",
"type": "Python"
}
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
from cuml.prims.label import make_monotonic
from cuml.prims.label import invert_labels
from cuml.prims.label import check_labels
from cuml.testing.utils import array_equal
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
@pytest.mark.parametrize("arr_type", ["np", "cp"])
@pytest.mark.parametrize("dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("copy", [True, False])
def test_monotonic_validate_invert_labels(arr_type, dtype, copy):
arr = np.array([0, 15, 10, 50, 20, 50], dtype=dtype)
original = arr.copy()
if arr_type == "cp":
arr = cp.asarray(arr, dtype=dtype)
arr_orig = arr.copy()
monotonic, mapped_classes = make_monotonic(arr, copy=copy)
cp.cuda.Stream.null.synchronize()
assert array_equal(monotonic, np.array([0, 2, 1, 4, 3, 4]))
# We only care about in-place updating if data is on device
if arr_type == "cp":
if copy:
assert array_equal(arr_orig, arr)
else:
assert array_equal(arr, monotonic)
wrong_classes = cp.asarray([0, 1, 2], dtype=dtype)
val_labels = check_labels(monotonic, classes=wrong_classes)
cp.cuda.Stream.null.synchronize()
assert not val_labels
correct_classes = cp.asarray([0, 1, 2, 3, 4], dtype=dtype)
val_labels = check_labels(monotonic, classes=correct_classes)
cp.cuda.Stream.null.synchronize()
assert val_labels
if arr_type == "cp":
monotonic_copy = monotonic.copy()
inverted = invert_labels(
monotonic,
classes=cp.asarray([0, 10, 15, 20, 50], dtype=dtype),
copy=copy,
)
cp.cuda.Stream.null.synchronize()
if arr_type == "cp":
if copy:
assert array_equal(monotonic_copy, monotonic)
else:
assert array_equal(monotonic, arr_orig)
assert array_equal(inverted, original)
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@tests@test_prims.py@.PATH_END.py
|
{
"filename": "population.py",
"repo_name": "TRASAL/frbpoppy",
"repo_path": "frbpoppy_extracted/frbpoppy-master/frbpoppy/population.py",
"type": "Python"
}
|
"""Define a class to hold a population of FRBs."""
import os
import dill as pickle
import numpy as np
from copy import deepcopy
from frbpoppy.paths import paths
from frbpoppy.frbs import FRBs
class Population:
"""Class to hold a population of FRBs."""
def __init__(self):
"""Initializing."""
# Population properties
self.name = None
# Frequency emission limits [MHz]
self.f_max = None
self.f_min = None
# Store FRB sources
self.frbs = FRBs()
self.uid = None # Unique Identifier
def __str__(self):
"""Define how to print a population object to a console."""
s = 'Population properties:'
# TODO: Code this to print all properties
return s
def to_df(self):
"""Gather source values into a pandas dataframe."""
df = self.frbs.to_df()
return df
def save(self, path=None):
"""
Write out source properties as data file.
Args:
path (str): Path to which to save.
"""
if path is None:
# Check if a population has been a survey name
if not self.name:
file_name = 'pop'
else:
file_name = self.name.lower()
if self.uid:
file_name += f'_{self.uid}'
path = paths.populations() + f'{file_name}.p'
self.to_pickle(path)
def to_csv(self, path):
"""Write a population to a csv file.
Args:
path (str): Path to which to write
"""
df = self.frbs.to_df()
df.to_csv(path)
def to_pickle(self, path):
"""Write a population to a pickled file for future use.
Args:
path (str): Path to which to write
"""
output = open(path, 'wb')
pickle.dump(self, output, 2)
output.close()
def n_sources(self):
"""Return the number of FRB sources."""
return len(self.frbs.ra)
def n_srcs(self):
return self.n_sources()
def n_bursts(self):
"""Return the number of bursts."""
try: # Will only work for a repeater population
n = np.count_nonzero(~np.isnan(self.frbs.time))
except TypeError:
n = self.n_sources()
return n
def n_repeaters(self):
"""Return the numer of repeaters in a population."""
try:
return np.sum((~np.isnan(self.frbs.time)).sum(1) > 1)
except TypeError:
return 0
def n_rep(self):
return self.n_repeaters()
def n_one_offs(self):
"""Return the numer of one-offs in a population."""
try:
return np.sum((~np.isnan(self.frbs.time)).sum(1) <= 1)
except TypeError:
return self.n_sources()
def n_oneoffs(self):
"""Return the numer of one-offs in a population."""
return self.n_one_offs()
def unpickle(filename=None, uid=None):
"""Quick function to unpickle a population.
Args:
filename (str, optional): Define the path to the pickled population,
or give the population name
uid (str, optional): Unique Identifier
Returns:
Population: Population class
"""
# Find population file
if os.path.isfile(filename):
f = open(filename, 'rb')
else:
# Find standard population files
try:
name = filename.lower()
if uid:
name += f'_{uid}'
p = paths.populations() + f'{name}.p'
f = open(p, 'rb')
except FileNotFoundError:
s = 'Pickled population file "{0}" does not exist'.format(filename)
raise FileNotFoundError(s)
# Unpickle
pop = pickle.load(f)
f.close()
return pop
def split_pop(pop, mask=None):
"""Split a population.
Args:
pop (Population): Population to be split
mask (array): Numpy boolean mask. If none, split into repeaters and
one_offs in that order
Returns:
tuple: Tuple of population classes
"""
if mask is None:
mask = ((~np.isnan(pop.frbs.time)).sum(1) > 1)
pop_true = deepcopy(pop)
pop_false = deepcopy(pop)
pop_true.frbs.apply(mask)
pop_false.frbs.apply(~mask)
return pop_true, pop_false
def merge_pop(*args, random=False):
"""Merge populations.
Args:
Populations to merge
random (bool): If wishing to shuffle the frbs from different pops
Returns:
Population
"""
mp = args[0] # Main population
# Merge each parameter
for attr in mp.frbs.__dict__.keys():
parm = getattr(mp.frbs, attr)
if type(parm) is np.ndarray:
parms = []
for pop in args:
parms.append(getattr(pop.frbs, attr))
try:
merged_parm = np.concatenate(parms, axis=0)
except ValueError:
# Check maximum size values should be padded to
max_size = max([p.shape[1] for p in parms])
new_parms = []
# Ensure matrices are the same shapes by padding them
for p in parms:
if p.shape[1] != max_size:
padded_p = np.zeros((p.shape[0], max_size))
padded_p[:] = np.nan
padded_p[:, :p.shape[1]] = p
new_parms.append(padded_p)
else:
new_parms.append(p)
merged_parm = np.concatenate(new_parms, axis=0)
setattr(mp.frbs, attr, merged_parm)
if random:
shuffle = np.random.permutation(mp.frbs.z.shape[0])
for attr in mp.frbs.__dict__.keys():
parm = getattr(mp.frbs, attr)
if type(parm) is np.ndarray:
setattr(mp.frbs, attr, parm[shuffle])
mp.n_srcs = len(mp.frbs.z)
return mp
|
TRASALREPO_NAMEfrbpoppyPATH_START.@frbpoppy_extracted@frbpoppy-master@frbpoppy@population.py@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/line/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="scatter3d.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@line@_colorscale.py@.PATH_END.py
|
{
"filename": "tutorial.ipynb",
"repo_name": "tingyuansen/binspec_plus",
"repo_path": "binspec_plus_extracted/binspec_plus-master/tutorial.ipynb",
"type": "Jupyter Notebook"
}
|
** This file gives a brief overview of the capabilities of the code. **
* If you want to predict the spectrum of a single or binary star with particular labels, you'll want the "spectral_model" package.
* If you want to fit an observed spectrum, see the "fitting" package.
* Downloading and processing APOGEE spectra is handled by the "process_spectra" package.
* The "utils" package contains some general-purpose functions used by the other packages.
Many of the functions require you to pass them a particular neural network (really, a list of biases and weights parameterizing the network), so we read in all the networks we'll be using at the beginning and then pass them to various functions as we go. This is a bit cumbersome, but the advantage is that if you train a new network (with architechture compatible with the existing code) you can just pass it to the relevant functions without having to rewrite everything.
```python
from __future__ import absolute_import, division, print_function # Python2 compatibility
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from binspec import utils
from binspec import spectral_model
from binspec import fitting
# read in the standard wavelength grid onto which we interpolate spectra.
wavelength = utils.load_wavelength_array()
# read in all individual neural networks we'll need.
NN_coeffs_norm = utils.read_in_neural_network(name = 'normalized_spectra')
NN_coeffs_flux = utils.read_in_neural_network(name = 'unnormalized_spectra')
NN_coeffs_R = utils.read_in_neural_network(name = 'radius')
NN_coeffs_Teff2_logg2 = utils.read_in_neural_network(name = 'Teff2_logg2')
```
Let's use the data-driven spectral model to predict the APOGEE-like spectrum of a single star similar to the Sun.
```python
spec_err = 1e-2*np.ones(len(wavelength))
# for a single-star model, the format of "labels" is [Teff, logg, [Fe/H], [Mg/Fe], v_macro, v_los].
real_labels = [5800, 4.44, 0, 0, 5, 10] # redshift by 10 km/s.
real_spec = spectral_model.get_normalized_spectrum_single_star(labels = real_labels,
NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
spec_err = spec_err)
# zoom in on a small region of the spectrum so we can see what's going on.
lambda_min, lambda_max = 15350, 15450# for plotting
m = (wavelength < lambda_max) & (wavelength > lambda_min)
plt.figure(figsize=(14, 4))
plt.plot(wavelength[m], real_spec[m], 'k', lw=0.5)
plt.xlim(lambda_min, lambda_max)
plt.ylim(0.7, 1.05)
```
(0.7, 1.05)

Now let's add some noise to this model spectrum, and then fit it to see if we can recover the labels we put in.
```python
data_spec = real_spec + 0.01*np.random.randn(len(real_spec))
popt, pcov, model_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = data_spec,
spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
p0 = None, num_p0 = 1)
plt.figure(figsize=(14, 4))
m = (wavelength < lambda_max) & (wavelength > lambda_min)
plt.plot(wavelength[m], data_spec[m], 'k', lw=0.5, label = '"data" spec')
plt.plot(wavelength[m], model_spec[m], 'r--', lw=0.5, label = 'best-fit model')
plt.xlim(lambda_min, lambda_max)
plt.legend(loc = 'best', frameon = False, fontsize = 18)
```
<matplotlib.legend.Legend at 0x101e3d45f8>

```python
# verify that our best-fit labels are close to what we put in.
print(popt)
```
[ 5.79605530e+03 4.44239327e+00 -8.39633505e-03 4.67740152e-03
4.96638546e+00 1.00293424e+01]
Now let's predict the spectrum of an unresolved binary.
```python
# predict a binary spec
# for a binary, the labels are [Teff1, logg1, [Fe/H], [Mg/Fe], mass ratio, v_macro1, v_macro2, v_los1, v_los2]
real_bin_labels = [5800, 4.44, 0, 0, 0.7, 2, 5, -10, 10]
specerr = 1e-2*np.ones(len(wavelength))
real_bin_spec = spectral_model.get_normalized_spectrum_binary(labels = real_bin_labels,
NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2,
spec_err = specerr)
plt.figure(figsize=(14, 4))
m = (wavelength < lambda_max) & (wavelength > lambda_min)
plt.plot(wavelength[m], real_bin_spec[m], 'k', lw=0.5)
plt.xlim(lambda_min, lambda_max)
plt.ylim(0.75, 1.05)
```
(0.75, 1.05)

Again, let's add some noise and then fit the spectrum. We'll fit it with both a single-star model and a binary model, and then compare the fits.
Notice that we always pass the fitting function an arguement "num_p0". This determines how many different "walkers" to initialize for the optimizer, to minimize the chance of it's converging on a local mininimum. For a simple single-star model, there's little danger of this happening, but it's more likely for more complicated models with more labels.
How long the code takes to run scales linearly with the number of walkers.
```python
data_bin_spec = real_bin_spec + 0.01*np.random.randn(len(real_bin_spec))
# fit single-star model
popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = data_bin_spec,
spec_err = specerr, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
p0 = None, num_p0 = 1)
# fit binary model.
# use the best-fit single-star model ("popt_single") as a starting guess.
popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = data_bin_spec,
spec_err = specerr, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2,
p0_single = popt_single, num_p0 = 10)
plt.figure(figsize=(14, 4))
m = (wavelength < lambda_max) & (wavelength > lambda_min)
plt.plot(wavelength[m], data_bin_spec[m], 'k', lw=0.5, label = '"data" spec')
plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model')
plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model')
plt.xlim(lambda_min, lambda_max)
plt.legend(loc = 'best', frameon = False, fontsize= 18)
```
<matplotlib.legend.Legend at 0x101e481320>

```python
# unsurprisingly, the single-star model isn't a very good fit, but the binary model is.
# verify that our best-fit labels are close to what we put in.
print(popt_binary)
```
[ 5.77891029e+03 4.43720910e+00 -5.81747659e-03 1.24170328e-02
7.01505412e-01 2.24734991e+00 4.94390273e+00 -9.93189388e+00
1.01599828e+01]
Now that we've seen how to generate and fit model spectra, let's download an actual APOGEE spectrum. Here we'll download a "combined" spectrum.
We'll start with a target that is likely a binary, but is not an "obvious" one. I.e., there's no large velocity offset.
```python
from binspec import process_spectra
apogee_id = '2M18513961+4338099'
spec, spec_err = process_spectra.get_combined_spectrum_single_object(apogee_id = apogee_id,
catalog = None, save_local = False)
plt.figure(figsize=(14, 4))
m = (spec_err < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min)
plt.plot(wavelength[m], spec[m], 'k', lw=0.5)
plt.ylim(0.75, 1.05)
plt.xlim(lambda_min, lambda_max)
```
(15350, 15450)

Now let's fit this spectrum with a single-star model and a binary model!
```python
# fit single-star model
popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = spec,
spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
p0 = None, num_p0 = 1)
# fit binary model.
popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = spec,
spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2,
p0_single = popt_single, num_p0 = 10)
plt.figure(figsize=(14, 4))
plt.plot(wavelength[m], spec[m], 'k', lw=0.5, label = 'APOGEE spectrum')
plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model')
plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model')
plt.xlim(lambda_min, lambda_max)
plt.ylim(0.7, 1.1)
plt.legend(loc = 'best', frameon = False, fontsize= 18)
```
<matplotlib.legend.Legend at 0x1045286f60>

The binary model looks like a better fit, though the differences are sublte. You can change the axis limits to zoom in on particular lines or explore other parts of the spectrum.
Let's compare the $\chi^2$ of the single and binary model.
```python
chi2_diff = utils.get_chi2_difference(norm_spec=spec, spec_err=spec_err,
norm_model_A = single_spec, norm_model_B = bin_spec)
print(chi2_diff)
```
5414.108844086688
Now that we've fit a not-obvious binary (one with a small velocity offset), let's look at one with a bigger velocity offset between the two stars. We'll download the spectrum and fit it in one go.
```python
apogee_id = '2M13080617+1753494'
spec, spec_err = process_spectra.get_combined_spectrum_single_object(apogee_id = apogee_id,
catalog = None, save_local = False)
# fit single-star model
popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = spec,
spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
p0 = None, num_p0 = 1)
# fit binary model.
popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = spec,
spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2,
p0_single = popt_single, num_p0 = 10)
m = (spec_err < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min)
plt.figure(figsize=(14, 4))
plt.plot(wavelength[m], spec[m], 'k', lw=0.5, label = 'APOGEE spectrum')
plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model')
plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model')
plt.xlim(15350, 15450)
plt.ylim(0.7, 1.1)
plt.legend(loc = 'best', frameon = False, fontsize= 18)
```
<matplotlib.legend.Legend at 0x101e84ec88>

Here, the differences between the best-fit binary and single-star models are more obvious. Since the velocity offset between the two stars in the best-fit binary model appears non-neglible, we should fit the spectra from individual visits, in case the spectrum changes significantly from one visit to the next.
Let's download and plot the spectra from each visit.
```python
all_specs, all_err, all_snr, all_hjd, all_vhelio = \
process_spectra.download_visit_spectra_single_object_and_renormalize(
apogee_id = apogee_id, p0_single_combined = popt_single,
NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
allvisit_cat = None, snr_min = 30)
plt.figure(figsize=(14, 4))
for i, spec in enumerate(all_specs):
m = (all_err[i] < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min)
plt.plot(wavelength[m], spec[m] + 0.2*i, 'k', lw=0.5)
plt.xlim(lambda_min, lambda_max)
plt.ylim(0.7, 1.6)
```
(0.7, 1.6)

Yup, the spectrum definitely looks like it's changing significantly from one visit to the next (most significantly, from the bottom spectrum to the first two). Let's fit these visit spectra simultaneously using an SB2 model.
```python
sb2_labels, pcov, sb2_models = fitting.fit_visit_spectra_sb2_model(
norm_spectra = all_specs, spec_errs = all_err,
NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2,
v_helios = all_vhelio, p0_combined = popt_binary, num_p0 = 5)
plt.figure(figsize=(14, 4))
for i, spec in enumerate(all_specs):
m = (all_err[i] < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min)
if i == 0: data_label, model_label = 'data', 'SB2 model'
else: data_label, model_label = None, None
plt.plot(wavelength[m], spec[m] + 0.2*i, 'k', lw=0.5, label = data_label)
plt.plot(wavelength[m], sb2_models[i][m] + 0.2*i, 'r', lw=0.5, label = model_label)
plt.xlim(lambda_min, lambda_max)
plt.ylim(0.7, 1.6)
plt.legend(loc = 'upper left', frameon = False, ncol = 2, fontsize = 18)
```
<matplotlib.legend.Legend at 0x10213747f0>

Looks like a good fit. Let's see what the best-fit labels are. The format of the label vector returned for an SB2 model is format is [Teff1, logg1, [Fe/H], [Mg/Fe], q_spec, v_macro1, v_macro2, q_dyn, gamma, dv1_i],
where i = 1...N_visits and dv_i is the velocity of the primary at each visit.
If you aren't sure what the labels for a particular model are, you can check in spectral_model.py
```python
print(sb2_labels)
```
[ 5.81209038e+03 4.44233921e+00 1.06612376e-01 -5.44079000e-02
8.31312281e-01 2.37523955e+00 7.17323488e-01 8.36045903e-01
1.05971627e+01 2.79507848e+01 1.68375081e+01 1.58818318e+01]
We note that q_spec and q_dyn are both about 0.83. That's good.
Finally, let's try fitting an SB1. First, we'll download and fit the combined spectrum.
```python
apogee_id = '2M13381097+5620250'
spec, spec_err = process_spectra.get_combined_spectrum_single_object(apogee_id = apogee_id,
catalog = None, save_local = False)
# fit single-star model
popt_single, pcov, single_spec = fitting.fit_normalized_spectrum_single_star_model(norm_spec = spec,
spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
p0 = None, num_p0 = 1)
# fit binary model.
popt_binary, pcov, bin_spec = fitting.fit_normalized_spectrum_binary_model(norm_spec = spec,
spec_err = spec_err, NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
NN_coeffs_R = NN_coeffs_R, NN_coeffs_Teff2_logg2 = NN_coeffs_Teff2_logg2,
p0_single = popt_single, num_p0 = 10)
m = (spec_err < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min)
plt.figure(figsize=(14, 4))
plt.plot(wavelength[m], spec[m], 'k', lw=0.5, label = 'APOGEE spectrum')
plt.plot(wavelength[m], single_spec[m], 'r', lw=0.5, label = 'single-star model')
plt.plot(wavelength[m], bin_spec[m], 'b', lw=0.5, label = 'binary model')
plt.xlim(lambda_min, lambda_max)
plt.ylim(0.7, 1.1)
plt.legend(loc = 'best', frameon = False, fontsize= 18)
```
<matplotlib.legend.Legend at 0x101f7756a0>

```python
chi2_diff = utils.get_chi2_difference(norm_spec=spec, spec_err=spec_err,
norm_model_A = single_spec, norm_model_B = bin_spec)
print(chi2_diff)
```
67.59678569121024
Hmmm, for this target, the binary model fit is not obviously better, and the $\chi^2$ difference for the combined spectrum is very small (small enough that it wouldn't pass our model selection criteria to consider it a reliable binary).
However, if we look at the APOGEE-supplied v_helios, we'll find that this target is RV variable.
Therefore, we'll download the individual-visit spectra, and we'll try fitting them with both an SB1 model and a genuine single-star model.
```python
# get the visit spectra
all_specs, all_err, all_snr, all_hjd, all_vhelio = \
process_spectra.download_visit_spectra_single_object_and_renormalize(
apogee_id = apogee_id, p0_single_combined = popt_single,
NN_coeffs_norm = NN_coeffs_norm, NN_coeffs_flux = NN_coeffs_flux,
allvisit_cat = None, snr_min = 30)
# fit them with a single-star model
single_labels, pcov, single_models = fitting.fit_visit_spectra_single_star_model(
norm_spectra = all_specs, spec_errs = all_err, NN_coeffs_norm = NN_coeffs_norm,
NN_coeffs_flux = NN_coeffs_flux, v_helios = all_vhelio, p0 = popt_single, num_p0 = 1)
# fit them with an SB1 model
sb1_labels, pcov, sb1_models = fitting.fit_visit_spectra_sb1_model(
norm_spectra = all_specs, spec_errs = all_err, NN_coeffs_norm = NN_coeffs_norm,
NN_coeffs_flux = NN_coeffs_flux, v_helios = all_vhelio, p0 = popt_single, num_p0 = 5)
plt.figure(figsize=(14, 4))
for i, spec in enumerate(all_specs):
m = (all_err[i] < 0.1) & (wavelength < lambda_max) & (wavelength > lambda_min)
if i == 0: data_label, sb1_label, single_label = 'data', 'SB1 model', 'single-star model'
else: data_label, sb1_label, single_label = None, None, None
plt.plot(wavelength[m], spec[m] + 0.2*i, 'k', lw=0.5, label = data_label)
plt.plot(wavelength[m], single_models[i][m] + 0.2*i, 'r', lw=0.5, label = single_label)
plt.plot(wavelength[m], sb1_models[i][m] + 0.2*i, 'b', lw=0.5, label = sb1_label)
plt.xlim(lambda_min, lambda_max)
plt.ylim(0.75, 1.6)
plt.legend(loc = 'upper left', frameon = False, ncol = 3, fontsize = 18)
```
<matplotlib.legend.Legend at 0x101e79ed30>

It's pretty clear that the spectrum is changing from one visit to the next, so the single-star model (which requires constant v_helio across visits) won't be able to get a good fit. But the SB1 model does achieve a good fit, and if you tried an SB2 model, you'd find that it couldn't do any better.
Let's look at the labels of the best-fit SB1 model, which are in the format [Teff, logg, [Fe/H], [Mg/Fe], v_macro, dv_i], where i = 1..N_visits is the velocity at each visit.
```python
print(sb1_labels)
```
[ 6.03814420e+03 4.21286962e+00 4.08804375e-02 -1.12199395e-01
8.39337302e+00 -1.43412469e+01 1.69912803e+01 -2.59064042e+00]
**One practical note:**
Fitting combined spectra with single/binary models is pretty fast. If you pass the fitting function to a Python multiprocessing Pool, you should be able to comfortably fit 10,000 targets in < 1 day on a single node of a typical cluster.
On the other hand, fitting visit spectra can be fairly slow, because the models get more complicated and each additional visit adds (at least) one more free parameter. A small fraction of targets have 30+ visits, which means that the optimization for a complicated model can entail optimizing in a function of 100+ free parameters. In this case, a single target can keep your node working all day.
Therefore, it makes sense to start out by fitting combined spectra. Afterward, you can switch to fitting visit spectra for targets where it makes sense to; i.e., targets that are RV variable or have combined spectra that appear to be binaries with significant velocity offsets. In cases where many visits need to be fit simultaneously, it can also help to first fit them individually (which is faster) and then use the velocities from this to initialize a good guess for simultaneous fitting.
```python
```
|
tingyuansenREPO_NAMEbinspec_plusPATH_START.@binspec_plus_extracted@binspec_plus-master@tutorial.ipynb@.PATH_END.py
|
{
"filename": "_xhoverformat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/_xhoverformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XhoverformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="xhoverformat", parent_name="isosurface", **kwargs):
super(XhoverformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@_xhoverformat.py@.PATH_END.py
|
{
"filename": "gexceptions.py",
"repo_name": "esheldon/ngmix",
"repo_path": "ngmix_extracted/ngmix-master/ngmix/gexceptions.py",
"type": "Python"
}
|
class NGmixBaseException(Exception):
"""Base exception class for ngmix"""
def __init__(self, value):
super(NGmixBaseException, self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
class GMixRangeError(NGmixBaseException):
"""
Some number was out of range.
"""
pass
class GMixFatalError(NGmixBaseException):
"""
A fatal error in the Gaussian mixtures.
"""
pass
class GMixMaxIterEM(NGmixBaseException):
"""
EM algorithm hit max iter.
"""
pass
class PSFFluxFailure(NGmixBaseException):
"""
Failure to fit PSF fluxes. This usually only fails for all zero weight
images, so this means we can't proceed with other types of fits or
measurements either
"""
pass
class BootPSFFailure(NGmixBaseException):
"""
Failure to bootstrap PSF
"""
pass
class BootGalFailure(NGmixBaseException):
"""
Failure to bootstrap galaxy
"""
pass
class FFTRangeError(NGmixBaseException):
"""
FFT size is not correct/consistent.
"""
pass
|
esheldonREPO_NAMEngmixPATH_START.@ngmix_extracted@ngmix-master@ngmix@gexceptions.py@.PATH_END.py
|
{
"filename": "SpectralEnergyDistributionsInSplusDr1.ipynb",
"repo_name": "astro-datalab/notebooks-latest",
"repo_path": "notebooks-latest_extracted/notebooks-latest-master/03_ScienceExamples/SpectralEnergyDistributions/SpectralEnergyDistributionsInSplusDr1.ipynb",
"type": "Jupyter Notebook"
}
|
```python
__nbid__ = '0028'
__author__ = 'David Nidever <david.nidever@noirlab.edu>, Astro Data Lab Team <datalab@noirlab.edu>'
__version__ = '20240606' # yyymmdd
__datasets__ = ['splus_dr1']
__keywords__ = ['science example', 'plot:cmd', 'plot:color-color', 'image cutout']
```
# Spectral Energy Distributions with S-PLUS DR1
*David Nidever & the Astro Data Lab Team*
### Table of contents
* [Goals & notebook summary](#goals)
* [Disclaimer & Attribution](#attribution)
* [Imports & setup](#import)
* [Query the data](#query)
* [Color Magnitude Diagrams](#cmd)
* [Color-Color Diagrams](#twocd)
* [Spectral Energy Distributions](#sed)
* [Spatial Density Map](#spatial)
* [Image Cutouts](#cutouts)
* [Resources](#resource)
<a class="anchor" id="goals"></a>
# Goals
We will show some examples of what can be done with the S-PLUS DR1 dataset focusing on the large number of photometric bands and spectral energy distributions.
# Summary
We will create a color magnitude diagram, color-color diagram, spectral energy distribution and image cutouts.
<a class="anchor" id="attribution"></a>
# Disclaimer & attribution
Disclaimers
-----------
Note that using the Astro Data Lab constitutes your agreement with our minimal [Disclaimers](https://datalab.noirlab.edu/disclaimers.php).
Acknowledgments
---------------
If you use **Astro Data Lab** in your published research, please include the text in your paper's Acknowledgments section:
_This research uses services or data provided by the Astro Data Lab, which is part of the Community Science and Data Center (CSDC) Program of NSF NOIRLab. NOIRLab is operated by the Association of Universities for Research in Astronomy (AURA), Inc. under a cooperative agreement with the U.S. National Science Foundation._
If you use **SPARCL jointly with the Astro Data Lab platform** (via JupyterLab, command-line, or web interface) in your published research, please include this text below in your paper's Acknowledgments section:
_This research uses services or data provided by the SPectra Analysis and Retrievable Catalog Lab (SPARCL) and the Astro Data Lab, which are both part of the Community Science and Data Center (CSDC) Program of NSF NOIRLab. NOIRLab is operated by the Association of Universities for Research in Astronomy (AURA), Inc. under a cooperative agreement with the U.S. National Science Foundation._
In either case **please cite the following papers**:
* Data Lab concept paper: Fitzpatrick et al., "The NOAO Data Laboratory: a conceptual overview", SPIE, 9149, 2014, https://doi.org/10.1117/12.2057445
* Astro Data Lab overview: Nikutta et al., "Data Lab - A Community Science Platform", Astronomy and Computing, 33, 2020, https://doi.org/10.1016/j.ascom.2020.100411
If you are referring to the Data Lab JupyterLab / Jupyter Notebooks, cite:
* Juneau et al., "Jupyter-Enabled Astrophysical Analysis Using Data-Proximate Computing Platforms", CiSE, 23, 15, 2021, https://doi.org/10.1109/MCSE.2021.3057097
If publishing in a AAS journal, also add the keyword: `\facility{Astro Data Lab}`
And if you are using SPARCL, please also add `\software{SPARCL}` and cite:
* Juneau et al., "SPARCL: SPectra Analysis and Retrievable Catalog Lab", Conference Proceedings for ADASS XXXIII, 2024
https://doi.org/10.48550/arXiv.2401.05576
The NOIRLab Library maintains [lists of proper acknowledgments](https://noirlab.edu/science/about/scientific-acknowledgments) to use when publishing papers using the Lab's facilities, data, or services.
# Imports and setup
```python
# 3rd party
import numpy as np
from astropy import utils, io
from astropy.visualization import make_lupton_rgb
from pyvo.dal import sia
import pylab as plt
%matplotlib inline
# Data Lab
from dl import queryClient as qc
from dl.helpers.utils import convert
# plots default setup
plt.rcParams['font.size'] = 14
```
<a class="anchor" id="query"></a>
# Query the S-PLUS DR1 database
Let's see how we query the S-PLUS DR1 database. With no specific spatial region in mind we'll just take the first 10,000 objects.
## Construct the query string
```python
# Create the query string; SQL keyword capitalized for clarity
query =\
"""SELECT *
FROM splus_dr1.stripe82
LIMIT 10000"""
```
# Submit the query
Running the query in synchroneous mode is very easy.
```python
response = qc.query(query) # response is by default a CSV-formatted string
```
We can use a helper function to convert the query result into a data structure. Let's convert to a Pandas dataframe:
```python
R = convert(response,'pandas') # R is a pandas dataframe
print("Number of objects:", R.shape[0])
print(R.head())
```
Number of objects: 10000
field id ra x \
0 STRIPE82-0107 SPLUS.STRIPE82-0107.00299.griz 315.0033 3159.854
1 STRIPE82-0107 SPLUS.STRIPE82-0107.00577.griz 315.0088 3124.340
2 STRIPE82-0107 SPLUS.STRIPE82-0107.00775.griz 315.0110 3109.870
3 STRIPE82-0107 SPLUS.STRIPE82-0107.01015.griz 315.0050 3148.750
4 STRIPE82-0107 SPLUS.STRIPE82-0107.01056.griz 315.0201 3050.336
y isoarea s2ndet photoflag fwhm fwhm_n ... prob_star htm9 \
0 1140.331 13 14.59 0 5.03 2.08 ... 0.0 NaN
1 1175.678 29 72.21 0 2.40 0.99 ... 1.0 NaN
2 1198.093 26 58.98 0 2.51 1.04 ... 1.0 NaN
3 1243.588 51 24.15 0 11.15 4.61 ... 0.0 NaN
4 1224.520 155 453.79 0 2.41 1.00 ... 1.0 NaN
ring256 nest4096 glon glat elon elat random_id dec
0 NaN NaN NaN NaN NaN NaN 96.334790 -1.3661
1 NaN NaN NaN NaN NaN NaN 80.409110 -1.3607
2 NaN NaN NaN NaN NaN NaN 27.586126 -1.3573
3 NaN NaN NaN NaN NaN NaN 60.595924 -1.3503
4 NaN NaN NaN NaN NaN NaN 80.914680 -1.3532
[5 rows x 147 columns]
Let's print out the column names
```python
print(np.array(R.columns))
```
['field' 'id' 'ra' 'x' 'y' 'isoarea' 's2ndet' 'photoflag' 'fwhm' 'fwhm_n'
'mumax' 'a' 'b' 'theta' 'flraddet' 'krraddet' 'ndet_auto' 'ndet_petro'
'ndet_aper' 'ujava_auto' 'eujava_auto' 's2n_ujava_auto' 'ujava_petro'
'eujava_petro' 's2n_ujava_petro' 'ujava_aper' 'eujava_aper'
's2n_ujava_aper' 'f378_auto' 'ef378_auto' 's2n_f378_auto' 'f378_petro'
'ef378_petro' 's2n_f378_petro' 'f378_aper' 'ef378_aper' 's2n_f378_aper'
'f395_auto' 'ef395_auto' 's2n_f395_auto' 'f395_petro' 'ef395_petro'
's2n_f395_petro' 'f395_aper' 'ef395_aper' 's2n_f395_aper' 'f410_auto'
'ef410_auto' 's2n_f410_auto' 'f410_petro' 'ef410_petro' 's2n_f410_petro'
'f410_aper' 'ef410_aper' 's2n_f410_aper' 'f430_auto' 'ef430_auto'
's2n_f430_auto' 'f430_petro' 'ef430_petro' 's2n_f430_petro' 'f430_aper'
'ef430_aper' 's2n_f430_aper' 'g_auto' 'eg_auto' 's2n_g_auto' 'g_petro'
'eg_petro' 's2n_g_petro' 'g_aper' 'eg_aper' 's2n_g_aper' 'f515_auto'
'ef515_auto' 's2n_f515_auto' 'f515_petro' 'ef515_petro' 's2n_f515_petro'
'f515_aper' 'ef515_aper' 's2n_f515_aper' 'r_auto' 'er_auto' 's2n_r_auto'
'r_petro' 'er_petro' 's2n_r_petro' 'r_aper' 'er_aper' 's2n_r_aper'
'f660_auto' 'ef660_auto' 's2n_f660_auto' 'f660_petro' 'ef660_petro'
's2n_f660_petro' 'f660_aper' 'ef660_aper' 's2n_f660_aper' 'i_auto'
'ei_auto' 's2n_i_auto' 'i_petro' 'ei_petro' 's2n_i_petro' 'i_aper'
'ei_aper' 's2n_i_aper' 'f861_auto' 'ef861_auto' 's2n_f861_auto'
'f861_petro' 'ef861_petro' 's2n_f861_petro' 'f861_aper' 'ef861_aper'
's2n_f861_aper' 'z_auto' 'ez_auto' 's2n_z_auto' 'z_petro' 'ez_petro'
's2n_z_petro' 'z_aper' 'ez_aper' 's2n_z_aper' 'zb' 'zb_min' 'zb_max' 'tb'
'odds' 'chi2' 'm_b' 'stell_mass' 'class' 'prob_gal' 'prob_star' 'htm9'
'ring256' 'nest4096' 'glon' 'glat' 'elon' 'elat' 'random_id' 'dec']
<a class="anchor" id="cmd"></a>
# Make a Color Magnitude Diagram
Let us look at what a S-PLUS Color Magnitude Diagram (CMD) looks like.
First a scatter plot and then a density map.
```python
fig = plt.figure(figsize=(7,6))
plt.scatter(R['g_auto']-R['r_auto'], R['g_auto'], s=10)
plt.xlim(-1,4)
plt.ylim(25,10)
plt.xlabel('g-r')
plt.ylabel('g')
plt.show()
```

```python
fig = plt.figure(figsize=(7,6))
plt.hexbin(R['g_auto']-R['r_auto'], R['g_auto'], extent=(-1,4,25,10),gridsize=(150,100))
plt.xlim(-0.2,2)
plt.ylim(25,10)
plt.xlabel('g-r')
plt.ylabel('g')
plt.show()
```

<a class="anchor" id="twocd"></a>
# Make a color-color diagram
The many S-PLUS photometric bands can be used to produce color-color diagrams that are useful for measuring properties such as the metallicity or surface gravity of a star.
(g-F515) vs. (g-r) is sensitive to surface gravity and can be used to separate dwarf stars from giant stars. The main locus of points is the dwarfs will the red giant live in the upper right. [See Majewski et al. (2000)](http://adsabs.harvard.edu/abs/2000AJ....120.2550M) for more details on this method.
```python
fig = plt.figure(figsize=(7,6))
bright = (R['g_auto']<20)
plt.scatter(R['g_auto'][bright]-R['r_auto'][bright], R['g_auto'][bright]-R['f515_auto'][bright],c=R['g_auto'][bright],s=10)
plt.xlim(-1,2)
plt.ylim(-0.5,0.8)
plt.xlabel('g-r')
plt.ylabel('g-F515')
plt.show()
```

The (F378-F410) vs. (F515-F861) is a similar color-color plot that can be used to derive surface gravity. See [Cenarro et al. (2018)](http://adsabs.harvard.edu/abs/2018arXiv180402667C) for more details.
```python
fig = plt.figure(figsize=(7,6))
bright = (R['g_auto']<19)
plt.scatter(R['f515_auto'][bright]-R['f861_auto'][bright], R['f378_auto'][bright]-R['f410_auto'][bright],c=R['g_auto'][bright],s=10)
plt.xlim(-1,3)
plt.ylim(2.0,0.0)
plt.xlabel('F515-F861')
plt.ylabel('F378-F410')
cbar = plt.colorbar()
cbar.set_label('g magnitude')
```

<a class="anchor" id="sed"></a>
# Creating Spectral Energy Distributions
One of the great advantages of the S-PLUS dataset is the large number of wide and narrow-band filters that can be used
to study the properties of the objects. Here we show how plot the Spectral Energy Distributions (SEDs) of objects. These can then be compared to spectral models to constrain properties such as Teff, logg and metallicity (for stars) and galaxy type and redshift for galaxies (see [Cennaro et al. 2018](http://adsabs.harvard.edu/abs/2018arXiv180402667C) for some examples).
```python
fig = plt.figure(figsize=(16,16))
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 12}
plt.rc('font',**font)
bands = ['ujava','f378','f395','f410','f430','g','f515','r','f660','i','f861','z']
wave = np.array([3536.,3733.,3941.,4095.,4293.,4780.,5134.,6267.,6614.,7684.,8608.,8956.])
for j in range(9):
ax = fig.add_subplot(3,3,j+1)
mag = []
err = []
for b in bands:
mag.append(R[b+'_auto'][j])
err.append(R['s2n_'+b+'_auto'][j])
mag = np.array(mag)
err = 1.087/np.array(err)
gd = (mag < 50)
ax.plot(wave[gd],mag[gd])
#ax.scatter(wave[gd],mag[gd])
#ax.errorbar(wave[gd],mag[gd],yerr=err[gd], fmt='o', color='black',
# ecolor='lightgray', elinewidth=3, capsize=0);
ax.errorbar(wave[gd],mag[gd],yerr=err[gd], fmt='.k',markersize=10)
ax.set_xlim(3400,9100)
ax.set_ylim(np.max(mag[gd])+1.0,np.min(mag[gd])-1.0)
ax.set_xlabel('Wavelength (Å)')
ax.set_ylabel('Magnitude')
ax.text(4100.0,np.min(mag[gd])-1.0+0.07*(np.max(mag[gd])-np.min(mag[gd])+2.0),R['id'][j])
```

<a class="anchor" id="spatial"></a>
# Make a figure of the spatial distribution
Let's make a spatial density map of the sources.
```python
fig = plt.figure(figsize=(7,6))
plt.hexbin(R['ra'][0:5000], R['dec'][0:5000],gridsize=100)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.colorbar(label='number of objects per spatial bin');
```

<a class="anchor" id="cuouts"></a>
# Image Cutouts
Let's get some image cutouts and make a three-color image.
First we define a few helper functions.
```python
# set up SIA
DEF_ACCESS_URL = "https://datalab.noirlab.edu/sia/splus_dr1"
svc = sia.SIAService(DEF_ACCESS_URL)
# a little func to download the deepest stacked images
def download_deepest_images(ra,dec,fov=0.1,bands=list('GRI')):
imgTable = svc.search((ra,dec), (fov/np.cos(dec*np.pi/180), fov), verbosity=2).to_table()
print("The full image list contains {:d} entries.".format(len(imgTable)))
sel0 = (imgTable['proctype'] == 'Stack') & (imgTable['prodtype'] == 'image') # basic selection
images = []
for band in bands:
print("Band {:s}: ".format(band)) #, end='')
sel = sel0 & (imgTable['obs_bandpass'] == band) # add 'band' to selection
Table = imgTable[sel] # select
row = Table[np.argmax(Table['exptime'].data.data.astype('float'))] # pick image with longest exposure time
url = row['access_url'] # get the download URL
print('downloading deepest stacked image...')
img = io.fits.getdata(utils.data.download_file(url,cache=True,show_progress=False,timeout=120)) # .decode() b/c in Python 3 url is of "byte" type and getdata() expects "string" type
images.append(img)
print("Downloaded {:d} images.".format(len(images)))
return images
# multi panel image plotter
def plot_images(images,geo=None,panelsize=7,titles=list('gri'),cmap=plt.cm.gray_r):
if geo is None:
geo = (2,2)
fig = plt.figure(figsize=(geo[0]*panelsize,geo[1]*panelsize))
for j,img in enumerate(images):
ax = fig.add_subplot(geo[1],geo[0],j+1)
ax.imshow(img,origin='lower',interpolation='none',cmap=cmap,norm=plt.mpl.colors.PowerNorm(0.1))
ax.set_title('{:s}'.format(titles[j]))
plt.axis('off')
```
Now we download the deepest stacked images in three bands and combine them to make a 3-band color image.
```python
bands = ['F515','R','I']
idx = 1
ra = 315.15
dec = -0.8
images = download_deepest_images(ra,dec, fov=0.1, bands=bands) # FOV in deg
```
The full image list contains 25 entries.
Band F515:
downloading deepest stacked image...
Band R:
downloading deepest stacked image...
Band I:
downloading deepest stacked image...
Downloaded 3 images.
```python
images2 = [im-np.median(im) for im in images] # subtract median from all images for better scaling
images2 += [make_lupton_rgb(*images2[::-1],stretch=0.5)] # add a 3-color composite image
plot_images(images2,titles=bands+['False-color 3-band image'])
```

# Some resources
Cenarro et al. (2018) "J-PLUS: The Javalambre Photometric Local Universe Survey":
http://adsabs.harvard.edu/abs/2018arXiv180402667C
Majewski et al. (2000) "Exploring Halo Substructure with Giant Stars. I. Survey Description and Calibration of the Photometric Search Technique": http://adsabs.harvard.edu/abs/2017AJ....154..199N
|
astro-datalabREPO_NAMEnotebooks-latestPATH_START.@notebooks-latest_extracted@notebooks-latest-master@03_ScienceExamples@SpectralEnergyDistributions@SpectralEnergyDistributionsInSplusDr1.ipynb@.PATH_END.py
|
{
"filename": "pytables.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/io/pytables.py",
"type": "Python"
}
|
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
from __future__ import annotations
from contextlib import suppress
import copy
from datetime import (
date,
tzinfo,
)
import itertools
import os
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Final,
Literal,
cast,
overload,
)
import warnings
import numpy as np
from pandas._config import (
config,
get_option,
using_string_dtype,
)
from pandas._libs import (
lib,
writers as libwriters,
)
from pandas._libs.lib import is_string_array
from pandas._libs.tslibs import timezones
from pandas.compat._optional import import_optional_dependency
from pandas.compat.pickle_compat import patch_pickle
from pandas.errors import (
AttributeConflictWarning,
ClosedFileError,
IncompatibilityWarning,
PerformanceWarning,
PossibleDataLossError,
)
from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_complex_dtype,
is_list_like,
is_string_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
ExtensionDtype,
PeriodDtype,
)
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
StringDtype,
TimedeltaIndex,
concat,
isna,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
PeriodArray,
)
from pandas.core.arrays.datetimes import tz_to_dtype
import pandas.core.common as com
from pandas.core.computation.pytables import (
PyTablesExpr,
maybe_expression,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.api import ensure_index
from pandas.io.common import stringify_path
from pandas.io.formats.printing import (
adjoin,
pprint_thing,
)
if TYPE_CHECKING:
from collections.abc import (
Callable,
Hashable,
Iterator,
Sequence,
)
from types import TracebackType
from tables import (
Col,
File,
Node,
)
from pandas._typing import (
AnyArrayLike,
ArrayLike,
AxisInt,
DtypeArg,
FilePath,
Self,
Shape,
npt,
)
from pandas.core.internals import Block
# versioning attribute
_version = "0.15.2"
# encoding
_default_encoding = "UTF-8"
def _ensure_encoding(encoding: str | None) -> str:
# set the encoding if we need
if encoding is None:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""
Ensure that an index / column name is a str (python 3); otherwise they
may be np.string dtype. Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, str):
name = str(name)
return name
Term = PyTablesExpr
def _ensure_term(where, scope_level: int):
"""
Ensure that the where is a Term or a list of Term.
This makes sure that we are capturing the scope of variables that are
passed create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
where = [
Term(term, scope_level=level + 1) if maybe_expression(term) else term
for term in where
if term is not None
]
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where if where is None or len(where) else None
incompatibility_doc: Final = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
attribute_conflict_doc: Final = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
performance_doc: Final = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"}
# axes map
_AXES_MAP = {DataFrame: [0]}
# register our configuration options
dropna_doc: Final = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc: Final = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix("io.hdf"):
config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool)
config.register_option(
"default_format",
None,
format_doc,
validator=config.is_one_of_factory(["fixed", "table", None]),
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
with suppress(AttributeError):
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == "strict"
)
return _table_mod
# interface to/from ###
def to_hdf(
path_or_buf: FilePath | HDFStore,
key: str,
value: DataFrame | Series,
mode: str = "a",
complevel: int | None = None,
complib: str | None = None,
append: bool = False,
format: str | None = None,
index: bool = True,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""store this object, close it if we opened it"""
if append:
f = lambda store: store.append(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
else:
# NB: dropna is not passed to `put`
f = lambda store: store.put(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
errors=errors,
encoding=encoding,
dropna=dropna,
)
if isinstance(path_or_buf, HDFStore):
f(path_or_buf)
else:
path_or_buf = stringify_path(path_or_buf)
with HDFStore(
path_or_buf, mode=mode, complevel=complevel, complib=complib
) as store:
f(store)
def read_hdf(
path_or_buf: FilePath | HDFStore,
key=None,
mode: str = "r",
errors: str = "strict",
where: str | list | None = None,
start: int | None = None,
stop: int | None = None,
columns: list[str] | None = None,
iterator: bool = False,
chunksize: int | None = None,
**kwargs,
):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path_or_buf : str, path object, pandas.HDFStore
Any valid string path is acceptable. Only supports the local file system,
remote URLs and file-like objects are not supported.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, default 'r'
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
object
The selected object. Return type depends on the object stored.
See Also
--------
DataFrame.to_hdf : Write a HDF file from a DataFrame.
HDFStore : Low-level access to HDF files.
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, "a"]], columns=["x", "y", "z"]) # doctest: +SKIP
>>> df.to_hdf("./store.h5", "data") # doctest: +SKIP
>>> reread = pd.read_hdf("./store.h5") # doctest: +SKIP
"""
if mode not in ["r", "r+", "a"]:
raise ValueError(
f"mode {mode} is not allowed while performing a read. "
f"Allowed modes are r, r+ and a."
)
# grab the scope
if where is not None:
where = _ensure_term(where, scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise OSError("The HDFStore must be open for reading.")
store = path_or_buf
auto_close = False
else:
path_or_buf = stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
"Support for generic buffers has not been implemented."
)
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise FileNotFoundError(f"File {path_or_buf} does not exist")
store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError(
"Dataset(s) incompatible with Pandas data types, "
"not table, or no datasets found in HDF5 file."
)
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
"key must be provided when HDF5 "
"file contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
key,
where=where,
start=start,
stop=stop,
columns=columns,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
except (ValueError, TypeError, LookupError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
with suppress(AttributeError):
store.close()
raise
def _is_metadata_of(group: Node, parent_group: Node) -> bool:
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == "meta":
return True
current = current._v_parent
return False
class HDFStore:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
These additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore("test.h5")
>>> store["foo"] = bar # write to HDF5
>>> bar = store["foo"] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore("test.h5", driver="H5FD_CORE")
>>> store["foo"] = bar
>>> store.close() # only now, data is written to disk
"""
_handle: File | None
_mode: str
def __init__(
self,
path,
mode: str = "a",
complevel: int | None = None,
complib=None,
fletcher32: bool = False,
**kwargs,
) -> None:
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self) -> str:
return self._path
@property
def root(self):
"""return the root node"""
self._check_if_open()
assert self._handle is not None # for mypy
return self._handle.root
@property
def filename(self) -> str:
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value) -> None:
self.put(key, value)
def __delitem__(self, key: str) -> int | None:
return self.remove(key)
def __getattr__(self, name: str):
"""allow attribute access to get stores"""
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if key in (name, name[1:]):
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
def keys(self, include: str = "pandas") -> list[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
See Also
--------
HDFStore.info : Prints detailed information on the store.
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
def __iter__(self) -> Iterator[str]:
return iter(self.keys())
def items(self) -> Iterator[tuple[str, list]]:
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
def open(self, mode: str = "a", **kwargs) -> None:
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self) -> None:
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False) -> None:
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Object to retrieve from file. Raises KeyError if not found.
Returns
-------
object
Same type as object stored in file.
See Also
--------
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
See Also
--------
HDFStore.select_as_coordinates : Returns the selection as an index.
HDFStore.select_column : Returns a single column from the table.
HDFStore.select_as_multiple : Retrieves pandas objects from multiple tables.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.select("/data1") # doctest: +SKIP
A B
0 1 2
1 3 4
>>> store.select("/data1", where="columns == A") # doctest: +SKIP
A
0 1
1 3
>>> store.close() # doctest: +SKIP
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : bool, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = {t.non_index_axes[0][0] for t in _tbls}.pop()
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: DataFrame | Series,
format=None,
index: bool = True,
append: bool = False,
complib=None,
complevel: int | None = None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
) -> None:
"""
Store object in HDFStore.
Parameters
----------
key : str
Key of object to store in file.
value : {Series, DataFrame}
Value of object to store in file.
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
complib : default None
This parameter is currently not accepted.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
min_itemsize : int, dict, or None
Dict of columns that specify minimum str sizes.
nan_rep : str
Str to use as str nan representation.
data_columns : list of columns or True, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
errors : str, default 'strict'
The error handling scheme to use for encoding errors.
The default is 'strict' meaning that encoding errors raise a
UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
dropna : bool, default False, optional
Remove missing values.
See Also
--------
HDFStore.info : Prints detailed information on the store.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
def remove(self, key: str, where=None, start=None, stop=None) -> int | None:
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
return None
# delete from the table
if not s.is_table:
raise ValueError("can only remove with where on objects written as tables")
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: DataFrame | Series,
format=None,
axes=None,
index: bool | list[str] = True,
append: bool = True,
complib=None,
complevel: int | None = None,
columns=None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
) -> None:
"""
Append to Table in file.
Node must already exist and be Table format.
Parameters
----------
key : str
Key of object to append.
value : {Series, DataFrame}
Value of object to append.
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
axes : default None
This parameter is currently not accepted.
index : bool, default True
Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
complib : default None
This parameter is currently not accepted.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
columns : default None
This parameter is currently not accepted, try data_columns.
min_itemsize : int, dict, or None
Dict of columns that specify minimum str sizes.
nan_rep : str
Str to use as str nan representation.
chunksize : int or None
Size to chunk the writing.
expectedrows : int
Expected TOTAL row size of this table.
dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : default None
Provide an encoding for str.
errors : str, default 'strict'
The error handling scheme to use for encoding errors.
The default is 'strict' meaning that encoding errors raise a
UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
See Also
--------
HDFStore.append_to_multiple : Append to multiple tables.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df1, format="table") # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["A", "B"])
>>> store.append("data", df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
A B
0 1 2
1 3 4
0 5 6
1 7 8
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: dict,
value,
selector,
data_columns=None,
axes=None,
dropna: bool = False,
**kwargs,
) -> None:
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)])))
# figure out how to split the value
remain_key = None
remain_values: list = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: int | None = None,
kind: str | None = None,
) -> None:
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self) -> list:
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
See Also
--------
HDFStore.get_node : Returns the node with the key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
'block0_items' (Array)]]
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]:
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
See Also
--------
HDFStore.info : Prints detailed information on the store.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df1, format="table") # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["A", "B"])
>>> store.append("data", df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
>>> for group in store.walk(): # doctest: +SKIP
... print(group) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Node | None:
"""return the node with the key or None if it does not exist"""
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> GenericFixed | Table:
"""return the storer object for a key, raise if not in the file"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode: str = "w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: int | None = None,
fletcher32: bool = False,
overwrite: bool = True,
) -> HDFStore:
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: bool | list[str] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
A String containing the python pandas class name, filepath to the HDF5
file and all the object keys along with their respective dataframe shapes.
See Also
--------
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["C", "D"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data1", df1) # doctest: +SKIP
>>> store.put("data2", df2) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
File path: store.h5
/data1 frame (shape->[2,2])
/data2 frame (shape->[2,2])
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self) -> None:
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
"""validate / deprecate formats"""
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
def _create_storer(
self,
group,
format=None,
value: DataFrame | Series | None = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> GenericFixed | Table:
"""return a suitable class to operate"""
cls: type[GenericFixed | Table]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
pt = getattr(group._v_attrs, "pandas_type", None)
tt = getattr(group._v_attrs, "table_type", None)
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_STORER_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt] # type: ignore[index]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_TABLE_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
def _write_to_group(
self,
key: str,
value: DataFrame | Series,
format,
axes=None,
index: bool | list[str] = True,
append: bool = False,
complib=None,
complevel: int | None = None,
fletcher32=None,
min_itemsize: int | dict[str, int] | None = None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
) -> None:
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: Node):
s = self._create_storer(group)
s.infer_axes()
return s.read()
def _identify_group(self, key: str, append: bool) -> Node:
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
def _create_nodes_and_group(self, key: str) -> Node:
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
class TableIterator:
"""
Define the iteration interface on a table
Parameters
----------
store : HDFStore
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : bool, default False
Whether to use the default iterator.
chunksize : the passed chunking value (default is 100000)
auto_close : bool, default False
Whether to automatically close the store at the end of iteration.
"""
chunksize: int | None
store: HDFStore
s: GenericFixed | Table
def __init__(
self,
store: HDFStore,
s: GenericFixed | Table,
func,
where,
nrows,
start=None,
stop=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
) -> None:
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self) -> Iterator:
# iterate
current = self.start
if self.coordinates is None:
raise ValueError("Cannot iterate until get_result is called.")
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self) -> None:
if self.auto_close:
self.store.close()
def get_result(self, coordinates: bool = False):
# return the actual iterator
if self.chunksize is not None:
if not isinstance(self.s, Table):
raise TypeError("can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
if not isinstance(self.s, Table):
raise TypeError("can only read_coordinates on a table")
where = self.s.read_coordinates(
where=self.where, start=self.start, stop=self.stop
)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol:
"""
an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable: bool = True
is_data_indexable: bool = True
_info_fields = ["freq", "tz", "index_name"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: str | None = None,
axis=None,
pos=None,
freq=None,
tz=None,
index_name=None,
ordered=None,
table=None,
meta=None,
metadata=None,
) -> None:
if not isinstance(name, str):
raise ValueError("`name` must be a str.")
self.values = values
self.kind = kind
self.typ = typ
self.name = name
self.cname = cname or name
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.ordered = ordered
self.table = table
self.meta = meta
self.metadata = metadata
if pos is not None:
self.set_pos(pos)
# These are ensured as long as the passed arguments match the
# constructor annotations.
assert isinstance(self.name, str)
assert isinstance(self.cname, str)
@property
def itemsize(self) -> int:
# Assumes self.typ has already been initialized
return self.typ.itemsize
@property
def kind_attr(self) -> str:
return f"{self.name}_kind"
def set_pos(self, pos: int) -> None:
"""set the position of this column in the Table"""
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
def __repr__(self) -> str:
temp = tuple(
map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
)
return ",".join(
[
f"{key}->{value}"
for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp)
]
)
def __eq__(self, other: object) -> bool:
"""compare 2 col items"""
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "axis", "pos"]
)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def is_indexed(self) -> bool:
"""return whether I am an indexed column"""
if not hasattr(self.table, "cols"):
# e.g. if infer hasn't been called yet, self.table will be None.
return False
return getattr(self.table.cols, self.cname).is_indexed
def convert(
self, values: np.ndarray, nan_rep, encoding: str, errors: str
) -> tuple[np.ndarray, np.ndarray] | tuple[Index, Index]:
"""
Convert the data from this selection to the appropriate pandas type.
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
# Copy, otherwise values will be a view
# preventing the original recarry from being free'ed
values = values[self.cname].copy()
val_kind = self.kind
values = _maybe_convert(values, val_kind, encoding, errors)
kwargs = {}
kwargs["name"] = self.index_name
if self.freq is not None:
kwargs["freq"] = self.freq
factory: type[Index | DatetimeIndex] = Index
if lib.is_np_dtype(values.dtype, "M") or isinstance(
values.dtype, DatetimeTZDtype
):
factory = DatetimeIndex
elif values.dtype == "i8" and "freq" in kwargs:
# PeriodIndex data is stored as i8
# error: Incompatible types in assignment (expression has type
# "Callable[[Any, KwArg(Any)], PeriodIndex]", variable has type
# "Union[Type[Index], Type[DatetimeIndex]]")
factory = lambda x, **kwds: PeriodIndex.from_ordinals( # type: ignore[assignment]
x, freq=kwds.get("freq", None)
)._rename(kwds["name"])
# making an Index instance could throw a number of different errors
try:
new_pd_index = factory(values, **kwargs)
except ValueError:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if "freq" in kwargs:
kwargs["freq"] = None
new_pd_index = factory(values, **kwargs)
final_pd_index: Index
if self.tz is not None and isinstance(new_pd_index, DatetimeIndex):
final_pd_index = new_pd_index.tz_localize("UTC").tz_convert(self.tz)
else:
final_pd_index = new_pd_index
return final_pd_index, final_pd_index
def take_data(self):
"""return the values"""
return self.values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
"""return my current col description"""
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
"""return my cython values"""
return self.values
def __iter__(self) -> Iterator:
return iter(self.values)
def maybe_set_size(self, min_itemsize=None) -> None:
"""
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
"""
if self.kind == "string":
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
def validate_names(self) -> None:
pass
def validate_and_set(self, handler: AppendableTable, append: bool) -> None:
self.table = handler.table
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
"""validate this column: return the compared against itemsize"""
# validate this column for string truncation (or reset to the max size)
if self.kind == "string":
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
f"Trying to store a string with len [{itemsize}] in "
f"[{self.cname}] column but\nthis column has a limit of "
f"[{c.itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns"
)
return c.itemsize
return None
def validate_attr(self, append: bool) -> None:
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
f"incompatible kind in col [{existing_kind} - {self.kind}]"
)
def update_info(self, info) -> None:
"""
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
"""
for key in self._info_fields:
value = getattr(self, key, None)
idx = info.setdefault(self.name, {})
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(
ws, AttributeConflictWarning, stacklevel=find_stack_level()
)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
f"invalid info for [{self.name}] for [{key}], "
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
elif value is not None or existing_value is not None:
idx[key] = value
def set_info(self, info) -> None:
"""set my state from the passed info"""
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def set_attr(self) -> None:
"""set the kind for this column"""
setattr(self.attrs, self.kind_attr, self.kind)
def validate_metadata(self, handler: AppendableTable) -> None:
"""validate that kind=category does not change the categories"""
if self.meta == "category":
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (
new_metadata is not None
and cur_metadata is not None
and not array_equivalent(
new_metadata, cur_metadata, strict_nan=True, dtype_equal=True
)
):
raise ValueError(
"cannot append a categorical with "
"different categories to the existing"
)
def write_metadata(self, handler: AppendableTable) -> None:
"""set the meta data"""
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
"""an index which is not represented in the data of the table"""
@property
def is_indexed(self) -> bool:
return False
def convert(
self, values: np.ndarray, nan_rep, encoding: str, errors: str
) -> tuple[Index, Index]:
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep : str
encoding : str
errors : str
"""
assert isinstance(values, np.ndarray), type(values)
index = RangeIndex(len(values))
return index, index
def set_attr(self) -> None:
pass
class DataCol(IndexCol):
"""
a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ["tz", "ordered"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: str | None = None,
pos=None,
tz=None,
ordered=None,
table=None,
meta=None,
metadata=None,
dtype: DtypeArg | None = None,
data=None,
) -> None:
super().__init__(
name=name,
values=values,
kind=kind,
typ=typ,
pos=pos,
cname=cname,
tz=tz,
ordered=ordered,
table=table,
meta=meta,
metadata=metadata,
)
self.dtype = dtype
self.data = data
@property
def dtype_attr(self) -> str:
return f"{self.name}_dtype"
@property
def meta_attr(self) -> str:
return f"{self.name}_meta"
def __repr__(self) -> str:
temp = tuple(
map(
pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape)
)
)
return ",".join(
[
f"{key}->{value}"
for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp)
]
)
def __eq__(self, other: object) -> bool:
"""compare 2 col items"""
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "dtype", "pos"]
)
def set_data(self, data: ArrayLike) -> None:
assert data is not None
assert self.dtype is None
data, dtype_name = _get_data_and_dtype_name(data)
self.data = data
self.dtype = dtype_name
self.kind = _dtype_to_kind(dtype_name)
def take_data(self):
"""return the data"""
return self.data
@classmethod
def _get_atom(cls, values: ArrayLike) -> Col:
"""
Get an appropriately typed and shaped pytables.Col object for values.
"""
dtype = values.dtype
# error: Item "ExtensionDtype" of "Union[ExtensionDtype, dtype[Any]]" has no
# attribute "itemsize"
itemsize = dtype.itemsize # type: ignore[union-attr]
shape = values.shape
if values.ndim == 1:
# EA, use block shape pretending it is 2D
# TODO(EA2D): not necessary with 2D EAs
shape = (1, values.size)
if isinstance(values, Categorical):
codes = values.codes
atom = cls.get_atom_data(shape, kind=codes.dtype.name)
elif lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype):
atom = cls.get_atom_datetime64(shape)
elif lib.is_np_dtype(dtype, "m"):
atom = cls.get_atom_timedelta64(shape)
elif is_complex_dtype(dtype):
atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
elif is_string_dtype(dtype):
atom = cls.get_atom_string(shape, itemsize)
else:
atom = cls.get_atom_data(shape, kind=dtype.name)
return atom
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=shape[0])
@classmethod
def get_atom_coltype(cls, kind: str) -> type[Col]:
"""return the PyTables column class for this column"""
if kind.startswith("uint"):
k4 = kind[4:]
col_name = f"UInt{k4}Col"
elif kind.startswith("period"):
# we store as integer
col_name = "Int64Col"
else:
kcap = kind.capitalize()
col_name = f"{kcap}Col"
return getattr(_tables(), col_name)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)(shape=shape[0])
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@property
def shape(self):
return getattr(self.data, "shape", None)
@property
def cvalues(self):
"""return my cython values"""
return self.data
def validate_attr(self, append) -> None:
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
"appended items dtype do not match existing items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep :
encoding : str
errors : str
Returns
-------
index : listlike to become an Index
data : ndarraylike to become a column
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
assert self.typ is not None
if self.dtype is None:
# Note: in tests we never have timedelta64 or datetime64,
# so the _get_data_and_dtype_name may be unnecessary
converted, dtype_name = _get_data_and_dtype_name(values)
kind = _dtype_to_kind(dtype_name)
else:
converted = values
dtype_name = self.dtype
kind = self.kind
assert isinstance(converted, np.ndarray) # for mypy
# use the meta if needed
meta = self.meta
metadata = self.metadata
ordered = self.ordered
tz = self.tz
assert dtype_name is not None
# convert to the correct dtype
dtype = dtype_name
# reverse converts
if dtype.startswith("datetime64"):
# recreate with tz if indicated
converted = _set_tz(converted, tz, dtype)
elif dtype == "timedelta64":
converted = np.asarray(converted, dtype="m8[ns]")
elif dtype == "date":
try:
converted = np.asarray(
[date.fromordinal(v) for v in converted], dtype=object
)
except ValueError:
converted = np.asarray(
[date.fromtimestamp(v) for v in converted], dtype=object
)
elif meta == "category":
# we have a categorical
categories = metadata
codes = converted.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum()._values
converted = Categorical.from_codes(
codes, categories=categories, ordered=ordered, validate=False
)
else:
try:
converted = converted.astype(dtype, copy=False)
except TypeError:
converted = converted.astype("O", copy=False)
# convert nans / decode
if kind == "string":
converted = _unconvert_string_array(
converted, nan_rep=nan_rep, encoding=encoding, errors=errors
)
return self.values, converted
def set_attr(self) -> None:
"""set the data for this column"""
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
assert self.dtype is not None
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
"""represent a data column that can be indexed"""
is_data_indexable = True
def validate_names(self) -> None:
if not is_string_dtype(Index(self.values).dtype):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)()
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col()
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
"""represent a generic pytables data column"""
class Fixed:
"""
represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""
pandas_kind: str
format_type: str = "fixed" # GH#30962 needed by dask
obj_type: type[DataFrame | Series]
ndim: int
parent: HDFStore
is_table: bool = False
def __init__(
self,
parent: HDFStore,
group: Node,
encoding: str | None = "UTF-8",
errors: str = "strict",
) -> None:
assert isinstance(parent, HDFStore), type(parent)
assert _table_mod is not None # needed for mypy
assert isinstance(group, _table_mod.Node), type(group)
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
@property
def is_old_version(self) -> bool:
return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
@property
def version(self) -> tuple[int, int, int]:
"""compute and set our version"""
version = getattr(self.group._v_attrs, "pandas_version", None)
if isinstance(version, str):
version_tup = tuple(int(x) for x in version.split("."))
if len(version_tup) == 2:
version_tup = version_tup + (0,)
assert len(version_tup) == 3 # needed for mypy
return version_tup
else:
return (0, 0, 0)
@property
def pandas_type(self):
return getattr(self.group._v_attrs, "pandas_type", None)
def __repr__(self) -> str:
"""return a pretty representation of myself"""
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
jshape = ",".join([pprint_thing(x) for x in s])
s = f"[{jshape}]"
return f"{self.pandas_type:12.12} (shape->{s})"
return self.pandas_type
def set_object_info(self) -> None:
"""set my pandas type & version"""
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
def copy(self) -> Fixed:
new_self = copy.copy(self)
return new_self
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self) -> int:
return self.parent._complevel
@property
def _fletcher32(self) -> bool:
return self.parent._fletcher32
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self) -> None:
"""set our object attributes"""
def get_attrs(self) -> None:
"""get our object attributes"""
@property
def storable(self):
"""return my storable"""
return self.group
@property
def is_exists(self) -> bool:
return False
@property
def nrows(self):
return getattr(self.storable, "nrows", None)
def validate(self, other) -> Literal[True] | None:
"""validate against an existing storable"""
if other is None:
return None
return True
def validate_version(self, where=None) -> None:
"""are we trying to operate on an old version?"""
def infer_axes(self) -> bool:
"""
infer the axes of my storer
return a boolean indicating if we have a valid storer or not
"""
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> Series | DataFrame:
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement"
)
def write(self, obj, **kwargs) -> None:
raise NotImplementedError(
"cannot write on an abstract storer: subclasses should implement"
)
def delete(
self, where=None, start: int | None = None, stop: int | None = None
) -> int | None:
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com.all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
"""a generified fixed version"""
_index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
_reverse_index_map = {v: k for k, v in _index_type_map.items()}
attributes: list[str] = []
# indexer helpers
def _class_to_alias(self, cls) -> str:
return self._index_type_map.get(cls, "")
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, attrs):
index_class = self._alias_to_class(getattr(attrs, "index_class", ""))
factory: Callable
if index_class == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
dta = DatetimeArray._simple_new(
values.values, dtype=values.dtype, freq=freq
)
result = DatetimeIndex._simple_new(dta, name=None)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
factory = f
elif index_class == PeriodIndex:
def f(values, freq=None, tz=None):
dtype = PeriodDtype(freq)
parr = PeriodArray._simple_new(values, dtype=dtype)
return PeriodIndex._simple_new(parr, name=None)
factory = f
else:
factory = index_class
kwargs = {}
if "freq" in attrs:
kwargs["freq"] = attrs["freq"]
if index_class is Index:
# DTI/PI would be gotten by _alias_to_class
factory = TimedeltaIndex
if "tz" in attrs:
kwargs["tz"] = attrs["tz"]
assert index_class is DatetimeIndex # just checking
return factory, kwargs
def validate_read(self, columns, where) -> None:
"""
raise if any keywords are passed which are not-None
"""
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
"a Fixed format store. this store must be selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
"from a Fixed format store. this store must be selected in its entirety"
)
@property
def is_exists(self) -> bool:
return True
def set_attrs(self) -> None:
"""set our object attributes"""
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self) -> None:
"""retrieve our attributes"""
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = getattr(self.attrs, "errors", "strict")
for n in self.attributes:
setattr(self, n, getattr(self.attrs, n, None))
def write(self, obj, **kwargs) -> None:
self.set_attrs()
def read_array(self, key: str, start: int | None = None, stop: int | None = None):
"""read an array for the specified node (off of group"""
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, "transposed", False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, "value_type", None)
shape = getattr(attrs, "shape", None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype and dtype.startswith("datetime64"):
# reconstruct a timezone if indicated
tz = getattr(attrs, "tz", None)
ret = _set_tz(ret, tz, dtype)
elif dtype == "timedelta64":
ret = np.asarray(ret, dtype="m8[ns]")
if transposed:
return ret.T
else:
return ret
def read_index(
self, key: str, start: int | None = None, stop: int | None = None
) -> Index:
variety = getattr(self.attrs, f"{key}_variety")
if variety == "multi":
return self.read_multi_index(key, start=start, stop=stop)
elif variety == "regular":
node = getattr(self.group, key)
index = self.read_index_node(node, start=start, stop=stop)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
def write_index(self, key: str, index: Index) -> None:
if isinstance(index, MultiIndex):
setattr(self.attrs, f"{key}_variety", "multi")
self.write_multi_index(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
converted = _convert_index("index", index, self.encoding, self.errors)
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
node._v_attrs.freq = index.freq
if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_multi_index(self, key: str, index: MultiIndex) -> None:
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
for i, (lev, level_codes, name) in enumerate(
zip(index.levels, index.codes, index.names)
):
# write the level
if isinstance(lev.dtype, ExtensionDtype):
raise NotImplementedError(
"Saving a MultiIndex with an extension dtype is not supported."
)
level_key = f"{key}_level{i}"
conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, f"{key}_name{name}", name)
# write the labels
label_key = f"{key}_label{i}"
self.write_array(label_key, level_codes)
def read_multi_index(
self, key: str, start: int | None = None, stop: int | None = None
) -> MultiIndex:
nlevels = getattr(self.attrs, f"{key}_nlevels")
levels = []
codes = []
names: list[Hashable] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
lev = self.read_index_node(node, start=start, stop=stop)
levels.append(lev)
names.append(lev.name)
label_key = f"{key}_label{i}"
level_codes = self.read_array(label_key, start=start, stop=stop)
codes.append(level_codes)
return MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=True
)
def read_index_node(
self, node: Node, start: int | None = None, stop: int | None = None
) -> Index:
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we replace it with the original.
if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type)
kind = node._v_attrs.kind
name = None
if "name" in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
attrs = node._v_attrs
factory, kwargs = self._get_index_factory(attrs)
if kind in ("date", "object"):
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
dtype=object,
**kwargs,
)
else:
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
**kwargs,
)
index.name = name
return index
def write_array_empty(self, key: str, value: ArrayLike) -> None:
"""write a 0-len array"""
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
node = getattr(self.group, key)
node._v_attrs.value_type = str(value.dtype)
node._v_attrs.shape = value.shape
def write_array(
self, key: str, obj: AnyArrayLike, items: Index | None = None
) -> None:
# TODO: we only have a few tests that get here, the only EA
# that gets passed is DatetimeArray, and we never have
# both self._filters and EA
value = extract_array(obj, extract_numpy=True)
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = value.size == 0
transposed = False
if isinstance(value.dtype, CategoricalDtype):
raise NotImplementedError(
"Cannot store a category dtype in a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
if hasattr(value, "T"):
# ExtensionArrays (1d) may not have transpose.
value = value.T
transposed = True
atom = None
if self._filters is not None:
with suppress(ValueError):
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
if atom is not None:
# We only get here if self._filters is non-None and
# the Atom.from_dtype call succeeded
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(
self.group, key, atom, value.shape, filters=self._filters
)
ca[:] = value
else:
self.write_array_empty(key, value)
elif value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value, skipna=False)
if empty_array:
pass
elif inferred_type == "string":
pass
elif get_option("performance_warnings"):
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level())
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
elif lib.is_np_dtype(value.dtype, "M"):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
elif isinstance(value.dtype, DatetimeTZDtype):
# store as UTC
# with a zone
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "asi8"
self._handle.create_array(
self.group,
key,
value.asi8, # type: ignore[union-attr]
)
node = getattr(self.group, key)
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "tz"
node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr]
node._v_attrs.value_type = f"datetime64[{value.dtype.unit}]"
elif lib.is_np_dtype(value.dtype, "m"):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "timedelta64"
elif empty_array:
self.write_array_empty(key, value)
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class SeriesFixed(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
name: Hashable
@property
def shape(self) -> tuple[int] | None:
try:
return (len(self.group.values),)
except (TypeError, AttributeError):
return None
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> Series:
self.validate_read(columns, where)
index = self.read_index("index", start=start, stop=stop)
values = self.read_array("values", start=start, stop=stop)
result = Series(values, index=index, name=self.name, copy=False)
if using_string_dtype() and is_string_array(values, skipna=True):
result = result.astype(StringDtype(na_value=np.nan))
return result
def write(self, obj, **kwargs) -> None:
super().write(obj, **kwargs)
self.write_index("index", obj.index)
self.write_array("values", obj)
self.attrs.name = obj.name
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
nblocks: int
@property
def shape(self) -> Shape | None:
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, f"block{i}_items")
shape = getattr(node, "shape", None)
if shape is not None:
items += shape[0]
# data shape
node = self.group.block0_values
shape = getattr(node, "shape", None)
if shape is not None:
shape = list(shape[0 : (ndim - 1)])
else:
shape = []
shape.append(items)
return shape
except AttributeError:
return None
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> DataFrame:
# start, stop applied to rows, so 0th axis only
self.validate_read(columns, where)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index(f"axis{i}", start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
dfs = []
for i in range(self.nblocks):
blk_items = self.read_index(f"block{i}_items")
values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
columns = items[items.get_indexer(blk_items)]
df = DataFrame(values.T, columns=columns, index=axes[1], copy=False)
if using_string_dtype() and is_string_array(values, skipna=True):
df = df.astype(StringDtype(na_value=np.nan))
dfs.append(df)
if len(dfs) > 0:
out = concat(dfs, axis=1).copy()
return out.reindex(columns=items)
return DataFrame(columns=axes[0], index=axes[1])
def write(self, obj, **kwargs) -> None:
super().write(obj, **kwargs)
data = obj._mgr
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0 and (not ax.is_unique):
raise ValueError("Columns index has to be unique for fixed format")
self.write_index(f"axis{i}", ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array(f"block{i}_values", blk.values, items=blk_items)
self.write_index(f"block{i}_items", blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = "frame"
obj_type = DataFrame
class Table(Fixed):
"""
represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes)
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = "wide_table"
format_type: str = "table" # GH#30962 needed by dask
table_type: str
levels: int | list[Hashable] = 1
is_table = True
metadata: list
def __init__(
self,
parent: HDFStore,
group: Node,
encoding: str | None = None,
errors: str = "strict",
index_axes: list[IndexCol] | None = None,
non_index_axes: list[tuple[AxisInt, Any]] | None = None,
values_axes: list[DataCol] | None = None,
data_columns: list | None = None,
info: dict | None = None,
nan_rep=None,
) -> None:
super().__init__(parent, group, encoding=encoding, errors=errors)
self.index_axes = index_axes or []
self.non_index_axes = non_index_axes or []
self.values_axes = values_axes or []
self.data_columns = data_columns or []
self.info = info or {}
self.nan_rep = nan_rep
@property
def table_type_short(self) -> str:
return self.table_type.split("_")[0]
def __repr__(self) -> str:
"""return a pretty representation of myself"""
self.infer_axes()
jdc = ",".join(self.data_columns) if len(self.data_columns) else ""
dc = f",dc->[{jdc}]"
ver = ""
if self.is_old_version:
jver = ".".join([str(x) for x in self.version])
ver = f"[{jver}]"
jindex_axes = ",".join([a.name for a in self.index_axes])
return (
f"{self.pandas_type:12.12}{ver} "
f"(typ->{self.table_type_short},nrows->{self.nrows},"
f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
)
def __getitem__(self, c: str):
"""return the axis for c"""
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other) -> None:
"""validate against an existing table"""
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
f"[{other.table_type} - {self.table_type}]"
)
for c in ["index_axes", "non_index_axes", "values_axes"]:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
# Argument 1 to "enumerate" has incompatible type
# "Optional[Any]"; expected "Iterable[Any]" [arg-type]
for i, sax in enumerate(sv): # type: ignore[arg-type]
# Value of type "Optional[Any]" is not indexable [index]
oax = ov[i] # type: ignore[index]
if sax != oax:
raise ValueError(
f"invalid combination of [{c}] on appending data "
f"[{sax}] vs current table [{oax}]"
)
# should never get here
raise Exception(
f"invalid combination of [{c}] on appending data [{sv}] vs "
f"current table [{ov}]"
)
@property
def is_multi_index(self) -> bool:
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_multiindex(
self, obj: DataFrame | Series
) -> tuple[DataFrame, list[Hashable]]:
"""
validate that we can store the multi-index; reset and return the
new object
"""
levels = com.fill_missing_names(obj.index.names)
try:
reset_obj = obj.reset_index()
except ValueError as err:
raise ValueError(
"duplicate names/columns in the multi-index when storing as a table"
) from err
assert isinstance(reset_obj, DataFrame) # for mypy
return reset_obj, levels
@property
def nrows_expected(self) -> int:
"""based on our axes, compute the expected nrows"""
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self) -> bool:
"""has this table been created"""
return "table" in self.group
@property
def storable(self):
return getattr(self.group, "table", None)
@property
def table(self):
"""return the table group (this is my storable)"""
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self) -> itertools.chain[IndexCol]:
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self) -> int:
"""the number of total columns in the values axes"""
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self) -> bool:
return False
@property
def data_orientation(self) -> tuple[int, ...]:
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(
itertools.chain(
[int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes],
)
)
def queryables(self) -> dict[str, Any]:
"""return a dict of the kinds allowable columns for this object"""
# mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here
axis_names = {0: "index", 1: "columns"}
# compute the values_axes queryables
d1 = [(a.cname, a) for a in self.index_axes]
d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]
d3 = [
(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)
]
return dict(d1 + d2 + d3)
def index_cols(self) -> list[tuple[Any, Any]]:
"""return a list of my index cols"""
# Note: each `i.cname` below is assured to be a str.
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self) -> list[str]:
"""return a list of my values cols"""
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key: str) -> str:
"""return the metadata pathname for this key"""
group = self.group._v_pathname
return f"{group}/meta/{key}/meta"
def write_metadata(self, key: str, values: np.ndarray) -> None:
"""
Write out a metadata array to the key as a fixed-format Series.
Parameters
----------
key : str
values : ndarray
"""
self.parent.put(
self._get_metadata_path(key),
Series(values, copy=False),
format="table",
encoding=self.encoding,
errors=self.errors,
nan_rep=self.nan_rep,
)
def read_metadata(self, key: str):
"""return the meta data array for this key"""
if getattr(getattr(self.group, "meta", None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_attrs(self) -> None:
"""set our table type & indexables"""
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.info = self.info
def get_attrs(self) -> None:
"""retrieve our attributes"""
self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or []
self.data_columns = getattr(self.attrs, "data_columns", None) or []
self.info = getattr(self.attrs, "info", None) or {}
self.nan_rep = getattr(self.attrs, "nan_rep", None)
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = getattr(self.attrs, "errors", "strict")
self.levels: list[Hashable] = getattr(self.attrs, "levels", None) or []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
def validate_version(self, where=None) -> None:
"""are we trying to operate on an old version?"""
if where is not None:
if self.is_old_version:
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
warnings.warn(
ws,
IncompatibilityWarning,
stacklevel=find_stack_level(),
)
def validate_min_itemsize(self, min_itemsize) -> None:
"""
validate the min_itemsize doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k in min_itemsize:
# ok, apply generally
if k == "values":
continue
if k not in q:
raise ValueError(
f"min_itemsize has the key [{k}] which is not an axis or "
"data_column"
)
@cache_readonly
def indexables(self):
"""create/cache the indexables if they don't exist"""
_indexables = []
desc = self.description
table_attrs = self.table.attrs
# Note: each of the `name` kwargs below are str, ensured
# by the definition in index_cols.
# index columns
for i, (axis, name) in enumerate(self.attrs.index_cols):
atom = getattr(desc, name)
md = self.read_metadata(name)
meta = "category" if md is not None else None
kind_attr = f"{name}_kind"
kind = getattr(table_attrs, kind_attr, None)
index_col = IndexCol(
name=name,
axis=axis,
pos=i,
kind=kind,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(index_col)
# values columns
dc = set(self.data_columns)
base_pos = len(_indexables)
def f(i, c: str) -> DataCol:
assert isinstance(c, str)
klass = DataCol
if c in dc:
klass = DataIndexableCol
atom = getattr(desc, c)
adj_name = _maybe_adjust_name(c, self.version)
# TODO: why kind_attr here?
values = getattr(table_attrs, f"{adj_name}_kind", None)
dtype = getattr(table_attrs, f"{adj_name}_dtype", None)
# Argument 1 to "_dtype_to_kind" has incompatible type
# "Optional[Any]"; expected "str" [arg-type]
kind = _dtype_to_kind(dtype) # type: ignore[arg-type]
md = self.read_metadata(c)
# TODO: figure out why these two versions of `meta` dont always match.
# meta = "category" if md is not None else None
meta = getattr(table_attrs, f"{adj_name}_meta", None)
obj = klass(
name=adj_name,
cname=c,
values=values,
kind=kind,
pos=base_pos + i,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
dtype=dtype,
)
return obj
# Note: the definition of `values_cols` ensures that each
# `c` below is a str.
_indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return _indexables
def create_index(
self, columns=None, optlevel=None, kind: str | None = None
) -> None:
"""
Create a pytables index on the specified columns.
Parameters
----------
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError if trying to create an index on a complex-type column.
Notes
-----
Cannot index Time64Col or ComplexCol.
Pytables must be >= 3.0.
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = {}
if optlevel is not None:
kw["optlevel"] = optlevel
if kind is not None:
kw["kind"] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw["kind"] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw["optlevel"] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith("complex"):
raise TypeError(
"Columns containing complex values can be stored but "
"cannot be indexed when using table format. Either use "
"fixed format, set index=False, or do not include "
"the columns containing complex values to "
"data_columns when initializing the table."
)
v.create_index(**kw)
elif c in self.non_index_axes[0][1]:
# GH 28156
raise AttributeError(
f"column {c} is not a data_column.\n"
f"In order to read column {c} you must reload the dataframe \n"
f"into HDFStore and include {c} with the data_columns argument."
)
def _read_axes(
self, where, start: int | None = None, stop: int | None = None
) -> list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
List[Tuple[index_values, column_values]]
"""
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
results = []
# convert the data
for a in self.axes:
a.set_info(self.info)
res = a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
results.append(res)
return results
@classmethod
def get_object(cls, obj, transposed: bool):
"""return the data for this obj"""
return obj
def validate_data_columns(self, data_columns, min_itemsize, non_index_axes) -> list:
"""
take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(non_index_axes):
return []
axis, axis_labels = non_index_axes[0]
info = self.info.get(axis, {})
if info.get("type") == "MultiIndex" and data_columns:
raise ValueError(
f"cannot use a multi-index on axis [{axis}] with "
f"data_columns {data_columns}"
)
# evaluate the passed data_columns, True == use all columns
# take only valid axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns = list(data_columns) # ensure we do not modify
data_columns.extend(
[
k
for k in min_itemsize.keys()
if k != "values" and k not in existing_data_columns
]
)
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def _create_axes(
self,
axes,
obj: DataFrame,
validate: bool = True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
):
"""
Create and return the axes.
Parameters
----------
axes: list or None
The names or numbers of the axes to create.
obj : DataFrame
The object to create axes on.
validate: bool, default True
Whether to validate the obj against an existing object already written.
nan_rep :
A value to use for string column nan_rep.
data_columns : List[str], True, or None, default None
Specify the columns that we want to create to allow indexing on.
* True : Use all available columns.
* None : Use no columns.
* List[str] : Use the specified columns.
min_itemsize: Dict[str, int] or None, default None
The min itemsize for a column in bytes.
"""
if not isinstance(obj, DataFrame):
group = self.group._v_name
raise TypeError(
f"cannot properly create the storer for: [group->{group},"
f"value->{type(obj)}]"
)
# set the default axes if needed
if axes is None:
axes = [0]
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
table_exists = True
axes = [a.axis for a in self.index_axes]
data_columns = list(self.data_columns)
nan_rep = self.nan_rep
# TODO: do we always have validate=True here?
else:
table_exists = False
new_info = self.info
assert self.ndim == 2 # with next check, we must have len(axes) == 1
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable"
)
# create according to the new data
new_non_index_axes: list = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
# We construct the non-index-axis first, since that alters new_info
idx = next(x for x in [0, 1] if x not in axes)
a = obj.axes[idx]
# we might be able to change the axes on the appending data if necessary
append_axis = list(a)
if table_exists:
indexer = len(new_non_index_axes) # i.e. 0
exist_axis = self.non_index_axes[indexer][1]
if not array_equivalent(
np.array(append_axis),
np.array(exist_axis),
strict_nan=True,
dtype_equal=True,
):
# ahah! -> reindex
if array_equivalent(
np.array(sorted(append_axis)),
np.array(sorted(exist_axis)),
strict_nan=True,
dtype_equal=True,
):
append_axis = exist_axis
# the non_index_axes info
info = new_info.setdefault(idx, {})
info["names"] = list(a.names)
info["type"] = type(a).__name__
new_non_index_axes.append((idx, append_axis))
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
axis_name = obj._get_axis_name(idx)
new_index = _convert_index(axis_name, a, self.encoding, self.errors)
new_index.axis = idx
# Because we are always 2D, there is only one new_index, so
# we know it will have pos=0
new_index.set_pos(0)
new_index.update_info(new_info)
new_index.maybe_set_size(min_itemsize) # check for column conflicts
new_index_axes = [new_index]
j = len(new_index_axes) # i.e. 1
assert j == 1
# reindex by our non_index_axes & compute data_columns
assert len(new_non_index_axes) == 1
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
data_columns = self.validate_data_columns(
data_columns, min_itemsize, new_non_index_axes
)
frame = self.get_object(obj, transposed)._consolidate()
blocks, blk_items = self._get_blocks_and_items(
frame, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
vaxes = []
for i, (blk, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
if not (name is None or isinstance(name, str)):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
# make sure that we match up the existing columns
# if we have an existing table
existing_col: DataCol | None
if table_exists and validate:
try:
existing_col = self.values_axes[i]
except (IndexError, KeyError) as err:
raise ValueError(
f"Incompatible appended table [{blocks}]"
f"with existing table [{self.values_axes}]"
) from err
else:
existing_col = None
new_name = name or f"values_block_{i}"
data_converted = _maybe_convert_for_string_atom(
new_name,
blk.values,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
columns=b_items,
)
adj_name = _maybe_adjust_name(new_name, self.version)
typ = klass._get_atom(data_converted)
kind = _dtype_to_kind(data_converted.dtype.name)
tz = None
if getattr(data_converted, "tz", None) is not None:
tz = _get_tz(data_converted.tz)
meta = metadata = ordered = None
if isinstance(data_converted.dtype, CategoricalDtype):
ordered = data_converted.ordered
meta = "category"
metadata = np.asarray(data_converted.categories).ravel()
data, dtype_name = _get_data_and_dtype_name(data_converted)
col = klass(
name=adj_name,
cname=new_name,
values=list(b_items),
typ=typ,
pos=j,
kind=kind,
tz=tz,
ordered=ordered,
meta=meta,
metadata=metadata,
dtype=dtype_name,
data=data,
)
col.update_info(new_info)
vaxes.append(col)
j += 1
dcs = [col.name for col in vaxes if col.is_data_indexable]
new_table = type(self)(
parent=self.parent,
group=self.group,
encoding=self.encoding,
errors=self.errors,
index_axes=new_index_axes,
non_index_axes=new_non_index_axes,
values_axes=vaxes,
data_columns=dcs,
info=new_info,
nan_rep=nan_rep,
)
if hasattr(self, "levels"):
# TODO: get this into constructor, only for appropriate subclass
new_table.levels = self.levels
new_table.validate_min_itemsize(min_itemsize)
if validate and table_exists:
new_table.validate(self)
return new_table
@staticmethod
def _get_blocks_and_items(
frame: DataFrame,
table_exists: bool,
new_non_index_axes,
values_axes,
data_columns,
):
# Helper to clarify non-state-altering parts of _create_axes
def get_blk_items(mgr):
return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks]
mgr = frame._mgr
blocks: list[Block] = list(mgr.blocks)
blk_items: list[Index] = get_blk_items(mgr)
if len(data_columns):
# TODO: prove that we only get here with axis == 1?
# It is the case in all extant tests, but NOT the case
# outside this `if len(data_columns)` check.
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
mgr = frame.reindex(new_labels, axis=axis)._mgr
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr)
for c in data_columns:
# This reindex would raise ValueError if we had a duplicate
# index, so we can infer that (as long as axis==1) we
# get a single column back, so a single block.
mgr = frame.reindex([c], axis=axis)._mgr
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr))
# reorder the blocks in the same order as the existing table if we can
if table_exists:
by_items = {
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
new_blocks: list[Block] = []
new_blk_items = []
for ea in values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except (IndexError, KeyError) as err:
jitems = ",".join([pprint_thing(item) for item in items])
raise ValueError(
f"cannot match existing table structure for [{jitems}] "
"on appending data"
) from err
blocks = new_blocks
blk_items = new_blk_items
return blocks, blk_items
def process_axes(self, obj, selection: Selection, columns=None) -> DataFrame:
"""process axes filters"""
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
assert isinstance(self.levels, list) # assured by is_multi_index
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
def process_filter(field, filt, op):
for axis_name in obj._AXIS_ORDERS:
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
assert axis_number is not None
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc(axis=axis_number)[takers]
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc(axis=axis_number)[takers]
raise ValueError(f"cannot find the field [{field}] for filtering!")
# apply the selection filters (but keep in the same order)
if selection.filter is not None:
for field, op, filt in selection.filter.format():
obj = process_filter(field, filt, op)
return obj
def create_description(
self,
complib,
complevel: int | None,
fletcher32: bool,
expectedrows: int | None,
) -> dict[str, Any]:
"""create the description of the table from the axes & values"""
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = {"name": "table", "expectedrows": expectedrows}
# description from the axes & values
d["description"] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel,
complib=complib,
fletcher32=fletcher32 or self._fletcher32,
)
d["filters"] = filters
elif self._filters is not None:
d["filters"] = self._filters
return d
def read_coordinates(
self, where=None, start: int | None = None, stop: int | None = None
):
"""
select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
coords = selection.select_coords()
if selection.filter is not None:
for field, op, filt in selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1
)
coords = coords[op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(
self,
column: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
f"column [{column}] can not be extracted individually; "
"it is not data indexable"
)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
col_values = a.convert(
c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
cvs = col_values[1]
return Series(cvs, name=column, copy=False)
raise KeyError(f"column [{column}] not found in the table")
class WORMTable(Table):
"""
a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = "worm"
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
):
"""
read the indices and the indexing array, calculate offset rows and return
"""
raise NotImplementedError("WORMTable needs to implement read")
def write(self, obj, **kwargs) -> None:
"""
write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")
class AppendableTable(Table):
"""support the new appendable table formats"""
table_type = "appendable"
# error: Signature of "write" incompatible with supertype "Fixed"
def write( # type: ignore[override]
self,
obj,
axes=None,
append: bool = False,
complib=None,
complevel=None,
fletcher32=None,
min_itemsize=None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
track_times: bool = True,
) -> None:
if not append and self.is_exists:
self._handle.remove_node(self.group, "table")
# create the axes
table = self._create_axes(
axes=axes,
obj=obj,
validate=append,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
)
for a in table.axes:
a.validate_names()
if not table.is_exists:
# create the table
options = table.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows,
)
# set the table attributes
table.set_attrs()
options["track_times"] = track_times
# create the table
table._handle.create_table(table.group, **options)
# update my info
table.attrs.info = table.info
# validate the axes and set the kinds
for a in table.axes:
a.validate_and_set(table, append)
# add the rows
table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: int | None, dropna: bool = False) -> None:
"""
we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
"""
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype("u1", copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(v.reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = nrows // chunksize + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
def write_data_chunk(
self,
rows: np.ndarray,
indexes: list[np.ndarray],
mask: npt.NDArray[np.bool_] | None,
values: list[np.ndarray],
) -> None:
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
if len(rows):
self.table.append(rows)
self.table.flush()
def delete(
self, where=None, start: int | None = None, stop: int | None = None
) -> int | None:
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
selection = Selection(self, where, start=start, stop=stop)
values = selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values, copy=False).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(range(g, pg))
table.remove_rows(
start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
"""support the new appendable table formats"""
pandas_kind = "frame_table"
table_type = "appendable_frame"
ndim = 2
obj_type: type[DataFrame | Series] = DataFrame
@property
def is_transposed(self) -> bool:
return self.index_axes[0].axis == 1
@classmethod
def get_object(cls, obj, transposed: bool):
"""these are written transposed"""
if transposed:
obj = obj.T
return obj
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
):
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return None
result = self._read_axes(where=where, start=start, stop=stop)
info = (
self.info.get(self.non_index_axes[0][0], {})
if len(self.non_index_axes)
else {}
)
inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]]
assert len(inds) == 1
ind = inds[0]
index = result[ind][0]
frames = []
for i, a in enumerate(self.axes):
if a not in self.values_axes:
continue
index_vals, cvalues = result[i]
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get("type") != "MultiIndex":
cols = Index(index_vals)
else:
cols = MultiIndex.from_tuples(index_vals)
names = info.get("names")
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, "name", None))
else:
values = cvalues.T
index_ = Index(index, name=getattr(index, "name", None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
if isinstance(values, (np.ndarray, DatetimeArray)):
df = DataFrame(values.T, columns=cols_, index=index_, copy=False)
elif isinstance(values, Index):
df = DataFrame(values, columns=cols_, index=index_)
else:
# Categorical
df = DataFrame._from_arrays([values], columns=cols_, index=index_)
if not (using_string_dtype() and values.dtype.kind == "O"):
assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
if using_string_dtype() and is_string_array(
values, # type: ignore[arg-type]
skipna=True,
):
df = df.astype(StringDtype(na_value=np.nan))
frames.append(df)
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
selection = Selection(self, where=where, start=start, stop=stop)
# apply the selection filters & axis orderings
df = self.process_axes(df, selection=selection, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
"""support the new appendable table formats"""
pandas_kind = "series_table"
table_type = "appendable_series"
ndim = 2
obj_type = Series
@property
def is_transposed(self) -> bool:
return False
@classmethod
def get_object(cls, obj, transposed: bool):
return obj
# error: Signature of "write" incompatible with supertype "Fixed"
def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override]
"""we are going to write this as a frame table"""
if not isinstance(obj, DataFrame):
name = obj.name or "values"
obj = obj.to_frame(name)
super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> Series:
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
assert isinstance(self.levels, list) # needed for mypy
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super().read(where=where, columns=columns, start=start, stop=stop)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == "values":
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
"""support the new appendable table formats"""
pandas_kind = "series_table"
table_type = "appendable_multiseries"
# error: Signature of "write" incompatible with supertype "Fixed"
def write(self, obj, **kwargs) -> None: # type: ignore[override]
"""we are going to write this as a frame table"""
name = obj.name or "values"
newobj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
cols = list(self.levels)
cols.append(name)
newobj.columns = Index(cols)
super().write(obj=newobj, **kwargs)
class GenericTable(AppendableFrameTable):
"""a table that read/writes the generic pytables table format"""
pandas_kind = "frame_table"
table_type = "generic_table"
ndim = 2
obj_type = DataFrame
levels: list[Hashable]
@property
def pandas_type(self) -> str:
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, "table", None) or self.group
def get_attrs(self) -> None:
"""retrieve our attributes"""
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@cache_readonly
def indexables(self):
"""create the indexables from the table description"""
d = self.description
# TODO: can we get a typ for this? AFAICT it is the only place
# where we aren't passing one
# the index columns is just a simple index
md = self.read_metadata("index")
meta = "category" if md is not None else None
index_col = GenericIndexCol(
name="index", axis=0, table=self.table, meta=meta, metadata=md
)
_indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col]
for i, n in enumerate(d._v_names):
assert isinstance(n, str)
atom = getattr(d, n)
md = self.read_metadata(n)
meta = "category" if md is not None else None
dc = GenericDataIndexableCol(
name=n,
pos=i,
values=[n],
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(dc)
return _indexables
# error: Signature of "write" incompatible with supertype "AppendableTable"
def write(self, **kwargs) -> None: # type: ignore[override]
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
"""a frame with a multi-index"""
table_type = "appendable_multiframe"
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self) -> str:
return "appendable_multi"
# error: Signature of "write" incompatible with supertype "Fixed"
def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override]
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
super().write(obj=obj, data_columns=data_columns, **kwargs)
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> DataFrame:
df = super().read(where=where, columns=columns, start=start, stop=stop)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names(
[None if self._re_levels.search(name) else name for name in df.index.names]
)
return df
def _reindex_axis(
obj: DataFrame, axis: AxisInt, labels: Index, other=None
) -> DataFrame:
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()).intersection(labels, sort=False)
if not labels.equals(ax):
slicer: list[slice | Index] = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
# tz to/from coercion
def _get_tz(tz: tzinfo) -> str | tzinfo:
"""for a tz-aware type, return an encoded zone"""
zone = timezones.get_timezone(tz)
return zone
def _set_tz(
values: npt.NDArray[np.int64], tz: str | tzinfo | None, datetime64_dtype: str
) -> DatetimeArray:
"""
Coerce the values to a DatetimeArray with appropriate tz.
Parameters
----------
values : ndarray[int64]
tz : str, tzinfo, or None
datetime64_dtype : str, e.g. "datetime64[ns]", "datetime64[25s]"
"""
assert values.dtype == "i8", values.dtype
# Argument "tz" to "tz_to_dtype" has incompatible type "str | tzinfo | None";
# expected "tzinfo"
unit, _ = np.datetime_data(datetime64_dtype) # parsing dtype: unit, count
dtype = tz_to_dtype(tz=tz, unit=unit) # type: ignore[arg-type]
dta = DatetimeArray._from_sequence(values, dtype=dtype)
return dta
def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol:
assert isinstance(name, str)
index_name = index.name
# error: Argument 1 to "_get_data_and_dtype_name" has incompatible type "Index";
# expected "Union[ExtensionArray, ndarray]"
converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[arg-type]
kind = _dtype_to_kind(dtype_name)
atom = DataIndexableCol._get_atom(converted)
if (
lib.is_np_dtype(index.dtype, "iu")
or needs_i8_conversion(index.dtype)
or is_bool_dtype(index.dtype)
):
# Includes Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
# in which case "kind" is "integer", "integer", "datetime64",
# "timedelta64", and "integer", respectively.
return IndexCol(
name,
values=converted,
kind=kind,
typ=atom,
freq=getattr(index, "freq", None),
tz=getattr(index, "tz", None),
index_name=index_name,
)
if isinstance(index, MultiIndex):
raise TypeError("MultiIndex not supported here!")
inferred_type = lib.infer_dtype(index, skipna=False)
# we won't get inferred_type of "datetime64" or "timedelta64" as these
# would go through the DatetimeIndex/TimedeltaIndex paths above
values = np.asarray(index)
if inferred_type == "date":
converted = np.asarray([v.toordinal() for v in values], dtype=np.int32)
return IndexCol(
name, converted, "date", _tables().Time32Col(), index_name=index_name
)
elif inferred_type == "string":
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
name,
converted,
"string",
_tables().StringCol(itemsize),
index_name=index_name,
)
elif inferred_type in ["integer", "floating"]:
return IndexCol(
name, values=converted, kind=kind, typ=atom, index_name=index_name
)
else:
assert isinstance(converted, np.ndarray) and converted.dtype == object
assert kind == "object", kind
atom = _tables().ObjectAtom()
return IndexCol(name, converted, kind, atom, index_name=index_name)
def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index:
index: Index | np.ndarray
if kind.startswith("datetime64"):
if kind == "datetime64":
# created before we stored resolution information
index = DatetimeIndex(data)
else:
index = DatetimeIndex(data.view(kind))
elif kind == "timedelta64":
index = TimedeltaIndex(data)
elif kind == "date":
try:
index = np.asarray([date.fromordinal(v) for v in data], dtype=object)
except ValueError:
index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object)
elif kind in ("integer", "float", "bool"):
index = np.asarray(data)
elif kind in ("string"):
index = _unconvert_string_array(
data, nan_rep=None, encoding=encoding, errors=errors
)
elif kind == "object":
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError(f"unrecognized index type {kind}")
return index
def _maybe_convert_for_string_atom(
name: str,
bvalues: ArrayLike,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors,
columns: list[str],
):
if bvalues.dtype != object:
return bvalues
bvalues = cast(np.ndarray, bvalues)
dtype_name = bvalues.dtype.name
inferred_type = lib.infer_dtype(bvalues, skipna=False)
if inferred_type == "date":
raise TypeError("[date] is not implemented as a table column")
if inferred_type == "datetime":
# after GH#8260
# this only would be hit for a multi-timezone dtype which is an error
raise TypeError(
"too many timezones in this block, create separate data columns"
)
if not (inferred_type == "string" or dtype_name == "object"):
return bvalues
mask = isna(bvalues)
data = bvalues.copy()
data[mask] = nan_rep
# see if we have a valid string type
inferred_type = lib.infer_dtype(data, skipna=False)
if inferred_type != "string":
# we cannot serialize this data, so report an exception on a column
# by column basis
# expected behaviour:
# search block for a non-string object column by column
for i in range(data.shape[0]):
col = data[i]
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
error_column_label = columns[i] if len(columns) > i else f"No.{i}"
raise TypeError(
f"Cannot serialize the column [{error_column_label}]\n"
f"because its data contents are not [string] but "
f"[{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci is not None and eci > itemsize:
itemsize = eci
data_converted = data_converted.astype(f"|S{itemsize}", copy=False)
return data_converted
def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray:
"""
Take a string-like that is object dtype and coerce to a fixed size string type.
Parameters
----------
data : np.ndarray[object]
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[fixed-length-string]
"""
# encode if needed
if len(data):
data = (
Series(data.ravel(), copy=False)
.str.encode(encoding, errors)
._values.reshape(data.shape)
)
# create the sized dtype
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype=f"S{itemsize}")
return data
def _unconvert_string_array(
data: np.ndarray, nan_rep, encoding: str, errors: str
) -> np.ndarray:
"""
Inverse of _convert_string_array.
Parameters
----------
data : np.ndarray[fixed-length-string]
nan_rep : the storage repr of NaN
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[object]
Decoded data.
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
if len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
dtype = f"U{itemsize}"
if isinstance(data[0], bytes):
data = Series(data, copy=False).str.decode(encoding, errors=errors)._values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = "nan"
libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str):
assert isinstance(val_kind, str), type(val_kind)
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
values = conv(values)
return values
def _get_converter(kind: str, encoding: str, errors: str):
if kind == "datetime64":
return lambda x: np.asarray(x, dtype="M8[ns]")
elif "datetime64" in kind:
return lambda x: np.asarray(x, dtype=kind)
elif kind == "string":
return lambda x: _unconvert_string_array(
x, nan_rep=None, encoding=encoding, errors=errors
)
else: # pragma: no cover
raise ValueError(f"invalid kind {kind}")
def _need_convert(kind: str) -> bool:
if kind in ("datetime64", "string") or "datetime64" in kind:
return True
return False
def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:
"""
Prior to 0.10.1, we named values blocks like: values_block_0 an the
name values_0, adjust the given name if necessary.
Parameters
----------
name : str
version : Tuple[int, int, int]
Returns
-------
str
"""
if isinstance(version, str) or len(version) < 3:
raise ValueError("Version is incorrect, expected sequence of 3 integers.")
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
grp = m.groups()[0]
name = f"values_{grp}"
return name
def _dtype_to_kind(dtype_str: str) -> str:
"""
Find the "kind" string describing the given dtype name.
"""
if dtype_str.startswith(("string", "bytes")):
kind = "string"
elif dtype_str.startswith("float"):
kind = "float"
elif dtype_str.startswith("complex"):
kind = "complex"
elif dtype_str.startswith(("int", "uint")):
kind = "integer"
elif dtype_str.startswith("datetime64"):
kind = dtype_str
elif dtype_str.startswith("timedelta"):
kind = "timedelta64"
elif dtype_str.startswith("bool"):
kind = "bool"
elif dtype_str.startswith("category"):
kind = "category"
elif dtype_str.startswith("period"):
# We store the `freq` attr so we can restore from integers
kind = "integer"
elif dtype_str == "object":
kind = "object"
else:
raise ValueError(f"cannot interpret dtype of [{dtype_str}]")
return kind
def _get_data_and_dtype_name(data: ArrayLike):
"""
Convert the passed data into a storable form and a dtype string.
"""
if isinstance(data, Categorical):
data = data.codes
if isinstance(data.dtype, DatetimeTZDtype):
# For datetime64tz we need to drop the TZ in tests TODO: why?
dtype_name = f"datetime64[{data.dtype.unit}]"
else:
dtype_name = data.dtype.name
if data.dtype.kind in "mM":
data = np.asarray(data.view("i8"))
# TODO: we used to reshape for the dt64tz case, but no longer
# doing that doesn't seem to break anything. why?
elif isinstance(data, PeriodIndex):
data = data.asi8
data = np.asarray(data)
return data, dtype_name
class Selection:
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(
self,
table: Table,
where=None,
start: int | None = None,
stop: int | None = None,
) -> None:
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
with suppress(ValueError):
inferred = lib.infer_dtype(where, skipna=False)
if inferred in ("integer", "boolean"):
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if (self.start is not None and (where < self.start).any()) or (
self.stop is not None and (where >= self.stop).any()
):
raise ValueError(
"where must have index locations >= start and < stop"
)
self.coordinates = where
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
@overload
def generate(self, where: dict | list | tuple | str) -> PyTablesExpr: ...
@overload
def generate(self, where: None) -> None: ...
def generate(self, where: dict | list | tuple | str | None) -> PyTablesExpr | None:
"""where can be a : dict,list,tuple,string"""
if where is None:
return None
q = self.table.queryables()
try:
return PyTablesExpr(where, queryables=q, encoding=self.table.encoding)
except NameError as err:
# raise a nice message, suggesting that the user should use
# data_columns
qkeys = ",".join(q.keys())
msg = dedent(
f"""\
The passed where expression: {where}
contains an invalid variable reference
all of the variable references must be a reference to
an axis (e.g. 'index' or 'columns'), or a data_column
The currently defined references are: {qkeys}
"""
)
raise ValueError(msg) from err
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(
self.condition.format(), start=self.start, stop=self.stop
)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(
self.condition.format(), start=start, stop=stop, sort=True
)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@io@pytables.py@.PATH_END.py
|
{
"filename": "_showocean.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/geo/_showocean.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowoceanValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showocean", parent_name="layout.geo", **kwargs):
super(ShowoceanValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@geo@_showocean.py@.PATH_END.py
|
{
"filename": "_showtickprefix.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/contourcarpet/colorbar/_showtickprefix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowtickprefixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showtickprefix",
parent_name="contourcarpet.colorbar",
**kwargs
):
super(ShowtickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@contourcarpet@colorbar@_showtickprefix.py@.PATH_END.py
|
{
"filename": "_thickness.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/surface/colorbar/_thickness.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ThicknessValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="thickness", parent_name="surface.colorbar", **kwargs
):
super(ThicknessValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@surface@colorbar@_thickness.py@.PATH_END.py
|
{
"filename": "test_setup.py",
"repo_name": "ucberkeleyseti/blimpy",
"repo_path": "blimpy_extracted/blimpy-master/tests/test_setup.py",
"type": "Python"
}
|
r""" Testspectra_gen functions"""
def test_setup():
import os
cmd = "python3 setup.py check"
os.system(cmd)
|
ucberkeleysetiREPO_NAMEblimpyPATH_START.@blimpy_extracted@blimpy-master@tests@test_setup.py@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/densitymap/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="densitymap", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@densitymap@_name.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "maho3/ltu-ili",
"repo_path": "ltu-ili_extracted/ltu-ili-main/ili/__init__.py",
"type": "Python"
}
|
from .dataloaders import *
from .inference import *
from .utils import *
from .validation import *
try:
from .embedding import *
except ModuleNotFoundError:
pass
|
maho3REPO_NAMEltu-iliPATH_START.@ltu-ili_extracted@ltu-ili-main@ili@__init__.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pure-eval/pure_eval/utils.py",
"type": "Python"
}
|
from collections import OrderedDict, deque
from datetime import date, time, datetime
from decimal import Decimal
from fractions import Fraction
import ast
import enum
import typing
class CannotEval(Exception):
def __repr__(self):
return self.__class__.__name__
__str__ = __repr__
def is_any(x, *args):
return any(
x is arg
for arg in args
)
def of_type(x, *types):
if is_any(type(x), *types):
return x
else:
raise CannotEval
def of_standard_types(x, *, check_dict_values: bool, deep: bool):
if is_standard_types(x, check_dict_values=check_dict_values, deep=deep):
return x
else:
raise CannotEval
def is_standard_types(x, *, check_dict_values: bool, deep: bool):
try:
return _is_standard_types_deep(x, check_dict_values, deep)[0]
except RecursionError:
return False
def _is_standard_types_deep(x, check_dict_values: bool, deep: bool):
typ = type(x)
if is_any(
typ,
str,
int,
bool,
float,
bytes,
complex,
date,
time,
datetime,
Fraction,
Decimal,
type(None),
object,
):
return True, 0
if is_any(typ, tuple, frozenset, list, set, dict, OrderedDict, deque, slice):
if typ in [slice]:
length = 0
else:
length = len(x)
assert isinstance(deep, bool)
if not deep:
return True, length
if check_dict_values and typ in (dict, OrderedDict):
items = (v for pair in x.items() for v in pair)
elif typ is slice:
items = [x.start, x.stop, x.step]
else:
items = x
for item in items:
if length > 100000:
return False, length
is_standard, item_length = _is_standard_types_deep(
item, check_dict_values, deep
)
if not is_standard:
return False, length
length += item_length
return True, length
return False, 0
class _E(enum.Enum):
pass
class _C:
def foo(self): pass # pragma: nocover
def bar(self): pass # pragma: nocover
@classmethod
def cm(cls): pass # pragma: nocover
@staticmethod
def sm(): pass # pragma: nocover
safe_name_samples = {
"len": len,
"append": list.append,
"__add__": list.__add__,
"insert": [].insert,
"__mul__": [].__mul__,
"fromkeys": dict.__dict__['fromkeys'],
"is_any": is_any,
"__repr__": CannotEval.__repr__,
"foo": _C().foo,
"bar": _C.bar,
"cm": _C.cm,
"sm": _C.sm,
"ast": ast,
"CannotEval": CannotEval,
"_E": _E,
}
typing_annotation_samples = {
name: getattr(typing, name)
for name in "List Dict Tuple Set Callable Mapping".split()
}
safe_name_types = tuple({
type(f)
for f in safe_name_samples.values()
})
typing_annotation_types = tuple({
type(f)
for f in typing_annotation_samples.values()
})
def eq_checking_types(a, b):
return type(a) is type(b) and a == b
def ast_name(node):
if isinstance(node, ast.Name):
return node.id
elif isinstance(node, ast.Attribute):
return node.attr
else:
return None
def safe_name(value):
typ = type(value)
if is_any(typ, *safe_name_types):
return value.__name__
elif value is typing.Optional:
return "Optional"
elif value is typing.Union:
return "Union"
elif is_any(typ, *typing_annotation_types):
return getattr(value, "__name__", None) or getattr(value, "_name", None)
else:
return None
def has_ast_name(value, node):
value_name = safe_name(value)
if type(value_name) is not str:
return False
return eq_checking_types(ast_name(node), value_name)
def copy_ast_without_context(x):
if isinstance(x, ast.AST):
kwargs = {
field: copy_ast_without_context(getattr(x, field))
for field in x._fields
if field != 'ctx'
if hasattr(x, field)
}
a = type(x)(**kwargs)
if hasattr(a, 'ctx'):
# Python 3.13.0b2+ defaults to Load when we don't pass ctx
# https://github.com/python/cpython/pull/118871
del a.ctx
return a
elif isinstance(x, list):
return list(map(copy_ast_without_context, x))
else:
return x
def ensure_dict(x):
"""
Handles invalid non-dict inputs
"""
try:
return dict(x)
except Exception:
return {}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pure-eval@pure_eval@utils.py@.PATH_END.py
|
{
"filename": "ContentEnd.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/metadata_schema_py_generated/ContentEnd.md",
"type": "Markdown"
}
|
page_type: reference
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.metadata_schema_py_generated.ContentEnd" />
<meta itemprop="path" content="Stable" />
</div>
# tflite_support.metadata_schema_py_generated.ContentEnd
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L877-L878">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.metadata_schema_py_generated.ContentEnd(
builder
)
</code></pre>
<!-- Placeholder for "Used in" -->
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@metadata_schema_py_generated@ContentEnd.md@.PATH_END.py
|
{
"filename": "table_wide.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/wcwidth/py2/wcwidth/table_wide.py",
"type": "Python"
}
|
"""
Exports WIDE_EASTASIAN table keyed by supporting unicode version level.
This code generated by wcwidth/bin/update-tables.py on 2024-01-06 01:39:49 UTC.
"""
WIDE_EASTASIAN = {
'4.1.0': (
# Source: EastAsianWidth-4.1.0.txt
# Date: 2005-03-17, 15:21:00 PST [KW]
#
(0x01100, 0x01159,), # Hangul Choseong Kiyeok ..Hangul Choseong Yeorinhi
(0x0115f, 0x0115f,), # Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312c,), # Bopomofo Letter B ..Bopomofo Letter Gn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
(0x031c0, 0x031cf,), # Cjk Stroke T ..Cjk Stroke N
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03243,), # Parenthesized Ideograph ..Parenthesized Ideograph
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04db5,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x09fbb,), # Cjk Unified Ideograph-4e..Cjk Unified Ideograph-9f
(0x0a000, 0x0a48c,), # Yi Syllable It ..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0fa2d,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fa30, 0x0fa6a,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fa70, 0x0fad9,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'5.0.0': (
# Source: EastAsianWidth-5.0.0.txt
# Date: 2006-02-15, 14:39:00 PST [KW]
#
(0x01100, 0x01159,), # Hangul Choseong Kiyeok ..Hangul Choseong Yeorinhi
(0x0115f, 0x0115f,), # Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312c,), # Bopomofo Letter B ..Bopomofo Letter Gn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
(0x031c0, 0x031cf,), # Cjk Stroke T ..Cjk Stroke N
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03243,), # Parenthesized Ideograph ..Parenthesized Ideograph
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04db5,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x09fbb,), # Cjk Unified Ideograph-4e..Cjk Unified Ideograph-9f
(0x0a000, 0x0a48c,), # Yi Syllable It ..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0fa2d,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fa30, 0x0fa6a,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fa70, 0x0fad9,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'5.1.0': (
# Source: EastAsianWidth-5.1.0.txt
# Date: 2008-03-20, 17:42:00 PDT [KW]
#
(0x01100, 0x01159,), # Hangul Choseong Kiyeok ..Hangul Choseong Yeorinhi
(0x0115f, 0x0115f,), # Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03243,), # Parenthesized Ideograph ..Parenthesized Ideograph
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04db5,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x09fc3,), # Cjk Unified Ideograph-4e..Cjk Unified Ideograph-9f
(0x0a000, 0x0a48c,), # Yi Syllable It ..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0fa2d,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fa30, 0x0fa6a,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fa70, 0x0fad9,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'5.2.0': (
# Source: EastAsianWidth-5.2.0.txt
# Date: 2009-06-09, 17:47:00 PDT [KW]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x1f200, 0x1f200,), # Square Hiragana Hoka
(0x1f210, 0x1f231,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'6.0.0': (
# Source: EastAsianWidth-6.0.0.txt
# Date: 2010-08-17, 12:17:00 PDT [KW]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'6.1.0': (
# Source: EastAsianWidth-6.1.0.txt
# Date: 2011-09-19, 18:46:00 GMT [KW]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'6.2.0': (
# Source: EastAsianWidth-6.2.0.txt
# Date: 2012-05-15, 18:30:00 GMT [KW]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'6.3.0': (
# Source: EastAsianWidth-6.3.0.txt
# Date: 2013-02-05, 20:09:00 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'7.0.0': (
# Source: EastAsianWidth-7.0.0.txt
# Date: 2014-02-28, 23:15:00 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'8.0.0': (
# Source: EastAsianWidth-8.0.0.txt
# Date: 2015-02-10, 21:00:00 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'9.0.0': (
# Source: EastAsianWidth-9.0.0.txt
# Date: 2016-05-27, 17:00:00 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe0,), # Tangut Iteration Mark
(0x17000, 0x187ec,), # (nil)
(0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
(0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6f6,), # Scooter ..Canoe
(0x1f910, 0x1f91e,), # Zipper-mouth Face ..Hand With Index And Midd
(0x1f920, 0x1f927,), # Face With Cowboy Hat ..Sneezing Face
(0x1f930, 0x1f930,), # Pregnant Woman
(0x1f933, 0x1f93e,), # Selfie ..Handball
(0x1f940, 0x1f94b,), # Wilted Flower ..Martial Arts Uniform
(0x1f950, 0x1f95e,), # Croissant ..Pancakes
(0x1f980, 0x1f991,), # Crab ..Squid
(0x1f9c0, 0x1f9c0,), # Cheese Wedge
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'10.0.0': (
# Source: EastAsianWidth-10.0.0.txt
# Date: 2017-03-08, 02:00:00 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312e,), # Bopomofo Letter B ..Bopomofo Letter O With D
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe1,), # Tangut Iteration Mark ..Nushu Iteration Mark
(0x17000, 0x187ec,), # (nil)
(0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
(0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6f8,), # Scooter ..Flying Saucer
(0x1f910, 0x1f93e,), # Zipper-mouth Face ..Handball
(0x1f940, 0x1f94c,), # Wilted Flower ..Curling Stone
(0x1f950, 0x1f96b,), # Croissant ..Canned Food
(0x1f980, 0x1f997,), # Crab ..Cricket
(0x1f9c0, 0x1f9c0,), # Cheese Wedge
(0x1f9d0, 0x1f9e6,), # Face With Monocle ..Socks
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'11.0.0': (
# Source: EastAsianWidth-11.0.0.txt
# Date: 2018-05-14, 09:41:59 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe1,), # Tangut Iteration Mark ..Nushu Iteration Mark
(0x17000, 0x187f1,), # (nil)
(0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
(0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6f9,), # Scooter ..Skateboard
(0x1f910, 0x1f93e,), # Zipper-mouth Face ..Handball
(0x1f940, 0x1f970,), # Wilted Flower ..Smiling Face With Smilin
(0x1f973, 0x1f976,), # Face With Party Horn And..Freezing Face
(0x1f97a, 0x1f97a,), # Face With Pleading Eyes
(0x1f97c, 0x1f9a2,), # Lab Coat ..Swan
(0x1f9b0, 0x1f9b9,), # Emoji Component Red Hair..Supervillain
(0x1f9c0, 0x1f9c2,), # Cheese Wedge ..Salt Shaker
(0x1f9d0, 0x1f9ff,), # Face With Monocle ..Nazar Amulet
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'12.0.0': (
# Source: EastAsianWidth-12.0.0.txt
# Date: 2019-01-21, 14:12:58 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
(0x03300, 0x04dbf,), # Square Apaato ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
(0x17000, 0x187f7,), # (nil)
(0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
(0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
(0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
(0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6d5, 0x1f6d5,), # Hindu Temple
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6fa,), # Scooter ..Auto Rickshaw
(0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
(0x1f90d, 0x1f971,), # White Heart ..Yawning Face
(0x1f973, 0x1f976,), # Face With Party Horn And..Freezing Face
(0x1f97a, 0x1f9a2,), # Face With Pleading Eyes ..Swan
(0x1f9a5, 0x1f9aa,), # Sloth ..Oyster
(0x1f9ae, 0x1f9ca,), # Guide Dog ..Ice Cube
(0x1f9cd, 0x1f9ff,), # Standing Person ..Nazar Amulet
(0x1fa70, 0x1fa73,), # Ballet Shoes ..Shorts
(0x1fa78, 0x1fa7a,), # Drop Of Blood ..Stethoscope
(0x1fa80, 0x1fa82,), # Yo-yo ..Parachute
(0x1fa90, 0x1fa95,), # Ringed Planet ..Banjo
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'12.1.0': (
# Source: EastAsianWidth-12.1.0.txt
# Date: 2019-03-31, 22:01:58 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
(0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x04dbf,), # Partnership Sign ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
(0x17000, 0x187f7,), # (nil)
(0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
(0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
(0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
(0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6d5, 0x1f6d5,), # Hindu Temple
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6fa,), # Scooter ..Auto Rickshaw
(0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
(0x1f90d, 0x1f971,), # White Heart ..Yawning Face
(0x1f973, 0x1f976,), # Face With Party Horn And..Freezing Face
(0x1f97a, 0x1f9a2,), # Face With Pleading Eyes ..Swan
(0x1f9a5, 0x1f9aa,), # Sloth ..Oyster
(0x1f9ae, 0x1f9ca,), # Guide Dog ..Ice Cube
(0x1f9cd, 0x1f9ff,), # Standing Person ..Nazar Amulet
(0x1fa70, 0x1fa73,), # Ballet Shoes ..Shorts
(0x1fa78, 0x1fa7a,), # Drop Of Blood ..Stethoscope
(0x1fa80, 0x1fa82,), # Yo-yo ..Parachute
(0x1fa90, 0x1fa95,), # Ringed Planet ..Banjo
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'13.0.0': (
# Source: EastAsianWidth-13.0.0.txt
# Date: 2029-01-21, 18:14:00 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031e3,), # Ideographic Annotation L..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x04dbf,), # Partnership Sign ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
(0x17000, 0x187f7,), # (nil)
(0x18800, 0x18cd5,), # Tangut Component-001 ..Khitan Small Script Char
(0x18d00, 0x18d08,), # (nil)
(0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
(0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
(0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6d5, 0x1f6d7,), # Hindu Temple ..Elevator
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6fc,), # Scooter ..Roller Skate
(0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
(0x1f90c, 0x1f93a,), # Pinched Fingers ..Fencer
(0x1f93c, 0x1f945,), # Wrestlers ..Goal Net
(0x1f947, 0x1f978,), # First Place Medal ..Disguised Face
(0x1f97a, 0x1f9cb,), # Face With Pleading Eyes ..Bubble Tea
(0x1f9cd, 0x1f9ff,), # Standing Person ..Nazar Amulet
(0x1fa70, 0x1fa74,), # Ballet Shoes ..Thong Sandal
(0x1fa78, 0x1fa7a,), # Drop Of Blood ..Stethoscope
(0x1fa80, 0x1fa86,), # Yo-yo ..Nesting Dolls
(0x1fa90, 0x1faa8,), # Ringed Planet ..Rock
(0x1fab0, 0x1fab6,), # Fly ..Feather
(0x1fac0, 0x1fac2,), # Anatomical Heart ..People Hugging
(0x1fad0, 0x1fad6,), # Blueberries ..Teapot
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'14.0.0': (
# Source: EastAsianWidth-14.0.0.txt
# Date: 2021-07-06, 09:58:53 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031e3,), # Ideographic Annotation L..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x04dbf,), # Partnership Sign ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
(0x17000, 0x187f7,), # (nil)
(0x18800, 0x18cd5,), # Tangut Component-001 ..Khitan Small Script Char
(0x18d00, 0x18d08,), # (nil)
(0x1aff0, 0x1aff3,), # Katakana Letter Minnan T..Katakana Letter Minnan T
(0x1aff5, 0x1affb,), # Katakana Letter Minnan T..Katakana Letter Minnan N
(0x1affd, 0x1affe,), # Katakana Letter Minnan N..Katakana Letter Minnan N
(0x1b000, 0x1b122,), # Katakana Letter Archaic ..Katakana Letter Archaic
(0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
(0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6d5, 0x1f6d7,), # Hindu Temple ..Elevator
(0x1f6dd, 0x1f6df,), # Playground Slide ..Ring Buoy
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6fc,), # Scooter ..Roller Skate
(0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
(0x1f7f0, 0x1f7f0,), # Heavy Equals Sign
(0x1f90c, 0x1f93a,), # Pinched Fingers ..Fencer
(0x1f93c, 0x1f945,), # Wrestlers ..Goal Net
(0x1f947, 0x1f9ff,), # First Place Medal ..Nazar Amulet
(0x1fa70, 0x1fa74,), # Ballet Shoes ..Thong Sandal
(0x1fa78, 0x1fa7c,), # Drop Of Blood ..Crutch
(0x1fa80, 0x1fa86,), # Yo-yo ..Nesting Dolls
(0x1fa90, 0x1faac,), # Ringed Planet ..Hamsa
(0x1fab0, 0x1faba,), # Fly ..Nest With Eggs
(0x1fac0, 0x1fac5,), # Anatomical Heart ..Person With Crown
(0x1fad0, 0x1fad9,), # Blueberries ..Jar
(0x1fae0, 0x1fae7,), # Melting Face ..Bubbles
(0x1faf0, 0x1faf6,), # Hand With Index Finger A..Heart Hands
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'15.0.0': (
# Source: EastAsianWidth-15.0.0.txt
# Date: 2022-05-24, 17:40:20 GMT [KW, LI]
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
(0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031e3,), # Ideographic Annotation L..Cjk Stroke Q
(0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x04dbf,), # Partnership Sign ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
(0x17000, 0x187f7,), # (nil)
(0x18800, 0x18cd5,), # Tangut Component-001 ..Khitan Small Script Char
(0x18d00, 0x18d08,), # (nil)
(0x1aff0, 0x1aff3,), # Katakana Letter Minnan T..Katakana Letter Minnan T
(0x1aff5, 0x1affb,), # Katakana Letter Minnan T..Katakana Letter Minnan N
(0x1affd, 0x1affe,), # Katakana Letter Minnan N..Katakana Letter Minnan N
(0x1b000, 0x1b122,), # Katakana Letter Archaic ..Katakana Letter Archaic
(0x1b132, 0x1b132,), # Hiragana Letter Small Ko
(0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
(0x1b155, 0x1b155,), # Katakana Letter Small Ko
(0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6d5, 0x1f6d7,), # Hindu Temple ..Elevator
(0x1f6dc, 0x1f6df,), # Wireless ..Ring Buoy
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6fc,), # Scooter ..Roller Skate
(0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
(0x1f7f0, 0x1f7f0,), # Heavy Equals Sign
(0x1f90c, 0x1f93a,), # Pinched Fingers ..Fencer
(0x1f93c, 0x1f945,), # Wrestlers ..Goal Net
(0x1f947, 0x1f9ff,), # First Place Medal ..Nazar Amulet
(0x1fa70, 0x1fa7c,), # Ballet Shoes ..Crutch
(0x1fa80, 0x1fa88,), # Yo-yo ..Flute
(0x1fa90, 0x1fabd,), # Ringed Planet ..Wing
(0x1fabf, 0x1fac5,), # Goose ..Person With Crown
(0x1face, 0x1fadb,), # Moose ..Pea Pod
(0x1fae0, 0x1fae8,), # Melting Face ..Shaking Face
(0x1faf0, 0x1faf8,), # Hand With Index Finger A..Rightwards Pushing Hand
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
'15.1.0': (
# Source: EastAsianWidth-15.1.0.txt
# Date: 2023-07-28, 23:34:08 GMT
#
(0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
(0x0231a, 0x0231b,), # Watch ..Hourglass
(0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
(0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
(0x023f0, 0x023f0,), # Alarm Clock
(0x023f3, 0x023f3,), # Hourglass With Flowing Sand
(0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
(0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
(0x02648, 0x02653,), # Aries ..Pisces
(0x0267f, 0x0267f,), # Wheelchair Symbol
(0x02693, 0x02693,), # Anchor
(0x026a1, 0x026a1,), # High Voltage Sign
(0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
(0x026bd, 0x026be,), # Soccer Ball ..Baseball
(0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
(0x026ce, 0x026ce,), # Ophiuchus
(0x026d4, 0x026d4,), # No Entry
(0x026ea, 0x026ea,), # Church
(0x026f2, 0x026f3,), # Fountain ..Flag In Hole
(0x026f5, 0x026f5,), # Sailboat
(0x026fa, 0x026fa,), # Tent
(0x026fd, 0x026fd,), # Fuel Pump
(0x02705, 0x02705,), # White Heavy Check Mark
(0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
(0x02728, 0x02728,), # Sparkles
(0x0274c, 0x0274c,), # Cross Mark
(0x0274e, 0x0274e,), # Negative Squared Cross Mark
(0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
(0x02757, 0x02757,), # Heavy Exclamation Mark Symbol
(0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
(0x027b0, 0x027b0,), # Curly Loop
(0x027bf, 0x027bf,), # Double Curly Loop
(0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
(0x02b50, 0x02b50,), # White Medium Star
(0x02b55, 0x02b55,), # Heavy Large Circle
(0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
(0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
(0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
(0x02ff0, 0x03029,), # Ideographic Description ..Hangzhou Numeral Nine
(0x03030, 0x0303e,), # Wavy Dash ..Ideographic Variation In
(0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
(0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
(0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
(0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
(0x03190, 0x031e3,), # Ideographic Annotation L..Cjk Stroke Q
(0x031ef, 0x0321e,), # (nil) ..Parenthesized Korean Cha
(0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
(0x03250, 0x04dbf,), # Partnership Sign ..Cjk Unified Ideograph-4d
(0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
(0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
(0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
(0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
(0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
(0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
(0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
(0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
(0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
(0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
(0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
(0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
(0x17000, 0x187f7,), # (nil)
(0x18800, 0x18cd5,), # Tangut Component-001 ..Khitan Small Script Char
(0x18d00, 0x18d08,), # (nil)
(0x1aff0, 0x1aff3,), # Katakana Letter Minnan T..Katakana Letter Minnan T
(0x1aff5, 0x1affb,), # Katakana Letter Minnan T..Katakana Letter Minnan N
(0x1affd, 0x1affe,), # Katakana Letter Minnan N..Katakana Letter Minnan N
(0x1b000, 0x1b122,), # Katakana Letter Archaic ..Katakana Letter Archaic
(0x1b132, 0x1b132,), # Hiragana Letter Small Ko
(0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
(0x1b155, 0x1b155,), # Katakana Letter Small Ko
(0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
(0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
(0x1f004, 0x1f004,), # Mahjong Tile Red Dragon
(0x1f0cf, 0x1f0cf,), # Playing Card Black Joker
(0x1f18e, 0x1f18e,), # Negative Squared Ab
(0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
(0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
(0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
(0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
(0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
(0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
(0x1f300, 0x1f320,), # Cyclone ..Shooting Star
(0x1f32d, 0x1f335,), # Hot Dog ..Cactus
(0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
(0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
(0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
(0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
(0x1f3e0, 0x1f3f0,), # House Building ..European Castle
(0x1f3f4, 0x1f3f4,), # Waving Black Flag
(0x1f3f8, 0x1f3fa,), # Badminton Racquet And Sh..Amphora
(0x1f400, 0x1f43e,), # Rat ..Paw Prints
(0x1f440, 0x1f440,), # Eyes
(0x1f442, 0x1f4fc,), # Ear ..Videocassette
(0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
(0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
(0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
(0x1f57a, 0x1f57a,), # Man Dancing
(0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
(0x1f5a4, 0x1f5a4,), # Black Heart
(0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
(0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
(0x1f6cc, 0x1f6cc,), # Sleeping Accommodation
(0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
(0x1f6d5, 0x1f6d7,), # Hindu Temple ..Elevator
(0x1f6dc, 0x1f6df,), # Wireless ..Ring Buoy
(0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
(0x1f6f4, 0x1f6fc,), # Scooter ..Roller Skate
(0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
(0x1f7f0, 0x1f7f0,), # Heavy Equals Sign
(0x1f90c, 0x1f93a,), # Pinched Fingers ..Fencer
(0x1f93c, 0x1f945,), # Wrestlers ..Goal Net
(0x1f947, 0x1f9ff,), # First Place Medal ..Nazar Amulet
(0x1fa70, 0x1fa7c,), # Ballet Shoes ..Crutch
(0x1fa80, 0x1fa88,), # Yo-yo ..Flute
(0x1fa90, 0x1fabd,), # Ringed Planet ..Wing
(0x1fabf, 0x1fac5,), # Goose ..Person With Crown
(0x1face, 0x1fadb,), # Moose ..Pea Pod
(0x1fae0, 0x1fae8,), # Melting Face ..Shaking Face
(0x1faf0, 0x1faf8,), # Hand With Index Finger A..Rightwards Pushing Hand
(0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
(0x30000, 0x3fffd,), # Cjk Unified Ideograph-30..(nil)
),
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@wcwidth@py2@wcwidth@table_wide.py@.PATH_END.py
|
{
"filename": "InstrumentDescription.py",
"repo_name": "cta-observatory/ctapipe",
"repo_path": "ctapipe_extracted/ctapipe-main/examples/core/InstrumentDescription.py",
"type": "Python"
}
|
"""
Working with Instrumental Descriptions
======================================
the instrumental description is loaded by the event source, and consists
of a hierarchy of classes in the ctapipe.instrument module, the base of
which is the ``SubarrayDescription``
"""
from astropy.coordinates import SkyCoord
from ctapipe.io import EventSource
from ctapipe.utils.datasets import get_dataset_path
from ctapipe.visualization import CameraDisplay
filename = get_dataset_path("gamma_prod5.simtel.zst")
with EventSource(filename, max_events=1) as source:
subarray = source.subarray
######################################################################
# the SubarrayDescription:
# ------------------------
#
subarray.info()
######################################################################
subarray.to_table()
######################################################################
# You can also get a table of just the ``OpticsDescriptions``
# (``CameraGeometry`` is more complex and can’t be stored on a single
# table row, so each one can be converted to a table separately)
#
subarray.to_table(kind="optics")
######################################################################
# Make a sub-array with only SC-type telescopes:
#
sc_tels = [tel_id for tel_id, tel in subarray.tel.items() if tel.optics.n_mirrors == 2]
newsub = subarray.select_subarray(sc_tels, name="SCTels")
newsub.info()
######################################################################
# can also do this by using ``Table.group_by``
#
######################################################################
# Explore some of the details of the telescopes
# ---------------------------------------------
#
tel = subarray.tel[1]
tel
######################################################################
tel.optics.mirror_area
######################################################################
tel.optics.n_mirror_tiles
######################################################################
tel.optics.equivalent_focal_length
######################################################################
tel.camera
######################################################################
tel.camera.geometry.pix_x
######################################################################
# %matplotlib inline
CameraDisplay(tel.camera.geometry)
######################################################################
CameraDisplay(subarray.tel[98].camera.geometry)
######################################################################
# Plot the subarray
# -----------------
#
# We’ll make a subarray by telescope type and plot each separately, so
# they appear in different colors. We also calculate the radius using the
# mirror area (and exaggerate it a bit).
#
# This is just for debugging and info, for any “real” use, a
# ``visualization.ArrayDisplay`` should be used
#
subarray.peek()
######################################################################
subarray.footprint
######################################################################
# Get info about the subarray in general
# --------------------------------------
#
subarray.telescope_types
######################################################################
subarray.camera_types
######################################################################
subarray.optics_types
######################################################################
center = SkyCoord("10.0 m", "2.0 m", "0.0 m", frame="groundframe")
coords = subarray.tel_coords # a flat list of coordinates by tel_index
coords.separation(center)
######################################################################
# Telescope IDs vs Indices
# ------------------------
#
# Note that ``subarray.tel`` is a dict mapped by ``tel_id`` (the
# identifying number of a telescope). It is possible to have telescope
# IDs that do not start at 0, are not contiguouous (e.g. if a subarray is
# selected). Some functions and properties like ``tel_coords`` are numpy
# arrays (not dicts) so they are not mapped to the telescope ID, but
# rather the *index* within this SubarrayDescription. To convert between
# the two concepts you can do:
#
subarray.tel_ids_to_indices([1, 5, 23])
######################################################################
# or you can get the indexing array directly in numpy or dict form:
#
subarray.tel_index_array
######################################################################
subarray.tel_index_array[[1, 5, 23]]
######################################################################
subarray.tel_indices[
1
] # this is a dict of tel_id -> tel_index, so we can only do one at once
ids = subarray.get_tel_ids_for_type(subarray.telescope_types[0])
ids
######################################################################
idx = subarray.tel_ids_to_indices(ids)
idx
######################################################################
subarray.tel_coords[idx]
######################################################################
# so, with that method you can quickly get many telescope positions at
# once (the alternative is to use the dict ``positions`` which maps
# ``tel_id`` to a position on the ground
#
subarray.positions[1]
|
cta-observatoryREPO_NAMEctapipePATH_START.@ctapipe_extracted@ctapipe-main@examples@core@InstrumentDescription.py@.PATH_END.py
|
{
"filename": "rawfibers.py",
"repo_name": "danielrd6/ifscube",
"repo_path": "ifscube_extracted/ifscube-master/ifscube/rawfibers.py",
"type": "Python"
}
|
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
class Cube:
def __init__(self, fitsfile):
hdu = fits.open(fitsfile)
self.data = hdu['sci'].data
self._full_mdf = hdu['mdf'].data
# Valid apertures, which must equal the number of lines
# in the data array.
aperture_id_mask = self._full_mdf['apid'] != 0
self.mdf = self._full_mdf[aperture_id_mask]
self.sky_mask = self.mdf['beam'] == 0
self.obj_mask = self.mdf['beam'] == 1
self._filename = fitsfile
def imshow(self, data=None, ax=None, **kwargs):
if ax is None:
fig = plt.figure(1)
plt.clf()
ax = fig.add_subplot(111)
m = self.obj_mask
y, x = self.mdf['yinst'][m], self.mdf['xinst'][m]
x0 = (x.max() + x.min()) / 2.
y0 = (y.max() + y.min()) / 2.
x -= x0
y -= y0
if data is None:
data = np.sum(self.data[m], 1)
ax.scatter(x, y, c=data, cmap='inferno', s=250,
marker='H', edgecolor='none', **kwargs)
ax.set_aspect('equal')
plt.show()
|
danielrd6REPO_NAMEifscubePATH_START.@ifscube_extracted@ifscube-master@ifscube@rawfibers.py@.PATH_END.py
|
{
"filename": "test_argcomplete.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/traitlets/py3/tests/config/test_argcomplete.py",
"type": "Python"
}
|
"""
Tests for argcomplete handling by traitlets.config.application.Application
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations
import io
import os
import typing as t
import pytest
argcomplete = pytest.importorskip("argcomplete")
from traitlets import Unicode
from traitlets.config.application import Application
from traitlets.config.configurable import Configurable
from traitlets.config.loader import KVArgParseConfigLoader
class ArgcompleteApp(Application):
"""Override loader to pass through kwargs for argcomplete testing"""
argcomplete_kwargs: t.Dict[str, t.Any]
def __init__(self, *args, **kwargs):
# For subcommands, inherit argcomplete_kwargs from parent app
parent = kwargs.get("parent")
super().__init__(*args, **kwargs)
if parent:
argcomplete_kwargs = getattr(parent, "argcomplete_kwargs", None)
if argcomplete_kwargs:
self.argcomplete_kwargs = argcomplete_kwargs
def _create_loader(self, argv, aliases, flags, classes):
loader = KVArgParseConfigLoader(
argv, aliases, flags, classes=classes, log=self.log, subcommands=self.subcommands
)
loader._argcomplete_kwargs = self.argcomplete_kwargs # type: ignore[attr-defined]
return loader
class SubApp1(ArgcompleteApp):
pass
class SubApp2(ArgcompleteApp):
@classmethod
def get_subapp_instance(cls, app: Application) -> Application:
app.clear_instance() # since Application is singleton, need to clear main app
return cls.instance(parent=app) # type: ignore[no-any-return]
class MainApp(ArgcompleteApp):
subcommands = {
"subapp1": (SubApp1, "First subapp"),
"subapp2": (SubApp2.get_subapp_instance, "Second subapp"),
}
class CustomError(Exception):
"""Helper for exit hook for testing argcomplete"""
@classmethod
def exit(cls, code):
raise cls(str(code))
class TestArgcomplete:
IFS = "\013"
COMP_WORDBREAKS = " \t\n\"'><=;|&(:"
@pytest.fixture()
def argcomplete_on(self, mocker):
"""Mostly borrowed from argcomplete's unit test fixtures
Set up environment variables to mimic those passed by argcomplete
"""
_old_environ = os.environ
os.environ = os.environ.copy() # type: ignore[assignment]
os.environ["_ARGCOMPLETE"] = "1"
os.environ["_ARC_DEBUG"] = "yes"
os.environ["IFS"] = self.IFS
os.environ["_ARGCOMPLETE_COMP_WORDBREAKS"] = self.COMP_WORDBREAKS
# argcomplete==2.0.0 always calls fdopen(9, "w") to open a debug stream,
# however this could conflict with file descriptors used by pytest
# and lead to obscure errors. Since we are not looking at debug stream
# in these tests, just mock this fdopen call out.
mocker.patch("os.fdopen")
try:
yield
finally:
os.environ = _old_environ
def run_completer(
self,
app: ArgcompleteApp,
command: str,
point: t.Union[str, int, None] = None,
**kwargs: t.Any,
) -> t.List[str]:
"""Mostly borrowed from argcomplete's unit tests
Modified to take an application instead of an ArgumentParser
Command is the current command being completed and point is the index
into the command where the completion is triggered.
"""
if point is None:
point = str(len(command))
# Flushing tempfile was leading to CI failures with Bad file descriptor, not sure why.
# Fortunately we can just write to a StringIO instead.
# print("Writing completions to temp file with mode=", write_mode)
# from tempfile import TemporaryFile
# with TemporaryFile(mode=write_mode) as t:
strio = io.StringIO()
os.environ["COMP_LINE"] = command
os.environ["COMP_POINT"] = str(point)
with pytest.raises(CustomError) as cm: # noqa: PT012
app.argcomplete_kwargs = dict(
output_stream=strio, exit_method=CustomError.exit, **kwargs
)
app.initialize()
if str(cm.value) != "0":
raise RuntimeError(f"Unexpected exit code {cm.value}")
out = strio.getvalue()
return out.split(self.IFS)
def test_complete_simple_app(self, argcomplete_on):
app = ArgcompleteApp()
expected = [
"--help",
"--debug",
"--show-config",
"--show-config-json",
"--log-level",
"--Application.",
"--ArgcompleteApp.",
]
assert set(self.run_completer(app, "app --")) == set(expected)
# completing class traits
assert set(self.run_completer(app, "app --App")) > {
"--Application.show_config",
"--Application.log_level",
"--Application.log_format",
}
def test_complete_custom_completers(self, argcomplete_on):
app = ArgcompleteApp()
# test pre-defined completers for Bool/Enum
assert set(self.run_completer(app, "app --Application.log_level=")) > {"DEBUG", "INFO"}
assert set(self.run_completer(app, "app --ArgcompleteApp.show_config ")) == {
"0",
"1",
"true",
"false",
}
# test custom completer and mid-command completions
class CustomCls(Configurable):
val = Unicode().tag(
config=True, argcompleter=argcomplete.completers.ChoicesCompleter(["foo", "bar"])
)
class CustomApp(ArgcompleteApp):
classes = [CustomCls]
aliases = {("v", "val"): "CustomCls.val"}
app = CustomApp()
assert self.run_completer(app, "app --val ") == ["foo", "bar"]
assert self.run_completer(app, "app --val=") == ["foo", "bar"]
assert self.run_completer(app, "app -v ") == ["foo", "bar"]
assert self.run_completer(app, "app -v=") == ["foo", "bar"]
assert self.run_completer(app, "app --CustomCls.val ") == ["foo", "bar"]
assert self.run_completer(app, "app --CustomCls.val=") == ["foo", "bar"]
completions = self.run_completer(app, "app --val= abc xyz", point=10)
# fixed in argcomplete >= 2.0 to return latter below
assert completions == ["--val=foo", "--val=bar"] or completions == ["foo", "bar"]
assert self.run_completer(app, "app --val --log-level=", point=10) == ["foo", "bar"]
def test_complete_subcommands(self, argcomplete_on):
app = MainApp()
assert set(self.run_completer(app, "app ")) >= {"subapp1", "subapp2"}
assert set(self.run_completer(app, "app sub")) == {"subapp1", "subapp2"}
assert set(self.run_completer(app, "app subapp1")) == {"subapp1"}
def test_complete_subcommands_subapp1(self, argcomplete_on):
# subcommand handling modifies _ARGCOMPLETE env var global state, so
# only can test one completion per unit test
app = MainApp()
try:
assert set(self.run_completer(app, "app subapp1 --Sub")) > {
"--SubApp1.show_config",
"--SubApp1.log_level",
"--SubApp1.log_format",
}
finally:
SubApp1.clear_instance()
def test_complete_subcommands_subapp2(self, argcomplete_on):
app = MainApp()
try:
assert set(self.run_completer(app, "app subapp2 --")) > {
"--Application.",
"--SubApp2.",
}
finally:
SubApp2.clear_instance()
def test_complete_subcommands_main(self, argcomplete_on):
app = MainApp()
completions = set(self.run_completer(app, "app --"))
assert completions > {"--Application.", "--MainApp."}
assert "--SubApp1." not in completions
assert "--SubApp2." not in completions
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@traitlets@py3@tests@config@test_argcomplete.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/packages/amuse-kepler/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python3
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-kepler'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework',
]
description = 'The Astrophysical Multipurpose Software Environment - Kepler'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = [
'amuse.community.kepler',
]
package_data = {
}
mapping_from_command_name_to_command_class = setup_commands()
setup_requires = ['setuptools_scm']
use_scm_version = {
"root": "../..",
"relative_to": __file__,
"version_file": "src/amuse/community/kepler/_version.py",
}
setup(
name=name,
use_scm_version=use_scm_version,
setup_requires=setup_requires,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.7",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={
'amuse.community.kepler': 'src/amuse/community/kepler',
},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@packages@amuse-kepler@setup.py@.PATH_END.py
|
{
"filename": "_tickfont.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/bar/marker/colorbar/_tickfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "bar.marker.colorbar"
_path_str = "bar.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.bar.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@bar@marker@colorbar@_tickfont.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "jlustigy/coronagraph",
"repo_path": "coronagraph_extracted/coronagraph-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from setuptools import setup
# Hackishly inject a constant into builtins to enable importing of the
# module in "setup" mode. Stolen from `kplr`
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__CORONAGRAPH_SETUP__ = True
import coronagraph
long_description = \
"""Coronagraph noise model for directly imaging exoplanets."""
# Setup!
setup(name='coronagraph',
version=coronagraph.__version__,
description='Coronagraph noise model for directly imaging exoplanets.',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Astronomy',
],
url='http://github.com/jlustigy/coronagraph',
author='Jacob Lustig-Yaeger',
author_email='jlustigy@uw.edu',
license='MIT',
packages=['coronagraph'],
install_requires=[
'numpy',
'scipy',
'matplotlib',
'numba',
'astropy'
],
dependency_links=[],
scripts=[],
include_package_data=True,
zip_safe=False,
data_files=["coronagraph/planets/ArcheanEarth_geo_albedo.txt",
"coronagraph/planets/EarlyMars_geo_albedo.txt",
"coronagraph/planets/EarlyVenus_geo_albedo.txt",
"coronagraph/planets/earth_avg_hitran2012_300_100000cm.trnst",
"coronagraph/planets/earth_avg_hitran2012_300_100000cm_toa.rad",
"coronagraph/planets/Hazy_ArcheanEarth_geo_albedo.txt",
"coronagraph/planets/Jupiter_geo_albedo.txt",
"coronagraph/planets/Mars_geo_albedo.txt",
"coronagraph/planets/Neptune_geo_albedo.txt",
"coronagraph/planets/Saturn_geo_albedo.txt",
"coronagraph/planets/Uranus_geo_albedo.txt",
"coronagraph/planets/Venus_geo_albedo.txt",
"coronagraph/planets/earth_quadrature_radiance_refl.dat"
]
)
|
jlustigyREPO_NAMEcoronagraphPATH_START.@coronagraph_extracted@coronagraph-master@setup.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/heatmap/_textfont.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="textfont", parent_name="heatmap", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@heatmap@_textfont.py@.PATH_END.py
|
{
"filename": "custom_instrument_example.py",
"repo_name": "AWehrhahn/PyReduce",
"repo_path": "PyReduce_extracted/PyReduce-master/examples/custom_instrument_example.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Simple usage example for PyReduce
Loads a sample UVES dataset, and runs the full extraction
"""
import os.path
import pyreduce
from pyreduce.configuration import get_configuration_for_instrument
from pyreduce.instruments.common import create_custom_instrument
from pyreduce.reduce import Reducer
from pyreduce.util import start_logging
# Define the path to support files if possible
# otherwise set them to None
# Obviously they are necessary for their respective steps
bpm_mask = "path/to/bpm_mask.fits"
wavecal_file = "path/to/wavecal_file"
# create our custom instrument
instrument = create_custom_instrument(
"custom", extension=1, mask_file=bpm_mask, wavecal_file=wavecal_file
)
# Override default values
# those can either be fixed values or refer to FITS header keywords
instrument.info["readnoise"] = 1
instrument.info["prescan_x"] = "PRESCAN X"
# For loading the config we specify pyreduce as the source, since this is the default
config = get_configuration_for_instrument("pyreduce", plot=1)
# Define your own configuration
config["orders"]["degree"] = 5
# Since we can't find the files ourselves (at least not without defining the criteria we are looking for)
# We need to manually define which files go where
files = {"bias": ["file1", "file2"], "flat": ["file3"]}
# We define the path to the output directory
output_dir = "path/to/output"
# (optional) We need to define the log file
log_file = "path/to/log_file.txt"
start_logging(log_file)
# Define other parameter for PyReduce
target = ""
night = "2019-07-21"
mode = ""
steps = (
"bias",
"flat",
"orders",
"curvature",
"scatter",
"norm_flat",
# "wavecal",
# "freq_comb",
"science",
# "continuum",
# "finalize",
)
# Call the PyReduce algorithm
reducer = Reducer(
files,
output_dir,
target,
instrument,
mode,
night,
config,
# order_range=order_range,
# skip_existing=False,
)
data = reducer.run_steps(steps=steps)
|
AWehrhahnREPO_NAMEPyReducePATH_START.@PyReduce_extracted@PyReduce-master@examples@custom_instrument_example.py@.PATH_END.py
|
{
"filename": "geometry.ipynb",
"repo_name": "rtazaki1205/AggScatVIR",
"repo_path": "AggScatVIR_extracted/AggScatVIR-master/docs/geometry.ipynb",
"type": "Jupyter Notebook"
}
|
# Visualization of Particles
This database already contains visualization images of particles rendered using POV-RAY. However, you may wish to change rendering parameters, such as particle and background colors, reference scale, particle orientation ... etc. To meet up needs, the `aggscatpy` package implements a simple interface that can be used to run POV-RAY and then generate a new rendering image of particles.
## Prerequisites
Since the rendering will be performed using POV-RAY, it has to be installed in advance. Also, to run the command, a povray script (either aggregate.pov or irregular.pov) has to be placed in your working directory. You can find these scripts in aggscatvir/python/povray/, so copy and paste them to the working directory.
## Basic usage
First, we need to import the package:
```python
import aggscatpy
```
To generate a particle image, we can use the ``particle_rendering`` function:
```python
aggscatpy.particle_rendering(partype='CAHP',size='8',amon='100nm',ireal='1',fn='geom_demo1',path='./imgs/')
```
writing ... ./imgs/geom_demo1.png
where ``partype``, ``size``, ``amon`` are the particle type, particle size, and the monomer radius, respectively. These arguments are the same as those introduced in the previous section. ``ireal`` is a realization number. The command will generate a povray readable file and pass it to POV-RAY automatically. After running POV-RAY, a .png image will be saved in `./imgs/`.
To check out the produced image, let's define a simple function:
```python
import matplotlib
import matplotlib.pyplot as plt
def show_image(filename):
try:
im = plt.imread(filename)
plt.imshow(im)
plt.axis('off')
except FileNotFoundError:
print('Not such file. Unable to read a particle image.')
```
The image produced by the above command is
```python
show_image('./imgs/geom_demo1.png')
```

You can also produce images for irregular grains via
```python
aggscatpy.particle_rendering(partype='grs',size='1_6000',ireal='4',fn='geom_demo2',path='./imgs/')
show_image('./imgs/geom_demo2.png')
```
writing ... ./imgs/geom_demo2.png

## Particle and background colors
We can specify the color of particles by adding ``particle_color='rgb<value,value,value>'``. The color can be chosen from basic colors (e.g., Red, Green, Blue, Yellow, Cyan, Magenta, Clear, White) or specified with RGB mixing. RGB is a mixture of colors based on the primary colors of light, allowing for fine-tuning of the particle color. The color can be specified in the form of 'rgb<value,value,value>', where each value ranges from 0 to 1. For example, black is 'rgb<0,0,0>', and white is 'rgb<1,1,1>'. When all values of R, G, and B are the same, the color can also be specified simply as 'rgb value'.
As an example, let's make a yellowish particle:
```python
aggscatpy.particle_rendering(partype='CAHP',size='8',amon='100nm',ireal='1',fn='geom_demo3',path='./imgs/',\
particle_color='rgb<0.4,0.4,0.15>')
show_image('./imgs/geom_demo3.png')
```
writing ... ./imgs/geom_demo3.png

By default, the background color is set to transparent. To change the background color, set ``background=True`` and specify its color by adding ``bg_color='rgb<value,value,value>'``. ``bgcolor`` can be specified in the same way as the particle color, either by RGB values or by basic colors (e.g., White, Black, Red, Green, Blue, Yellow, Cyan, Magenta, Gray). For example, to have a gray background, the command looks like this:
```python
aggscatpy.particle_rendering(partype='CAHP',size='8',amon='100nm',ireal='1',fn='geom_demo4',path='./imgs/',\
particle_color='rgb<0.4,0.4,0.15>', background=True, bg_color='Gray20')
show_image('./imgs/geom_demo4.png')
```
writing ... ./imgs/geom_demo4.png

## Reference bar: position and reference scale
You can modify/adjust a reference-scale bar in the image. The default bar length is set to the characteristic radius and the volume-equivalent radius for aggregates and irregular grains, respectively. The physical length of the bar (in units of $\mu\mathrm{m}$) can be directly changed by setting ``reference_length``. The color of the reference bar can also be changed with ``reference_color`` (similar to the particle and background colors). If you want larger text fonts, you can set a magnification rate by ``ref_fontsize`` (to make it large, the value has to be >1.0. Conversely, to make it small, the value should be <1.0).
```python
aggscatpy.particle_rendering(partype='CAHP',size='8',amon='100nm',ireal='1',fn='geom_demo5',path='./imgs/',\
particle_color='rgb<0.4,0.4,0.15>', background=True, bg_color='Gray20',\
ref_color='White', ref_length=0.5, ref_fontsize=1.5)
show_image('./imgs/geom_demo5.png')
```
writing ... ./imgs/geom_demo5.png

The reference bar is a cylindrical object placed on the $y$-$z$ plane ($x=0$) in the rendering coordinate system (*Note:* The system is left-handed). You can change the position of the reference bar, using ``ref_dist`` and ``ref_posang`` (see also the image below). The former and latter set the distance from the origin to the center of the bar (in units of the characteristic radius for aggregates and the volume-equivalent radius for irregular grains) and angle (in degrees) measured from the $z$ axis, respectively.
<img src="reference_bar.png" width="300">
For example, if you would like to place the bar at the top of the image, set ``ref_posang=90``:
```python
aggscatpy.particle_rendering(partype='CAHP',size='8',amon='100nm',ireal='1',fn='geom_demo6',path='./imgs/',\
particle_color='rgb<0.4,0.4,0.15>', background=True, bg_color='Gray20',\
ref_color='White', ref_length=0.5, ref_fontsize=1.5, ref_dist=1.1,ref_posang=90)
show_image('./imgs/geom_demo6.png')
```
writing ... ./imgs/geom_demo6.png

You can hide the reference bar by setting ``reference=False``:
```python
aggscatpy.particle_rendering(partype='CAHP',size='8',amon='100nm',ireal='1',fn='geom_demo7',path='./imgs/',\
particle_color='rgb<0.4,0.4,0.15>', background=True, bg_color='Gray20',\
reference=False)
show_image('./imgs/geom_demo7.png')
```
writing ... ./imgs/geom_demo7.png

## Camera position and orientation of a particle
You can change the camera's location (observer) and the particle's orientation. The camera is fixed to the $x$ axis of the coordinate system ($x_\mathrm{camera},0,0$) and you can change the distance from the origin to the camera position via ``xcamera`` (in units of the characteristic radius and the volume-equivalent radius for aggregates and irregular grains, respectively). By default, ``xcamera=3.5``.
You can also rotate the coordinate system about each axis: $x$, $y$, and $z$ by using ``rotx``, ``roty``, and ``rotz``, respectively (in units of degrees) (see the image below), while the particle is fixed to the space (Note that POV-RAY adopts the left-handed system).
<img src="camera.png" width="300">
Here is an example. Let's start with a default parameter:
```python
aggscatpy.particle_rendering(partype='FA11',size='32',amon='100nm',ireal='1',fn='geom_demo8',path='./imgs/')
show_image('./imgs/geom_demo8.png')
```
writing ... ./imgs/geom_demo8.png

By setting the ``xcamera`` less than 3.5, the camera will get closer to the particle than in the default position, and therefore, you will get a closeup image of the particle (I changed the reference scale accordingly):
```python
aggscatpy.particle_rendering(partype='FA11',size='32',amon='100nm',ireal='1',fn='geom_demo9',path='./imgs/',\
xcamera=0.5, ref_length=0.1, ref_fontsize=1.0, ref_dist=0.2)
show_image('./imgs/geom_demo9.png')
```
writing ... ./imgs/geom_demo9.png

Let's get the camera back to the default position, and next set ``rotx=45``:
```python
aggscatpy.particle_rendering(partype='FA11',size='32',amon='100nm',ireal='1',fn='geom_demo10',path='./imgs/',\
rotx=45)
show_image('./imgs/geom_demo10.png')
```
writing ... ./imgs/geom_demo10.png

Since the coordinate system was rotated 45 degrees around the $x$ axis (counterclockwise), the aggregate is apparently rotated by the same angle, but in clockwise.
|
rtazaki1205REPO_NAMEAggScatVIRPATH_START.@AggScatVIR_extracted@AggScatVIR-master@docs@geometry.ipynb@.PATH_END.py
|
{
"filename": "_ypad.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/marker/colorbar/_ypad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ypad", parent_name="scattercarpet.marker.colorbar", **kwargs
):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@marker@colorbar@_ypad.py@.PATH_END.py
|
{
"filename": "test_abscal.py",
"repo_name": "HERA-Team/hera_cal",
"repo_path": "hera_cal_extracted/hera_cal-main/hera_cal/tests/test_abscal.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import pytest
import os
from scipy import constants
import numpy as np
import sys
from collections import OrderedDict as odict
import copy
import glob
from pyuvdata import UVCal, UVData
import warnings
from hera_sim.antpos import hex_array, linear_array
from .. import io, abscal, redcal, utils, apply_cal
from ..data import DATA_PATH
from ..datacontainer import DataContainer
from ..utils import split_pol, reverse_bl, split_bl
from ..apply_cal import calibrate_in_place
from ..flag_utils import synthesize_ant_flags
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:invalid value encountered in true_divide")
class Test_AbsCal_Funcs(object):
def setup_method(self):
np.random.seed(0)
# load into pyuvdata object
self.data_file = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.uvd = UVData()
self.uvd.read_miriad(self.data_file)
self.freq_array = np.unique(self.uvd.freq_array)
self.antpos, self.ants = self.uvd.get_enu_data_ants()
self.antpos = odict(zip(self.ants, self.antpos))
self.time_array = np.unique(self.uvd.time_array)
# configure data into dictionaries
data, flgs = io.load_vis(self.uvd, pop_autos=True)
wgts = odict()
for k in flgs.keys():
wgts[k] = (~flgs[k]).astype(float)
wgts = DataContainer(wgts)
# configure baselines
bls = odict([(x, self.antpos[x[0]] - self.antpos[x[1]]) for x in data.keys()])
# make mock data
abs_gain = 0.5
TT_phi = np.array([-0.004, 0.006, 0])
model = DataContainer({})
for i, k in enumerate(data.keys()):
model[k] = data[k] * np.exp(abs_gain + 1j * np.dot(TT_phi, bls[k]))
# assign data
self.data = data
self.bls = bls
self.model = model
self.wgts = wgts
@pytest.mark.parametrize("divide_gains", [True, False])
def test_multiply_gains(self, tmpdir, divide_gains):
tmp_path = tmpdir.strpath
gain_1_path = os.path.join(tmp_path, 'gain_1.calfits')
gain_2_path = os.path.join(tmp_path, 'gain_2.calfits')
output_path = os.path.join(tmp_path, 'output.calfits')
uvc1 = UVCal()
uvc1 = uvc1.initialize_from_uvdata(self.uvd, gain_convention='divide',
cal_style='redundant', metadata_only=False)
uvc2 = UVCal()
uvc2 = uvc2.initialize_from_uvdata(self.uvd, gain_convention='divide',
cal_style='redundant', metadata_only=False)
uvc1.gain_array[:] = np.random.rand(*uvc1.gain_array.shape) + 1j * np.random.rand(*uvc1.gain_array.shape)
uvc2.gain_array[:] = np.random.rand(*uvc2.gain_array.shape) + 1j * np.random.rand(*uvc2.gain_array.shape)
flag_times_1 = np.random.randint(low=0, high=self.uvd.Ntimes, size=self.uvd.Ntimes // 4)
uvc1.flag_array[:, :, flag_times_1] = True
flag_times_2 = np.random.randint(low=0, high=self.uvd.Ntimes, size=self.uvd.Ntimes // 4)
uvc2.flag_array[:, :, flag_times_2] = True
uvc1.quality_array = np.zeros_like(uvc1.gain_array, dtype=float) + 1.
uvc2.quality_array = np.zeros_like(uvc1.quality_array) + 2.
uvc1.write_calfits(gain_1_path, clobber=True)
uvc2.write_calfits(gain_2_path, clobber=True)
abscal.multiply_gains(gain_1_path, gain_2_path, output_path,
clobber=True, divide_gains=divide_gains)
uvc3 = UVCal()
uvc3.read_calfits(output_path)
if divide_gains:
np.testing.assert_array_almost_equal(uvc1.gain_array / uvc2.gain_array, uvc3.gain_array)
else:
np.testing.assert_array_almost_equal(uvc1.gain_array * uvc2.gain_array, uvc3.gain_array)
np.testing.assert_array_almost_equal(uvc1.flag_array | uvc2.flag_array, uvc3.flag_array)
assert np.all(np.isnan(uvc3.quality_array))
assert uvc3.total_quality_array is None
def test_data_key_to_array_axis(self):
m, pk = abscal.data_key_to_array_axis(self.model, 2)
assert m[(24, 25)].shape == (60, 64, 1)
assert 'ee' in pk
# test w/ avg_dict
m, ad, pk = abscal.data_key_to_array_axis(self.model, 2, avg_dict=self.bls)
assert m[(24, 25)].shape == (60, 64, 1)
assert ad[(24, 25)].shape == (3,)
assert 'ee' in pk
def test_array_axis_to_data_key(self):
m, pk = abscal.data_key_to_array_axis(self.model, 2)
m2 = abscal.array_axis_to_data_key(m, 2, ['ee'])
assert m2[(24, 25, 'ee')].shape == (60, 64)
# copy dict
m, ad, pk = abscal.data_key_to_array_axis(self.model, 2, avg_dict=self.bls)
m2, cd = abscal.array_axis_to_data_key(m, 2, ['ee'], copy_dict=ad)
assert m2[(24, 25, 'ee')].shape == (60, 64)
assert cd[(24, 25, 'ee')].shape == (3,)
def test_interp2d(self):
# test interpolation w/ warning
m, mf = abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array, self.freq_array, flags=self.wgts, medfilt_flagged=False)
assert m[(24, 25, 'ee')].shape == (60, 64)
# downsampling w/ no flags
m, mf = abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array[::2], self.freq_array[::2])
assert m[(24, 25, 'ee')].shape == (30, 32)
# test flag propagation
m, mf = abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array, self.freq_array, flags=self.wgts, medfilt_flagged=True)
assert np.all(mf[(24, 25, 'ee')][10, 0])
# test flag extrapolation
m, mf = abscal.interp2d_vis(self.data, self.time_array, self.freq_array,
self.time_array + .0001, self.freq_array, flags=self.wgts, flag_extrapolate=True)
assert np.all(mf[(24, 25, 'ee')][-1].min())
def test_wiener(self):
# test smoothing
d = abscal.wiener(self.data, window=(5, 15), noise=None, medfilt=True, medfilt_kernel=(1, 13))
assert d[(24, 37, 'ee')].shape == (60, 64)
assert d[(24, 37, 'ee')].dtype == complex
# test w/ noise
d = abscal.wiener(self.data, window=(5, 15), noise=0.1, medfilt=True, medfilt_kernel=(1, 13))
assert d[(24, 37, 'ee')].shape == (60, 64)
# test w/o medfilt
d = abscal.wiener(self.data, window=(5, 15), medfilt=False)
assert d[(24, 37, 'ee')].shape == (60, 64)
# test as array
d = abscal.wiener(self.data[(24, 37, 'ee')], window=(5, 15), medfilt=False, array=True)
assert d.shape == (60, 64)
assert d.dtype == complex
def test_Baseline(self):
# test basic execution
keys = list(self.data.keys())
k1 = (24, 25, 'ee') # 14.6 m E-W
i1 = keys.index(k1)
k2 = (24, 37, 'ee') # different
i2 = keys.index(k2)
k3 = (52, 53, 'ee') # 14.6 m E-W
i3 = keys.index(k3)
bls = [abscal.Baseline(self.antpos[k[1]] - self.antpos[k[0]], tol=2.0) for k in keys]
bls_conj = [abscal.Baseline(self.antpos[k[0]] - self.antpos[k[1]], tol=2.0) for k in keys]
assert bls[i1] == bls[i1]
assert bls[i1] != bls[i2]
assert (bls[i1] == bls_conj[i1]) == 'conjugated'
# test different yet redundant baselines still agree
assert bls[i1] == bls[i3]
# test tolerance works as expected
bls = [abscal.Baseline(self.antpos[k[1]] - self.antpos[k[0]], tol=1e-4) for k in keys]
assert bls[i1] != bls[i3]
def test_match_red_baselines(self):
model = copy.deepcopy(self.data)
model = DataContainer(odict([((k[0] + 1, k[1] + 1, k[2]), model[k]) for i, k in enumerate(model.keys())]))
del model[(25, 54, 'ee')]
model_antpos = odict([(k + 1, self.antpos[k]) for i, k in enumerate(self.antpos.keys())])
new_model = abscal.match_red_baselines(model, model_antpos, self.data, self.antpos, tol=2.0, verbose=False)
assert len(new_model.keys()) == 8
assert (24, 37, 'ee') in new_model
assert (24, 53, 'ee') not in new_model
def test_mirror_data_to_red_bls(self):
# make fake data
reds = redcal.get_reds(self.antpos, pols=['ee'])
data = DataContainer(odict([(k[0], self.data[k[0]]) for k in reds[:5]]))
# test execuation
d = abscal.mirror_data_to_red_bls(data, self.antpos)
assert len(d.keys()) == 16
assert (24, 25, 'ee') in d
# test correct value is propagated
assert np.allclose(data[(24, 25, 'ee')][30, 30], d[(38, 39, 'ee')][30, 30])
# test reweighting
w = abscal.mirror_data_to_red_bls(self.wgts, self.antpos, weights=True)
assert w[(24, 25, 'ee')].dtype == float
assert np.allclose(w[(24, 25, 'ee')].max(), 16.0)
def test_flatten(self):
li = abscal.flatten([['hi']])
assert np.array(li).ndim == 1
@pytest.mark.filterwarnings("ignore:Casting complex values to real discards the imaginary part")
@pytest.mark.filterwarnings("ignore:This function will be deprecated")
def test_avg_data_across_red_bls(self):
# test basic execution
wgts = copy.deepcopy(self.wgts)
wgts[(24, 25, 'ee')][45, 45] = 0.0
data, flags, antpos, ants, freqs, times, lsts, pols = io.load_vis(self.data_file, return_meta=True)
rd, rf, rk = abscal.avg_data_across_red_bls(data, antpos, wgts=wgts, tol=2.0, broadcast_wgts=False)
assert rd[(24, 25, 'ee')].shape == (60, 64)
assert rf[(24, 25, 'ee')][45, 45] > 0.0
# test various kwargs
wgts[(24, 25, 'ee')][45, 45] = 0.0
rd, rf, rk = abscal.avg_data_across_red_bls(data, antpos, tol=2.0, wgts=wgts, broadcast_wgts=True)
assert len(rd.keys()) == 9
assert len(rf.keys()) == 9
assert np.allclose(rf[(24, 25, 'ee')][45, 45], 0.0)
# test averaging worked
rd, rf, rk = abscal.avg_data_across_red_bls(data, antpos, tol=2.0, broadcast_wgts=False)
v = np.mean([data[(52, 53, 'ee')], data[(37, 38, 'ee')], data[(24, 25, 'ee')], data[(38, 39, 'ee')]], axis=0)
assert np.allclose(rd[(24, 25, 'ee')], v)
# test mirror_red_data
rd, rf, rk = abscal.avg_data_across_red_bls(data, antpos, wgts=self.wgts, tol=2.0, mirror_red_data=True)
assert len(rd.keys()) == 21
assert len(rf.keys()) == 21
def test_match_times(self):
dfiles = [os.path.join(DATA_PATH, f'zen.2458043.{f}.xx.HH.uvORA') for f in (12552, 13298)]
mfiles = [os.path.join(DATA_PATH, f'zen.2458042.{f}.xx.HH.uvXA') for f in (12552, 13298)]
# test basic execution
relevant_mfiles = abscal.match_times(dfiles[0], mfiles, filetype='miriad')
assert len(relevant_mfiles) == 2
# test basic execution
relevant_mfiles = abscal.match_times(dfiles[1], mfiles, filetype='miriad')
assert len(relevant_mfiles) == 1
# test no overlap
mfiles = sorted(glob.glob(os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcA')))
relevant_mfiles = abscal.match_times(dfiles[0], mfiles, filetype='miriad')
assert len(relevant_mfiles) == 0
def test_rephase_vis(self):
dfile = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
mfiles = [os.path.join(DATA_PATH, 'zen.2458042.12552.xx.HH.uvXA')]
m, mf, mantp, mant, mfr, mt, ml, mp = io.load_vis(mfiles, return_meta=True)
d, df, dantp, dant, dfr, dt, dl, dp = io.load_vis(dfile, return_meta=True)
bls = odict([(k, dantp[k[0]] - dantp[k[1]]) for k in d.keys()])
# basic execution
new_m, new_f = abscal.rephase_vis(m, ml, dl, bls, dfr)
k = list(new_m.keys())[0]
assert new_m[k].shape == d[k].shape
assert np.all(new_f[k][-1])
assert not np.any(new_f[k][0])
def test_cut_bl(self):
Nbls = len(self.data)
_data = abscal.cut_bls(self.data, bls=self.bls, min_bl_cut=20.0, inplace=False)
assert Nbls == 21
assert len(_data) == 9
_data2 = copy.deepcopy(self.data)
abscal.cut_bls(_data2, bls=self.bls, min_bl_cut=20.0, inplace=True)
assert len(_data2) == 9
_data = abscal.cut_bls(self.data, bls=self.bls, min_bl_cut=20.0, inplace=False)
abscal.cut_bls(_data2, min_bl_cut=20.0, inplace=True)
assert len(_data2) == 9
def test_dft_phase_slope_solver(self):
np.random.seed(21)
# build a perturbed grid
xs = np.zeros(100)
ys = np.zeros(100)
i = 0
for x in np.arange(0, 100, 10):
for y in np.arange(0, 100, 10):
xs[i] = x + 5 * (.5 - np.random.rand())
ys[i] = y + 5 * (.5 - np.random.rand())
i += 1
phase_slopes_x = (.2 * np.random.rand(5, 2) - .1) # not too many phase wraps over the array
phase_slopes_y = (.2 * np.random.rand(5, 2) - .1) # (i.e. avoid undersampling of very fast slopes)
data = np.array([np.exp(1.0j * x * phase_slopes_x
+ 1.0j * y * phase_slopes_y) for x, y in zip(xs, ys)])
x_slope_est, y_slope_est = abscal.dft_phase_slope_solver(xs, ys, data)
np.testing.assert_array_almost_equal(phase_slopes_x - x_slope_est, 0, decimal=7)
np.testing.assert_array_almost_equal(phase_slopes_y - y_slope_est, 0, decimal=7)
def test_put_transformed_array_on_integer_grid(self):
# Create a set of points that are not on an integer grid
np.random.seed(42)
antvec = np.random.uniform(0, 10, size=(5))
antpos = {i: np.array([antvec[i]]) for i in range(5)}
# Check that the function raises an error if the points are not on an integer grid
with pytest.raises(AssertionError):
abscal._put_transformed_array_on_integer_grid(antpos)
# Create a set of points that can be put on an integer grid
antvec = np.arange(0, 5, 0.5)
antpos = {i: np.array([antvec[i]]) for i in range(antvec.shape[0])}
abscal._put_transformed_array_on_integer_grid(antpos)
# Check that the points are now on an integer grid
for i in range(antvec.shape[0]):
assert np.isclose(antpos[i], i)
def test_grad_and_hess(self):
# Generate a set of baseline vectors
np.random.seed(42)
blvecs = np.column_stack([np.linspace(0, 5, 10), np.linspace(0, 2, 10)])
data = np.ones(10)
x = np.random.normal(2, 0.25, size=(2))
data = data * np.exp(-1j * np.dot(x, blvecs.T))
# Compute gradient and hessian
grad, _ = abscal._grad_and_hess(x, blvecs, data)
# At global minimum, gradient should be zero
assert np.allclose(grad, np.zeros(2))
def test_eval_Z(self):
# Generate a simple set of data
blvecs = np.column_stack([np.linspace(0, 5, 10), np.linspace(0, 2, 10)])
data = np.ones(10)
x = np.random.normal(2, 0.25, size=(2))
data = data * np.exp(-1j * np.dot(x, blvecs.T))
# Compute Z
Z = abscal._eval_Z(x, blvecs, data)
# At solution point Z should be 1 + 0j
np.testing.assert_array_almost_equal(Z.real, 1)
np.testing.assert_array_almost_equal(Z.imag, 0)
@pytest.mark.filterwarnings("ignore:invalid value encountered in true_divide")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in true_divide")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in divide")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in log")
class Test_Abscal_Solvers:
def test_abs_amp_lincal_1pol(self):
antpos = hex_array(2, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = {bl: np.ones((10, 5)) for red in reds for bl in red}
data = {bl: 4.0 * np.ones((10, 5)) for red in reds for bl in red}
data[0, 1, 'ee'][0, 0] = np.nan
data[0, 1, 'ee'][0, 1] = np.inf
model[0, 1, 'ee'][0, 0] = np.nan
model[0, 1, 'ee'][0, 1] = np.inf
fit = abscal.abs_amp_lincal(model, data)
np.testing.assert_array_equal(fit['A_Jee'], 2.0)
ants = list(set([ant for bl in data for ant in utils.split_bl(bl)]))
gains = abscal.abs_amp_lincal(model, data, return_gains=True, gain_ants=ants)
for ant in ants:
np.testing.assert_array_equal(gains[ant], 2.0)
def test_abs_amp_lincal_4pol(self):
antpos = hex_array(2, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee', 'en', 'ne', 'nn'], pol_mode='4pol')
model = {bl: np.ones((10, 5)) for red in reds for bl in red}
gain_products = {'ee': 4.0, 'en': 6.0, 'ne': 6.0, 'nn': 9.0}
data = {bl: gain_products[bl[2]] * np.ones((10, 5)) for red in reds for bl in red}
data[0, 1, 'ee'][0, 0] = np.nan
data[0, 1, 'ee'][0, 1] = np.inf
model[0, 1, 'ee'][0, 0] = np.nan
model[0, 1, 'ee'][0, 1] = np.inf
fit = abscal.abs_amp_lincal(model, data)
np.testing.assert_array_equal(fit['A_Jee'], 2.0)
np.testing.assert_array_equal(fit['A_Jnn'], 3.0)
ants = list(set([ant for bl in data for ant in utils.split_bl(bl)]))
gains = abscal.abs_amp_lincal(model, data, return_gains=True, gain_ants=ants)
for ant in ants:
if ant[1] == 'Jee':
np.testing.assert_array_equal(gains[ant], 2.0)
elif ant[1] == 'Jnn':
np.testing.assert_array_equal(gains[ant], 3.0)
def test_TT_phs_logcal_1pol_assume_2D(self):
antpos = hex_array(2, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = {bl: np.ones((10, 5), dtype=complex) for red in reds for bl in red}
data = {bl: np.ones((10, 5), dtype=complex) for red in reds for bl in red}
bl_vecs = {bl: antpos[bl[0]] - antpos[bl[1]] for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [.01, .02, 0]))
data[0, 1, 'ee'][0, 0] = np.nan
data[0, 1, 'ee'][0, 1] = np.inf
model[0, 1, 'ee'][0, 0] = np.nan
model[0, 1, 'ee'][0, 1] = np.inf
fit = abscal.TT_phs_logcal(model, data, antpos, assume_2D=True)
np.testing.assert_array_almost_equal(fit['Phi_ew_Jee'], .01)
np.testing.assert_array_almost_equal(fit['Phi_ns_Jee'], .02)
ants = list(set([ant for bl in data for ant in utils.split_bl(bl)]))
gains = abscal.TT_phs_logcal(model, data, antpos, assume_2D=True, return_gains=True, gain_ants=ants)
rephased_gains = {ant: gains[ant] / gains[ants[0]] * np.abs(gains[ants[0]]) for ant in ants}
true_gains = {ant: np.exp(1.0j * np.dot(antpos[ant[0]], [.01, .02, 0])) for ant in ants}
rephased_true_gains = {ant: true_gains[ant] / true_gains[ants[0]] * np.abs(true_gains[ants[0]]) for ant in ants}
for ant in ants:
np.testing.assert_array_almost_equal(rephased_gains[ant], rephased_true_gains[ant])
def test_TT_phs_logcal_4pol_assume_2D(self):
antpos = hex_array(2, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee', 'en', 'ne', 'nn'], pol_mode='4pol')
model = {bl: np.ones((10, 5), dtype=complex) for red in reds for bl in red}
data = {bl: np.ones((10, 5), dtype=complex) for red in reds for bl in red}
bl_vecs = {bl: antpos[bl[0]] - antpos[bl[1]] for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [.01, .02, 0]))
data[0, 1, 'ee'][0, 0] = np.nan
data[0, 1, 'ee'][0, 1] = np.inf
model[0, 1, 'ee'][0, 0] = np.nan
model[0, 1, 'ee'][0, 1] = np.inf
fit = abscal.TT_phs_logcal(model, data, antpos, assume_2D=True, four_pol=True)
np.testing.assert_array_almost_equal(fit['Phi_ew'], .01)
np.testing.assert_array_almost_equal(fit['Phi_ns'], .02)
ants = list(set([ant for bl in data for ant in utils.split_bl(bl)]))
gains = abscal.TT_phs_logcal(model, data, antpos, assume_2D=True, four_pol=True, return_gains=True, gain_ants=ants)
rephased_gains = {ant: gains[ant] / gains[ants[0]] * np.abs(gains[ants[0]]) for ant in ants}
true_gains = {ant: np.exp(1.0j * np.dot(antpos[ant[0]], [.01, .02, 0])) for ant in ants}
rephased_true_gains = {ant: true_gains[ant] / true_gains[ants[0]] * np.abs(true_gains[ants[0]]) for ant in ants}
for ant in ants:
np.testing.assert_array_almost_equal(rephased_gains[ant], rephased_true_gains[ant])
def test_TT_phs_logcal_1pol_nDim(self):
# test assume_2D=False by introducing another 6 element hex 100 m away
antpos = hex_array(2, split_core=False, outriggers=0)
antpos2 = hex_array(2, split_core=False, outriggers=0)
antpos.update({len(antpos) + ant: antpos2[ant] + np.array([100, 0, 0]) for ant in antpos2})
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
antpos = redcal.reds_to_antpos(reds)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol', bl_error_tol=1e-10)
model = {bl: np.ones((10, 5), dtype=complex) for red in reds for bl in red}
data = {bl: np.ones((10, 5), dtype=complex) for red in reds for bl in red}
bl_vecs = {bl: antpos[bl[0]] - antpos[bl[1]] for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [.01, .02, .03]))
data[0, 1, 'ee'][0, 0] = np.nan
data[0, 1, 'ee'][0, 1] = np.inf
model[0, 1, 'ee'][0, 0] = np.nan
model[0, 1, 'ee'][0, 1] = np.inf
fit = abscal.TT_phs_logcal(model, data, antpos, assume_2D=False)
np.testing.assert_array_almost_equal(fit['Phi_0_Jee'], .01)
np.testing.assert_array_almost_equal(fit['Phi_1_Jee'], .02)
np.testing.assert_array_almost_equal(fit['Phi_2_Jee'], .03)
ants = list(set([ant for bl in data for ant in utils.split_bl(bl)]))
gains = abscal.TT_phs_logcal(model, data, antpos, assume_2D=False, return_gains=True, gain_ants=ants)
rephased_gains = {ant: gains[ant] / gains[ants[0]] * np.abs(gains[ants[0]]) for ant in ants}
true_gains = {ant: np.exp(1.0j * np.dot(antpos[ant[0]], [.01, .02, .03])) for ant in ants}
rephased_true_gains = {ant: true_gains[ant] / true_gains[ants[0]] * np.abs(true_gains[ants[0]]) for ant in ants}
for ant in ants:
np.testing.assert_array_almost_equal(rephased_gains[ant], rephased_true_gains[ant])
def test_delay_slope_lincal_1pol_assume_2D(self):
antpos = hex_array(2, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = {bl: np.ones((2, 1024), dtype=complex) for red in reds for bl in red}
data = {bl: np.ones((2, 1024), dtype=complex) for red in reds for bl in red}
freqs = np.linspace(100e6, 200e6, 1024)
df = np.median(np.diff(freqs))
ants = sorted(list(set([ant for bl in data for ant in utils.split_bl(bl)])))
true_dlys = {ant: np.dot([1e-9, 2e-9, 0], antpos[ant[0]]) for ant in ants}
true_gains = {ant: np.outer(np.ones(2), np.exp(2.0j * np.pi * true_dlys[ant] * (freqs))) for ant in ants}
for bl in data:
ant0, ant1 = utils.split_bl(bl)
data[bl] *= true_gains[ant0] * np.conj(true_gains[ant1])
fit = abscal.delay_slope_lincal(model, data, antpos, df=df, assume_2D=True, time_avg=True)
np.testing.assert_array_almost_equal(1e9 * fit['T_ew_Jee'], 1.0, decimal=3)
np.testing.assert_array_almost_equal(1e9 * fit['T_ns_Jee'], 2.0, decimal=3)
gains = abscal.delay_slope_lincal(model, data, antpos, df=df, f0=freqs[0], assume_2D=True, time_avg=True, return_gains=True, gain_ants=ants)
rephased_gains = {ant: gains[ant] / gains[ants[0]] * np.abs(gains[ants[0]]) for ant in ants}
rephased_true_gains = {ant: true_gains[ant] / true_gains[ants[0]] * np.abs(true_gains[ants[0]]) for ant in ants}
for ant in ants:
np.testing.assert_array_almost_equal(rephased_gains[ant], rephased_true_gains[ant], decimal=3)
def test_delay_slope_lincal_4pol_assume_2D(self):
antpos = hex_array(2, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee', 'en', 'ne', 'nn'], pol_mode='4pol')
model = {bl: np.ones((2, 1024), dtype=complex) for red in reds for bl in red}
data = {bl: np.ones((2, 1024), dtype=complex) for red in reds for bl in red}
freqs = np.linspace(100e6, 200e6, 1024)
df = np.median(np.diff(freqs))
ants = sorted(list(set([ant for bl in data for ant in utils.split_bl(bl)])))
true_dlys = {ant: np.dot([1e-9, 2e-9, 0], antpos[ant[0]]) for ant in ants}
true_gains = {ant: np.outer(np.ones(2), np.exp(2.0j * np.pi * true_dlys[ant] * (freqs))) for ant in ants}
for bl in data:
ant0, ant1 = utils.split_bl(bl)
data[bl] *= true_gains[ant0] * np.conj(true_gains[ant1])
fit = abscal.delay_slope_lincal(model, data, antpos, df=df, assume_2D=True, four_pol=True)
np.testing.assert_array_almost_equal(1e9 * fit['T_ew'], 1.0, decimal=3)
np.testing.assert_array_almost_equal(1e9 * fit['T_ns'], 2.0, decimal=3)
gains = abscal.delay_slope_lincal(model, data, antpos, df=df, f0=freqs[0], assume_2D=True, four_pol=True, return_gains=True, gain_ants=ants)
rephased_gains = {ant: gains[ant] / gains[ants[0]] * np.abs(gains[ants[0]]) for ant in ants}
rephased_true_gains = {ant: true_gains[ant] / true_gains[ants[0]] * np.abs(true_gains[ants[0]]) for ant in ants}
for ant in ants:
np.testing.assert_array_almost_equal(rephased_gains[ant], rephased_true_gains[ant], decimal=3)
def test_delay_slope_lincal_1pol_nDim(self):
antpos = hex_array(2, split_core=False, outriggers=0)
antpos2 = hex_array(2, split_core=False, outriggers=0)
antpos.update({len(antpos) + ant: antpos2[ant] + np.array([100, 0, 0]) for ant in antpos2})
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
antpos = redcal.reds_to_antpos(reds)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol', bl_error_tol=1e-10)
model = {bl: np.ones((2, 1024), dtype=complex) for red in reds for bl in red}
data = {bl: np.ones((2, 1024), dtype=complex) for red in reds for bl in red}
freqs = np.linspace(100e6, 200e6, 1024)
df = np.median(np.diff(freqs))
ants = sorted(list(set([ant for bl in data for ant in utils.split_bl(bl)])))
true_dlys = {ant: np.dot([1e-9, 2e-9, 3e-9], antpos[ant[0]]) for ant in ants}
true_gains = {ant: np.outer(np.ones(2), np.exp(2.0j * np.pi * true_dlys[ant] * (freqs))) for ant in ants}
for bl in data:
ant0, ant1 = utils.split_bl(bl)
data[bl] *= true_gains[ant0] * np.conj(true_gains[ant1])
fit = abscal.delay_slope_lincal(model, data, antpos, df=df, assume_2D=False)
np.testing.assert_array_almost_equal(1e9 * fit['T_0_Jee'], 1.0, decimal=3)
np.testing.assert_array_almost_equal(1e9 * fit['T_1_Jee'], 2.0, decimal=3)
np.testing.assert_array_almost_equal(1e9 * fit['T_2_Jee'], 3.0, decimal=3)
gains = abscal.delay_slope_lincal(model, data, antpos, df=df, f0=freqs[0], assume_2D=False, return_gains=True, gain_ants=ants)
rephased_gains = {ant: gains[ant] / gains[ants[0]] * np.abs(gains[ants[0]]) for ant in ants}
rephased_true_gains = {ant: true_gains[ant] / true_gains[ants[0]] * np.abs(true_gains[ants[0]]) for ant in ants}
for ant in ants:
np.testing.assert_array_almost_equal(rephased_gains[ant], rephased_true_gains[ant], decimal=3)
def test_RFI_delay_slope_cal(self):
# build array
antpos = hex_array(3, split_core=False, outriggers=0)
antpos[19] = np.array([101, 102, 0])
reds = redcal.get_reds(antpos, pols=['ee', 'nn'])
red_data = DataContainer({red[0]: np.ones((5, 128), dtype=complex) for red in reds})
freqs = np.linspace(50e6, 250e6, 128)
unique_blvecs = {red[0]: np.mean([antpos[bl[1]] - antpos[bl[0]] for bl in red], axis=0) for red in reds}
idealized_antpos = redcal.reds_to_antpos(reds)
idealized_blvecs = {red[0]: idealized_antpos[red[0][1]] - idealized_antpos[red[0][0]] for red in reds}
# Invent RFI stations and delay slopes
rfi_chans = [7, 9, 12, 13, 22, 31, 33]
rfi_angles = [0.7853981, 0.7853981, 0.7853981, 6.0632738, 6.0632738, 0.7853981, 6.0632738]
rfi_headings = np.array([np.cos(rfi_angles), np.sin(rfi_angles), np.zeros_like(rfi_angles)])
rfi_wgts = np.array([1, 2, 1, 3, 1, 5, 1])
true_delay_slopes = {'T_ee_0': 1e-9, 'T_ee_1': -2e-9, 'T_ee_2': 1.5e-9,
'T_nn_0': 1.8e-9, 'T_nn_1': -5e-9, 'T_nn_2': 3.5e-9}
# Add RFI and uncalibrate
for bl in red_data:
for chan, heading in zip(rfi_chans, rfi_headings.T):
red_data[bl][:, chan] = 100 * np.exp(2j * np.pi * np.dot(unique_blvecs[bl], heading) * freqs[chan] / constants.c)
for key, slope in true_delay_slopes.items():
if key[2:4] == bl[2]:
red_data[bl] *= np.exp(-2j * np.pi * idealized_blvecs[bl][int(key[-1])] * slope * freqs)
# Solve for delay slopes
solved_dly_slopes = abscal.RFI_delay_slope_cal(reds, antpos, red_data, freqs, rfi_chans, rfi_headings, rfi_wgts=rfi_wgts)
for key, slope in solved_dly_slopes.items():
assert np.all(np.abs((slope - true_delay_slopes[key]) / true_delay_slopes[key]) < 1e-10)
# test converting slopes to gains
ants_in_reds = set([ant for red in reds for bl in red for ant in split_bl(bl)])
gains = abscal.RFI_delay_slope_cal(reds, antpos, red_data, freqs, rfi_chans, rfi_headings, rfi_wgts=rfi_wgts,
return_gains=True, gain_ants=ants_in_reds)
# test showing that non-RFI contaminated channels have been returned to 1s
calibrate_in_place(red_data, gains)
not_rfi_chans = [i for i in range(128) if i not in rfi_chans]
for bl in red_data:
np.testing.assert_almost_equal(red_data[bl][:, not_rfi_chans], 1.0, decimal=10)
with pytest.raises(NotImplementedError):
reds = redcal.get_reds(antpos, pols=['ee', 'nn', 'en', 'ne'])
solved_dly_slopes = abscal.RFI_delay_slope_cal(reds, antpos, red_data, freqs, rfi_chans, rfi_headings, rfi_wgts=rfi_wgts)
def test_ndim_fft_phase_slope_solver_1D_ideal_antpos(self):
antpos = linear_array(50)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
data = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
antpos = redcal.reds_to_antpos(reds)
bl_vecs = {bl: (antpos[bl[0]] - antpos[bl[1]]) for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [-1.2]))
phase_slopes = abscal.ndim_fft_phase_slope_solver(data, bl_vecs, assume_2D=False, zero_pad=3, bl_error_tol=1e-8)
for ps, answer in zip(phase_slopes, [-1.2]):
assert ps.shape == (2, 3)
np.testing.assert_array_less(np.abs(ps - answer), .1)
def test_ndim_fft_phase_slope_solver_2D_ideal_antpos(self):
antpos = hex_array(6, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
data = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
antpos = redcal.reds_to_antpos(reds)
bl_vecs = {bl: (antpos[bl[0]] - antpos[bl[1]]) for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [-1, .1]))
phase_slopes = abscal.ndim_fft_phase_slope_solver(data, bl_vecs, assume_2D=False, zero_pad=3, bl_error_tol=1e-8)
for ps, answer in zip(phase_slopes, [-1, .1]):
assert ps.shape == (2, 3)
np.testing.assert_array_less(np.abs(ps - answer), .2)
def test_ndim_fft_phase_slope_solver_3D_ideal_antpos(self):
antpos = hex_array(4, split_core=False, outriggers=0)
antpos2 = hex_array(4, split_core=False, outriggers=0)
for d in [100.0, 200.0, 300.0, 400.0]:
antpos.update({len(antpos) + ant: antpos2[ant] + np.array([d, 0, 0]) for ant in antpos2})
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
data = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
antpos = redcal.reds_to_antpos(reds)
bl_vecs = {bl: (antpos[bl[0]] - antpos[bl[1]]) for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [-1, -.1, 2.5]))
phase_slopes = abscal.ndim_fft_phase_slope_solver(data, bl_vecs, assume_2D=False, zero_pad=3, bl_error_tol=1e-8)
for ps, answer in zip(phase_slopes, [-1, -.1, 2.5]):
assert ps.shape == (2, 3)
np.testing.assert_array_less(np.abs(ps - answer), .2)
def test_ndim_fft_phase_slope_solver_assume_2D_real_antpos(self):
antpos = hex_array(8, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
data = {red[0]: np.ones((2, 3), dtype=complex) for red in reds}
bl_vecs = {bl: (antpos[bl[0]] - antpos[bl[1]]) for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [-.02, .03, 0]))
phase_slopes = abscal.ndim_fft_phase_slope_solver(data, bl_vecs, assume_2D=True, zero_pad=3, bl_error_tol=1)
for ps, answer in zip(phase_slopes, [-.02, .03]):
assert ps.shape == (2, 3)
np.testing.assert_array_less(np.abs(ps - answer), .003)
def test_global_phase_slope_logcal_2D(self):
antpos = hex_array(5, split_core=False, outriggers=0)
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = DataContainer({bl: np.ones((2, 3), dtype=complex) for red in reds for bl in red})
uncal_data = DataContainer({bl: np.ones((2, 3), dtype=complex) for red in reds for bl in red})
antpos = redcal.reds_to_antpos(reds)
bl_vecs = {bl: (antpos[bl[0]] - antpos[bl[1]]) for bl in uncal_data}
for bl in uncal_data:
uncal_data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [-.2, 1]))
# test results when fit is returned
fit = abscal.global_phase_slope_logcal(model, uncal_data, antpos, solver='ndim_fft', assume_2D=False, verbose=False)
fit2 = abscal.global_phase_slope_logcal(model, uncal_data, antpos, solver='ndim_fft', assume_2D=True, verbose=False)
np.testing.assert_array_equal(fit['Phi_0_Jee'], fit2['Phi_ew_Jee'])
np.testing.assert_array_equal(fit['Phi_1_Jee'], fit2['Phi_ns_Jee'])
for f, answer in zip(fit.values(), [-.2, 1]):
assert f.shape == (2, 1)
np.testing.assert_array_less(np.abs(f - answer), .2)
ants = sorted(list(set([ant for bl in uncal_data for ant in utils.split_bl(bl)])))
# try doing the first iteration with either dft or ndim_fft
for solver in ['dft', 'ndim_fft']:
data = copy.deepcopy(uncal_data)
for i in range(8):
if i == 0:
gains = abscal.global_phase_slope_logcal(model, data, antpos, solver=solver, assume_2D=True,
time_avg=True, return_gains=True, gain_ants=ants, verbose=False)
else:
gains = abscal.global_phase_slope_logcal(model, data, antpos, solver='linfit', assume_2D=False,
time_avg=True, return_gains=True, gain_ants=ants, verbose=False)
calibrate_in_place(data, gains)
np.testing.assert_array_almost_equal(np.linalg.norm([data[bl] - model[bl] for bl in data]), 0)
def test_global_phase_slope_logcal_3D(self):
antpos = hex_array(3, split_core=False, outriggers=0)
antpos2 = hex_array(3, split_core=False, outriggers=0)
for d in [100.0, 200.0, 300.0]:
antpos.update({len(antpos) + ant: antpos2[ant] + np.array([d, 0, 0]) for ant in antpos2})
reds = redcal.get_reds(antpos, pols=['ee'], pol_mode='1pol')
model = DataContainer({bl: np.ones((2, 3), dtype=complex) for red in reds for bl in red})
data = DataContainer({bl: np.ones((2, 3), dtype=complex) for red in reds for bl in red})
antpos = redcal.reds_to_antpos(reds)
bl_vecs = {bl: (antpos[bl[0]] - antpos[bl[1]]) for bl in data}
for bl in data:
data[bl] *= np.exp(1.0j * np.dot(bl_vecs[bl], [-.8, -.1, .5]))
ants = sorted(list(set([ant for bl in data for ant in utils.split_bl(bl)])))
for i in range(10):
if i == 0:
gains = abscal.global_phase_slope_logcal(model, data, antpos, solver='ndim_fft', assume_2D=False,
time_avg=True, return_gains=True, gain_ants=ants, verbose=False)
else:
gains = abscal.global_phase_slope_logcal(model, data, antpos, solver='linfit', assume_2D=False,
time_avg=True, return_gains=True, gain_ants=ants, verbose=False)
calibrate_in_place(data, gains)
np.testing.assert_array_almost_equal(np.linalg.norm([data[bl] - model[bl] for bl in data]), 0, 5)
def test_complex_phase_abscal(self):
# with split_core=True this array will have 4 degenerate phase parameters to solve for
antpos = hex_array(3, split_core=True, outriggers=0)
reds = redcal.get_reds(antpos)
transformed_antpos = redcal.reds_to_antpos(reds)
abscal._put_transformed_array_on_integer_grid(transformed_antpos)
data_bls = model_bls = [group[0] for group in reds]
transformed_b_vecs = np.rint([transformed_antpos[jj] - transformed_antpos[ii] for (ii, jj, pol) in data_bls]).astype(int)
model, data = {}, {}
ntimes, nfreqs = 1, 2
# Test that the data is calibrated properly after being moved in phase
phase_deg = np.random.normal(0, 1, (ntimes, nfreqs, transformed_b_vecs.shape[-1]))
for bi, bls in enumerate(data_bls):
model[bls] = np.ones((ntimes, nfreqs))
data[bls] = model[bls] * np.exp(-1j * np.sum(transformed_b_vecs[bi][None, None, :] * phase_deg, axis=-1))
# Solve for the phase degeneracy
meta, delta_gains = abscal.complex_phase_abscal(data, model, reds, data_bls, model_bls)
# Apply calibration with new gains
apply_cal.calibrate_in_place(data, delta_gains)
# Test that the data is calibrated properly after being moved in phase
for k in data:
np.testing.assert_array_almost_equal(data[k], model[k])
# Test that function errors when polarizations are not the same
model, data = {}, {}
model_bls = [group[0] for group in reds]
data_bls = [group[0][:2] + ('ee', ) for group in reds]
# Test that the data is calibrated properly after being moved in phase
phase_deg = np.random.normal(0, 1, (ntimes, nfreqs, transformed_b_vecs.shape[-1]))
for bi, bls in enumerate(data_bls):
model[bls] = np.ones((ntimes, nfreqs))
data[bls[:2] + ('ee',)] = model[bls]
with pytest.raises(AssertionError):
meta, delta_gains = abscal.complex_phase_abscal(data, model, reds, data_bls, model_bls)
def test_cross_pol_phase_cal(self):
rng = np.random.default_rng(42)
antpos = hex_array(3, split_core=True, outriggers=0)
model, data = {}, {}
ntimes, nfreqs = 1, 2
reds = redcal.get_reds(antpos, pols=['en', 'ne'])
data_bls = model_bls = [group[0] for group in reds]
delta = rng.uniform(-1, 1, (ntimes, nfreqs))
for bi, bls in enumerate(data_bls):
model[bls] = np.ones((ntimes, nfreqs))
if bls[2] == 'en':
gain = np.exp(1j * delta)
else:
gain = np.exp(-1j * delta)
data[bls] = model[bls] * gain
# Solve for the phase degeneracy
solved_delta = abscal.cross_pol_phase_cal(model, data, model_bls, data_bls, refpol='Jnn')
# Check that the phase degeneracy was solved for correctly
np.testing.assert_array_almost_equal(solved_delta, delta, decimal=5)
gain_ants = set()
for bl in data_bls:
gain_ants.update(set(utils.split_bl(bl)))
gain_ants = list(gain_ants)
gains = abscal.cross_pol_phase_cal(model, data, model_bls, data_bls, return_gains=True, gain_ants=gain_ants, refpol='Jnn')
for k in gains:
# Check that the gains are correct
if k[-1] == 'Jnn':
np.testing.assert_array_almost_equal(gains[k], 1.0 + 0.0j, decimal=5)
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:invalid value encountered in true_divide")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in true_divide")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in log")
@pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only")
@pytest.mark.filterwarnings("ignore:Selected frequencies are not evenly spaced")
class Test_AbsCal:
def setup_method(self):
np.random.seed(0)
# load into pyuvdata object
self.data_fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.model_fname = os.path.join(DATA_PATH, "zen.2458042.12552.xx.HH.uvXA")
self.AC = abscal.AbsCal(self.data_fname, self.model_fname, refant=24)
self.input_cal = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA.abs.calfits")
# make custom gain keys
d, fl, ap, a, f, _, _, p = io.load_vis(self.data_fname, return_meta=True, pick_data_ants=False)
self.freq_array = f
self.antpos = ap
gain_pols = np.unique([split_pol(pp) for pp in p])
self.ap = ap
self.gk = abscal.flatten([[(k, p) for k in a] for p in gain_pols])
self.freqs = f
def test_init(self):
# init with no meta
AC = abscal.AbsCal(self.AC.model, self.AC.data)
assert AC.bls is None
# init with meta
AC = abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.AC.antpos, freqs=self.AC.freqs)
assert np.allclose(AC.bls[(24, 25, 'ee')][0], -14.607842046642745)
# init with meta
AC = abscal.AbsCal(self.AC.model, self.AC.data)
# test feeding file and refant and bl_cut and bl_taper
AC = abscal.AbsCal(self.model_fname, self.data_fname, refant=24, antpos=self.AC.antpos,
max_bl_cut=26.0, bl_taper_fwhm=15.0)
# test ref ant
assert AC.refant == 24
assert np.allclose(np.linalg.norm(AC.antpos[24]), 0.0)
# test bl cut
assert not np.any(np.array([np.linalg.norm(AC.bls[k]) for k in AC.bls.keys()]) > 26.0)
# test bl taper
assert np.median(AC.wgts[(24, 25, 'ee')]) > np.median(AC.wgts[(24, 39, 'ee')])
# test with input cal
bl = (24, 25, 'ee')
uvc = UVCal()
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="telescope_location is not set. Using known values for HERA"
)
warnings.filterwarnings(
"ignore",
message="antenna_positions are not set or are being overwritten."
)
warnings.filterwarnings(
"ignore",
message="antenna_diameters are not set or are being overwritten."
)
uvc.read_calfits(self.input_cal)
aa = uvc.ant_array.tolist()
g = (uvc.gain_array[aa.index(bl[0])] * uvc.gain_array[aa.index(bl[1])].conj()).squeeze().T
gf = (uvc.flag_array[aa.index(bl[0])] + uvc.flag_array[aa.index(bl[1])]).squeeze().T
w = self.AC.wgts[bl] * ~gf
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="antenna_diameters are not set")
AC2 = abscal.AbsCal(copy.deepcopy(self.AC.model), copy.deepcopy(self.AC.data), wgts=copy.deepcopy(self.AC.wgts), refant=24, input_cal=self.input_cal)
np.testing.assert_array_almost_equal(self.AC.data[bl] / g * w, AC2.data[bl] * w)
def test_abs_amp_logcal(self):
# test execution and variable assignments
self.AC.abs_amp_logcal(verbose=False)
assert self.AC.abs_eta[(24, 'Jee')].shape == (60, 64)
assert self.AC.abs_eta_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.abs_eta_arr.shape == (7, 60, 64, 1)
assert self.AC.abs_eta_gain_arr.shape == (7, 60, 64, 1)
# test Nones
AC = abscal.AbsCal(self.AC.model, self.AC.data)
assert AC.abs_eta is None
assert AC.abs_eta_arr is None
assert AC.abs_eta_gain is None
assert AC.abs_eta_gain_arr is None
# test propagation to gain_arr
AC.abs_amp_logcal(verbose=False)
AC._abs_eta_arr *= 0
assert np.allclose(np.abs(AC.abs_eta_gain_arr[0, 0, 0, 0]), 1.0)
# test custom gain
g = self.AC.custom_abs_eta_gain(self.gk)
assert len(g) == 47
# test w/ no wgts
AC.wgts = None
AC.abs_amp_logcal(verbose=False)
def test_TT_phs_logcal(self):
# test execution
self.AC.TT_phs_logcal(verbose=False)
assert self.AC.TT_Phi_arr.shape == (7, 2, 60, 64, 1)
assert self.AC.TT_Phi_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.abs_psi_arr.shape == (7, 60, 64, 1)
assert self.AC.abs_psi_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.abs_psi[(24, 'Jee')].shape == (60, 64)
assert self.AC.abs_psi_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.TT_Phi[(24, 'Jee')].shape == (2, 60, 64)
assert self.AC.TT_Phi_gain[(24, 'Jee')].shape == (60, 64)
assert np.allclose(np.angle(self.AC.TT_Phi_gain[(24, 'Jee')]), 0.0)
# test merge pols
self.AC.TT_phs_logcal(verbose=False, four_pol=True)
assert self.AC.TT_Phi_arr.shape == (7, 2, 60, 64, 1)
assert self.AC.abs_psi_arr.shape == (7, 60, 64, 1)
# test Nones
AC = abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.antpos)
assert AC.abs_psi_arr is None
assert AC.abs_psi_gain_arr is None
assert AC.TT_Phi_arr is None
assert AC.TT_Phi_gain_arr is None
assert AC.abs_psi is None
assert AC.abs_psi_gain is None
assert AC.TT_Phi is None
assert AC.TT_Phi_gain is None
# test custom gain
g = self.AC.custom_TT_Phi_gain(self.gk, self.ap)
assert len(g) == 47
g = self.AC.custom_abs_psi_gain(self.gk)
assert g[(0, 'Jee')].shape == (60, 64)
# test w/ no wgts
AC.wgts = None
AC.TT_phs_logcal(verbose=False)
def test_amp_logcal(self):
self.AC.amp_logcal(verbose=False)
assert self.AC.ant_eta[(24, 'Jee')].shape == (60, 64)
assert self.AC.ant_eta_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.ant_eta_arr.shape == (7, 60, 64, 1)
assert self.AC.ant_eta_arr.dtype == float
assert self.AC.ant_eta_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.ant_eta_gain_arr.dtype == complex
# test Nones
AC = abscal.AbsCal(self.AC.model, self.AC.data)
assert AC.ant_eta is None
assert AC.ant_eta_gain is None
assert AC.ant_eta_arr is None
assert AC.ant_eta_gain_arr is None
# test w/ no wgts
AC.wgts = None
AC.amp_logcal(verbose=False)
def test_phs_logcal(self):
self.AC.phs_logcal(verbose=False)
assert self.AC.ant_phi[(24, 'Jee')].shape == (60, 64)
assert self.AC.ant_phi_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.ant_phi_arr.shape == (7, 60, 64, 1)
assert self.AC.ant_phi_arr.dtype == float
assert self.AC.ant_phi_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.ant_phi_gain_arr.dtype == complex
assert np.allclose(np.angle(self.AC.ant_phi_gain[(24, 'Jee')]), 0.0)
self.AC.phs_logcal(verbose=False, avg=True)
AC = abscal.AbsCal(self.AC.model, self.AC.data)
assert AC.ant_phi is None
assert AC.ant_phi_gain is None
assert AC.ant_phi_arr is None
assert AC.ant_phi_gain_arr is None
# test w/ no wgts
AC.wgts = None
AC.phs_logcal(verbose=False)
def test_delay_lincal(self):
# test w/o offsets
self.AC.delay_lincal(verbose=False, kernel=(1, 3), medfilt=False)
assert self.AC.ant_dly[(24, 'Jee')].shape == (60, 1)
assert self.AC.ant_dly_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.ant_dly_arr.shape == (7, 60, 1, 1)
assert self.AC.ant_dly_gain_arr.shape == (7, 60, 64, 1)
# test w/ offsets
self.AC.delay_lincal(verbose=False, kernel=(1, 3), medfilt=False)
assert self.AC.ant_dly_phi[(24, 'Jee')].shape == (60, 1)
assert self.AC.ant_dly_phi_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.ant_dly_phi_arr.shape == (7, 60, 1, 1)
assert self.AC.ant_dly_phi_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.ant_dly_arr.shape == (7, 60, 1, 1)
assert self.AC.ant_dly_arr.dtype == float
assert self.AC.ant_dly_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.ant_dly_gain_arr.dtype == complex
assert np.allclose(np.angle(self.AC.ant_dly_gain[(24, 'Jee')]), 0.0)
assert np.allclose(np.angle(self.AC.ant_dly_phi_gain[(24, 'Jee')]), 0.0)
# test exception
AC = abscal.AbsCal(self.AC.model, self.AC.data)
pytest.raises(AttributeError, AC.delay_lincal)
# test Nones
AC = abscal.AbsCal(self.AC.model, self.AC.data, freqs=self.freq_array)
assert AC.ant_dly is None
assert AC.ant_dly_gain is None
assert AC.ant_dly_arr is None
assert AC.ant_dly_gain_arr is None
assert AC.ant_dly_phi is None
assert AC.ant_dly_phi_gain is None
assert AC.ant_dly_phi_arr is None
assert AC.ant_dly_phi_gain_arr is None
# test flags handling
AC = abscal.AbsCal(self.AC.model, self.AC.data, freqs=self.freqs)
AC.wgts[(24, 25, 'ee')] *= 0
AC.delay_lincal(verbose=False)
# test medfilt
self.AC.delay_lincal(verbose=False, medfilt=False)
self.AC.delay_lincal(verbose=False, time_avg=True)
# test w/ no wgts
AC.wgts = None
AC.delay_lincal(verbose=False)
def test_delay_slope_lincal(self):
# test w/o offsets
self.AC.delay_slope_lincal(verbose=False, kernel=(1, 3), medfilt=False)
assert self.AC.dly_slope[(24, 'Jee')].shape == (2, 60, 1)
assert self.AC.dly_slope_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.dly_slope_arr.shape == (7, 2, 60, 1, 1)
assert self.AC.dly_slope_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.dly_slope_ant_dly_arr.shape == (7, 60, 1, 1)
assert np.allclose(np.angle(self.AC.dly_slope_gain[(24, 'Jee')]), 0.0)
g = self.AC.custom_dly_slope_gain(self.gk, self.ap)
assert g[(0, 'Jee')].shape == (60, 64)
# test exception
AC = abscal.AbsCal(self.AC.model, self.AC.data)
pytest.raises(AttributeError, AC.delay_slope_lincal)
# test Nones
AC = abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.antpos, freqs=self.freq_array)
assert AC.dly_slope is None
assert AC.dly_slope_gain is None
assert AC.dly_slope_arr is None
assert AC.dly_slope_gain_arr is None
assert AC.dly_slope_ant_dly_arr is None
# test medfilt and time_avg
self.AC.delay_slope_lincal(verbose=False, medfilt=False)
self.AC.delay_slope_lincal(verbose=False, time_avg=True)
# test four pol
self.AC.delay_slope_lincal(verbose=False, four_pol=True)
assert self.AC.dly_slope[(24, 'Jee')].shape == (2, 60, 1)
assert self.AC.dly_slope_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.dly_slope_arr.shape == (7, 2, 60, 1, 1)
assert self.AC.dly_slope_gain_arr.shape == (7, 60, 64, 1)
# test flags handling
AC = abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.ap, freqs=self.freqs)
AC.wgts[(24, 25, 'ee')] *= 0
AC.delay_slope_lincal(verbose=False)
# test w/ no wgts
AC.wgts = None
AC.delay_slope_lincal(verbose=False)
def test_global_phase_slope_logcal(self):
for solver in ['dft', 'linfit']:
# test w/o offsets
self.AC.global_phase_slope_logcal(verbose=False, edge_cut=31, solver=solver)
assert self.AC.phs_slope[(24, 'Jee')].shape == (2, 60, 1)
assert self.AC.phs_slope_gain[(24, 'Jee')].shape == (60, 64)
assert self.AC.phs_slope_arr.shape == (7, 2, 60, 1, 1)
assert self.AC.phs_slope_gain_arr.shape == (7, 60, 64, 1)
assert self.AC.phs_slope_ant_phs_arr.shape == (7, 60, 1, 1)
assert np.allclose(np.angle(self.AC.phs_slope_gain[(24, 'Jee')]), 0.0)
g = self.AC.custom_phs_slope_gain(self.gk, self.ap)
assert g[(0, 'Jee')].shape == (60, 64)
# test Nones
AC = abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.antpos, freqs=self.freq_array)
assert AC.phs_slope is None
assert AC.phs_slope_gain is None
assert AC.phs_slope_arr is None
assert AC.phs_slope_gain_arr is None
assert AC.phs_slope_ant_phs_arr is None
AC = abscal.AbsCal(self.AC.model, self.AC.data, antpos=self.ap, freqs=self.freqs)
AC.wgts[(24, 25, 'ee')] *= 0
AC.global_phase_slope_logcal(verbose=False, solver=solver)
# test w/ no wgts
AC.wgts = None
AC.global_phase_slope_logcal(verbose=False, solver=solver)
def test_merge_gains(self):
self.AC.abs_amp_logcal(verbose=False)
self.AC.TT_phs_logcal(verbose=False)
self.AC.delay_lincal(verbose=False)
self.AC.phs_logcal(verbose=False)
self.AC.amp_logcal(verbose=False)
gains = [self.AC.abs_eta_gain, self.AC.TT_Phi_gain, self.AC.abs_psi_gain,
self.AC.ant_dly_gain, self.AC.ant_eta_gain, self.AC.ant_phi_gain]
gains[0][(99, 'Jee')] = 1.0
# merge shared keys
mgains = abscal.merge_gains(gains, merge_shared=True)
assert (99, 'Jee') not in mgains
# merge all keys
mgains = abscal.merge_gains(gains, merge_shared=False)
assert (99, 'Jee') in mgains
# test merge
k = (53, 'Jee')
assert mgains[k].shape == (60, 64)
assert mgains[k].dtype == complex
assert np.allclose(np.abs(mgains[k][0, 0]), np.abs(self.AC.abs_eta_gain[k] * self.AC.ant_eta_gain[k])[0, 0])
assert np.allclose(np.angle(mgains[k][0, 0]), np.angle(self.AC.TT_Phi_gain[k] * self.AC.abs_psi_gain[k]
* self.AC.ant_dly_gain[k] * self.AC.ant_phi_gain[k])[0, 0])
# test merge of flag dictionaries
f1 = {(1, 'Jee'): np.zeros(5, bool)}
f2 = {(1, 'Jee'): np.zeros(5, bool)}
f3 = abscal.merge_gains([f1, f2])
assert f3[(1, 'Jee')].dtype == np.bool_
assert not np.any(f3[(1, 'Jee')])
f2[(1, 'Jee')][:] = True
f3 = abscal.merge_gains([f1, f2])
assert np.all(f3[(1, 'Jee')])
def test_fill_dict_nans(self):
data = copy.deepcopy(self.AC.data)
wgts = copy.deepcopy(self.AC.wgts)
data[(25, 38, 'ee')][15, 20] *= np.nan
data[(25, 38, 'ee')][20, 15] *= np.inf
abscal.fill_dict_nans(data, wgts=wgts, nan_fill=-1, inf_fill=-2)
assert data[(25, 38, 'ee')][15, 20].real == -1
assert data[(25, 38, 'ee')][20, 15].real == -2
assert np.allclose(wgts[(25, 38, 'ee')][15, 20], 0)
assert np.allclose(wgts[(25, 38, 'ee')][20, 15], 0)
data = copy.deepcopy(self.AC.data)
wgts = copy.deepcopy(self.AC.wgts)
data[(25, 38, 'ee')][15, 20] *= np.nan
data[(25, 38, 'ee')][20, 15] *= np.inf
abscal.fill_dict_nans(data[(25, 38, 'ee')], wgts=wgts[(25, 38, 'ee')], nan_fill=-1, inf_fill=-2, array=True)
assert data[(25, 38, 'ee')][15, 20].real == -1
assert data[(25, 38, 'ee')][20, 15].real == -2
assert np.allclose(wgts[(25, 38, 'ee')][15, 20], 0)
assert np.allclose(wgts[(25, 38, 'ee')][20, 15], 0)
def test_mock_data(self):
# load into pyuvdata object
data_file = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
data, flgs, ap, a, f, t, _, p = io.load_vis(data_file, return_meta=True)
wgts = odict()
for k in flgs.keys():
wgts[k] = (~flgs[k]).astype(float)
wgts = DataContainer(wgts)
# make mock data
dly_slope = np.array([-1e-9, 2e-9, 0])
model = odict()
for i, k in enumerate(data.keys()):
bl = np.around(ap[k[0]] - ap[k[1]], 0)
model[k] = data[k] * np.exp(2j * np.pi * f * np.dot(dly_slope, bl))
model = DataContainer(model)
# setup AbsCal
AC = abscal.AbsCal(model, data, antpos=ap, wgts=wgts, freqs=f)
# run delay_slope_cal
AC.delay_slope_lincal(time_avg=True, verbose=False)
# test recovery: accuracy only checked at 10% level
assert np.allclose(AC.dly_slope_arr[0, 0, 0, 0, 0], 1e-9, atol=1e-10)
assert np.allclose(AC.dly_slope_arr[0, 1, 0, 0, 0], -2e-9, atol=1e-10)
# make mock data
abs_gain = 0.02
TT_phi = np.array([1e-3, -1e-3, 0])
model = odict()
for i, k in enumerate(data.keys()):
bl = np.around(ap[k[0]] - ap[k[1]], 0)
model[k] = data[k] * np.exp(abs_gain + 1j * np.dot(TT_phi, bl))
model = DataContainer(model)
# setup AbsCal
AC = abscal.AbsCal(model, data, antpos=ap, wgts=wgts, freqs=f)
# run abs_amp cal
AC.abs_amp_logcal(verbose=False)
# run TT_phs_logcal
AC.TT_phs_logcal(verbose=False)
assert np.allclose(np.median(AC.abs_eta_arr[0, :, :, 0][AC.wgts[(24, 25, 'ee')].astype(bool)]),
-0.01, atol=1e-3)
assert np.allclose(np.median(AC.TT_Phi_arr[0, 0, :, :, 0][AC.wgts[(24, 25, 'ee')].astype(bool)]),
-1e-3, atol=1e-4)
assert np.allclose(np.median(AC.TT_Phi_arr[0, 1, :, :, 0][AC.wgts[(24, 25, 'ee')].astype(bool)]),
1e-3, atol=1e-4)
def test_run_model_based_calibration(self, tmpdir):
data_file = os.path.join(DATA_PATH, 'test_input/zen.2458098.45361.HH.uvh5_downselected')
tmppath = tmpdir.strpath
hd = io.HERAData(data_file)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Fixing auto-correlations to be be real-only")
data, flags, nsamples = hd.read()
antpairs = hd.get_antpairs()
hdm = io.HERAData(data_file)
model_data, model_flags, model_nsamples = hdm.read()
precal_fname = os.path.join(tmppath, 'test_precal.calfits')
# precalibration test gain (with unity gains).
uvc_precal = UVCal()
uvc_precal = uvc_precal.initialize_from_uvdata(uvdata=hd, gain_convention='divide', cal_style='sky',
ref_antenna_name='Amadeus', sky_catalog='The Library of Congress.',
metadata_only=False, cal_type='gain')
uvc_precal.gain_array[:] = 1. + 0j
uvc_precal.write_calfits(precal_fname)
# include a model random scale factor tiomes the amplitude of the data.
scale_factor = np.random.rand() * 0.8 + 0.1
hdm.data_array *= scale_factor
# there are integrations and channels that need to be flagged.
hdm.flag_array[np.isclose(hdm.data_array, 0.)] = True
hd.flag_array[np.isclose(hd.data_array, 0.)] = True
model_fname = os.path.join(tmppath, 'test_model.uvh5')
data_fname = os.path.join(tmppath, 'test_data.uvh5')
hdm.write_uvh5(model_fname)
hd.write_uvh5(data_fname)
# Now run abscal run
cal_fname = os.path.join(tmppath, 'test_cal.calfits')
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Mean of empty slice")
abscal.run_model_based_calibration(
data_file=data_fname, model_file=model_fname,
output_filename=cal_fname, clobber=True, precalibration_gain_file=precal_fname
)
# check that gains equal to 1/sqrt(scale_factor)
hc = io.HERACal(cal_fname)
gains, gain_flags, _, _ = hc.read()
for k in gains:
np.testing.assert_array_almost_equal(gains[k][~gain_flags[k]], scale_factor ** -.5)
# Now run abscal run with dly_lincal
cal_fname = os.path.join(tmppath, 'test_cal.calfits')
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Mean of empty slice")
abscal.run_model_based_calibration(
data_file=data_fname, model_file=model_fname, dly_lincal=True,
output_filename=cal_fname, clobber=True, precalibration_gain_file=precal_fname
)
# check that gains equal to 1/sqrt(scale_factor)
hc = io.HERACal(cal_fname)
gains, gain_flags, _, _ = hc.read()
for k in gains:
np.testing.assert_array_almost_equal(gains[k][~gain_flags[k]], scale_factor ** -.5)
# include auto_file and specify referance antenna.
abscal.run_model_based_calibration(data_file=data_fname, model_file=model_fname, auto_file=data_fname,
output_filename=cal_fname, clobber=True, refant=(0, 'Jnn'), precalibration_gain_file=precal_fname)
# check that gains equal to1/sqrt(scale_factor)
hc = io.HERACal(cal_fname)
gains, gain_flags, _, _ = hc.read()
for k in gains:
np.testing.assert_array_almost_equal(gains[k][~gain_flags[k]], scale_factor ** -.5)
hd = UVData()
hdm = UVData()
hd.read(data_fname)
hdm.read(model_fname)
# test feeding UVData objects instead.
abscal.run_model_based_calibration(data_file=hd, model_file=hdm, auto_file=hd,
output_filename=cal_fname, clobber=True, refant=(0, 'Jnn'), precalibration_gain_file=precal_fname)
# check that gains equal to1/sqrt(scale_factor)
hc = io.HERACal(cal_fname)
gains, gain_flags, _, _ = hc.read()
for k in gains:
np.testing.assert_array_almost_equal(gains[k][~gain_flags[k]], scale_factor ** -.5)
def test_run_model_based_calibration_flagged_gains(self, tmpdir):
"""
Test case when all gains are flagged.
"""
data_file = os.path.join(DATA_PATH, 'test_input/zen.2458098.45361.HH.uvh5_downselected')
tmppath = tmpdir.strpath
hd = io.HERAData(data_file)
data, flags, nsamples = hd.read()
antpairs = hd.get_antpairs()
hdm = io.HERAData(data_file)
model_data, model_flags, model_nsamples = hdm.read()
precal_fname = os.path.join(tmppath, 'test_precal.calfits')
# include a model random scale factor tiomes the amplitude of the data.
scale_factor = np.random.rand() * 0.8 + 0.1
hdm.data_array *= scale_factor
# there are integrations and channels that need to be flagged.
hdm.flag_array[np.isclose(hdm.data_array, 0.)] = True
hd.flag_array[np.isclose(hd.data_array, 0.)] = True
model_fname = os.path.join(tmppath, 'test_model.uvh5')
data_fname = os.path.join(tmppath, 'test_data.uvh5')
hd.flag_array[:] = True
hdm.write_uvh5(model_fname)
hd.write_uvh5(data_fname)
cal_fname = os.path.join(tmppath, 'test_cal.calfits')
# test feeding UVData objects instead.
abscal.run_model_based_calibration(data_file=data_fname, model_file=model_fname, auto_file=data_fname,
output_filename=cal_fname, clobber=True,
refant=(0, 'Jnn'),
spoof_missing_channels=True)
# assert all flags and gains equal 1.
hc = io.HERACal(cal_fname)
gains, gain_flags, _, _ = hc.read()
for k in gains:
np.testing.assert_array_almost_equal(gains[k][~gain_flags[k]], 1.)
np.testing.assert_array_almost_equal(gain_flags[k], True)
def test_run_model_based_calibration_nonuniform_channels(self, tmpdir):
include_chans = np.hstack([np.arange(10), np.arange(12, 15), np.arange(64 - 10, 64)])
data_file = os.path.join(DATA_PATH, 'test_input/zen.2458098.45361.HH.uvh5_downselected')
tmppath = tmpdir.strpath
hd = io.HERAData(data_file)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Selected frequencies are not evenly spaced")
data, flags, nsamples = hd.read(freq_chans=include_chans)
antpairs = hd.get_antpairs()
hdm = io.HERAData(data_file)
model_data, model_flags, model_nsamples = hdm.read(freq_chans=include_chans)
# include a model random scale factor tiomes the amplitude of the data.
scale_factor = np.random.rand() * 0.8 + 0.1
hdm.data_array *= scale_factor
# there are integrations and channels that need to be flagged.
hdm.flag_array[np.isclose(hdm.data_array, 0.)] = True
hd.flag_array[np.isclose(hd.data_array, 0.)] = True
model_fname = os.path.join(tmppath, 'test_model.uvh5')
data_fname = os.path.join(tmppath, 'test_data.uvh5')
hdm.write_uvh5(model_fname)
hd.write_uvh5(data_fname)
cal_fname = os.path.join(tmppath, 'test_cal.calfits')
# test feeding UVData objects instead.
abscal.run_model_based_calibration(data_file=data_fname, model_file=model_fname, auto_file=data_fname,
output_filename=cal_fname, clobber=True,
refant=(0, 'Jnn'),
spoof_missing_channels=True)
# check that gains equal to1/sqrt(scale_factor)
hc = io.HERACal(cal_fname)
gains, gain_flags, _, _ = hc.read()
for k in gains:
np.testing.assert_array_almost_equal(gains[k][~gain_flags[k]], scale_factor ** -.5)
def test_run_model_based_calibration_redundant(self, tmpdir):
data_file = os.path.join(DATA_PATH, 'test_input/zen.2458098.45361.HH.uvh5_downselected')
tmppath = tmpdir.strpath
hd = io.HERAData(data_file)
data, flags, nsamples = hd.read()
antpairs = hd.get_antpairs()
hdm = io.HERAData(data_file)
model_data, model_flags, model_nsamples = hdm.read()
precal_fname = os.path.join(tmppath, 'test_precal.calfits')
uvc_precal = UVCal()
uvc_precal = uvc_precal.initialize_from_uvdata(
uvdata=hd, gain_convention='divide', cal_style='sky',
ref_antenna_name='Amadeus', sky_catalog='The Library of Congress.',
metadata_only=False, cal_type='gain'
)
uvc_precal.gain_array[:] = 1. + 0j
uvc_precal.write_calfits(precal_fname)
# include a model random scale factor tiomes the amplitude of the data.
scale_factor = np.random.rand() * 0.8 + 0.1
hdm.data_array *= scale_factor
# there are integrations and channels that need to be flagged.
hdm.flag_array[np.isclose(hdm.data_array, 0.)] = True
hd.flag_array[np.isclose(hd.data_array, 0.)] = True
model_fname = os.path.join(tmppath, 'test_model.uvh5')
data_fname = os.path.join(tmppath, 'test_data.uvh5')
hdm.write_uvh5(model_fname)
hd.write_uvh5(data_fname)
cal_fname = os.path.join(tmppath, 'test_cal.calfits')
# data file where all data in redundant group are equal to redundantly averaged data
# (inflated by redundancy)
red_data_fname = os.path.join(tmppath, 'test_data_red.uvh5')
# model file that is redundantly averaged
red_model_fname = os.path.join(tmppath, 'test_model_red.uvh5')
# create a redundantly averaged model file.
reds = redcal.get_pos_reds(hdm.antpos, include_autos=True)
reds = [[bl for bl in redgrp if bl in antpairs or reverse_bl(bl) in antpairs] for redgrp in reds]
reds = [redgrp for redgrp in reds if len(redgrp) > 0]
utils.red_average(model_data, reds=reds, flags=model_flags,
nsamples=model_nsamples, inplace=True)
hdm.select(bls=list(model_data.keys()))
hdm.update(data=model_data, flags=model_flags, nsamples=model_nsamples)
hdm.flag_array[np.isclose(hdm.data_array, 0.)] = True
hdm.write_uvh5(red_model_fname)
# generate a new data file that is inflated by redundancy from redundant odel file.
hdm.select(antenna_nums=np.unique(np.hstack([hd.ant_1_array, hd.ant_2_array])),
keep_all_metadata=False)
hdm.inflate_by_redundancy()
hdm.select(bls=hd.bls)
hdm.data_array /= scale_factor
hdm.write_uvh5(red_data_fname)
# use inflated redundant model.
abscal.run_model_based_calibration(data_file=red_data_fname, model_file=red_model_fname,
auto_file=red_data_fname,
output_filename=cal_fname,
clobber=True, refant=(0, 'Jnn'),
constrain_model_to_data_ants=True, max_iter=1,
inflate_model_by_redundancy=True, precalibration_gain_file=precal_fname)
# check that gains equal to1/sqrt(scale_factor)
hc = io.HERACal(cal_fname)
gains, gain_flags, _, _ = hc.read()
for k in gains:
np.testing.assert_array_almost_equal(gains[k][~gain_flags[k]], scale_factor ** -.5)
def test_model_calibration_argparser(self):
sys.argv = [sys.argv[0], 'a', 'b', 'c', '--auto_file', 'd']
ap = abscal.model_calibration_argparser()
args = ap.parse_args()
assert args.data_file == 'a'
assert args.model_file == 'b'
assert args.output_filename == 'c'
assert args.auto_file == 'd'
assert args.tol == 1e-6
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in reciprocal")
@pytest.mark.filterwarnings("ignore:telescope_location is not set")
@pytest.mark.filterwarnings("ignore:invalid value encountered in multiply")
@pytest.mark.filterwarnings("ignore:antenna_positions are not set or are being overwritten")
@pytest.mark.filterwarnings("ignore:Fixing auto-correlations to be be real-only")
class Test_Post_Redcal_Abscal_Run(object):
def setup_method(self):
self.data_file = os.path.join(DATA_PATH, 'test_input/zen.2458098.45361.HH.uvh5_downselected')
self.redcal_file = os.path.join(DATA_PATH, 'test_input/zen.2458098.45361.HH.omni.calfits_downselected')
self.model_files = [os.path.join(DATA_PATH, 'test_input/zen.2458042.60288.HH.uvRXLS.uvh5_downselected'),
os.path.join(DATA_PATH, 'test_input/zen.2458042.61034.HH.uvRXLS.uvh5_downselected')]
self.model_files_missing_one_int = [os.path.join(DATA_PATH, 'test_input/zen.2458042.60288.HH.uvRXLS.uvh5_downselected'),
os.path.join(DATA_PATH, 'test_input/zen.2458042.61034.HH.uvRXLS.uvh5_downselected_missing_first_integration')]
self.red_data_file = os.path.join(DATA_PATH, 'test_input/zen.2458098.45361.HH.uvh5_downselected_redavg')
self.red_model_files = [os.path.join(DATA_PATH, 'test_input/zen.2458042.60288.HH.uvRXLS.uvh5_downselected_redavg'),
os.path.join(DATA_PATH, 'test_input/zen.2458042.61034.HH.uvRXLS.uvh5_downselected_redavg')]
def test_get_all_times_and_lsts(self):
hd = io.HERAData(self.model_files)
all_times, all_lsts = abscal.get_all_times_and_lsts(hd)
assert len(all_times) == 120
assert len(all_lsts) == 120
np.testing.assert_array_equal(all_times, sorted(all_times))
for f in hd.lsts.keys():
hd.lsts[f] += 4.75
all_times, all_lsts = abscal.get_all_times_and_lsts(hd, unwrap=True)
assert all_lsts[-1] > 2 * np.pi
np.testing.assert_array_equal(all_lsts, sorted(all_lsts))
c = abscal.get_all_times_and_lsts(hd)
assert all_lsts[0] < all_lsts[-1]
hd = io.HERAData(self.data_file)
hd.times = hd.times[0:4] + .5
hd.lsts = hd.lsts[0:4] + np.pi
all_times, all_lsts = abscal.get_all_times_and_lsts(hd, solar_horizon=0.0)
assert len(all_times) == 0
assert len(all_lsts) == 0
def test_get_d2m_time_map(self):
hd = io.HERAData(self.data_file)
hdm = io.HERAData(self.model_files)
all_data_times, all_data_lsts = abscal.get_all_times_and_lsts(hd)
all_model_times, all_model_lsts = abscal.get_all_times_and_lsts(hdm)
d2m_time_map = abscal.get_d2m_time_map(all_data_times, all_data_lsts, all_model_times, all_model_lsts)
for dtime, mtime in d2m_time_map.items():
dlst = all_data_lsts[np.argwhere(all_data_times == dtime)[0][0]]
mlst = all_model_lsts[np.argwhere(all_model_times == mtime)[0][0]]
assert np.abs(dlst - mlst) < np.median(np.ediff1d(all_data_lsts))
assert np.min(np.abs(all_data_lsts - mlst)) == np.abs(dlst - mlst)
hd = io.HERAData(self.data_file)
hdm = io.HERAData(self.model_files[0])
all_data_times, all_data_lsts = abscal.get_all_times_and_lsts(hd)
all_model_times, all_model_lsts = abscal.get_all_times_and_lsts(hdm)
d2m_time_map = abscal.get_d2m_time_map(all_data_times, all_data_lsts, all_model_times, all_model_lsts)
for dtime, mtime in d2m_time_map.items():
dlst = all_data_lsts[np.argwhere(all_data_times == dtime)[0][0]]
if mtime is None:
for mlst in all_model_lsts:
assert np.min(np.abs(all_data_lsts - mlst)) < np.abs(dlst - mlst)
else:
mlst = all_model_lsts[np.argwhere(all_model_times == mtime)[0][0]]
assert np.abs(dlst - mlst) < np.median(np.ediff1d(all_data_lsts))
assert np.min(np.abs(all_data_lsts - mlst)) == np.abs(dlst - mlst)
# Test errors for when times/lsts don't match lengths
with pytest.raises(ValueError):
abscal.get_d2m_time_map(all_data_times[1:], all_data_lsts, all_model_times, all_model_lsts)
with pytest.raises(ValueError):
abscal.get_d2m_time_map(all_data_times, all_data_lsts, all_model_times[1:], all_model_lsts)
def test_match_baselines(self):
with pytest.raises(NotImplementedError):
abscal.match_baselines(None, None, None, model_is_redundant=False, data_is_redsol=True)
# try with data files:
hd = io.HERAData(self.data_file)
hdm = io.HERAData(self.model_files[0])
data_bl_to_load, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(hd.bls, hdm.bls, hd.antpos)
for bl in data_bl_to_load:
assert bl in model_bl_to_load
assert data_to_model_bl_map[bl] == bl
for bl in model_bl_to_load:
assert bl in data_bl_to_load
# try with redundant model
with pytest.raises(AssertionError):
abscal.match_baselines(hd.bls, hdm.bls, hd.antpos, model_is_redundant=True)
antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0]), 3: np.array([100, 100, 0])}
data_bls = [(0, 1, 'ee'), (0, 2, 'ee'), (1, 2, 'ee'), (0, 3, 'ee')]
model_bls = [(0, 1, 'ee'), (0, 2, 'ee'), (1, 3, 'ee')]
data_bl_to_load, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(data_bls, model_bls, antpos, model_is_redundant=True)
assert len(data_bl_to_load) == 3
assert len(model_bl_to_load) == 2
assert data_to_model_bl_map[(0, 1, 'ee')] == (0, 1, 'ee')
assert data_to_model_bl_map[(1, 2, 'ee')] == (0, 1, 'ee')
assert data_to_model_bl_map[(0, 2, 'ee')] == (0, 2, 'ee')
# try with cutting on baseline length
with pytest.raises(AssertionError):
abscal.match_baselines(hd.bls, hdm.bls, hd.antpos, model_is_redundant=True)
antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0]), 3: np.array([100, 100, 0])}
data_bls = [(0, 1, 'ee'), (0, 2, 'ee'), (1, 2, 'ee'), (0, 3, 'ee')]
model_bls = [(0, 1, 'ee'), (0, 2, 'ee'), (1, 3, 'ee'), (0, 3, 'ee')]
data_bl_to_load, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(data_bls, model_bls, antpos, model_is_redundant=True, min_bl_cut=15, max_bl_cut=50)
assert len(data_bl_to_load) == 1
assert len(model_bl_to_load) == 1
assert data_to_model_bl_map[(0, 2, 'ee')] == (0, 2, 'ee')
# try with redundant model and some reversed baselines
with pytest.raises(AssertionError):
abscal.match_baselines(hd.bls, hdm.bls, hd.antpos, model_is_redundant=True)
antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0]), 3: np.array([100, 100, 0])}
data_bls = [(0, 1, 'ee'), (0, 2, 'ee'), (2, 1, 'ee'), (0, 3, 'ee')]
model_bls = [(0, 1, 'ee'), (2, 0, 'ee'), (1, 3, 'ee')]
data_bl_to_load, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(data_bls, model_bls, antpos, model_is_redundant=True)
assert len(data_bl_to_load) == 3
assert len(model_bl_to_load) == 2
assert data_to_model_bl_map[(0, 1, 'ee')] == (0, 1, 'ee')
assert data_to_model_bl_map[(2, 1, 'ee')] == (1, 0, 'ee')
assert data_to_model_bl_map[(0, 2, 'ee')] == (0, 2, 'ee')
# try with different antenna numbering in model
antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0]), 3: np.array([100, 100, 0])}
model_antpos = {100: np.array([0, 0, 0]), 101: np.array([10, 0, 0]), 102: np.array([20, 0, 0]), 103: np.array([100, 100, 0])}
data_bls = [(0, 1, 'ee'), (0, 2, 'ee'), (1, 2, 'ee'), (0, 3, 'ee')]
model_bls = [(100, 101, 'ee'), (100, 102, 'ee'), (101, 103, 'ee')]
data_bl_to_load, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(data_bls, model_bls, antpos, model_antpos=model_antpos, model_is_redundant=True)
assert len(data_bl_to_load) == 3
assert len(model_bl_to_load) == 2
assert data_to_model_bl_map[(0, 1, 'ee')] == (100, 101, 'ee')
assert data_to_model_bl_map[(1, 2, 'ee')] == (100, 101, 'ee')
assert data_to_model_bl_map[(0, 2, 'ee')] == (100, 102, 'ee')
# try with both redundant
with pytest.raises(AssertionError):
abscal.match_baselines(data_bls, model_bls, antpos, model_antpos=model_antpos, model_is_redundant=True, data_is_redsol=True)
antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0]), 3: np.array([100, 100, 0])}
model_antpos = {100: np.array([0, 0, 0]), 101: np.array([10, 0, 0]), 102: np.array([20, 0, 0]), 103: np.array([100, 100, 0])}
data_bls = [(0, 2, 'ee'), (1, 2, 'ee'), (0, 3, 'ee')]
model_bls = [(100, 101, 'ee'), (100, 102, 'ee'), (101, 103, 'ee')]
data_bl_to_load, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(data_bls, model_bls, antpos, model_antpos=model_antpos,
data_is_redsol=True, model_is_redundant=True)
assert len(data_bl_to_load) == 2
assert len(model_bl_to_load) == 2
assert data_to_model_bl_map[(1, 2, 'ee')] == (100, 101, 'ee')
assert data_to_model_bl_map[(0, 2, 'ee')] == (100, 102, 'ee')
def test_build_data_wgts(self):
# test non-redundant version
bls = [(0, 1, 'ee'), (0, 2, 'ee'), (1, 2, 'ee')]
auto_bls = [(0, 0, 'ee'), (1, 1, 'ee'), (2, 2, 'ee')]
data_flags = DataContainer({bl: np.zeros((3, 4), dtype=bool) for bl in bls})
data_flags[(0, 1, 'ee')][0, 0] = True
data_flags.times_by_bl = {bl[:2]: np.arange(3) / 86400 for bl in bls}
data_flags.freqs = np.arange(4)
data_flags.antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0])}
data_flags.data_antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0])}
data_nsamples = DataContainer({bl: np.ones((3, 4), dtype=float) for bl in bls})
data_nsamples[(0, 1, 'ee')][1, 1] = 2
model_flags = data_flags
autocorrs = DataContainer({bl: np.ones((3, 4), dtype=complex) for bl in auto_bls})
autocorrs[(1, 1, 'ee')][2, 2] = 3
auto_flags = DataContainer({bl: np.zeros((3, 4), dtype=bool) for bl in auto_bls})
wgts = abscal.build_data_wgts(data_flags, data_nsamples, model_flags, autocorrs, auto_flags)
for bl in wgts:
for t in range(3):
for f in range(4):
if 1 in bl and t == 2 and f == 2:
assert wgts[bl][t, f] == 1 / 3
elif bl == (0, 1, 'ee'):
if t == 0 and f == 0:
assert wgts[bl][t, f] == 0
elif t == 1 and f == 1:
assert wgts[bl][t, f] == 2
else:
assert wgts[bl][t, f] == 1
else:
assert wgts[bl][t, f] == 1
# test redundant verison
bls = [(0, 1, 'ee'), (0, 2, 'ee')]
data_flags = DataContainer({bl: np.zeros((3, 4), dtype=bool) for bl in bls})
data_flags.times_by_bl = {bl[:2]: np.arange(3) / 86400 for bl in bls}
data_flags.freqs = np.arange(4)
data_flags.antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0]), 3: np.array([30, 0, 0])}
data_flags.data_antpos = {0: np.array([0, 0, 0]), 1: np.array([10, 0, 0]), 2: np.array([20, 0, 0]), 3: np.array([30, 0, 0])}
data_nsamples = DataContainer({bl: np.ones((3, 4), dtype=float) for bl in bls})
data_nsamples[(0, 1, 'ee')] *= 3
data_nsamples[(0, 2, 'ee')] *= 2
model_flags = data_flags
autocorrs = DataContainer({bl: np.ones((3, 4), dtype=complex) for bl in auto_bls})
autocorrs[(2, 2, 'ee')][2, 2] = 3
auto_flags = DataContainer({bl: np.zeros((3, 4), dtype=bool) for bl in auto_bls})
auto_flags[(0, 0, 'ee')][1, 1] = True
gain_flags = {ant: np.zeros((3, 4), dtype=bool) for ant in [(0, 'Jee'), (1, 'Jee'), (2, 'Jee'), (-1, 'Jee')]}
gain_flags[(0, 'Jee')] += True
wgts = abscal.build_data_wgts(data_flags, data_nsamples, model_flags, autocorrs, auto_flags,
data_is_redsol=True, gain_flags=gain_flags, tol=1.0)
for bl in wgts:
for t in range(3):
for f in range(3):
if bl == (0, 1, 'ee'):
if t == 2 and f == 2:
assert wgts[bl][t, f] == 3 / (((1 / 3) + (1 / 1))**-1 * 2)
else:
assert wgts[bl][t, f] == 3
elif bl == (0, 2, 'ee'):
if t == 2 and f == 2:
assert wgts[bl][t, f] == 2 / (((1 / 3))**-1 * 1)
elif t == 1 and f == 1:
assert wgts[bl][t, f] == 0
else:
assert wgts[bl][t, f] == 2
def test_get_idealized_antpos(self):
# build 7 element hex with 1 outrigger. If all antennas are unflagged, the outrigger
# is not redundant with the hex, so it introduces an extra degeneracy. That corresponds
# to an extra dimension in an idealized antenna position.
antpos = hex_array(2, split_core=False, outriggers=0)
antpos[7] = np.array([100, 0, 0])
reds = redcal.get_reds(antpos, pols=['ee'])
# test with no flagged antennas
cal_flags = {(ant, 'Jee'): np.array([False]) for ant in antpos}
iap = abscal._get_idealized_antpos(cal_flags, antpos, ['ee'], keep_flagged_ants=True)
assert len(iap) == 8 # all antennas are included
assert len(iap[0]) == 3 # 3 degeneracies ==> 3 dimensions
# check that the results are the same as in redcal.reds_to_antpos
r2a = redcal.reds_to_antpos(reds)
for ant in r2a:
np.testing.assert_array_equal(iap[ant], r2a[ant])
# test with flagged outrigger, which lowers the number of degeneracies
cal_flags = {(ant, 'Jee'): np.array([False]) for ant in antpos}
cal_flags[(7, 'Jee')] = True
iap = abscal._get_idealized_antpos(cal_flags, antpos, ['ee'], keep_flagged_ants=True)
# because keep_flagged_ants is True, the flagged antenna is still in the antpos dict
assert len(iap) == 8
# because the only antenna necessitating a 3rd tip-tilt degeneracy is flagged,
# get_idealized_antpos enforces that all remaining antenna positions are expressed in 2D
assert len(iap[0]) == 2
r2a = redcal.reds_to_antpos(redcal.filter_reds(reds, ex_ants=[7]))
for ant in r2a:
np.testing.assert_array_equal(iap[ant], r2a[ant])
# because there's no sensible way to describe the antenna's position in this basis, set it to 0
assert np.all(iap[7] == 0)
# test with flagged grid ant, which does not affect the number of degeneracies
cal_flags = {(ant, 'Jee'): np.array([False]) for ant in antpos}
cal_flags[(1, 'Jee')] = True
iap = abscal._get_idealized_antpos(cal_flags, antpos, ['ee'], keep_flagged_ants=True)
assert len(iap) == 8 # all antennas included
# removing an on-grid antenna but keeping the outrigger doesn't change the number of degeneracies
assert len(iap[0]) == 3
# test that the flagged antenna has the position it would have had it if weren't flagged
r2a = redcal.reds_to_antpos(reds)
for ant in r2a:
np.testing.assert_array_equal(iap[ant], r2a[ant])
# test keep_flagged_ants=False
cal_flags = {(ant, 'Jee'): np.array([False]) for ant in antpos}
cal_flags[(1, 'Jee')] = True
iap = abscal._get_idealized_antpos(cal_flags, antpos, ['ee'], keep_flagged_ants=False)
assert 1 not in iap
assert len(iap) == 7
# test error when an antenna is somehow in the cal_flags (unflagged) but not antpos or the derived reds
antpos2 = hex_array(2, split_core=False, outriggers=0)
cal_flags = {(ant, 'Jee'): np.array([False]) for ant in antpos2}
# remove antenna 0
del antpos2[0]
with pytest.raises(ValueError):
iap = abscal._get_idealized_antpos(cal_flags, antpos2, ['ee'])
# test error where an antenna has non-zero weight, but doesn't appear in cal_flags
data_wgts = {bl: np.array([1]) for red in reds for bl in red}
cal_flags = {(ant, 'Jee'): np.array([False]) for ant in antpos}
cal_flags[(7, 'Jee')] = True
with pytest.raises(ValueError):
iap = abscal._get_idealized_antpos(cal_flags, antpos, ['ee'], data_wgts=data_wgts)
# test error where antenna with non-zero weight is getting placed at position 0
cal_flags = {(ant, 'Jee'): np.array([False]) for ant in antpos2}
cal_flags[7, 'Jee'] = True
with pytest.raises(ValueError):
iap = abscal._get_idealized_antpos(cal_flags, antpos, ['ee'], data_wgts=data_wgts)
def test_post_redcal_abscal(self):
# setup
hd = io.HERAData(self.data_file)
hdm = io.HERAData(self.model_files)
hc = io.HERACal(self.redcal_file)
model_bls = list(set([bl for bls in list(hdm.bls.values()) for bl in bls]))
model_antpos = {ant: pos for antpos in hdm.antpos.values() for ant, pos in antpos.items()}
(data_bl_to_load,
model_bl_to_load,
data_to_model_bl_map) = abscal.match_baselines(hd.bls, model_bls, hd.antpos, model_antpos=model_antpos, pols=['ee', 'nn'], min_bl_cut=1.0)
rc_gains, rc_flags, rc_quals, rc_tot_qual = hc.read()
all_data_times, all_data_lsts = abscal.get_all_times_and_lsts(hd)
all_model_times, all_model_lsts = abscal.get_all_times_and_lsts(hdm)
d2m_time_map = abscal.get_d2m_time_map(all_data_times, all_data_lsts, all_model_times, all_model_lsts)
tinds = [0, 1, 2]
data, flags, nsamples = hd.read(times=hd.times[tinds], bls=data_bl_to_load)
model_times_to_load = [d2m_time_map[time] for time in hd.times[tinds]]
model, model_flags, _ = io.partial_time_io(hdm, model_times_to_load, bls=model_bl_to_load)
model_bls = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_bls, model.freqs, data.lsts - model.lsts,
lat=hdm.telescope.location.lat.deg, inplace=True)
for k in flags.keys():
if k in model_flags:
flags[k] += model_flags[k]
data_ants = set([ant for bl in data.keys() for ant in utils.split_bl(bl)])
rc_gains_subset = {k: rc_gains[k][tinds, :] for k in data_ants}
rc_flags_subset = {k: rc_flags[k][tinds, :] for k in data_ants}
calibrate_in_place(data, rc_gains_subset, data_flags=flags,
cal_flags=rc_flags_subset, gain_convention=hc.gain_convention)
wgts = DataContainer({k: (~flags[k]).astype(float) for k in flags.keys()})
# run function
with warnings.catch_warnings():
warnings.simplefilter("ignore")
delta_gains = abscal.post_redcal_abscal(model, copy.deepcopy(data), wgts, rc_flags_subset, verbose=False)
# use returned gains to calibrate data
calibrate_in_place(data, delta_gains, data_flags=flags,
cal_flags=rc_flags_subset, gain_convention=hc.gain_convention)
# basic shape & type checks
for k in rc_gains.keys():
assert k in delta_gains
assert delta_gains[k].shape == (3, rc_gains[k].shape[1])
assert delta_gains[k].dtype == complex
# try running without amplitude solvers
with warnings.catch_warnings():
warnings.simplefilter("ignore")
delta_gains = abscal.post_redcal_abscal(model, copy.deepcopy(data), wgts, rc_flags_subset, verbose=False,
use_abs_amp_logcal=False, use_abs_amp_lincal=False)
for k in delta_gains:
np.testing.assert_array_almost_equal(np.abs(delta_gains[k]), 1)
@pytest.mark.filterwarnings("ignore:not set or are being overwritten")
def test_post_redcal_abscal_run_units_warning(self, tmpdir):
tmp_path = tmpdir.strpath
calfile_units = os.path.join(tmp_path, 'redcal_units.calfits')
model_units = os.path.join(tmp_path, 'model_file_units.uvh5')
hd = io.HERAData(self.model_files[0])
hd.read()
hd.vis_units = 'Jy'
hd.write_uvh5(model_units)
hcr = io.HERACal(self.redcal_file)
hcr.read()
hcr.gain_scale = 'k str'
hcr.write_calfits(calfile_units)
with pytest.warns(RuntimeWarning, match='Overwriting redcal gain_scale of k str with model gain_scale of Jy'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="All-NaN slice encountered")
hca = abscal.post_redcal_abscal_run(self.data_file, calfile_units, [model_units], phs_conv_crit=1e-4,
nInt_to_load=30, verbose=False, add_to_history='testing')
assert hca.gain_scale == 'Jy'
def test_post_redcal_abscal_run(self, tmpdir):
tmp_path = tmpdir.strpath
output_file_delta = os.path.join(tmp_path, 'delta_gains.calfits')
# test no model overlap
hcr = io.HERACal(self.redcal_file)
rc_gains, rc_flags, rc_quals, rc_total_qual = hcr.read()
hd = io.HERAData(self.model_files[0])
hd.read(return_data=False)
hd.lst_array += 1
temp_outfile = os.path.join(DATA_PATH, 'test_output/temp.uvh5')
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="The lst_array is not self-consistent with the time_array")
warnings.filterwarnings("ignore", message="The uvw_array does not match")
hd.write_uvh5(temp_outfile, clobber=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hca = abscal.post_redcal_abscal_run(self.data_file, self.redcal_file, [temp_outfile], phs_conv_crit=1e-4,
nInt_to_load=30, verbose=False, add_to_history='testing')
assert os.path.exists(self.redcal_file.replace('.omni.', '.abs.'))
np.testing.assert_array_equal(hca.total_quality_array, 0.0)
np.testing.assert_array_equal(hca.gain_array, hcr.gain_array)
np.testing.assert_array_equal(hca.flag_array, True)
np.testing.assert_array_equal(hca.quality_array, 0.0)
os.remove(self.redcal_file.replace('.omni.', '.abs.'))
os.remove(temp_outfile)
# test normal operation of abscal (with one missing integration, to test assinging multiple data times to one model time and then rephasing)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hca = abscal.post_redcal_abscal_run(self.data_file, self.redcal_file, self.model_files_missing_one_int, extrap_limit=1.0,
phs_conv_crit=1e-4, nInt_to_load=30, verbose=False, add_to_history='testing')
pytest.raises(IOError, abscal.post_redcal_abscal_run, self.data_file, self.redcal_file, self.model_files, clobber=False)
assert os.path.exists(self.redcal_file.replace('.omni.', '.abs.'))
os.remove(self.redcal_file.replace('.omni.', '.abs.'))
ac_gains, ac_flags, ac_quals, ac_total_qual = hca.build_calcontainers()
hdm = io.HERAData(self.model_files_missing_one_int)
assert hca.gain_scale == hdm.vis_units
assert hcr.history.replace('\n', '').replace(' ', '') in hca.history.replace('\n', '').replace(' ', '')
assert 'testing' in hca.history.replace('\n', '').replace(' ', '')
for k in rc_gains:
assert k in ac_gains
assert ac_gains[k].shape == rc_gains[k].shape
assert ac_gains[k].dtype == complex
hd = io.HERAData(self.data_file)
_, data_flags, _ = hd.read()
ac_flags_expected = synthesize_ant_flags(data_flags)
ac_flags_waterfall = np.all([f for f in ac_flags.values()], axis=0)
for ant in ac_flags_expected:
ac_flags_expected[ant] += rc_flags[ant]
ac_flags_expected[ant] += ac_flags_waterfall
for k in rc_flags:
assert k in ac_flags
assert ac_flags[k].shape == rc_flags[k].shape
assert ac_flags[k].dtype == bool
np.testing.assert_array_equal(ac_flags[k], ac_flags_expected[k])
assert not np.all(list(ac_flags.values()))
for pol in ['Jee', 'Jnn']:
assert pol in ac_total_qual
assert ac_total_qual[pol].shape == rc_total_qual[pol].shape
assert np.issubdtype(ac_total_qual[pol].dtype, np.floating)
# test redundant model and full data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hca_red = abscal.post_redcal_abscal_run(self.data_file, self.redcal_file, self.red_model_files, phs_conv_crit=1e-4,
nInt_to_load=10, verbose=False, add_to_history='testing2', model_is_redundant=True)
assert os.path.exists(self.redcal_file.replace('.omni.', '.abs.'))
os.remove(self.redcal_file.replace('.omni.', '.abs.'))
ac_gains, ac_flags, ac_quals, ac_total_qual = hca_red.build_calcontainers()
hcr = io.HERACal(self.redcal_file)
rc_gains, rc_flags, rc_quals, rc_total_qual = hcr.read()
assert hcr.history.replace('\n', '').replace(' ', '') in hca_red.history.replace('\n', '').replace(' ', '')
assert 'testing2' in hca_red.history.replace('\n', '').replace(' ', '')
for k in rc_gains:
assert k in ac_gains
assert ac_gains[k].shape == rc_gains[k].shape
assert ac_gains[k].dtype == complex
hd = io.HERAData(self.data_file)
_, data_flags, _ = hd.read()
ac_flags_expected = synthesize_ant_flags(data_flags)
ac_flags_waterfall = np.all([f for f in ac_flags.values()], axis=0)
for ant in ac_flags_expected:
ac_flags_expected[ant] += rc_flags[ant]
ac_flags_expected[ant] += ac_flags_waterfall
for k in rc_flags:
assert k in ac_flags
assert ac_flags[k].shape == rc_flags[k].shape
assert ac_flags[k].dtype == bool
np.testing.assert_array_equal(ac_flags[k], ac_flags_expected[k])
assert not np.all(list(ac_flags.values()))
for pol in ['Jee', 'Jnn']:
assert pol in ac_total_qual
assert ac_total_qual[pol].shape == rc_total_qual[pol].shape
assert np.issubdtype(ac_total_qual[pol].dtype, np.floating)
# test redundant model and redundant data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hca_red_red = abscal.post_redcal_abscal_run(self.red_data_file, self.redcal_file, self.red_model_files, phs_conv_crit=1e-4,
nInt_to_load=10, verbose=False, add_to_history='testing3', model_is_redundant=True,
data_is_redsol=True, raw_auto_file=self.data_file,
write_delta_gains=True, output_file_delta=output_file_delta)
hdm = io.HERAData(self.red_model_files)
assert hca_red_red.gain_scale == hdm.vis_units
assert os.path.exists(self.redcal_file.replace('.omni.', '.abs.'))
hcat = io.HERACal(self.redcal_file.replace('.omni.', '.abs.'))
hcat.read()
os.remove(self.redcal_file.replace('.omni.', '.abs.'))
ac_gains, ac_flags, ac_quals, ac_total_qual = hca_red_red.build_calcontainers()
hcr = io.HERACal(self.redcal_file)
rc_gains, rc_flags, rc_quals, rc_total_qual = hcr.read()
assert os.path.exists(output_file_delta)
hcg = io.HERACal(output_file_delta)
hcg.read()
# ensure that unflagged redundant gains times degenerate gains equal
# abscal gains.
assert np.allclose(hcat.gain_array[~hcat.flag_array],
hcr.gain_array[~hcat.flag_array] * hcg.gain_array[~hcat.flag_array])
assert hcr.history.replace('\n', '').replace(' ', '') in hca_red_red.history.replace('\n', '').replace(' ', '')
assert 'testing3' in hca_red_red.history.replace('\n', '').replace(' ', '')
for k in rc_gains:
assert k in ac_gains
assert ac_gains[k].shape == rc_gains[k].shape
assert ac_gains[k].dtype == complex
hd = io.HERAData(self.data_file)
_, data_flags, _ = hd.read()
ac_flags_expected = synthesize_ant_flags(data_flags)
ac_flags_waterfall = np.all([f for f in ac_flags.values()], axis=0)
for ant in ac_flags_expected:
ac_flags_expected[ant] += rc_flags[ant]
ac_flags_expected[ant] += ac_flags_waterfall
for k in rc_flags:
assert k in ac_flags
assert ac_flags[k].shape == rc_flags[k].shape
assert ac_flags[k].dtype == bool
np.testing.assert_array_equal(ac_flags[k], ac_flags_expected[k])
assert not np.all(list(ac_flags.values()))
for pol in ['Jee', 'Jnn']:
assert pol in ac_total_qual
assert ac_total_qual[pol].shape == rc_total_qual[pol].shape
assert np.issubdtype(ac_total_qual[pol].dtype, np.floating)
# compare all 3 versions
g1, f1, q1, tq1 = hca.build_calcontainers()
g2, f2, q2, tq2 = hca_red.build_calcontainers()
g3, f3, q3, tq3 = hca_red_red.build_calcontainers()
for ant in f1:
np.testing.assert_array_equal(f1[ant], f2[ant])
np.testing.assert_array_equal(f1[ant], f3[ant])
for ant in g1:
if not np.all(f1[ant]):
assert np.abs(np.median(np.abs(g1[ant][~f1[ant]] / g2[ant][~f2[ant]])) - 1) < .1
assert np.abs(np.median(np.abs(g1[ant][~f1[ant]] / g3[ant][~f3[ant]])) - 1) < .1
for ant in q1:
np.testing.assert_array_equal(q1[ant], 0.0)
np.testing.assert_array_equal(q2[ant], 0.0)
np.testing.assert_array_equal(q3[ant], 0.0)
def test_post_redcal_abscal_argparser(self):
sys.argv = [sys.argv[0], 'a', 'b', 'c', 'd', '--nInt_to_load', '6', '--verbose']
a = abscal.post_redcal_abscal_argparser().parse_args()
assert a.data_file == 'a'
assert a.redcal_file == 'b'
assert a.model_files[0] == 'c'
assert a.model_files[1] == 'd'
assert len(a.model_files) == 2
assert isinstance(a.model_files, list)
assert a.nInt_to_load == 6
assert a.verbose is True
def test_multiply_gains_argparser(self):
sys.argv = [sys.argv[0], 'a', 'b', 'c', '--clobber']
a = abscal.multiply_gains_argparser()
a = a.parse_args()
assert a.gain_file_1 == 'a'
assert a.gain_file_2 == 'b'
assert a.output_file == 'c'
assert a.clobber
assert a.divide_gains is False
|
HERA-TeamREPO_NAMEhera_calPATH_START.@hera_cal_extracted@hera_cal-main@hera_cal@tests@test_abscal.py@.PATH_END.py
|
{
"filename": "authurls.py",
"repo_name": "astropy/pyvo",
"repo_path": "pyvo_extracted/pyvo-main/pyvo/auth/authurls.py",
"type": "Python"
}
|
import collections
import logging
from . import securitymethods
__all__ = ["AuthURLs"]
class AuthURLs():
"""
AuthURLs helps determine which security method should be used
with a given URL. It learns the security methods through the
VOSI capabilities, which are passed in via update_from_capabilities.
"""
def __init__(self):
self.full_urls = collections.defaultdict(set)
self.base_urls = collections.defaultdict(set)
def update_from_capabilities(self, capabilities):
"""
Update the URL to security method mapping using the
capabilities provided.
Parameters
----------
capabilities : object
List of `~pyvo.io.vosi.voresource.Capability`
"""
for c in capabilities:
for i in c.interfaces:
for u in i.accessurls:
url = u.content
exact = u.use == 'full'
if not i.securitymethods:
self.add_security_method_for_url(url, securitymethods.ANONYMOUS, exact)
for sm in i.securitymethods:
method = sm.standardid or securitymethods.ANONYMOUS
self.add_security_method_for_url(url, method, exact)
def add_security_method_for_url(self, url, security_method, exact=False):
"""
Add a security method for a url.
This is additive with update_from_capabilities. This
can be useful to set additional security methods that
aren't set in the capabilities for whatever reason.
Parameters
----------
url : str
URL to set a security method for
security_method : str
URI of the security method to set
exact : bool
If True, match only this URL. If false, match all URLs that
match this as a base URL.
"""
if exact:
self.full_urls[url].add(security_method)
else:
self.base_urls[url].add(security_method)
def allowed_auth_methods(self, url):
"""
Return the authentication methods allowed for a particular URL.
The methods are returned as URIs that represent security methods.
Parameters
----------
url : str
the URL to determine authentication methods
"""
logging.debug('Determining auth method for %s', url)
if url in self.full_urls:
methods = self.full_urls[url]
logging.debug('Matching full url %s, methods %s', url, methods)
return methods
for base_url, methods in self._iterate_base_urls():
if url.startswith(base_url):
logging.debug('Matching base url %s, methods %s', base_url, methods)
return methods
logging.debug('No match, using anonymous auth')
return {securitymethods.ANONYMOUS}
def _iterate_base_urls(self):
"""
A generator to sort the base URLs in the correct way
to determine the most specific base_url. This is done
by returning them longest to shortest.
"""
def sort_by_len(x):
return len(x[0])
# Sort the base path matching URLs, so that
# the longest URLs (the most specific ones, if
# there is a tie) are used to determine the
# auth method.
for url, method in sorted(self.base_urls.items(),
key=sort_by_len,
reverse=True):
yield url, method
def __repr__(self):
urls = []
for url, methods in self.full_urls.items():
urls.append('Full match:' + url + ':' + str(methods))
for url, methods in self._iterate_base_urls():
urls.append('Base match:' + url + ':' + str(methods))
return '\n'.join(urls)
|
astropyREPO_NAMEpyvoPATH_START.@pyvo_extracted@pyvo-main@pyvo@auth@authurls.py@.PATH_END.py
|
{
"filename": "test_synthetic_data.py",
"repo_name": "lightkurve/lightkurve",
"repo_path": "lightkurve_extracted/lightkurve-main/tests/test_synthetic_data.py",
"type": "Python"
}
|
"""Use synthetic data to verify lightkurve detrending and signal recovery.
"""
from __future__ import division, print_function
from astropy.utils.data import get_pkg_data_filename
from astropy.timeseries import BoxLeastSquares
import numpy as np
import pytest
from scipy import stats
from lightkurve.targetpixelfile import KeplerTargetPixelFile
from lightkurve.correctors import SFFCorrector, PLDCorrector
# See `data/synthetic/README.md` for details about these synthetic test files
filename_synthetic_sine = get_pkg_data_filename(
"data/synthetic/synthetic-k2-sinusoid.targ.fits.gz"
)
filename_synthetic_transit = get_pkg_data_filename(
"data/synthetic/synthetic-k2-planet.targ.fits.gz"
)
filename_synthetic_flat = get_pkg_data_filename(
"data/synthetic/synthetic-k2-flat.targ.fits.gz"
)
def test_sine_sff():
"""Can we recover a synthetic sine curve using SFF and LombScargle?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_sine)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_amplitude = float(tpf.hdu[3].header["SINE_AMP"])
# Run the SFF algorithm
lc = tpf.to_lightcurve()
corrector = SFFCorrector(lc)
cor_lc = corrector.correct(
tpf.pos_corr2,
tpf.pos_corr1,
niters=4,
windows=1,
bins=7,
restore_trend=True,
timescale=0.5,
)
# Verify that we get the period within ~20%
pg = cor_lc.to_periodogram(
method="lombscargle", minimum_period=1, maximum_period=10, oversample_factor=10
)
ret_period = pg.period_at_max_power.value
threshold = 0.2
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the amplitude to within 10%
n_cad = len(tpf.time)
design_matrix = np.vstack(
[
np.ones(n_cad),
np.sin(2.0 * np.pi * cor_lc.time.value / ret_period),
np.cos(2.0 * np.pi * cor_lc.time.value / ret_period),
]
).T
ATA = np.dot(design_matrix.T, design_matrix / cor_lc.flux_err[:, None] ** 2)
least_squares_coeffs = np.linalg.solve(
ATA, np.dot(design_matrix.T, cor_lc.flux / cor_lc.flux_err ** 2)
)
const, sin_weight, cos_weight = least_squares_coeffs
fractional_amplitude = (sin_weight ** 2 + cos_weight ** 2) ** (0.5) / const
assert (fractional_amplitude > true_amplitude / 1.1) & (
fractional_amplitude < true_amplitude * 1.1
)
def test_transit_sff():
"""Can we recover a synthetic exoplanet signal using SFF and BLS?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_transit)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_rprs = float(tpf.hdu[3].header["RPRS"])
true_transit_lc = tpf.hdu[3].data["NOISELESS_INPUT"]
max_depth = 1 - np.min(true_transit_lc)
# Run the SFF algorithm
lc = tpf.to_lightcurve().normalize()
corrector = SFFCorrector(lc)
cor_lc = corrector.correct(
tpf.pos_corr2,
tpf.pos_corr1,
niters=4,
windows=1,
bins=7,
restore_trend=False,
timescale=0.5,
)
# Verify that we get the transit period within 5%
pg = cor_lc.to_periodogram(
method="bls",
minimum_period=1,
maximum_period=9,
frequency_factor=0.05,
duration=np.arange(0.1, 0.6, 0.1),
)
ret_period = pg.period_at_max_power.value
threshold = 0.05
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the transit depth in expected bounds
assert (pg.depth_at_max_power >= true_rprs ** 2) & (
pg.depth_at_max_power < max_depth
)
def test_transit_pld():
"""Can we recover a synthetic exoplanet signal using PLD and BLS?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_transit)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_rprs = float(tpf.hdu[3].header["RPRS"])
true_transit_lc = tpf.hdu[3].data["NOISELESS_INPUT"]
max_depth = 1 - np.min(true_transit_lc)
# Run the PLD algorithm on a first pass
corrector = PLDCorrector(tpf)
cor_lc = corrector.correct()
pg = cor_lc.to_periodogram(
method="bls",
minimum_period=1,
maximum_period=9,
frequency_factor=0.05,
duration=np.arange(0.1, 0.6, 0.1),
)
# Re-do PLD with the suspected transits masked
cor_lc = corrector.correct(cadence_mask=~pg.get_transit_mask()).normalize()
pg = cor_lc.to_periodogram(
method="bls",
minimum_period=1,
maximum_period=9,
frequency_factor=0.05,
duration=np.arange(0.1, 0.6, 0.1),
)
# Verify that we get the period within ~5%
ret_period = pg.period_at_max_power.value
threshold = 0.05
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the transit depth in expected bounds
assert (pg.depth_at_max_power >= true_rprs ** 2) & (
pg.depth_at_max_power < max_depth
)
def test_sine_pld():
"""Can we recover a synthetic sine wave using PLD and LombScargle?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_sine)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_amplitude = float(tpf.hdu[3].header["SINE_AMP"])
# Run the PLD algorithm
corrector = tpf.to_corrector("pld")
cor_lc = corrector.correct()
# Verify that we get the period within ~20%
pg = cor_lc.to_periodogram(
method="lombscargle", minimum_period=1, maximum_period=10, oversample_factor=10
)
ret_period = pg.period_at_max_power.value
threshold = 0.2
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the amplitude to within 20%
n_cad = len(tpf.time)
design_matrix = np.vstack(
[
np.ones(n_cad),
np.sin(2.0 * np.pi * cor_lc.time.value / ret_period),
np.cos(2.0 * np.pi * cor_lc.time.value / ret_period),
]
).T
ATA = np.dot(design_matrix.T, design_matrix / cor_lc.flux_err[:, None] ** 2)
least_squares_coeffs = np.linalg.solve(
ATA, np.dot(design_matrix.T, cor_lc.flux / cor_lc.flux_err ** 2)
)
const, sin_weight, cos_weight = least_squares_coeffs
fractional_amplitude = (sin_weight ** 2 + cos_weight ** 2) ** (0.5) / const
assert (fractional_amplitude > true_amplitude / 1.1) & (
fractional_amplitude < true_amplitude * 1.1
)
def test_detrending_residuals():
"""Test the detrending residual distributions"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_flat)
# Run the SFF algorithm
lc = tpf.to_lightcurve()
corrector = SFFCorrector(lc)
cor_lc = corrector.correct(
tpf.pos_corr2, tpf.pos_corr1, niters=10, windows=5, bins=7, restore_trend=True
)
# Verify that we get a significant reduction in RMS
cdpp_improvement = lc.estimate_cdpp() / cor_lc.estimate_cdpp()
assert cdpp_improvement > 10.0
# The residuals should be Gaussian-"ish"
# Table 4.1 of Ivezic, Connolly, Vanerplas, Gray 2014
anderson_threshold = 1.57
resid_n_sigmas = (cor_lc.flux - np.mean(cor_lc.flux)) / cor_lc.flux_err
A_value, _, _ = stats.anderson(resid_n_sigmas)
assert A_value ** 2 < anderson_threshold
n_sigma = np.std(resid_n_sigmas)
assert n_sigma < 2.0
corrector = tpf.to_corrector("pld")
cor_lc = corrector.correct(restore_trend=False)
cdpp_improvement = lc.estimate_cdpp() / cor_lc.estimate_cdpp()
assert cdpp_improvement > 10.0
resid_n_sigmas = (cor_lc.flux - np.mean(cor_lc.flux)) / cor_lc.flux_err
A_value, crit, sig = stats.anderson(resid_n_sigmas)
assert A_value ** 2 < anderson_threshold
n_sigma = np.std(resid_n_sigmas)
assert n_sigma < 2.0
def test_centroids():
"""Test the estimate centroid method."""
for fn in (
filename_synthetic_sine,
filename_synthetic_transit,
filename_synthetic_flat,
):
tpf = KeplerTargetPixelFile(fn)
xraw, yraw = tpf.estimate_centroids()
xnorm = xraw - np.median(xraw)
ynorm = yraw - np.median(yraw)
xposc = tpf.pos_corr2 - np.median(tpf.pos_corr2)
yposc = tpf.pos_corr1 - np.median(tpf.pos_corr1)
rmax = np.max(np.sqrt((xnorm.value - xposc) ** 2 + (ynorm.value - yposc) ** 2))
# The centroids should agree to within a hundredth of a pixel.
assert rmax < 0.01
|
lightkurveREPO_NAMElightkurvePATH_START.@lightkurve_extracted@lightkurve-main@tests@test_synthetic_data.py@.PATH_END.py
|
{
"filename": "classification.py",
"repo_name": "samuelperezdi/umlcaxs",
"repo_path": "umlcaxs_extracted/umlcaxs-main/classification.py",
"type": "Python"
}
|
from umlcaxs_lib import lognorm, mahal_classifier_all
import pandas as pd
# Feature definitions
features = ['hard_hm', 'hard_hs', 'hard_ms', 'powlaw_gamma', 'bb_kt',
'var_prob_b', 'var_ratio_b', 'var_prob_h', 'var_ratio_h',
'var_prob_s', 'var_ratio_s', 'var_newq_b']
features_lognorm = ['bb_kt', 'var_ratio_h', 'var_ratio_b', 'var_ratio_s', 'var_newq_b']
features_norm = ['powlaw_gamma']
ltypes = ['QSO', 'AGN', 'Seyfert_1', 'Seyfert_2', 'HMXB', 'LMXB', 'XB', 'YSO', 'TTau*', 'Orion_V*']
uks = ['Star', 'X', 'Radio', 'IR', 'Blue', 'UV', 'gamma', 'PartofG', '**']
# Load data
df_csc_simbad = pd.read_csv('out_data/cluster_csc_simbad.csv', index_col=0)
df_csc_simbad.fillna({'main_type': 'NaN'}, inplace=True)
# Preprocess data
df_csc_out = df_csc_simbad.dropna(subset=features)
df_csc_lognorm = lognorm(df_csc_out, features, features_norm, features_lognorm)
# Classification
classified_df = mahal_classifier_all(df_csc_lognorm, df_csc_out, features, ltypes, uks=uks)
classified_df.head(10)
# Save results
classified_df.to_csv('./out_data/detection_level_classification.csv')
print('Detections classified.')
|
samuelperezdiREPO_NAMEumlcaxsPATH_START.@umlcaxs_extracted@umlcaxs-main@classification.py@.PATH_END.py
|
{
"filename": "los_pvd_vs_rp.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/mock_observables/pairwise_velocities/los_pvd_vs_rp.py",
"type": "Python"
}
|
r"""
Module containing the `~halotools.mock_observables.los_pvd_vs_rp` function
used to calculate the pairwise line-of-sight velocity dispersion
as a function of projected distance between the pairs.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from .pairwise_velocities_helpers import (_pairwise_velocity_stats_process_args,
_process_rp_bins)
from .velocity_marked_npairs_xy_z import velocity_marked_npairs_xy_z
__all__ = ('los_pvd_vs_rp', )
__author__ = ['Duncan Campbell']
np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero
def los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max, sample2=None,
velocities2=None, period=None, do_auto=True, do_cross=True,
num_threads=1, approx_cell1_size=None, approx_cell2_size=None):
r"""
Calculate the pairwise line-of-sight (LOS) velocity dispersion (PVD),
as a function of radial distance from ``sample1`` :math:`\sigma_{z12}(r_p)`.
Example calls to this function appear in the documentation below.
Parameters
----------
sample1 : array_like
Npts1 x 3 numpy array containing 3-D positions of points.
velocities1 : array_like
Npts1 x 3 array containing the 3-D components of the velocities.
rp_bins : array_like
array of boundaries defining the radial bins perpendicular to the LOS in which
pairs are counted.
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
pi_max : float
maximum LOS separation
Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools.
sample2 : array_like, optional
Npts2 x 3 array containing 3-D positions of points.
velocities2 : array_like, optional
Npts2 x 3 array containing the 3-D components of the velocities.
period : array_like, optional
length 3 array defining periodic boundary conditions. If only
one number, Lbox, is specified, period is assumed to be [Lbox, Lbox, Lbox].
do_auto : boolean, optional
calculate the auto-pairwise velocities?
do_cross : boolean, optional
calculate the cross-pairwise velocities?
num_threads : int, optional
number of threads to use in calculation. Default is 1. A string 'max' may be used
to indicate that the pair counters should use all available cores on the machine.
Returns
-------
sigma : numpy.array or tuple(numpy.arrays)
Each numpy.array is a *len(rbins)-1* length array containing the dispersion
of the pairwise velocity, :math:`\sigma_{12}(r)`, computed in each of the bins
defined by ``rbins``.
If sample2 is None, returns :math:`\sigma_{11}(r)`
If ``do_auto`` and ``do_cross`` are True, returns (:math:`\sigma_{11}(r)`, :math:`\sigma_{12}(r)`, :math:`\sigma_{22}(r)`)
If only ``do_auto`` is True, returns (:math:`\sigma_{11}(r)`, :math:`\sigma_{22}(r)`)
If only ``do_cross`` is True, returns :math:`\sigma_{12}(r)`
Notes
-----
The pairwise LOS velocity, :math:`v_{z12}(r)`, is defined as:
.. math::
v_{z12} = |\vec{v}_{\rm 1, pec}\cdot \hat{z}-\vec{v}_{\rm 2, pec}\cdot\hat{z}|
where :math:`\vec{v}_{\rm 1, pec}` is the peculiar velocity of object 1, and
:math:`\hat{z}` is the unit-z vector.
:math:`\sigma_{z12}(r_p)` is the standard deviation of this quantity in
projected radial bins.
Pairs and radial velocities are calculated using
`~halotools.mock_observables.pair_counters.velocity_marked_npairs_xy_z`.
Examples
--------
For demonstration purposes we create a randomly distributed set of points within a
periodic unit cube.
>>> from halotools.mock_observables import los_pvd_vs_rp
>>> Npts = 1000
>>> Lbox = 1.0
>>> period = np.array([Lbox,Lbox,Lbox])
>>> x = np.random.random(Npts)
>>> y = np.random.random(Npts)
>>> z = np.random.random(Npts)
We transform our *x, y, z* points into the array shape used by the pair-counter by
taking the transpose of the result of `numpy.vstack`. This boilerplate transformation
is used throughout the `~halotools.mock_observables` sub-package:
>>> coords = np.vstack((x,y,z)).T
We will do the same to get a random set of peculiar velocities.
>>> vx = np.random.random(Npts)
>>> vy = np.random.random(Npts)
>>> vz = np.random.random(Npts)
>>> velocities = np.vstack((vx,vy,vz)).T
>>> rp_bins = np.logspace(-2,-1,10)
>>> pi_max = 0.3
>>> sigmaz_12 = los_pvd_vs_rp(coords, velocities, rp_bins, pi_max, period=period)
>>> x2 = np.random.random(Npts)
>>> y2 = np.random.random(Npts)
>>> z2 = np.random.random(Npts)
>>> coords2 = np.vstack((x2,y2,z2)).T
>>> vx2 = np.random.random(Npts)
>>> vy2 = np.random.random(Npts)
>>> vz2 = np.random.random(Npts)
>>> velocities2 = np.vstack((vx2,vy2,vz2)).T
>>> sigmaz_12 = los_pvd_vs_rp(coords, velocities, rp_bins, pi_max, period=period, sample2=coords2, velocities2=velocities2)
"""
# process input arguments
function_args = (sample1, velocities1, sample2, velocities2, period,
do_auto, do_cross, num_threads,
approx_cell1_size, approx_cell2_size, None)
sample1, velocities1, sample2, velocities2,\
period, do_auto, do_cross,\
num_threads, _sample1_is_sample2, PBCs =\
_pairwise_velocity_stats_process_args(*function_args)
rp_bins, pi_max = _process_rp_bins(rp_bins, pi_max, period, PBCs)
pi_bins = np.array([0.0, pi_max])
# calculate velocity difference scale
std_v1 = np.sqrt(np.std(velocities1[2, :]))
std_v2 = np.sqrt(np.std(velocities2[2, :]))
# build the marks.
shift1 = np.repeat(std_v1, len(sample1))
shift2 = np.repeat(std_v2, len(sample2))
marks1 = np.vstack((sample1.T, velocities1.T, shift1)).T
marks2 = np.vstack((sample2.T, velocities2.T, shift2)).T
def marked_pair_counts(sample1, sample2, rp_bins, pi_bins, period, num_threads,
do_auto, do_cross, marks1, marks2,
weight_func_id, _sample1_is_sample2, approx_cell1_size, approx_cell2_size):
"""
Count velocity weighted data pairs.
"""
if do_auto is True:
D1D1, S1S1, N1N1 = velocity_marked_npairs_xy_z(
sample1, sample1, rp_bins, pi_bins,
weights1=marks1, weights2=marks1, weight_func_id=weight_func_id,
period=period, num_threads=num_threads,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell1_size)
D1D1 = np.diff(D1D1, axis=1)[:, 0]
D1D1 = np.diff(D1D1)
S1S1 = np.diff(S1S1, axis=1)[:, 0]
S1S1 = np.diff(S1S1)
N1N1 = np.diff(N1N1, axis=1)[:, 0]
N1N1 = np.diff(N1N1)
else:
D1D1 = None
D2D2 = None
N1N1 = None
N2N2 = None
S1S1 = None
S2S2 = None
if _sample1_is_sample2:
D1D2 = D1D1
D2D2 = D1D1
N1N2 = N1N1
N2N2 = N1N1
S1S2 = S1S1
S2S2 = S1S1
else:
if do_cross is True:
D1D2, S1S2, N1N2 = velocity_marked_npairs_xy_z(
sample1, sample2, rp_bins, pi_bins,
weights1=marks1, weights2=marks2,
weight_func_id=weight_func_id, period=period, num_threads=num_threads,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell2_size)
D1D2 = np.diff(D1D2, axis=1)[:, 0]
D1D2 = np.diff(D1D2)
S1S2 = np.diff(S1S2, axis=1)[:, 0]
S1S2 = np.diff(S1S2)
N1N2 = np.diff(N1N2, axis=1)[:, 0]
N1N2 = np.diff(N1N2)
else:
D1D2 = None
N1N2 = None
S1S2 = None
if do_auto is True:
D2D2, S2S2, N2N2 = velocity_marked_npairs_xy_z(
sample2, sample2, rp_bins, pi_bins,
weights1=marks2, weights2=marks2,
weight_func_id=weight_func_id, period=period, num_threads=num_threads,
approx_cell1_size=approx_cell2_size,
approx_cell2_size=approx_cell2_size)
D2D2 = np.diff(D2D2, axis=1)[:, 0]
D2D2 = np.diff(D2D2)
S2S2 = np.diff(S2S2, axis=1)[:, 0]
S2S2 = np.diff(S2S2)
N2N2 = np.diff(N2N2, axis=1)[:, 0]
N2N2 = np.diff(N2N2)
else:
D2D2 = None
N2N2 = None
return D1D1, D1D2, D2D2, S1S1, S1S2, S2S2, N1N1, N1N2, N2N2
weight_func_id = 4
V1V1, V1V2, V2V2, S1S1, S1S2, S2S2, N1N1, N1N2, N2N2 = marked_pair_counts(
sample1, sample2, rp_bins, pi_bins, period,
num_threads, do_auto, do_cross,
marks1, marks2, weight_func_id,
_sample1_is_sample2,
approx_cell1_size, approx_cell2_size)
def _shifted_std(N, sum_x, sum_x_sqr):
"""
calculate the variance
"""
variance = (sum_x_sqr - (sum_x * sum_x)/N)/(N - 1)
return np.sqrt(variance)
# return results
if _sample1_is_sample2:
sigma_11 = _shifted_std(N1N1, V1V1, S1S1)
return np.where(np.isfinite(sigma_11), sigma_11, 0.)
else:
if (do_auto is True) & (do_cross is True):
sigma_11 = _shifted_std(N1N1, V1V1, S1S1)
sigma_12 = _shifted_std(N1N2, V1V2, S1S2)
sigma_22 = _shifted_std(N2N2, V2V2, S2S2)
return (np.where(np.isfinite(sigma_11), sigma_11, 0.),
np.where(np.isfinite(sigma_12), sigma_12, 0.),
np.where(np.isfinite(sigma_22), sigma_22, 0.))
elif (do_cross is True):
sigma_12 = _shifted_std(N1N2, V1V2, S1S2)
return np.where(np.isfinite(sigma_12), sigma_12, 0.)
elif (do_auto is True):
sigma_11 = _shifted_std(N1N1, V1V1, S1S1)
sigma_22 = _shifted_std(N2N2, V2V2, S2S2)
return (np.where(np.isfinite(sigma_11), sigma_11, 0.),
np.where(np.isfinite(sigma_22), sigma_22, 0.))
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@mock_observables@pairwise_velocities@los_pvd_vs_rp.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "ThomasHelfer/multimodal-supernovae",
"repo_path": "multimodal-supernovae_extracted/multimodal-supernovae-main/src/utils.py",
"type": "Python"
}
|
import os
from typing import List, Optional
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback
import torch
import numpy as np
from torch.utils.data import DataLoader
from typing import Tuple, List, Dict, Any
from matplotlib import pyplot as plt
from ruamel.yaml import YAML
from sklearn.linear_model import LinearRegression
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.metrics import (
f1_score,
precision_score,
accuracy_score,
recall_score,
balanced_accuracy_score,
)
from torch.nn import Module
import pandas as pd
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.ticker as ticker
def filter_files(filenames_avail, filenames_to_filter, data_to_filter=None):
"""
Function to filter filenames and data based on the filenames_avail
Args:
filenames_avail (list): List of filenames available
filenames_to_filter (list): List of filenames to filter
data_to_filter (List[np.ndarray]): Data to filter based on filenames_to_filter
Returns:
inds_filt (np.ndarray): Indices of filtered filenames in filenames_to_filter
filenames_to_filter (list): List of filtered filenames
data_to_filter (np.ndarray): Filtered data
"""
# Check which each filenames_to_filter are available in filenames_avail
inds_filt = np.isin(filenames_to_filter, filenames_avail)
if data_to_filter:
for i in range(len(data_to_filter)):
data_to_filter[i] = data_to_filter[i][inds_filt]
filenames_to_filter = np.array(filenames_to_filter)[inds_filt]
return inds_filt, filenames_to_filter, data_to_filter
def find_indices_in_arrays(st1, st2):
"""
Find indices of where elements of st1 appear in st2 and indices in st1 of those elements.
Parameters:
- st1 (list or array): The list of strings to find in st2.
- st2 (list or array): The list of strings to search within.
Returns:
- tuple of two lists:
- The first list contains indices indicating where each element of st1 is found in st2.
- The second list contains the indices in st1 for elements that were found in st2.
"""
indices_in_st2 = []
indices_in_st1 = []
for idx, item in enumerate(st1):
try:
index_in_st2 = st2.index(item) # Find the index of item in st2
indices_in_st2.append(index_in_st2)
indices_in_st1.append(idx)
except ValueError:
# Item not found in st2, optionally handle it
continue # Simply skip if not found
return indices_in_st2, indices_in_st1
def get_savedir(args) -> str:
"""
Return config dict and path to save new plots and models based on
whether to continue from checkpoint or not; dump config file in savedir path
Args:
args: argparse.ArgumentParser object
Returns:
str: path to save new plots and models
cfg: dict: configuration dictionary
"""
# Create directory to save new plots and checkpoints
import os
if not os.path.exists("analysis"):
os.makedirs("analysis")
os.makedirs("analysis/runs")
if not os.path.exists("analysis/runs"):
os.makedirs("analysis/runs")
# save in checkpoint directory if resuming from checkpoint
# else save in numbered directory if not given runname
if args.ckpt_path:
cfg = YAML(typ="safe").load(
open(os.path.join(os.path.dirname(args.ckpt_path), "config.yaml"))
)
save_dir = os.path.join(os.path.dirname(args.ckpt_path), "resume/")
os.makedirs(save_dir, exist_ok=True)
else:
yaml = YAML(typ="rt")
cfg = yaml.load(open(args.config_path))
if args.runname:
save_dir = f"./analysis/runs/{args.runname}/"
else:
dirlist = [
int(item)
for item in os.listdir("./analysis/runs/")
if os.path.isdir(os.path.join("./analysis/runs/", item))
and item.isnumeric()
]
dirname = str(max(dirlist) + 1) if len(dirlist) > 0 else "0"
save_dir = os.path.join("./analysis/runs/", dirname)
os.makedirs(save_dir, exist_ok=True)
with open(os.path.join(save_dir, "config.yaml"), "w") as outfile:
yaml.dump(cfg, outfile)
return save_dir, cfg
def set_seed(seed: int = 0) -> None:
"""
set seed so that results are fully reproducible
"""
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["PYTHONHASHSEED"] = str(seed)
print(f"Random seed: {seed}")
def get_valid_dir(data_dirs: List[str]) -> str:
"""
Returns the first valid directory in the list of directories.
Args:
data_dirs (List[str]): A list of directory paths to check.
Returns:
str: The first valid directory path found in the list.
Raises:
ValueError: If no valid directory is found in the list.
"""
for data_dir in data_dirs:
if os.path.isdir(data_dir):
return data_dir
raise ValueError("No valid data directory found")
class LossTrackingCallback(Callback):
def __init__(self):
self.train_loss_history = []
self.val_loss_history = []
self.epoch_train_loss = []
self.auc_val_history = []
self.R2_val_history = []
self.R2_train_history = []
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
# Accumulate training loss for each batch
loss = outputs["loss"] if isinstance(outputs, dict) else outputs
self.epoch_train_loss.append(loss.detach().item())
def on_train_epoch_end(self, trainer, pl_module):
# Append average training loss after each epoch
epoch_loss = sum(self.epoch_train_loss) / len(self.epoch_train_loss)
self.train_loss_history.append(epoch_loss)
self.R2_train_history.append(trainer.callback_metrics.get("R2_train"))
# Reset the list for the next epoch
self.epoch_train_loss = []
def on_validation_epoch_end(self, trainer, pl_module):
# Append validation loss after each validation epoch
val_loss = trainer.callback_metrics.get("val_loss")
if val_loss is not None:
self.val_loss_history.append(val_loss.detach().item())
def on_validation_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
auc_val = trainer.callback_metrics.get("AUC_val")
auc_val1 = trainer.callback_metrics.get("AUC_val1")
self.R2_val_history.append(trainer.callback_metrics.get("R2_val"))
if auc_val or auc_val1:
if auc_val is None:
auc_val = (
sum(
[
trainer.callback_metrics.get(f"AUC_val{i}").detach().item()
for i in range(1, 4)
]
)
/ 3
)
else:
auc_val = auc_val.detach().item()
self.auc_val_history.append(auc_val)
def plot_loss_history(train_loss_history, val_loss_history, path_base="./"):
"""
Plots the training and validation loss histories.
Args:
train_loss_history (list): A list of training loss values.
val_loss_history (list): A list of validation loss values.
"""
# Create a figure and a set of subplots
plt.figure(figsize=(10, 6))
# Plot training loss
plt.plot(
train_loss_history,
label="Training Loss",
color="blue",
linestyle="-",
marker="o",
)
# Plot validation loss
plt.plot(
val_loss_history,
label="Validation Loss",
color="red",
linestyle="-",
marker="x",
)
# Adding title and labels
plt.title("Training and Validation Loss Over Epochs")
plt.xlabel("Epochs")
plt.ylabel("Loss")
# Adding a legend
plt.legend()
# Show grid
plt.grid(True)
# Show the plot
plt.savefig(os.path.join(path_base, "loss_history.png"))
def cosine_similarity(a, b, temperature=1):
"""
Compute cosine similarity between two tensors.
Args:
a (torch.Tensor): First tensor.
b (torch.Tensor): Second tensor.
temperature (float): Temperature parameter for scaling the cosine similarity; default is 1.
Returns:
torch.Tensor: Cosine similarity between the two tensors.
"""
a_norm = a / a.norm(dim=-1, keepdim=True)
b_norm = b / b.norm(dim=-1, keepdim=True)
logits = a_norm @ b_norm.T * temperature
return logits.squeeze()
def get_embs(
clip_model: torch.nn.Module,
dataloader: DataLoader,
combinations: List[str],
ret_combs: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Computes and concatenates embeddings for different data modalities (images, light curves, spectra)
from a DataLoader using a specified model. This function allows selection of modalities via a list of combinations.
Args:
clip_model (torch.nn.Module): The model used for generating embeddings. It should have methods
to compute embeddings for the specified modalities.
dataloader (DataLoader): DataLoader that provides batches of data. Each batch should include data
for images, magnitudes, times, and masks for light curves and spectral data.
combinations (List[str]): List of strings specifying which data modalities to compute embeddings for.
Possible options include 'host_galaxy' for images, 'lightcurve' for light curves,
and 'spectral' for spectral data.
ret_combs (bool, optional): If True, returns a tuple of the embeddings and the names of the modalities
processed. Defaults to False.
Returns:
Tuple[torch.Tensor, ...] or Tuple[List[torch.Tensor], np.ndarray]:
- If ret_combs is False, returns a list of torch.Tensor, each tensor represents concatenated embeddings
for each modality specified in combinations.
- If ret_combs is True, returns a tuple containing the list of concatenated embeddings and an array of
modality names that were included in the combinations and processed.
"""
clip_model.eval()
# getting device of model
device = next(clip_model.parameters()).device
embs_list = [[] for i in range(len(combinations))]
# gives combination names corresponding each emb in embs_list
combs_all = ["host_galaxy", "lightcurve", "spectral", "meta"]
combs = np.array(combs_all)[np.isin(combs_all, combinations)]
# Iterate through the DataLoader
for batch in dataloader:
(
x_img,
x_lc,
t_lc,
mask_lc,
x_sp,
t_sp,
mask_sp,
redshift,
classification,
) = batch
if "host_galaxy" in combinations:
x_img = x_img.to(device)
if "lightcurve" in combinations:
x_lc = x_lc.to(device)
t_lc = t_lc.to(device)
mask_lc = mask_lc.to(device)
if "spectral" in combinations:
x_sp = x_sp.to(device)
t_sp = t_sp.to(device)
mask_sp = mask_sp.to(device)
# Compute embeddings and detach from the computation graph
with torch.no_grad():
x = []
if "host_galaxy" in combinations:
x.append(clip_model.image_embeddings_with_projection(x_img))
if "lightcurve" in combinations:
x.append(
clip_model.lightcurve_embeddings_with_projection(
x_lc, t_lc, mask_lc
)
)
if "spectral" in combinations:
x.append(
clip_model.spectral_embeddings_with_projection(x_sp, t_sp, mask_sp)
)
if "meta" in combinations:
# half of the input is the class embedding, the other half is the redshift
x_meta = torch.concat(
[
clip_model.class_emb(classification.to(device)).to(device),
redshift.unsqueeze(1)
.repeat(1, clip_model.len_meta_input // 2)
.to(device),
],
dim=-1,
).to(device)
x_meta = clip_model.meta_encoder(x_meta)
x.append(x_meta)
# Append the results to the lists
for i in range(len(x)):
embs_list[i].append(x[i].detach())
# Concatenate all embeddings into single tensors
for i in range(len(embs_list)):
embs_list[i] = torch.cat(embs_list[i], dim=0)
if not ret_combs:
return embs_list
return embs_list, combs
def get_ROC_data(
embs1: torch.Tensor, embs2: torch.Tensor
) -> Tuple[np.ndarray, np.ndarray]:
"""
Calculate ROC-like data by evaluating the cosine similarity between two sets of embeddings.
Args:
embs1 (torch.Tensor): Tensor of first set of embeddings.
embs2 (torch.Tensor): Tensor of second set of embeddings.
Returns:
Tuple[np.ndarray, np.ndarray]: A tuple containing an array of thresholds and an array of the fraction of correct predictions at each threshold.
"""
thresholds = np.linspace(0, 1, 100)
imgs = []
# Iterate through image embeddings and calculate cosine similarity with curve embeddings
for idx, emb_src in enumerate(embs2):
cos_sim = cosine_similarity(embs1, emb_src)
idx_sorted = torch.argsort(cos_sim, descending=True)
# Calculate the number of correct predictions for each threshold
num_right = [
idx in idx_sorted[: int(threshold * len(idx_sorted))]
for threshold in thresholds
]
imgs.append(num_right)
# Calculate the fraction of correct predictions at each threshold
fraction_correct = np.sum(imgs, axis=0) / len(embs2)
return thresholds, fraction_correct
def get_AUC(
embs1: torch.Tensor,
embs2: torch.Tensor,
) -> Tuple[float, float]:
"""
Calculate the area under the ROC curve for training and validation datasets.
Args:
embs1 (torch.Tensor): Embeddings for first modality.
embs2 (torch.Tensor): Embeddings for second modality.
"""
thresholds, fraction_correct = get_ROC_data(embs1, embs2)
auc = np.trapz(fraction_correct, thresholds)
return auc
def plot_ROC_curves(
embs_train: List[torch.Tensor],
embs_val: List[torch.Tensor],
combinations: List[str],
path_base: str = "./",
) -> None:
"""
Plots ROC-like curves for training and validation datasets based on embeddings.
Args:
embs_train (List[torch.Tensor]): List of embeddings for training data.
embs_val (List[torch.Tensor]): List of embeddings for validation data.
combinations (List[str]): List of combinations of modalities to use for embeddings.
path_base (str) : path to save the plot
"""
combinations = sorted(combinations)
fractions_train, fractions_val, labels = [], [], []
for i in range(len(embs_train) - 1):
for j in range(i + 1, len(embs_train)):
thresholds, fraction_correct_train = get_ROC_data(
embs_train[i], embs_train[j]
)
thresholds, fraction_correct_val = get_ROC_data(embs_val[i], embs_val[j])
fractions_train.append(fraction_correct_train)
fractions_val.append(fraction_correct_val)
labels.append(f"{combinations[i]} and {combinations[j]}")
# Set overall figure size and title
plt.figure(figsize=(12, 6))
plt.suptitle("Fraction of Correct Predictions vs. Threshold")
# Plot for validation data
plt.subplot(1, 2, 1)
for i, f_val in enumerate(fractions_val):
plt.plot(thresholds, f_val, lw=2, label=labels[i])
plt.plot(thresholds, thresholds, linestyle="--", color="gray", label="Random")
plt.title("Validation Data")
plt.xlabel("Threshold")
plt.ylabel("Fraction Correct")
plt.legend()
plt.grid(True, linestyle="--", alpha=0.7)
# Plot for training data
plt.subplot(1, 2, 2)
for i, f_train in enumerate(fractions_train):
plt.plot(thresholds, f_train, lw=2, label=labels[i])
plt.plot(thresholds, thresholds, linestyle="--", color="gray", label="Random")
plt.title("Training Data")
plt.xlabel("Threshold")
plt.ylabel("Fraction Correct")
plt.legend()
plt.grid(True, linestyle="--", alpha=0.7)
# Adjust layout to prevent overlapping
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(os.path.join(path_base, "ROC_curves.png"))
def get_linear_predictions(
X: torch.Tensor,
Y: torch.Tensor,
X_val: Optional[torch.Tensor] = None,
Y_val: Optional[torch.Tensor] = None,
task: str = "regression",
) -> torch.Tensor:
"""
Calculate predictions using a linear regression model (or a linear-kernel SVM, for classification).
Parameters:
X (torch.Tensor): The input features for training.
Y (torch.Tensor): The target values for training.
X_val (Optional[torch.Tensor]): The input features for validation (default is None).
Y_val (Optional[torch.Tensor]): The target values for validation (default is None).
task (str): The downstream task ('regression' or 'classification').
Returns:
torch.Tensor: The predictions of the model trained on training data or on validation data if provided.
"""
# Ensure Y is 2D (necessary for sklearn)
if len(Y.shape) == 1:
Y = Y[:, np.newaxis]
# Convert tensors to numpy
X = X.cpu().detach().numpy()
if X_val is not None:
X_val = X_val.cpu().detach().numpy()
# fit the model
if task.lower() == "regression":
model = LinearRegression().fit(X, Y)
elif task.lower() == "classification":
model = LinearSVC().fit(X, Y)
else:
raise ValueError("Invalid task")
# If validation data is provided, make predictions on that, otherwise on training data
if X_val is not None and Y_val is not None:
predictions = model.predict(X_val)
else:
predictions = model.predict(X)
# Convert numpy array back to PyTorch tensor
predictions_tensor = torch.from_numpy(predictions).flatten()
return predictions_tensor
def get_knn_predictions(
X: torch.Tensor,
Y: torch.Tensor,
X_val: Optional[torch.Tensor] = None,
Y_val: Optional[torch.Tensor] = None,
k: int = 5,
task: str = "regression",
) -> torch.Tensor:
"""
Calculate predictions using a k-nearest neighbors regression model.
Parameters:
X (torch.Tensor): The input features for training.
Y (torch.Tensor): The target values for training.
X_val (Optional[torch.Tensor]): The input features for validation (default is None).
Y_val (Optional[torch.Tensor]): The target values for validation (default is None).
k (int): The number of neighbors to use for k-nearest neighbors.
task (str): The downstream task ('regression' or 'classification').
Returns:
torch.Tensor: The 1D predictions of the model trained on training data or on validation data if provided.
"""
# Ensure Y is 2D (necessary for sklearn)
if len(Y.shape) == 1:
Y = Y[:, np.newaxis]
# Convert tensors to numpy
X = X.cpu().detach().numpy()
if X_val is not None:
X_val = X_val.cpu().detach().numpy()
# fit the model
if task.lower() == "regression":
model = KNeighborsRegressor(n_neighbors=k).fit(X, Y)
elif task.lower() == "classification":
model = KNeighborsClassifier(n_neighbors=k).fit(X, Y)
else:
raise ValueError("Invalid task")
# If validation data is provided, make predictions on that, otherwise on training data
if X_val is not None and Y_val is not None:
predictions = model.predict(X_val)
else:
predictions = model.predict(X)
# Convert numpy array back to PyTorch tensor and flatten to 1D
predictions_tensor = torch.from_numpy(predictions).flatten()
return predictions_tensor
def is_subset(subset: List[str], superset: List[str]) -> bool:
"""
Check if a list of filenames (subset) is completely contained within another list of filenames (superset).
Args:
subset (List[str]): A list of filenames to be checked if they are contained within the superset.
superset (List[str]): A list of filenames that is expected to contain all elements of the subset.
Returns:
bool: Returns True if all elements in the subset are found in the superset, otherwise False.
"""
# Convert lists to sets for efficient subset checking
subset_set = set(subset)
superset_set = set(superset)
# Check if subset is a subset of superset
return subset_set.issubset(superset_set)
def process_data_loader(
loader: DataLoader,
regression: bool,
classification: bool,
device: str,
model: Module,
combinations: List[str],
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""
Processes batches from a DataLoader to generate model predictions and true labels for regression or classification.
Args:
loader (DataLoader): The DataLoader from which data batches are loaded.
regression (bool): Indicates whether the processing is for regression tasks.
classification (bool): Indicates whether the processing is for classification tasks.
device (str): The device (e.g., 'cuda', 'cpu') to which tensors are sent for model computation.
model (Module): The neural network model that processes the input data.
combinations (List[str]): Specifies which types of data (e.g., 'host_galaxy', 'lightcurve', 'spectral') are included in the input batches.
Returns:
Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: A tuple containing:
- The true values for the regression or classification targets.
- The true labels for classification if available.
- The predicted values from the model if regression is true, otherwise None.
"""
y_true_val = []
y_pred_val = []
y_true_val_label = []
lc_datas = []
time_lc_datas = []
masked_lc_datas = []
for batch in loader:
# Send them all existing tensors to the device
(
x_img,
x_lc,
t_lc,
mask_lc,
x_sp,
t_sp,
mask_sp,
redshift,
labels,
) = batch
# tracking lc data for later checks
lc_datas.append(x_lc)
time_lc_datas.append(t_lc)
masked_lc_datas.append(mask_lc)
if regression or classification:
if "host_galaxy" in combinations:
x_img = x_img.to(device)
if "lightcurve" in combinations:
x_lc = x_lc.to(device)
t_lc = t_lc.to(device)
mask_lc = mask_lc.to(device)
if "spectral" in combinations:
x_sp = x_sp.to(device)
t_sp = t_sp.to(device)
mask_sp = mask_sp.to(device)
x = model(x_img, x_lc, t_lc, mask_lc, x_sp, t_sp, mask_sp)
if regression:
y_pred_val.append(x.detach().cpu().flatten())
elif classification:
_, predicted_classes = torch.max(x, dim=1)
y_pred_val.append(predicted_classes.detach().cpu().flatten())
y_true_val.append(redshift)
y_true_val_label.append(labels)
y_true = torch.cat(y_true_val, dim=0)
y_true_val_label = torch.cat(y_true_val_label, dim=0)
if regression or classification:
y_pred_val = torch.cat(y_pred_val, dim=0)
if len(lc_datas) > 0 and lc_datas[0] is not None:
x_lc = torch.cat(lc_datas, dim=0)
t_lc = torch.cat(time_lc_datas, dim=0)
mask_lc = torch.cat(masked_lc_datas, dim=0)
lc_data = {"x_lc": x_lc, "t_lc": t_lc, "mask_lc": mask_lc}
else:
lc_data = None
return y_true, y_true_val_label, y_pred_val, lc_data
def print_metrics_in_latex(
metrics_list: List[Dict[str, float]], drop=None, sort=None
) -> None:
"""
Generates LaTeX code from a list of metric dictionaries and prints it.
This function takes a list of dictionaries where each dictionary represents
performance metrics for a particular model and data combination. It converts
this list into a DataFrame, formats numerical values to three decimal places,
and converts the DataFrame to LaTeX format which it then prints.
Args:
metrics_list (List[Dict[str, float]]): A list of dictionaries with keys as metric names
and values as their respective numerical values.
drop: List of columns to drop from the table
sort: string of column to sort from the table
Output:
None: This function directly prints the LaTeX formatted table to the console.
"""
"""
Generates a LaTeX table from a list of dictionaries containing model metrics,
formatting the metrics as mean ± standard deviation for each combination and model.
Parameters:
data (list of dicts): Each dictionary should contain metrics and descriptors such as Model, Combination, and id.
Returns:
str: A LaTeX formatted table as a string.
"""
# Convert the list of dictionaries to a DataFrame
df = pd.DataFrame(metrics_list)
# Select numeric columns
numeric_cols = df.select_dtypes(include=[float]).columns
# Ensure that no more than 4 numeric columns are in one table
max_cols_per_table = 4
# Calculate mean and standard deviation
grouped_df = df.groupby(["id", "Model", "Combination"])[numeric_cols]
mean_df = grouped_df.mean()
std_df = grouped_df.std()
# Generate tables
num_tables = (
len(numeric_cols) + max_cols_per_table - 1
) // max_cols_per_table # Calculate how many tables are needed
tables = []
for i in range(num_tables):
# Select subset of columns for the current table
cols_subset = numeric_cols[
i * max_cols_per_table : (i + 1) * max_cols_per_table
]
summary_df = mean_df[cols_subset].copy()
# Format 'mean ± std' for each metric in the subset
for col in cols_subset:
summary_df[col] = (
mean_df[col].apply("{:.3f}".format)
+ " ± "
+ std_df[col].apply("{:.3f}".format)
)
# Reset the index and drop 'id'
summary_df.reset_index(inplace=True)
summary_df.drop(columns="id", inplace=True)
if drop is not None:
summary_df.drop(columns=drop, inplace=True)
if sort is not None:
if sort in summary_df.columns:
summary_df.sort_values(by=sort, inplace=True, ascending=False)
# Generate LaTeX table for the current subset of columns
latex_table = summary_df.to_latex(
escape=False,
column_format="|c" * (len(summary_df.columns)) + "|",
index=False,
header=True,
)
tables.append(latex_table)
print(latex_table)
def get_checkpoint_paths(
root_dir: str, name: str, id: int
) -> Tuple[List[str], List[str], List[int]]:
"""
Traverse the directory structure starting from the specified root directory,
and find the checkpoint file (.ckpt) with the smallest epoch number in each sweep.
Parameters:
root_dir (str): The root directory containing different sweep directories.
Returns:
List[str]: A list with the paths to the checkpoint file with the smallest epoch number.
List[str]:
"""
# Dictionary to hold the paths of the smallest epoch checkpoint files
ckpt_paths = []
# Walk through the directory structure
for dirpath, dirnames, filenames in os.walk(root_dir):
smallest_epoch = float("inf")
path_of_smallest = None
# Filter and process only the checkpoint files
for filename in filenames:
if filename.endswith(".ckpt"):
# Extract epoch number from the filename
try:
epoch = int(filename.split("=")[1].split("-")[0])
except (IndexError, ValueError):
continue
# Update if the current file has a smaller epoch number
if epoch < smallest_epoch:
smallest_epoch = epoch
path_of_smallest = os.path.join(dirpath, filename)
# Store the path of the checkpoint file with the smallest epoch number for each sweep
if path_of_smallest:
ckpt_paths.append(path_of_smallest)
return ckpt_paths, [name] * len(ckpt_paths), [id] * len(ckpt_paths)
def calculate_metrics(
y_true: torch.Tensor,
y_true_label: torch.Tensor,
y_pred: torch.Tensor,
lc_data: List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
label: str,
combination: str,
id: int,
task: str = "regression",
) -> dict:
"""
Calculates performance metrics (for both classification and redshift estimation) to assess the accuracy of predictions against true values.
Parameters:
- y_true (torch.Tensor): The true values against which predictions are evaluated.
- y_pred (torch.Tensor): The predicted values to be evaluated.
- lc_data (List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]): List of tuples containing light curve data (x_lc, t_lc, mask_lc).
- label (str): Label describing the model or configuration being evaluated.
- combination (str): Description of the data or feature combination used for the model.
- id (int): A unique indentifier to distiguish different k-fold runs
- task (str): the downstream task being done; can be 'redshift' or 'classification'.
Returns:
- dict: A dictionary containing the calculated metrics. Each key describes the metric.
- 'Model': The label of the model or configuration.
- 'Combination': Description of the feature or data combination.
For redshift regression:
- 'L1': The L1 norm (mean absolute error) of the prediction error.
- 'L2': The L2 norm (root mean squared error) of the prediction error.
- 'R2': The coefficient of determination of the prediction error.
- 'OLF': The outlier fraction of the prediction error.
For 3- or 5-way classification:
- 'micro-f1': The micro-averaged f1-score (NOT balanced across classes).
- 'micro-precision': The micro-averaged precision (true positives / (true positives + false positives), NOT balanced across classes).
- 'micro-recall': The micro-averaged precision (true positives / (true positives + false negatives), NOT balanced across classes).
- 'micro-acc': The micro-averaged accuracy (averaged across all points, NOT balanced across classes).
- 'macro-f1': The macro-averaged f1-score (balanced across classes).
- 'macro-precision': The macro-averaged precision (true positives / (true positives + false positives), balanced across classes).
- 'macro-recall': The macro-averaged precision (true positives / (true positives + false negatives), balanced across classes).
- 'macro-acc': The macro-averaged accuracy (balanced across classes).
"""
if task == "regression":
# Calculate L1 and L2 norms for the predictions
l1 = torch.mean(torch.abs(y_true - y_pred)).item()
l2 = torch.sqrt(torch.mean((y_true - y_pred) ** 2)).item()
R2 = (
1
- (
torch.sum((y_true - y_pred) ** 2)
/ torch.sum((y_true - torch.mean(y_true)) ** 2)
).item()
)
# Calculate the residuals
delta_z = y_true - y_pred
# Outliers based on a fixed threshold
outliers = torch.abs(delta_z) / (1.0 + y_true) > 0.15
non_outliers = ~outliers
# calulate the fraction of outliers
OLF = torch.mean(outliers.float()).item()
# Compile the results into a metrics dictionary
metrics = {
"Model": label,
"Combination": combination,
"L1": l1,
"L2": l2,
"R2": R2,
"OLF": OLF,
"id": id,
}
elif task == "classification":
"""
# Create folder
if not os.path.exists(f"confusion_plots"):
os.makedirs(f"confusion_plots")
# Create the confusion matrix
cm = confusion_matrix(y_true_label, y_pred)
# Normalize the confusion matrix
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Plotting using seaborn
plt.figure(figsize=(8, 6))
sns.heatmap(cm_normalized, annot=True, fmt='.2f', cmap='Blues')
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.title('Normalized Confusion Matrix')
plt.savefig(f'confusion_plots/{label.replace(" ", "")}_{combination.replace(" ", "")}_fold{id}_confusion_matrix.png')
print(f'confusion_plots/{label.replace(" ", "")}_{combination.replace(" ", "")}_fold{id}_confusion_matrix.png')
plt.close()
"""
y_true_label = y_true_label.cpu().numpy()
y_pred = y_pred.cpu().numpy()
y_pred_idxs = y_pred
# micro f1-score
micF1 = f1_score(y_true_label, y_pred_idxs, average="micro")
# micro precision
micPrec = precision_score(y_true_label, y_pred, average="micro")
# micro recall
micRec = recall_score(y_true_label, y_pred_idxs, average="micro")
# micro accuracy
# y_pred needs to be array of predicted class labels
micAcc = accuracy_score(y_true_label, y_pred_idxs, normalize=True)
# macro f1-score
macF1 = f1_score(y_true_label, y_pred_idxs, average="macro")
# macro precision
macPrec = precision_score(y_true_label, y_pred, average="macro")
# macro recall
macRec = recall_score(y_true_label, y_pred_idxs, average="macro")
# macro accuracy
# y_pred needs to be array of predicted class labels
macAcc = balanced_accuracy_score(y_true_label, y_pred_idxs)
# Compile the results into a metrics dictionary
metrics = {
"Model": label,
"Combination": combination,
"mic-f1": micF1,
"mic-p": micPrec,
"mic-r": micRec,
"mic-acc": micAcc,
"mac-f1": macF1,
"mac-p": macPrec,
"mac-r": macRec,
"mac-acc": macAcc,
"id": id,
}
else:
raise ValueError(
"Could not understand the task! Please set task to 'redshift' or 'classification'."
)
results = {
"Model": label,
"Combination": combination,
"id": id,
"y_pred": y_pred,
"y_true": y_true,
"y_true_label": y_true_label,
"lc_data": lc_data,
}
return metrics, results
def mergekfold_results(results: List[Dict[str, Any]]) -> pd.DataFrame:
"""
Processes a list of classification results by grouping and concatenating the prediction, label arrays, and lc_data.
Each result entry should contain 'Model', 'Combination', 'id', 'y_pred', 'y_true_label', and 'lc_data' keys.
Args:
results (List[Dict[str, Any]]): A list of dictionaries, each containing classification data.
Returns:
pd.DataFrame: A DataFrame with concatenated results grouped by 'Model', 'Combination', and 'id'.
"""
# Convert the list of dictionaries to a DataFrame
df = pd.DataFrame(results)
# Create a dictionary to hold the concatenated results
concatenated_results = {
"Model": [],
"Combination": [],
"id": [],
"y_pred": [],
"y_true": [],
"y_true_label": [],
"lc_data": [],
}
# Group by 'Model', 'Combination', 'id'
grouped = df.groupby(["Model", "Combination", "id"])
# Iterate through each group and concatenate the results
for (model, combination, id_), group in grouped:
concatenated_results["Model"].append(model)
concatenated_results["Combination"].append(combination)
concatenated_results["id"].append(id_)
concatenated_results["y_pred"].append(
np.concatenate(group["y_pred"].dropna().values)
)
concatenated_results["y_true"].append(
np.concatenate(group["y_true"].dropna().values)
)
concatenated_results["y_true_label"].append(
np.concatenate(group["y_true_label"].dropna().values)
)
# Concatenate lc_data if it's not None
if group["lc_data"].notna().any():
lc_data_concat = {
key: np.concatenate([d[key] for d in group["lc_data"].dropna().values])
for key in group["lc_data"].dropna().values[0].keys()
}
else:
lc_data_concat = None
concatenated_results["lc_data"].append(lc_data_concat)
# Convert the concatenated results to a DataFrame
concatenated_df = pd.DataFrame(concatenated_results)
return concatenated_df
def save_normalized_conf_matrices(
df: pd.DataFrame, class_names: dict, output_dir: str = "confusion_matrices"
) -> None:
"""
Calculates and saves a normalized confusion matrix for each entry in a DataFrame.
The confusion matrices are labeled with class names and saved as PNG files named after
the model and combination identifiers.
Args:
df (pd.DataFrame): DataFrame containing the columns 'Model', 'Combination', 'y_true_label', and 'y_pred'.
class_names (dict): Dictionary mapping class labels (int) to class names (str).
output_dir (str): Directory where the confusion matrix plots will be saved. Defaults to 'confusion_matrices'.
"""
# Ensure the output directory exists
os.makedirs(output_dir, exist_ok=True)
# Function to calculate and save a confusion matrix for a single DataFrame row
def save_conf_matrix(row):
# Calculate the confusion matrix and normalize it
cm = confusion_matrix(row["y_true_label"], row["y_pred"])
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
# Create the plot
plt.figure(figsize=(10, 8))
sns.heatmap(
cm_normalized,
annot=True,
fmt=".2f",
cmap="Blues",
xticklabels=[class_names[label][0] for label in sorted(class_names)],
yticklabels=[class_names[label][0] for label in sorted(class_names)],
)
plt.title(f'Normalized Confusion Matrix: {row["Model"]}, {row["Combination"]}')
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# Generate filename and save the plot
filename = f"{output_dir}/{row['Model']}_{row['Combination']}.png".replace(
" ", ""
)
plt.savefig(filename)
plt.close() # Close the plot to free memory
print(f"Saved plot to {filename}")
# Apply save_conf_matrix to each row in the DataFrame
df.apply(save_conf_matrix, axis=1)
def plot_pred_vs_true(df, folder_name, class_names):
"""
Creates and saves a plot for each row in the DataFrame, where each subplot within a plot
corresponds to a unique class. Each class is plotted with its designated color and label.
Parameters:
- df (pandas.DataFrame): DataFrame containing the data for plots. Expected to have columns:
'y_pred', 'y_true', 'y_true_label', 'Model', and 'Combination'.
- folder_name (str): Directory name where the plots will be saved.
- class_names (dict): Dictionary mapping class labels (int) to tuples of (class name, color).
Each plot is saved with the filename format "Model_Combination.png", where spaces are removed.
"""
# Ensure the directory exists where plots will be saved
os.makedirs(folder_name, exist_ok=True)
# Iterate over each row in the DataFrame to create plots
for index, row in df.iterrows():
y_pred = np.array(row["y_pred"])
y_true = np.array(row["y_true"])
y_true_label = np.array(row["y_true_label"])
# Determine global axis limits
x_min, x_max = y_true.min(), y_true.max()
y_min, y_max = y_pred.min(), y_pred.max()
x_min = min(0, x_min)
y_min = min(0, y_min)
# Setup for subplots
plt.figure(figsize=(15, 30))
unique_labels = np.unique(y_true_label)
n_classes = len(unique_labels)
red_line = np.linspace(-1, 1, 100)
for i, label in enumerate(unique_labels, 1):
ax = plt.subplot(n_classes, 1, i)
# Plot all classes in gray as the background
ax.scatter(y_true, y_pred, color="gray", alpha=0.2, label="Other Classes")
# Highlight the current class
idx = y_true_label == label
class_color = class_names[label][1] # Color corresponding to the label
ax.scatter(
y_true[idx],
y_pred[idx],
color=class_color,
label=f"{class_names[label][0]}",
)
ax.plot(
red_line, red_line, linewidth=3, alpha=0.4, linestyle="--", color="red"
)
# Set tick parameters
ax.xaxis.set_major_locator(
ticker.MultipleLocator(0.05)
) # Adjust tick spacing as needed
ax.yaxis.set_major_locator(
ticker.MultipleLocator(0.05)
) # Adjust tick spacing as needed
ax.tick_params(direction="in", length=6, width=2)
ax.set_title(f"{class_names[label][0]}")
ax.set_xlabel("True Redshift")
ax.set_ylabel("Predicted Redshift")
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.legend()
ax.grid(True)
# Format and save the file
filename = f"{folder_name}/{row['Model']}_{row['Combination']}.png".replace(
" ", ""
)
plt.savefig(filename)
print(f"Saved plot to {filename}")
plt.close() # Close the plot to free up memory
def get_class_dependent_predictions(
inputs: List[Dict[str, Any]], class_names: Dict[int, Tuple[str, str]]
) -> List[Dict[str, Any]]:
"""
Segregates predictions and true values by class and calculates metrics for each class within
each provided model and combination from the input data.
Args:
inputs (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains model output data including
'y_pred', 'y_true', 'y_true_label', 'Model', 'Combination', and 'id'.
class_names (Dict[int, Tuple[str, str]]): A dictionary mapping class labels (int) to tuples containing
the class name (str) and its associated color (str).
Returns:
List[Dict[str, Any]]: A list of dictionaries where each dictionary contains calculated metrics for a class,
including the class name under the key 'class', and each key from the calculated metrics.
"""
df = pd.DataFrame(inputs)
results = []
for index, row in df.iterrows():
y_pred = torch.tensor(row["y_pred"])
y_true = torch.tensor(row["y_true"])
y_true_labels = torch.tensor(row["y_true_label"])
# Process each class
for label, name_color_tuple in class_names.items():
class_name = name_color_tuple[0] # Class name
mask = y_true_labels == label
# Segregate y_true and y_pred based on class
y_pred_class = y_pred[mask]
y_true_class = y_true[mask]
if len(y_pred_class) > 0 and len(y_true_class) > 0:
# Calculate metrics
metrics, _ = calculate_metrics(
y_true=y_true_class,
y_pred=y_pred_class,
y_true_label=y_true_labels[mask], # if needed by calculate_metrics
lc_data=None,
label=row["Model"],
combination=row["Combination"],
id=row["id"],
task="regression", # or "redshift" based on your task
)
metrics["class"] = class_name
# Collect results
results.append(metrics)
return results
def make_spider(
df: pd.DataFrame,
title: str,
metric: str,
output_dir: str,
Range: Optional[Tuple[float, float]] = None,
) -> None:
"""
Creates a radar plot for the specific metric across different classes, allowing the scale to be
adjusted, and saves it to a file.
Args:
df (pd.DataFrame): DataFrame containing the metrics and class labels.
title (str): Title of the plot, typically includes model, combination, and metric.
metric (str): The specific metric to plot.
output_dir (str): Directory where the plots will be saved.
Range (Optional[Tuple[float, float]]): A tuple specifying the lower and upper limits for the plot's radial axis.
If None, the plot scales automatically based on the data.
Creates:
A radar plot saved as a PNG file in the specified directory.
"""
categories = df["class"].tolist() # Classes as categories
num_vars = len(categories)
# Compute angle each bar
angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()
angles += angles[:1] # Complete the circle by appending the first angle at the end
# The plot is made in a circular (not polygon) interface
fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
# Extract the metric values and repeat the first value at the end to close the circle
values = df[metric].tolist()
values += values[:1]
ax.fill(angles, values, color="blue", alpha=0.25)
ax.plot(angles, values, color="blue", linewidth=2)
# Set the range for the plot's radial axis if provided
if Range is not None:
ax.set_ylim(Range[0], Range[1])
# Labels for each point
ax.set_xticks(angles[:-1])
ax.set_xticklabels(categories, fontsize=13)
# Title of the plot
plt.title(f"{title} - {metric}", size=15, color="blue", y=1.1)
# Ensure the output directory exists
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, f"{title}_{metric}.png").replace(" ", "_")
plt.savefig(output_path)
print(f"Created radar plot in {output_path}")
plt.close(fig) # Close the plot after saving to free up memory
def generate_radar_plots(
df: pd.DataFrame,
output_dir: str,
range_dict: Dict[str, Optional[Tuple[float, float]]],
) -> None:
"""
Generates radar plots for each metric across different classes within each model and combination grouping
from the input data and saves them to specified directory.
Args:
df (pd.DataFrame): DataFrame containing the metrics, class labels, and model/combination identifiers.
output_dir (str): Directory where the radar plots will be saved.
range_dict (Dict[str, Optional[Tuple[float, float]]]): Dictionary mapping metric names to tuples that specify
the range (min, max) of the radar plot's radial axis. If the value is None, the plot scales automatically.
Generates:
Radar plots saved as PNG files in the specified output directory. Each plot is named according to its model,
combination, and metric.
"""
# Group by Model and Combination
grouped = df.groupby(["Model", "Combination"])
# Iterate through each group and metric to create radar plots
for (model, combination), group in grouped:
for metric in ["L1", "L2", "R2", "OLF"]: # Define the metrics to iterate over
title = f"{model} - {combination}"
range_values = range_dict.get(
metric, None
) # Get range for the metric, default to None if not specified
make_spider(group, title, metric, output_dir, range_values)
def filter_classes(
X_list: List[torch.Tensor],
y: torch.Tensor,
lc_data: Dict[str, torch.Tensor],
target_classes: torch.Tensor,
) -> (List[torch.Tensor], torch.Tensor):
"""
Filter a list of datasets based on target classes and automatically remap the class labels
to start from 0 and increase sequentially.
Parameters:
- X_list (list of torch.Tensor): List of feature matrices.
- y (torch.Tensor): The label vector.
- lc_data (Dict[str, Tensor]): containing lc_data
- target_classes (torch.Tensor): A tensor of the original class labels to keep.
Returns:
- list of torch.Tensor: List of filtered feature matrices.
- torch.Tensor: Remapped label vector, consistent across all feature matrices.
"""
# Flatten y to ensure it is a 1D tensor
y_flat = y.flatten()
# Create a mask for the elements of y that are in the target classes
mask = y_flat == target_classes[:, None]
mask = mask.any(dim=0)
# Filter each X in the list based on the mask
filtered_X_list = [X[mask] for X in X_list]
if lc_data is not None:
filtered_lc_data = {key: value[mask] for key, value in lc_data.items()}
else:
filtered_lc_data = None
filtered_y = y_flat[mask]
# Automatically generate new_labels based on the order in target_classes
remapped_y = torch.empty_like(filtered_y)
for i, class_val in enumerate(target_classes):
remapped_y[filtered_y == class_val] = i
return filtered_X_list, remapped_y, filtered_lc_data
def assert_sorted_lc(loader: Any, bands: List[Any]) -> None:
"""
Check if the time sequences in each batch of the loader are sorted within each band.
Parameters:
loader (Any): A data loader that provides batches of data. Each batch is expected to be a tuple containing at least the following elements:
- mag_test: A list or array of magnitudes.
- time_test: A list or array of time sequences.
- padding_mask: A mask indicating padded elements.
- spec: Spectrum data.
- freq: Frequency data.
- maskspec: Masked spectrum data.
- redshift: Redshift data.
bands (List[Any]): A list representing the bands for which the time sequences need to be checked.
Raises:
AssertionError: If any time sequence within a band is found to be unsorted.
"""
nbands = len(bands)
for batch in loader:
_, mag_test, time_test, padding_mask, spec, freq, maskspec, redshift, _ = batch
check = True
for i in range(len(mag_test)):
N = len(time_test[i])
for k in range(nbands):
test = (
time_test[i][(N // nbands) * k : (N // nbands) * (k + 1)]
).numpy()
test = test[test != 0]
check = check and (sorted(test) == test).all()
assert check
|
ThomasHelferREPO_NAMEmultimodal-supernovaePATH_START.@multimodal-supernovae_extracted@multimodal-supernovae-main@src@utils.py@.PATH_END.py
|
{
"filename": "flagging.py",
"repo_name": "realfastvla/rfpipe",
"repo_path": "rfpipe_extracted/rfpipe-main/rfpipe/flagging.py",
"type": "Python"
}
|
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
import numpy as np
from numba import jit
from rfpipe import util
import logging
logger = logging.getLogger(__name__)
def flag_data(st, data):
""" Identifies bad data and flags it to 0.
Converts to masked array for flagging, but returns zeroed numpy array.
"""
datam = np.ma.masked_values(data, 0j, copy=False, shrink=False)
spwchans = st.spw_chan_select
for flagparams in st.prefs.flaglist:
if len(flagparams) == 3:
mode, arg0, arg1 = flagparams
else:
mode, arg0 = flagparams
if mode == 'blstd':
flag_blstd(datam, arg0, arg1)
elif mode == 'badchtslide':
flag_badchtslide(datam, spwchans, arg0, arg1)
elif mode == 'badspw':
flag_badspw(datam, spwchans, arg0)
else:
logger.warning("Flaging mode {0} not available.".format(mode))
return datam.filled(0)
def flag_blstd(data, sigma, convergence):
""" Use data (4d) to calculate (int, chan, pol) to be flagged.
Masked arrays assumed as input.
"""
sh = data.shape
blstd = util.blstd(data.data, data.mask) # uses mask
blstd = np.ma.masked_equal(blstd, 0)
# blstd = np.ma.std(data, axis=1)
# iterate to good median and std values
blstdmednew = np.ma.median(blstd)
blstdstdnew = np.ma.std(blstd)
blstdstd = blstdstdnew*2 # TODO: is this initialization used?
while (blstdstd-blstdstdnew)/blstdstd > convergence:
blstdstd = blstdstdnew
blstdmed = blstdmednew
blstd = np.ma.masked_where(blstd > blstdmed + sigma*blstdstd, blstd, copy=False)
blstdmednew = np.ma.median(blstd)
blstdstdnew = np.ma.std(blstd)
# flag blstd too high
badt, badch, badpol = np.where(blstd > blstdmednew + sigma*blstdstdnew)
logger.info("flagged by blstd: {0} of {1} total channel/time/pol cells."
.format(len(badt), sh[0]*sh[2]*sh[3]))
for i in range(len(badt)):
data.mask[badt[i], :, badch[i], badpol[i]] = True
def flag_badchtslide(data, spwchans, sigma, win):
""" Use data (4d) to calculate (int, chan, pol) to be flagged
"""
sh = data.shape
meanamp = np.abs(data).mean(axis=1)
# calc badch as deviation from median of window
spec = meanamp.mean(axis=0)
# specmed = slidedev(spec, win)
specmed = np.concatenate([spec[chans] - np.ma.median(spec[chans]) for chans in spwchans])
badch = np.where(specmed > sigma*np.ma.std(specmed, axis=0))
# calc badt as deviation from median of window
lc = meanamp.mean(axis=1)
lcmed = slidedev(lc, win)
badt = np.where(lcmed > sigma*np.ma.std(lcmed, axis=0))
badtcnt = len(np.ma.unique(badt))
badchcnt = len(np.ma.unique(badch))
logger.info("flagged by badchtslide: {0}/{1} pol-times and {2}/{3} pol-chans."
.format(badtcnt, sh[0]*sh[3], badchcnt, sh[2]*sh[3]))
for i in range(len(badch[0])):
data.mask[:, :, badch[0][i], badch[1][i]] = True
for i in range(len(badt[0])):
data.mask[badt[0][i], :, :, badt[1][i]] = True
def flag_badspw(data, spwchans, sigma):
""" Use data median variance between spw-pols to flag outliers.
Also flags spw-pols with fewer than 5 channels.
Best to use this after flagging bad channels.
"""
nspw = len(spwchans)*2 # 2 pols assumed
if nspw >= 4:
# calc badspw
# spec = np.abs(data).mean(axis=3).mean(axis=1).mean(axis=0) # memory intensive and slow
# spec = np.abs(data.mean(axis=0).mean(axis=2)).mean(axis=0) # probably better
spec = np.abs(data.mean(axis=0)).mean(axis=0) # keep pols separate
deviations = []
for chans in spwchans:
for pol in [0, 1]:
if spec[chans, pol].count() >= 5:
deviations.append(np.ma.std(spec[chans, pol]))
else:
deviations.append(0)
deviations = np.ma.masked_equal(np.nan_to_num(deviations), 0).flatten()
logger.info("badspw flagging finds deviations per spw-pol: {0}"
.format(deviations))
badspw = []
badspwnew = np.where(deviations > sigma*np.ma.median(deviations))[0]
badspwnew = np.where(deviations > sigma*np.ma.std(deviations) + np.ma.median(deviations))[0]
while len(badspwnew) > len(badspw):
badspw = badspwnew
goodspw = [spwpol for spwpol in range(nspw) if spwpol not in badspw]
badspwnew = np.where(deviations > sigma*np.ma.std(deviations.take(goodspw)) + np.ma.median(deviations.take(goodspw)))[0]
# badspw = np.concatenate((badspw, np.where(deviations.mask)[0])).astype(int)
deviations[badspw] = np.ma.masked
logger.info("flagged {0}/{1} spw-pol ({2})"
.format(len(np.where(deviations.mask)[0]), nspw, np.where(deviations.mask)[0]))
for i in range(nspw//2):
for j in range(2):
if deviations.mask.reshape(nspw//2, 2)[i, j]:
data.mask[:, :, spwchans[i], j] = np.ma.masked
else:
logger.warning("Fewer than 4 spw. Not performing badspw detetion.")
def slidedev(arr, win):
""" Given a (len x 2) array, calculate the deviation from the median per pol.
Calculates median over a window, win.
"""
med = np.zeros_like(arr)
for i in range(len(arr)):
inds = list(range(max(0, i-win//2), i)) + list(range(i+1, min(i+win//2, len(arr))))
med[i] = np.ma.median(arr.take(inds, axis=0), axis=0)
return arr-med
def getonlineflags(st, segment):
""" Gets antenna flags for a given segment from either sdm or mcaf server.
Returns an array of flags (1: good, 0: bad) for each baseline.
"""
t0, t1 = st.segmenttimes[segment]
if st.metadata.datasource == 'sdm':
sdm = util.getsdm(st.metadata.filename, bdfdir=st.metadata.bdfdir)
scan = sdm.scan(st.metadata.scan)
takebls = [st.metadata.blarr_orig.tolist().index(list(bl)) for bl in st.blarr]
flags = scan.flags([t0, t1])[:, takebls].all(axis=0)
elif st.metadata.datasource == 'vys':
try:
from realfast.mcaf_servers import getblflags
flags = getblflags(st.metadata.datasetId, st.blarr,
startTime=t0, endTime=t1)
except (ImportError, Exception):
logger.warning("No mcaf antenna flag server flags available")
flags = np.ones(st.nbl)
if not flags.all():
logger.info('Found antennas to flag in time range {0}-{1} '
.format(t0, t1))
else:
logger.info('No flagged antennas in time range {0}-{1} '
.format(t0, t1))
return flags
def flag_data_rtpipe(st, data):
""" Flagging data in single process
Deprecated.
"""
try:
import rtlib_cython as rtlib
except ImportError:
logger.error("rtpipe not installed. Cannot import rtlib for flagging.")
# **hack!**
d = {'dataformat': 'sdm', 'ants': [int(ant.lstrip('ea')) for ant in st.ants], 'excludeants': st.prefs.excludeants, 'nants': len(st.ants)}
for flag in st.prefs.flaglist:
mode, sig, conv = flag
for spw in st.spw:
chans = np.arange(st.metadata.spw_nchan[spw]*spw, st.metadata.spw_nchan[spw]*(1+spw))
for pol in range(st.npol):
status = rtlib.dataflag(data, chans, pol, d, sig, mode, conv)
logger.info(status)
# hack to get rid of bad spw/pol combos whacked by rfi
if st.prefs.badspwpol:
logger.info('Comparing overall power between spw/pol. Removing those with {0} times typical value'.format(st.prefs.badspwpol))
spwpol = {}
for spw in st.spw:
chans = np.arange(st.metadata.spw_nchan[spw]*spw, st.metadata.spw_nchan[spw]*(1+spw))
for pol in range(st.npol):
spwpol[(spw, pol)] = np.abs(data[:, :, chans, pol]).std()
meanstd = np.mean(list(spwpol.values()))
for (spw, pol) in spwpol:
if spwpol[(spw, pol)] > st.prefs.badspwpol*meanstd:
logger.info('Flagging all of (spw %d, pol %d) for excess noise.' % (spw, pol))
chans = np.arange(st.metadata.spw_nchan[spw]*spw, st.metadata.spw_nchan[spw]*(1+spw))
data[:, :, chans, pol] = 0j
return data
|
realfastvlaREPO_NAMErfpipePATH_START.@rfpipe_extracted@rfpipe-main@rfpipe@flagging.py@.PATH_END.py
|
{
"filename": "mcmc_kernel.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/infer/mcmc/mcmc_kernel.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from abc import ABCMeta, abstractmethod
class MCMCKernel(object, metaclass=ABCMeta):
def setup(self, warmup_steps, *args, **kwargs):
r"""
Optional method to set up any state required at the start of the
simulation run.
:param int warmup_steps: Number of warmup iterations.
:param \*args: Algorithm specific positional arguments.
:param \*\*kwargs: Algorithm specific keyword arguments.
"""
pass
def cleanup(self):
"""
Optional method to clean up any residual state on termination.
"""
pass
def logging(self):
"""
Relevant logging information to be printed at regular intervals
of the MCMC run. Returns `None` by default.
:return: String containing the diagnostic summary. e.g. acceptance rate
:rtype: string
"""
return None
def diagnostics(self):
"""
Returns a dict of useful diagnostics after finishing sampling process.
"""
# NB: should be not None for multiprocessing works
return {}
def end_warmup(self):
"""
Optional method to tell kernel that warm-up phase has been finished.
"""
pass
@property
def initial_params(self):
"""
Returns a dict of initial params (by default, from the prior) to initiate the MCMC run.
:return: dict of parameter values keyed by their name.
"""
raise NotImplementedError
@initial_params.setter
def initial_params(self, params):
"""
Sets the parameters to initiate the MCMC run. Note that the parameters must
have unconstrained support.
"""
raise NotImplementedError
@abstractmethod
def sample(self, params):
"""
Samples parameters from the posterior distribution, when given existing parameters.
:param dict params: Current parameter values.
:param int time_step: Current time step.
:return: New parameters from the posterior distribution.
"""
raise NotImplementedError
def __call__(self, params):
"""
Alias for MCMCKernel.sample() method.
"""
return self.sample(params)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@infer@mcmc@mcmc_kernel.py@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/_stream.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="surface", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@_stream.py@.PATH_END.py
|
{
"filename": "download_regression_models.py",
"repo_name": "sxs-collaboration/gwsurrogate",
"repo_path": "gwsurrogate_extracted/gwsurrogate-master/test/download_regression_models.py",
"type": "Python"
}
|
""" download all models to be tested in test_model_regression.py
This is useful to do when using continuous integration. """
import gwsurrogate as gws
import hashlib, os
def md5(fname):
""" Compute has from file. code taken from
https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
models = ['SpEC_q1_10_NoSpin_linear_alt',
'NRHybSur3dq8',
'NRHybSur3dq8_CCE',
'SpEC_q1_10_NoSpin',
'SpEC_q1_10_NoSpin_linear',
'NRSur7dq4',
'NRHybSur2dq15'
]
for model in models:
print("Downloading model %s ..."%model)
gws.catalog.pull(model)
surr_url = gws.catalog._surrogate_world[model].url
path_to_model = gws.catalog.download_path()+os.path.basename(surr_url)
print("md5 Hash of %s is %s"%(model,md5(path_to_model)))
if not gws.catalog.is_file_recent(path_to_model):
print("File download failed!")
assert(False)
|
sxs-collaborationREPO_NAMEgwsurrogatePATH_START.@gwsurrogate_extracted@gwsurrogate-master@test@download_regression_models.py@.PATH_END.py
|
{
"filename": "clickup.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/utilities/clickup.py",
"type": "Python"
}
|
"""Util that calls clickup."""
import json
import warnings
from dataclasses import asdict, dataclass, fields
from typing import Any, Dict, List, Mapping, Optional, Tuple, Type, Union
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
DEFAULT_URL = "https://api.clickup.com/api/v2"
@dataclass
class Component:
"""Base class for all components."""
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Component":
raise NotImplementedError()
@dataclass
class Task(Component):
"""Class for a task."""
id: int
name: str
text_content: str
description: str
status: str
creator_id: int
creator_username: str
creator_email: str
assignees: List[Dict[str, Any]]
watchers: List[Dict[str, Any]]
priority: Optional[str]
due_date: Optional[str]
start_date: Optional[str]
points: int
team_id: int
project_id: int
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Task":
priority = None if data["priority"] is None else data["priority"]["priority"]
return cls(
id=data["id"],
name=data["name"],
text_content=data["text_content"],
description=data["description"],
status=data["status"]["status"],
creator_id=data["creator"]["id"],
creator_username=data["creator"]["username"],
creator_email=data["creator"]["email"],
assignees=data["assignees"],
watchers=data["watchers"],
priority=priority,
due_date=data["due_date"],
start_date=data["start_date"],
points=data["points"],
team_id=data["team_id"],
project_id=data["project"]["id"],
)
@dataclass
class CUList(Component):
"""Component class for a list."""
folder_id: float
name: str
content: Optional[str] = None
due_date: Optional[int] = None
due_date_time: Optional[bool] = None
priority: Optional[int] = None
assignee: Optional[int] = None
status: Optional[str] = None
@classmethod
def from_data(cls, data: dict) -> "CUList":
return cls(
folder_id=data["folder_id"],
name=data["name"],
content=data.get("content"),
due_date=data.get("due_date"),
due_date_time=data.get("due_date_time"),
priority=data.get("priority"),
assignee=data.get("assignee"),
status=data.get("status"),
)
@dataclass
class Member(Component):
"""Component class for a member."""
id: int
username: str
email: str
initials: str
@classmethod
def from_data(cls, data: Dict) -> "Member":
return cls(
id=data["user"]["id"],
username=data["user"]["username"],
email=data["user"]["email"],
initials=data["user"]["initials"],
)
@dataclass
class Team(Component):
"""Component class for a team."""
id: int
name: str
members: List[Member]
@classmethod
def from_data(cls, data: Dict) -> "Team":
members = [Member.from_data(member_data) for member_data in data["members"]]
return cls(id=data["id"], name=data["name"], members=members)
@dataclass
class Space(Component):
"""Component class for a space."""
id: int
name: str
private: bool
enabled_features: Dict[str, Any]
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Space":
space_data = data["spaces"][0]
enabled_features = {
feature: value
for feature, value in space_data["features"].items()
if value["enabled"]
}
return cls(
id=space_data["id"],
name=space_data["name"],
private=space_data["private"],
enabled_features=enabled_features,
)
def parse_dict_through_component(
data: dict, component: Type[Component], fault_tolerant: bool = False
) -> Dict:
"""Parse a dictionary by creating
a component and then turning it back into a dictionary.
This helps with two things
1. Extract and format data from a dictionary according to schema
2. Provide a central place to do this in a fault-tolerant way
"""
try:
return asdict(component.from_data(data))
except Exception as e:
if fault_tolerant:
warning_str = f"""Error encountered while trying to parse
{str(data)}: {str(e)}\n Falling back to returning input data."""
warnings.warn(warning_str)
return data
else:
raise e
def extract_dict_elements_from_component_fields(
data: dict, component: Type[Component]
) -> dict:
"""Extract elements from a dictionary.
Args:
data: The dictionary to extract elements from.
component: The component to extract elements from.
Returns:
A dictionary containing the elements from the input dictionary that are also
in the component.
"""
output = {}
for attribute in fields(component):
if attribute.name in data:
output[attribute.name] = data[attribute.name]
return output
def load_query(
query: str, fault_tolerant: bool = False
) -> Tuple[Optional[Dict], Optional[str]]:
"""Parse a JSON string and return the parsed object.
If parsing fails, returns an error message.
:param query: The JSON string to parse.
:return: A tuple containing the parsed object or None and an error message or None.
Exceptions:
json.JSONDecodeError: If the input is not a valid JSON string.
"""
try:
return json.loads(query), None
except json.JSONDecodeError as e:
if fault_tolerant:
return (
None,
f"""Input must be a valid JSON. Got the following error: {str(e)}.
"Please reformat and try again.""",
)
else:
raise e
def fetch_first_id(data: dict, key: str) -> Optional[int]:
"""Fetch the first id from a dictionary."""
if key in data and len(data[key]) > 0:
if len(data[key]) > 1:
warnings.warn(f"Found multiple {key}: {data[key]}. Defaulting to first.")
return data[key][0]["id"]
return None
def fetch_data(url: str, access_token: str, query: Optional[dict] = None) -> dict:
"""Fetch data from a URL."""
headers = {"Authorization": access_token}
response = requests.get(url, headers=headers, params=query)
response.raise_for_status()
return response.json()
def fetch_team_id(access_token: str) -> Optional[int]:
"""Fetch the team id."""
url = f"{DEFAULT_URL}/team"
data = fetch_data(url, access_token)
return fetch_first_id(data, "teams")
def fetch_space_id(team_id: int, access_token: str) -> Optional[int]:
"""Fetch the space id."""
url = f"{DEFAULT_URL}/team/{team_id}/space"
data = fetch_data(url, access_token, query={"archived": "false"})
return fetch_first_id(data, "spaces")
def fetch_folder_id(space_id: int, access_token: str) -> Optional[int]:
"""Fetch the folder id."""
url = f"{DEFAULT_URL}/space/{space_id}/folder"
data = fetch_data(url, access_token, query={"archived": "false"})
return fetch_first_id(data, "folders")
def fetch_list_id(space_id: int, folder_id: int, access_token: str) -> Optional[int]:
"""Fetch the list id."""
if folder_id:
url = f"{DEFAULT_URL}/folder/{folder_id}/list"
else:
url = f"{DEFAULT_URL}/space/{space_id}/list"
data = fetch_data(url, access_token, query={"archived": "false"})
# The structure to fetch list id differs based if its folderless
if folder_id and "id" in data:
return data["id"]
else:
return fetch_first_id(data, "lists")
class ClickupAPIWrapper(BaseModel):
"""Wrapper for Clickup API."""
access_token: Optional[str] = None
team_id: Optional[str] = None
space_id: Optional[str] = None
folder_id: Optional[str] = None
list_id: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def get_access_code_url(
cls, oauth_client_id: str, redirect_uri: str = "https://google.com"
) -> str:
"""Get the URL to get an access code."""
url = f"https://app.clickup.com/api?client_id={oauth_client_id}"
return f"{url}&redirect_uri={redirect_uri}"
@classmethod
def get_access_token(
cls, oauth_client_id: str, oauth_client_secret: str, code: str
) -> Optional[str]:
"""Get the access token."""
url = f"{DEFAULT_URL}/oauth/token"
params = {
"client_id": oauth_client_id,
"client_secret": oauth_client_secret,
"code": code,
}
response = requests.post(url, params=params)
data = response.json()
if "access_token" not in data:
print(f"Error: {data}") # noqa: T201
if "ECODE" in data and data["ECODE"] == "OAUTH_014":
url = ClickupAPIWrapper.get_access_code_url(oauth_client_id)
print( # noqa: T201
"You already used this code once. Generate a new one.",
f"Our best guess for the url to get a new code is:\n{url}",
)
return None
return data["access_token"]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["access_token"] = get_from_dict_or_env(
values, "access_token", "CLICKUP_ACCESS_TOKEN"
)
values["team_id"] = fetch_team_id(values["access_token"])
values["space_id"] = fetch_space_id(values["team_id"], values["access_token"])
values["folder_id"] = fetch_folder_id(
values["space_id"], values["access_token"]
)
values["list_id"] = fetch_list_id(
values["space_id"], values["folder_id"], values["access_token"]
)
return values
def attempt_parse_teams(self, input_dict: dict) -> Dict[str, List[dict]]:
"""Parse appropriate content from the list of teams."""
parsed_teams: Dict[str, List[dict]] = {"teams": []}
for team in input_dict["teams"]:
try:
team = parse_dict_through_component(team, Team, fault_tolerant=False)
parsed_teams["teams"].append(team)
except Exception as e:
warnings.warn(f"Error parsing a team {e}")
return parsed_teams
def get_headers(
self,
) -> Mapping[str, Union[str, bytes]]:
"""Get the headers for the request."""
if not isinstance(self.access_token, str):
raise TypeError(f"Access Token: {self.access_token}, must be str.")
headers = {
"Authorization": str(self.access_token),
"Content-Type": "application/json",
}
return headers
def get_default_params(self) -> Dict:
return {"archived": "false"}
def get_authorized_teams(self) -> Dict[Any, Any]:
"""Get all teams for the user."""
url = f"{DEFAULT_URL}/team"
response = requests.get(url, headers=self.get_headers())
data = response.json()
parsed_teams = self.attempt_parse_teams(data)
return parsed_teams
def get_folders(self) -> Dict:
"""
Get all the folders for the team.
"""
url = f"{DEFAULT_URL}/team/" + str(self.team_id) + "/space"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def get_task(self, query: str, fault_tolerant: bool = True) -> Dict:
"""
Retrieve a specific task.
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {"Error": error}
url = f"{DEFAULT_URL}/task/{params['task_id']}"
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
response = requests.get(url, headers=self.get_headers(), params=params)
data = response.json()
parsed_task = parse_dict_through_component(
data, Task, fault_tolerant=fault_tolerant
)
return parsed_task
def get_lists(self) -> Dict:
"""
Get all available lists.
"""
url = f"{DEFAULT_URL}/folder/{self.folder_id}/list"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def query_tasks(self, query: str) -> Dict:
"""
Query tasks that match certain fields
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {"Error": error}
url = f"{DEFAULT_URL}/list/{params['list_id']}/task"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def get_spaces(self) -> Dict:
"""
Get all spaces for the team.
"""
url = f"{DEFAULT_URL}/team/{self.team_id}/space"
response = requests.get(
url, headers=self.get_headers(), params=self.get_default_params()
)
data = response.json()
parsed_spaces = parse_dict_through_component(data, Space, fault_tolerant=True)
return parsed_spaces
def get_task_attribute(self, query: str) -> Dict:
"""
Update an attribute of a specified task.
"""
task = self.get_task(query, fault_tolerant=True)
params, error = load_query(query, fault_tolerant=True)
if not isinstance(params, dict):
return {"Error": error}
if params["attribute_name"] not in task:
return {
"Error": f"""attribute_name = {params['attribute_name']} was not
found in task keys {task.keys()}. Please call again with one of the key names."""
}
return {params["attribute_name"]: task[params["attribute_name"]]}
def update_task(self, query: str) -> Dict:
"""
Update an attribute of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
headers = self.get_headers()
payload = {query_dict["attribute_name"]: query_dict["value"]}
response = requests.put(url, headers=headers, params=params, json=payload)
return {"response": response}
def update_task_assignees(self, query: str) -> Dict:
"""
Add or remove assignees of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
for user in query_dict["users"]:
if not isinstance(user, int):
return {
"Error": f"""All users must be integers, not strings!
"Got user {user} if type {type(user)}"""
}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
headers = self.get_headers()
if query_dict["operation"] == "add":
assigne_payload = {"add": query_dict["users"], "rem": []}
elif query_dict["operation"] == "rem":
assigne_payload = {"add": [], "rem": query_dict["users"]}
else:
raise ValueError(
f"Invalid operation ({query_dict['operation']}). ",
"Valid options ['add', 'rem'].",
)
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
payload = {"assignees": assigne_payload}
response = requests.put(url, headers=headers, params=params, json=payload)
return {"response": response}
def create_task(self, query: str) -> Dict:
"""
Creates a new task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
list_id = self.list_id
url = f"{DEFAULT_URL}/list/{list_id}/task"
params = {"custom_task_ids": "true", "team_id": self.team_id}
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers, params=params)
data: Dict = response.json()
return parse_dict_through_component(data, Task, fault_tolerant=True)
def create_list(self, query: str) -> Dict:
"""
Creates a new list.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
# Default to using folder as location if it exists.
# If not, fall back to using the space.
location = self.folder_id if self.folder_id else self.space_id
url = f"{DEFAULT_URL}/folder/{location}/list"
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
parsed_list = parse_dict_through_component(data, CUList, fault_tolerant=True)
# set list id to new list
if "id" in parsed_list:
self.list_id = parsed_list["id"]
return parsed_list
def create_folder(self, query: str) -> Dict:
"""
Creates a new folder.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
space_id = self.space_id
url = f"{DEFAULT_URL}/space/{space_id}/folder"
payload = {
"name": query_dict["name"],
}
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
if "id" in data:
self.list_id = data["id"]
return data
def run(self, mode: str, query: str) -> str:
"""Run the API."""
if mode == "get_task":
output = self.get_task(query)
elif mode == "get_task_attribute":
output = self.get_task_attribute(query)
elif mode == "get_teams":
output = self.get_authorized_teams()
elif mode == "create_task":
output = self.create_task(query)
elif mode == "create_list":
output = self.create_list(query)
elif mode == "create_folder":
output = self.create_folder(query)
elif mode == "get_lists":
output = self.get_lists()
elif mode == "get_folders":
output = self.get_folders()
elif mode == "get_spaces":
output = self.get_spaces()
elif mode == "update_task":
output = self.update_task(query)
elif mode == "update_task_assignees":
output = self.update_task_assignees(query)
else:
output = {"ModeError": f"Got unexpected mode {mode}."}
try:
return json.dumps(output)
except Exception:
return str(output)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@utilities@clickup.py@.PATH_END.py
|
{
"filename": "OConnor_2013.ipynb",
"repo_name": "SNEWS2/snewpy",
"repo_path": "snewpy_extracted/snewpy-main/doc/nb/ccsn/OConnor_2013.ipynb",
"type": "Jupyter Notebook"
}
|
# O'Connor 2013 Models
Data from O'Connor & Ott 2013, 32 progenitors (Woosley and Heger 2007) and 2 EOS (LS220 and HShen) for 500 ms post bounce in spherical symmetry (no explosions)
Reference: O'Connor and Ott ApJ 762 126 2013
- [doi:10.1088/0004-637X/762/2/126](https://doi.org/10.1088/0004-637X/762/2/126)
- [arXiv:1207.1100](https://arxiv.org/abs/1207.1100)
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from snewpy.neutrino import Flavor, MassHierarchy
from snewpy.models.ccsn import OConnor_2013
from snewpy.flavor_transformation import NoTransformation, AdiabaticMSW, ThreeFlavorDecoherence
mpl.rc('font', size=16)
%matplotlib inline
```
## Initialize Models
To start, let’s see what progenitors are available for the `OConnor_2013` model. We can use the `param` property to view all physics parameters and their possible values:
```python
OConnor_2013.param
```
Quite a lot of choice there! Let’s initialise all of these progenitors and compare their $\nu_e$ luminosities. If this is the first time you’re using a progenitor, snewpy will automatically download the required data files for you.
```python
models = {}
for mass in OConnor_2013.param['progenitor_mass']:
models[int(mass.value)] = OConnor_2013(progenitor_mass=mass, eos='LS220')
for model in models.values():
plt.plot(model.time, model.luminosity[Flavor.NU_E]/1e51, 'C0', lw=1)
plt.xlabel(r'$t$ [s]')
plt.ylabel(r'luminosity [foe s$^{-1}$]');
```
Finally, let’s plot the luminosity of different neutrino flavors for two of these progenitors. (Note that the `OConnor_2013` simulations didn’t distinguish between $\nu_x$ and $\bar{\nu}_x$, so both flavors have the same luminosity.)
```python
fig, axes = plt.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True, tight_layout=True)
for i, model in enumerate([models[12], models[20]]):
ax = axes[i]
for flavor in Flavor:
ax.plot(model.time, model.luminosity[flavor]/1e51, # Report luminosity in units foe/s
label=flavor.to_tex(),
color='C0' if flavor.is_electron else 'C1',
ls='-' if flavor.is_neutrino else ':',
lw=2)
ax.set(xlim=(-0.05, 0.51),
xlabel=r'$t-t_{\rm bounce}$ [s]',
title=r'{}: {} $M_\odot$'.format(model.metadata['EOS'], model.metadata['Progenitor mass'].value))
ax.grid()
ax.legend(loc='upper right', ncol=2, fontsize=18)
axes[0].set(ylabel=r'luminosity [foe s$^{-1}$]');
```
## Initial and Oscillated Spectra
Plot the neutrino spectra at the source and after the requested flavor transformation has been applied.
### Adiabatic MSW Flavor Transformation: Normal mass ordering
```python
# Adiabatic MSW effect. NMO is used by default.
xform_nmo = AdiabaticMSW()
# Energy array and time to compute spectra.
# Note that any convenient units can be used and the calculation will remain internally consistent.
E = np.linspace(0,100,201) * u.MeV
t = 400*u.ms
ispec = model.get_initial_spectra(t, E)
ospec_nmo = model.get_transformed_spectra(t, E, xform_nmo)
```
```python
fig, axes = plt.subplots(1,2, figsize=(12,5), sharex=True, sharey=True, tight_layout=True)
for i, spec in enumerate([ispec, ospec_nmo]):
ax = axes[i]
for flavor in Flavor:
ax.plot(E, spec[flavor],
label=flavor.to_tex(),
color='C0' if flavor.is_electron else 'C1',
ls='-' if flavor.is_neutrino else ':', lw=2,
alpha=0.7)
ax.set(xlabel=r'$E$ [{}]'.format(E.unit),
title='Initial Spectra: $t = ${:.1f}'.format(t) if i==0 else 'Oscillated Spectra: $t = ${:.1f}'.format(t))
ax.grid()
ax.legend(loc='upper right', ncol=2, fontsize=16)
ax = axes[0]
ax.set(ylabel=r'flux [erg$^{-1}$ s$^{-1}$]')
fig.tight_layout();
```
|
SNEWS2REPO_NAMEsnewpyPATH_START.@snewpy_extracted@snewpy-main@doc@nb@ccsn@OConnor_2013.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/api/_tf_keras/keras/preprocessing/image/__init__.py",
"type": "Python"
}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.image import DirectoryIterator
from keras.src.legacy.preprocessing.image import ImageDataGenerator
from keras.src.legacy.preprocessing.image import Iterator
from keras.src.legacy.preprocessing.image import NumpyArrayIterator
from keras.src.legacy.preprocessing.image import apply_affine_transform
from keras.src.legacy.preprocessing.image import apply_brightness_shift
from keras.src.legacy.preprocessing.image import apply_channel_shift
from keras.src.legacy.preprocessing.image import random_brightness
from keras.src.legacy.preprocessing.image import random_channel_shift
from keras.src.legacy.preprocessing.image import random_rotation
from keras.src.legacy.preprocessing.image import random_shear
from keras.src.legacy.preprocessing.image import random_shift
from keras.src.legacy.preprocessing.image import random_zoom
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.image_utils import smart_resize
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@api@_tf_keras@keras@preprocessing@image@__init__.py@.PATH_END.py
|
{
"filename": "lbl_wrap.py",
"repo_name": "njcuk9999/lbl",
"repo_path": "lbl_extracted/lbl-main/lbl/recipes/lbl_wrap.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2021-03-19
@author: cook
"""
import sys
from lbl.core import base
from lbl.core import base_classes
from lbl.core import io
from lbl.recipes import lbl_compile
from lbl.recipes import lbl_compute
from lbl.recipes import lbl_mask
from lbl.recipes import lbl_telluclean
from lbl.recipes import lbl_template
from lbl.recipes import lbl_reset
from lbl.resources import lbl_misc
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'lbl_wrap.py'
__STRNAME__ = 'LBL Wrapper'
__version__ = base.__version__
__date__ = base.__date__
__authors__ = base.__authors__
# Description of recipe
DESCRIPTION_MASK = 'Use this code to wrap around lbl'
# get the logger
log = io.log
# define keys to remove from run params
REMOVE_KEYS = [ # core
'INSTRUMENT', 'DATA_DIR', 'DATA_TYPES', 'DATA_SOURCE',
# science keys
'OBJECT_SCIENCE', 'OBJECT_TEMPLATE', 'OBJECT_TEFF',
'BLAZE_CORRECTED', 'BLAZE_FILE',
# run keys
'RUN_LBL_RESET',
'RUN_LBL_TELLUCLEAN', 'RUN_LBL_TEMPLATE', 'RUN_LBL_MASK',
'RUN_LBL_COMPUTE', 'RUN_LBL_COMPILE',
# skip keys
'SKIP_LBL_TELLUCLEAN', 'SKIP_LBL_TEMPLATE', 'SKIP_LBL_MASK',
'SKIP_LBL_COMPUTE', 'SKIP_LBL_COMPILE',
# general keys already used
'SKIP_DONE', 'OVERWRITE', 'TELLUCLEAN_USE_TEMPLATE']
# Define the default values
DEFAULTS = dict()
DEFAULTS['RUN_LBL_RESET'] = False
DEFAULTS['RUN_LBL_TELLUCLEAN'] = False
DEFAULTS['RUN_LBL_TEMPLATE'] = False
DEFAULTS['RUN_LBL_MASK'] = False
DEFAULTS['RUN_LBL_COMPUTE'] = False
DEFAULTS['RUN_LBL_COMPILE'] = False
DEFAULTS['SKIP_LBL_TEMPLATE'] = False
DEFAULTS['SKIP_LBL_MASK'] = False
DEFAULTS['SKIP_LBL_COMPUTE'] = False
DEFAULTS['SKIP_LBL_COMPILE'] = False
# =============================================================================
# Define functions
# =============================================================================
def main(runparams: dict):
"""
Wrapper around __main__ recipe code (deals with errors and loads instrument
profile)
:param runparams: dict, parameters to pass to lbl recipes
:return:
"""
# reset the sys.argv (arguments from command line aren't used)
sys.argv = [__NAME__]
# get key parameters
instrument = lbl_misc.check_runparams(runparams, 'INSTRUMENT')
data_dir = lbl_misc.check_runparams(runparams, 'DATA_DIR')
data_source = lbl_misc.check_runparams(runparams, 'DATA_SOURCE')
data_types = lbl_misc.check_runparams(runparams, 'DATA_TYPES')
object_sciences = lbl_misc.check_runparams(runparams, 'OBJECT_SCIENCE')
object_templates = lbl_misc.check_runparams(runparams, 'OBJECT_TEMPLATE')
object_teffs = lbl_misc.check_runparams(runparams, 'OBJECT_TEFF')
blaze_corrs = lbl_misc.check_runparams(runparams, 'BLAZE_CORRECTED',
required=False)
blaze_files = lbl_misc.check_runparams(runparams, 'BLAZE_FILE',
required=False)
# -------------------------------------------------------------------------
# push other keyword arguments into keyword arguments dictionary
keyword_args = dict()
for key in runparams:
if key not in REMOVE_KEYS:
keyword_args[key] = runparams[key]
# make sure we have defaults if key not in runparams
for key in DEFAULTS:
if key not in runparams:
runparams[key] = DEFAULTS[key]
# -------------------------------------------------------------------------
# sanity checks on runparams (certain things should not be set together)
if runparams['RUN_LBL_MASK'] and 'MASK_FILE' in runparams:
if runparams['MASK_FILE'] not in [None, 'None', '', 'Null']:
emsg = ('LBL_WRAP ERROR: Cannot have RUN_LBL_MASK=True and '
'MASK_FILE={0} (Must be unset)')
raise base_classes.LblException(emsg.format(runparams['MASK_FILE']))
# -------------------------------------------------------------------------
# mark the expected length if a list
olen = len(object_sciences)
# loop around all files
for num in range(olen):
# get the science target
object_science = object_sciences[num]
# print wrapper splash
lbl_misc.splash(name=__STRNAME__, instrument=instrument,
plogger=log)
# print iteration we are running
msg = 'Running [{0}] iteration {1}/{2}'
margs = [object_science, num + 1, olen]
log.info(msg.format(*margs))
# wrap check args
wkargs = dict(iteration=num, length=olen)
# get this iterations values (and check if they are a list of matching
# length to object_sciences) or just a single value
data_type = lbl_misc.wraplistcheck(data_types,
'DATA_TYPES', **wkargs)
object_template = lbl_misc.wraplistcheck(object_templates,
'OBJECT_TEMPLATE', **wkargs)
object_teff = lbl_misc.wraplistcheck(object_teffs,
'OBJECT_TEFF', **wkargs)
blaze_corr = lbl_misc.wraplistcheck(blaze_corrs,
'BLAZE_CORRECTED', **wkargs)
blaze_file = lbl_misc.wraplistcheck(blaze_files,
'BLAZE_FILE', **wkargs)
# ---------------------------------------------------------------------
if runparams['RUN_LBL_RESET']:
lbl_reset.main(instrument=instrument, data_dir=data_dir,
data_source=data_source, **keyword_args)
# ---------------------------------------------------------------------
# run all pre-cleaning steps
if runparams['RUN_LBL_TELLUCLEAN'] and data_type == 'SCIENCE':
# run telluric cleaning (without template)
lbl_telluclean.main(instrument=instrument, data_dir=data_dir,
data_source=data_source,
data_type=data_type,
object_science=object_science,
object_template=object_template,
skip_done=False,
telluclean_use_template=False,
blaze_corrected=blaze_corr,
blaze_file=blaze_file,
**keyword_args)
# update template name
if not object_template.endswith('_tc'):
object_template = object_template + '_tc'
# make the template (if not present)
lbl_template.main(instrument=instrument, data_dir=data_dir,
data_source=data_source,
data_type=data_type,
object_science=object_science + '_tc',
object_template=object_template,
blaze_corrected=blaze_corr,
blaze_file=blaze_file,
overwrite=True,
**keyword_args)
# re-run tellu clean with uncorrected science data now using our
# template (made from cleaned science data)
lbl_telluclean.main(instrument=instrument, data_dir=data_dir,
data_source=data_source,
data_type=data_type,
object_science=object_science,
object_template=object_template,
skip_done=False,
telluclean_use_template=True,
blaze_corrected=blaze_corr,
blaze_file=blaze_file,
**keyword_args)
# update object name
if not object_science.endswith('_tc'):
object_science = object_science + '_tc'
# ---------------------------------------------------------------------
# make the template (if not present)
if runparams['RUN_LBL_TEMPLATE']:
# Must produce the template for the science data and the template
# we use a set to do this (only runs once if they are the same)
for _obj_template in {object_science, object_template}:
lbl_template.main(instrument=instrument, data_dir=data_dir,
data_source=data_source,
data_type=data_type,
object_science=object_science,
object_template=_obj_template,
blaze_corrected=blaze_corr,
blaze_file=blaze_file,
overwrite=not runparams['SKIP_LBL_TEMPLATE'],
**keyword_args)
# ---------------------------------------------------------------------
# make the mask (if not present)
if runparams['RUN_LBL_MASK']:
# Must produce the mask for the science data and the template
# we use a set to do this (only runs once if they are the same)
for _obj_template in {object_science, object_template}:
lbl_mask.main(instrument=instrument, data_dir=data_dir,
data_source=data_source,
data_type=data_type,
object_science=object_science,
object_template=_obj_template,
object_teff=object_teff,
overwrite=not runparams['SKIP_LBL_MASK'],
**keyword_args)
# ---------------------------------------------------------------------
# # make the noise model (if not present)
# if runparams['RUN_LBL_NOISE']:
# lbl_noise(instrument=instrument, data_dir=data_dir,
# object_science=object_science,
# object_template=object_template,
# **keyword_args)
# ---------------------------------------------------------------------
# run the compute code
if runparams['RUN_LBL_COMPUTE']:
lbl_compute.main(instrument=instrument, data_dir=data_dir,
data_source=data_source,
data_type=data_type,
object_science=object_science,
object_template=object_template,
blaze_corrected=blaze_corr,
blaze_file=blaze_file,
skip_done=runparams['SKIP_LBL_COMPUTE'],
**keyword_args)
# ---------------------------------------------------------------------
# run the compile code
if runparams['RUN_LBL_COMPILE']:
lbl_compile.main(instrument=instrument, data_dir=data_dir,
data_source=data_source,
data_type=data_type,
object_science=object_science,
object_template=object_template,
skip_done=runparams['SKIP_LBL_COMPILE'],
**keyword_args)
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# set up parameters
rparams = dict()
rparams['INSTRUMENT'] = 'SPIROU'
rparams['DATA_DIR'] = '/data/spirou/data/lbl/'
# science criteria
rparams['DATA_TYPES'] = ['FP', 'SCIENCE']
rparams['OBJECT_SCIENCE'] = ['FP', 'GL699']
rparams['OBJECT_TEMPLATE'] = ['FP', 'GL699']
rparams['OBJECT_TEFF'] = [300, 3224]
# what to run
rparams['RUN_LBL_TELLUCLEAN'] = True
rparams['RUN_LBL_TEMPLATE'] = True
rparams['RUN_LBL_MASK'] = True
rparams['RUN_LBL_COMPUTE'] = True
rparams['RUN_LBL_COMPILE'] = True
# whether to skip done files
rparams['SKIP_LBL_TEMPLATE'] = True
rparams['SKIP_LBL_MASK'] = True
rparams['SKIP_LBL_COMPUTE'] = True
rparams['SKIP_LBL_COMPILE'] = True
# run main
main(rparams)
# =============================================================================
# End of code
# =============================================================================
|
njcuk9999REPO_NAMElblPATH_START.@lbl_extracted@lbl-main@lbl@recipes@lbl_wrap.py@.PATH_END.py
|
{
"filename": "style_widget.py",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/jdaviz/core/style_widget.py",
"type": "Python"
}
|
from ipyvuetify import VuetifyTemplate
__all__ = ['StyleWidget']
class StyleWidget(VuetifyTemplate):
def __init__(self, template_file, *args, **kwargs):
self.template_file = template_file
super().__init__(*args, **kwargs)
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@jdaviz@core@style_widget.py@.PATH_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/box/_legendgrouptitle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="legendgrouptitle", parent_name="box", **kwargs):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@box@_legendgrouptitle.py@.PATH_END.py
|
{
"filename": "tuningOptimization.py",
"repo_name": "tjlcbakx/redshift-search-graphs",
"repo_path": "redshift-search-graphs_extracted/redshift-search-graphs-master/tuningOptimization.py",
"type": "Python"
}
|
##-----------------------------------------
## Code written and edited by Tom Bakx
## tjlcbakx@gmail.com
##-----------------------------------------
##-----------------------------------------
## Header imports, colours
##-----------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import os
import os.path
from random import random
import glob
import RSG
from tqdm import tqdm
orange = '#ff9500'#(1,0.584,0)
blue = '#007aff' #(0,.478,1) blue
green = '#4cd964'
red = '#ff3b30'
grey = '#8e8e93' #(142./255,142./255,147./255)
# Based on the photometric redshifts from Bakx et al. 2020.
z_herbs = np.array([2.19186859, 2.79758652, 3.68950118, 3.00452574, 3.67224612,2.08343975, 2.88648255, 2.53047932, 3.81421492, 2.39080641,2.36160175, 2.2931653 , 4.42502717, 4.04977082, 2.53546913,4.13125973, 3.13316123, 2.3795508 , 3.69238066, 1.86626077,3.52627478, 3.3827414 , 3.99953851, 2.55417985, 3.57625569,2.46946344, 4.89886738, 4.50395695, 2.6880802 , 3.25011581,3.22604547, 2.96665631, 3.11239943, 2.81692217, 2.72178991,3.43768265, 2.44590522, 3.77647762, 2.96976767, 2.94884464,4.27115722, 3.04732141, 3.94939743, 2.11996678, 1.91303853,2.46927962, 2.50622804, 2.31568987, 3.69342457, 3.45367575,2.2264977 , 4.65660711, 2.04674319, 3.76789787, 2.76520355,3.57155072, 3.10078595, 3.18320996, 2.92288049, 3.81079303,4.26121343, 2.3989643 , 2.41466162, 4.61230835, 2.73367044,2.43845409, 3.82073557, 2.19379705, 2.13303289, 2.24386634,3.29546989, 3.77100117, 2.90334917, 2.83503374, 2.38901629,2.49421993, 2.90944992, 3.42570579, 2.84975621, 2.13924181,3.30741637, 2.39342267, 4.67566701, 2.63146934, 2.75155332,3.50481908, 2.32732294, 2.89352825, 4.14979873, 3.98621298,1.96003978, 5.0961295 , 3.08676225, 2.66082447, 4.28589441,2.62855066, 2.67423963, 3.65753391, 2.59634375, 2.47594027,2.25725126, 3.60591005, 2.36000645, 1.95944828, 3.16654878,2.10909085, 2.50682954, 3.18667822, 2.32348022, 3.73948054,2.38252379, 3.70236025, 3.2406187 , 3.00140975, 2.35153775,4.30719179, 3.54916876, 3.57168274, 3.09897795, 3.54828021,3.24261403, 2.99892422, 2.38770728, 2.01236366, 3.17459574,3.22992794, 2.42104138, 2.4550749 , 4.0395271 , 2.02067151,2.88503158, 2.87131708, 3.12520377, 3.7905346 , 3.09877938,3.6669109 , 2.87604818, 2.07527395, 3.32532197, 2.46835884,2.04360009, 3.1394963 , 2.78864932, 2.45022839, 3.78356115,2.10124175, 2.56819065, 2.05691913, 2.33645675, 4.39242384,3.36807314, 3.80526693, 2.17773557, 3.31405298, 3.87342847,2.38403916, 3.21439101, 3.45789572, 2.74976995, 4.40158064,3.53196084, 3.23785403, 2.41019031, 2.82897604, 2.91388164,4.40869455, 3.62370346, 3.96261118, 2.70547747, 4.17878825,3.57781063, 2.54957505, 3.08914272, 2.28816871, 4.01362479,3.25938825, 4.7555208 , 2.58757075, 4.07211012, 2.6189511 ,2.58379855, 2.91412126, 2.92064276, 2.71343349, 3.6898582 ,3.95477604, 2.19844396, 2.96683813, 2.53070078, 2.97237641,3.66703447, 2.3223793 , 2.86341401, 2.75122254, 3.02624635,2.77075485, 2.96771817, 3.35284708, 1.97130142, 2.09816312,3.80344085, 2.07315629, 1.92196618, 4.09351606, 2.97212521,2.81690854, 2.58692162, 3.54605495, 2.87971117])
blendFactor = 0.13*3
scaleFactor = 20
z_herbs_smooth = np.zeros([len(z_herbs)*scaleFactor])
for i in range(scaleFactor):
z_herbs_smooth[i*len(z_herbs):(i+1)*len(z_herbs)] = z_herbs + blendFactor*np.random.normal(size=len(z_herbs))
lower_tuning3 = RSG.giveALMA(3,0)
upper_tuning3 = RSG.giveALMA(3,1)
lower_tuning4 = RSG.giveALMA(4,0)
upper_tuning4 = RSG.giveALMA(4,1)
# We will want to generate two ladders,
# so we calculate the dynamic range
# leaving 3.75 x 2 at the top-frequency part
dFreq3 = upper_tuning3[0][0]-lower_tuning3[0][0]
dynamicRange3 = 1 - (3.75 * 2 / dFreq3)
dFreq4 = upper_tuning4[0][0]-lower_tuning4[0][0]
dynamicRange4 = 1 - (3.75 * 2 / dFreq4)
nrOfIterations = 100
robustFraction = 0
qualityArray = np.zeros([nrOfIterations,nrOfIterations])
multipleLines = np.zeros([nrOfIterations,nrOfIterations])
overlapFraction = 0.05
# print('(no_lines,one_line,two_lines,more_lines,robust_single_lines,non_robust_double_lines)')
for i in (range(nrOfIterations)):
print(i)
tuning3 = RSG.giveALMA(3,dynamicRange3*i/nrOfIterations)
tuning3[0].append(tuning3[0][-2]+3.75-overlapFraction)
tuning3[0].append(tuning3[0][-2]+3.75-overlapFraction)
tuning3[0].append(tuning3[0][-2]+3.75-overlapFraction)
tuning3[0].append(tuning3[0][-2]+3.75-overlapFraction)
tuning3[1].append(tuning3[1][-2]+3.75-overlapFraction)
tuning3[1].append(tuning3[1][-2]+3.75-overlapFraction)
tuning3[1].append(tuning3[1][-2]+3.75-overlapFraction)
tuning3[1].append(tuning3[1][-2]+3.75-overlapFraction)
for j in range(nrOfIterations):
tuning4 = RSG.giveALMA(4,dynamicRange4*j/nrOfIterations)
tuning4[0].append(tuning4[0][-2]+3.75-overlapFraction)
tuning4[0].append(tuning4[0][-2]+3.75-overlapFraction)
tuning4[0].append(tuning4[0][-2]+3.75-overlapFraction)
tuning4[0].append(tuning4[0][-2]+3.75-overlapFraction)
tuning4[1].append(tuning4[1][-2]+3.75-overlapFraction)
tuning4[1].append(tuning4[1][-2]+3.75-overlapFraction)
tuning4[1].append(tuning4[1][-2]+3.75-overlapFraction)
tuning4[1].append(tuning4[1][-2]+3.75-overlapFraction)
loFreq = [item for sublist in [tuning3[0],tuning4[0]] for item in sublist]
upFreq = [item for sublist in [tuning3[1],tuning4[1]] for item in sublist]
tuningQuality = RSG.RSGquality(loFreq,upFreq,z_herbs_smooth,sigma_threshold=5,dzUncertainty=0.07,lin_arr_size=1000,includeCI=False)
robustness = tuningQuality[2] + tuningQuality[3] - tuningQuality[5]*0.5 + tuningQuality[4] + (tuningQuality[1]-tuningQuality[4])*0.5
qualityArray[i,j] = robustness
multipleLines[i,j] = tuningQuality[2] + tuningQuality[3]
if robustness > robustFraction:
robustFraction = robustness
loFreqSave = loFreq
upFreqSave = upFreq
print(str(np.round(robustness*100,1))+'% robust + 0.5 x non-robust')
print(loFreq)
print(upFreq)
plt.figure(figsize=(4.2,3.5))
c = plt.imshow(qualityArray.transpose(), cmap ='coolwarm', #vmin = qualityArray.min(), vmax = qualityArray.max(),
extent =[lower_tuning3[0][0], upper_tuning3[0][0]-3.75*2 ,lower_tuning4[0][0], upper_tuning4[0][0]-3.75*2],
aspect='auto',interpolation ='nearest', origin ='lower')
# plt.contour(multipleLines.transpose(),extent =[lower_tuning3[0][0], upper_tuning3[0][0]-3.75*2 ,lower_tuning4[0][0], upper_tuning4[0][0]-3.75*2],
# colors=grey,levels=np.linspace(0,1,int(1/0.05)+1))
cbar = plt.colorbar(c)
cbar.set_ticks([0.85,0.9])
cbar.set_ticklabels(["0.85", "0.90"])
# cbar.set_label('Figure of merit')
plt.scatter(loFreqSave[0],loFreqSave[6],marker='X',color='k',s=100,zorder=200)
plt.scatter(loFreqSave[0],loFreqSave[6],marker='X',color=plt.get_cmap('coolwarm')(255),s=20,zorder=200)
plt.ylabel('Band 4 Frequency [GHz]',fontsize=14)
plt.xlabel('Band 3 Frequency [GHz]',fontsize=14)
plt.yticks([125,130,135,140])
plt.xticks([84,87,90,93])
plt.tight_layout()
plt.savefig('band34_tuning.pdf')
plt.show()
|
tjlcbakxREPO_NAMEredshift-search-graphsPATH_START.@redshift-search-graphs_extracted@redshift-search-graphs-master@tuningOptimization.py@.PATH_END.py
|
{
"filename": "functional.py",
"repo_name": "halomod/halomod",
"repo_path": "halomod_extracted/halomod-main/src/halomod/functional.py",
"type": "Python"
}
|
r"""Module defining functional approaches to generating halo model quantities."""
from __future__ import annotations
from hmf import Framework, get_hmf
from .halo_model import HaloModel
def get_halomodel(
required_attrs, get_label=True, kls=HaloModel, fast_kwargs: dict | None = None, **kwargs
) -> list[Framework]:
r"""
Yield framework instances for all combinations of parameters supplied.
Returns a :func:`~hmf.helpers.functional.get_hmf`, with `framework =`
:class:`~halomod.halo_model.HaloModel`. See
:func:`~hmf.helpers.functional.get_hmf` for input parameters and yields.
"""
fast_kwargs = fast_kwargs or {
"transfer_fit": "BBKS",
"lnk_min": -4,
"lnk_max": 2,
"dlnk": 1,
"Mmin": 13,
"dlog10m": 0.5,
"rmin": 10,
"rmax": 20,
"rnum": 4,
"halo_exclusion": "None",
"nonlinear": False,
"scale_dependent_bias": False,
"hod_model": "Zehavi05",
}
return get_hmf(required_attrs, get_label, kls, fast_kwargs, **kwargs)
|
halomodREPO_NAMEhalomodPATH_START.@halomod_extracted@halomod-main@src@halomod@functional.py@.PATH_END.py
|
{
"filename": "test_ivp.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/integrate/_ivp/tests/test_ivp.py",
"type": "Python"
}
|
from itertools import product
from numpy.testing import (assert_, assert_allclose, assert_array_less,
assert_equal, assert_no_warnings, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy as np
from scipy.optimize._numdiff import group_columns
from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA
from scipy.integrate import OdeSolution
from scipy.integrate._ivp.common import num_jac, select_initial_step
from scipy.integrate._ivp.base import ConstantDenseOutput
from scipy.sparse import coo_matrix, csc_matrix
def fun_zero(t, y):
return np.zeros_like(y)
def fun_linear(t, y):
return np.array([-y[0] - 5 * y[1], y[0] + y[1]])
def jac_linear():
return np.array([[-1, -5], [1, 1]])
def sol_linear(t):
return np.vstack((-5 * np.sin(2 * t),
2 * np.cos(2 * t) + np.sin(2 * t)))
def fun_rational(t, y):
return np.array([y[1] / t,
y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])
def fun_rational_vectorized(t, y):
return np.vstack((y[1] / t,
y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))
def jac_rational(t, y):
return np.array([
[0, 1 / t],
[-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
(y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
])
def jac_rational_sparse(t, y):
return csc_matrix([
[0, 1 / t],
[-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
(y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
])
def sol_rational(t):
return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))
def fun_medazko(t, y):
n = y.shape[0] // 2
k = 100
c = 4
phi = 2 if t <= 5 else 0
y = np.hstack((phi, 0, y, y[-2]))
d = 1 / n
j = np.arange(n) + 1
alpha = 2 * (j * d - 1) ** 3 / c ** 2
beta = (j * d - 1) ** 4 / c ** 2
j_2_p1 = 2 * j + 2
j_2_m3 = 2 * j - 2
j_2_m1 = 2 * j
j_2 = 2 * j + 1
f = np.empty(2 * n)
f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +
beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -
k * y[j_2_m1] * y[j_2])
f[1::2] = -k * y[j_2] * y[j_2_m1]
return f
def medazko_sparsity(n):
cols = []
rows = []
i = np.arange(n) * 2
cols.append(i[1:])
rows.append(i[1:] - 2)
cols.append(i)
rows.append(i)
cols.append(i)
rows.append(i + 1)
cols.append(i[:-1])
rows.append(i[:-1] + 2)
i = np.arange(n) * 2 + 1
cols.append(i)
rows.append(i)
cols.append(i)
rows.append(i - 1)
cols = np.hstack(cols)
rows = np.hstack(rows)
return coo_matrix((np.ones_like(cols), (cols, rows)))
def fun_complex(t, y):
return -y
def jac_complex(t, y):
return -np.eye(y.shape[0])
def jac_complex_sparse(t, y):
return csc_matrix(jac_complex(t, y))
def sol_complex(t):
y = (0.5 + 1j) * np.exp(-t)
return y.reshape((1, -1))
def fun_event_dense_output_LSODA(t, y):
return y * (t - 2)
def jac_event_dense_output_LSODA(t, y):
return t - 2
def sol_event_dense_output_LSODA(t):
return np.exp(t ** 2 / 2 - 2 * t + np.log(0.05) - 6)
def compute_error(y, y_true, rtol, atol):
e = (y - y_true) / (atol + rtol * np.abs(y_true))
return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0])
@pytest.mark.thread_unsafe
def test_integration():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for vectorized, method, t_span, jac in product(
[False, True],
['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],
[[5, 9], [5, 1]],
[None, jac_rational, jac_rational_sparse]):
if vectorized:
fun = fun_rational_vectorized
else:
fun = fun_rational
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The following arguments have no effect for a chosen "
"solver: `jac`")
res = solve_ivp(fun, t_span, y0, rtol=rtol,
atol=atol, method=method, dense_output=True,
jac=jac, vectorized=vectorized)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.y_events is None)
assert_(res.success)
assert_equal(res.status, 0)
if method == 'DOP853':
# DOP853 spends more functions evaluation because it doesn't
# have enough time to develop big enough step size.
assert_(res.nfev < 50)
else:
assert_(res.nfev < 40)
if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:
assert_equal(res.njev, 0)
assert_equal(res.nlu, 0)
else:
assert_(0 < res.njev < 3)
assert_(0 < res.nlu < 10)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
tc = (t_span[0] + t_span[-1]) / 2
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
@pytest.mark.thread_unsafe
def test_integration_complex():
rtol = 1e-3
atol = 1e-6
y0 = [0.5 + 1j]
t_span = [0, 1]
tc = np.linspace(t_span[0], t_span[1])
for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'],
[None, jac_complex, jac_complex_sparse]):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The following arguments have no effect for a chosen "
"solver: `jac`")
res = solve_ivp(fun_complex, t_span, y0, method=method,
dense_output=True, rtol=rtol, atol=atol, jac=jac)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.y_events is None)
assert_(res.success)
assert_equal(res.status, 0)
if method == 'DOP853':
assert res.nfev < 35
else:
assert res.nfev < 25
if method == 'BDF':
assert_equal(res.njev, 1)
assert res.nlu < 6
else:
assert res.njev == 0
assert res.nlu == 0
y_true = sol_complex(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert np.all(e < 5)
yc_true = sol_complex(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert np.all(e < 5)
@pytest.mark.fail_slow(5)
def test_integration_sparse_difference():
n = 200
t_span = [0, 20]
y0 = np.zeros(2 * n)
y0[1::2] = 1
sparsity = medazko_sparsity(n)
for method in ['BDF', 'Radau']:
res = solve_ivp(fun_medazko, t_span, y0, method=method,
jac_sparsity=sparsity)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.y_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)
assert_allclose(res.y[79, -1], 0, atol=1e-3)
assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)
assert_allclose(res.y[149, -1], 0, atol=1e-3)
assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)
assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)
assert_allclose(res.y[238, -1], 0, atol=1e-3)
assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)
def test_integration_const_jac():
rtol = 1e-3
atol = 1e-6
y0 = [0, 2]
t_span = [0, 2]
J = jac_linear()
J_sparse = csc_matrix(J)
for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):
res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,
method=method, dense_output=True, jac=jac)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.y_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_(res.nfev < 100)
assert_equal(res.njev, 0)
assert_(0 < res.nlu < 15)
y_true = sol_linear(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 10))
tc = np.linspace(*t_span)
yc_true = sol_linear(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 15))
assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)
@pytest.mark.slow
@pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA'])
def test_integration_stiff(method, num_parallel_threads):
rtol = 1e-6
atol = 1e-6
y0 = [1e4, 0, 0]
tspan = [0, 1e8]
if method == 'LSODA' and num_parallel_threads > 1:
pytest.skip(reason='LSODA does not allow for concurrent calls')
def fun_robertson(t, state):
x, y, z = state
return [
-0.04 * x + 1e4 * y * z,
0.04 * x - 1e4 * y * z - 3e7 * y * y,
3e7 * y * y,
]
res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol,
atol=atol, method=method)
# If the stiff mode is not activated correctly, these numbers will be much bigger
assert res.nfev < 5000
assert res.njev < 200
def test_events(num_parallel_threads):
def event_rational_1(t, y):
return y[0] - y[1] ** 0.7
def event_rational_2(t, y):
return y[1] ** 0.6 - y[0]
def event_rational_3(t, y):
return t - 7.4
event_rational_3.terminal = True
for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
if method == 'LSODA' and num_parallel_threads > 1:
continue
res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[1][0] < 7.7)
assert_equal(res.y_events[0].shape, (1, 2))
assert_equal(res.y_events[1].shape, (1, 2))
assert np.isclose(
event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
assert np.isclose(
event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
event_rational_1.direction = 1
event_rational_2.direction = 1
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_equal(res.y_events[0].shape, (1, 2))
assert_equal(res.y_events[1].shape, (0,))
assert np.isclose(
event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
event_rational_1.direction = -1
event_rational_2.direction = -1
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
assert_equal(res.y_events[0].shape, (0,))
assert_equal(res.y_events[1].shape, (1, 2))
assert np.isclose(
event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
event_rational_1.direction = 0
event_rational_2.direction = 0
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2,
event_rational_3), dense_output=True)
assert_equal(res.status, 1)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_equal(res.t_events[2].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[2][0] < 7.5)
assert_equal(res.y_events[0].shape, (1, 2))
assert_equal(res.y_events[1].shape, (0,))
assert_equal(res.y_events[2].shape, (1, 2))
assert np.isclose(
event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
assert np.isclose(
event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=event_rational_1, dense_output=True)
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_equal(res.y_events[0].shape, (1, 2))
assert np.isclose(
event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
# Also test that termination by event doesn't break interpolants.
tc = np.linspace(res.t[0], res.t[-1])
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, 1e-3, 1e-6)
assert_(np.all(e < 5))
# Test that the y_event matches solution
assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0],
rtol=1e-3, atol=1e-6)
# Test in backward direction.
event_rational_1.direction = 0
event_rational_2.direction = 0
for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
if method == 'LSODA' and num_parallel_threads > 1:
continue
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[1][0] < 7.7)
assert_equal(res.y_events[0].shape, (1, 2))
assert_equal(res.y_events[1].shape, (1, 2))
assert np.isclose(
event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
assert np.isclose(
event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
event_rational_1.direction = -1
event_rational_2.direction = -1
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_equal(res.y_events[0].shape, (1, 2))
assert_equal(res.y_events[1].shape, (0,))
assert np.isclose(
event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
event_rational_1.direction = 1
event_rational_2.direction = 1
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
assert_equal(res.y_events[0].shape, (0,))
assert_equal(res.y_events[1].shape, (1, 2))
assert np.isclose(
event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
event_rational_1.direction = 0
event_rational_2.direction = 0
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2,
event_rational_3), dense_output=True)
assert_equal(res.status, 1)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_equal(res.t_events[2].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
assert_(7.3 < res.t_events[2][0] < 7.5)
assert_equal(res.y_events[0].shape, (0,))
assert_equal(res.y_events[1].shape, (1, 2))
assert_equal(res.y_events[2].shape, (1, 2))
assert np.isclose(
event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
assert np.isclose(
event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
# Also test that termination by event doesn't break interpolants.
tc = np.linspace(res.t[-1], res.t[0])
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, 1e-3, 1e-6)
assert_(np.all(e < 5))
assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0],
rtol=1e-3, atol=1e-6)
assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0],
rtol=1e-3, atol=1e-6)
def _get_harmonic_oscillator():
def f(t, y):
return [y[1], -y[0]]
def event(t, y):
return y[0]
return f, event
@pytest.mark.parametrize('n_events', [3, 4])
def test_event_terminal_integer(n_events):
f, event = _get_harmonic_oscillator()
event.terminal = n_events
res = solve_ivp(f, (0, 100), [1, 0], events=event)
assert len(res.t_events[0]) == n_events
assert len(res.y_events[0]) == n_events
assert_allclose(res.y_events[0][:, 0], 0, atol=1e-14)
def test_event_terminal_iv():
f, event = _get_harmonic_oscillator()
args = (f, (0, 100), [1, 0])
event.terminal = None
res = solve_ivp(*args, events=event)
event.terminal = 0
ref = solve_ivp(*args, events=event)
assert_allclose(res.t_events, ref.t_events)
message = "The `terminal` attribute..."
event.terminal = -1
with pytest.raises(ValueError, match=message):
solve_ivp(*args, events=event)
event.terminal = 3.5
with pytest.raises(ValueError, match=message):
solve_ivp(*args, events=event)
def test_max_step(num_parallel_threads):
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
if method is LSODA and num_parallel_threads > 1:
continue
for t_span in ([5, 9], [5, 1]):
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
max_step=0.5, atol=atol, method=method,
dense_output=True)
assert_equal(res.t[0], t_span[0])
assert_equal(res.t[-1], t_span[-1])
assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15))
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
assert_raises(ValueError, method, fun_rational, t_span[0], y0,
t_span[1], max_step=-1)
if method is not LSODA:
solver = method(fun_rational, t_span[0], y0, t_span[1],
rtol=rtol, atol=atol, max_step=1e-20)
message = solver.step()
message = solver.step() # First step succeeds but second step fails.
assert_equal(solver.status, 'failed')
assert_("step size is less" in message)
assert_raises(RuntimeError, solver.step)
def test_first_step(num_parallel_threads):
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
first_step = 0.1
for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
if method is LSODA and num_parallel_threads > 1:
continue
for t_span in ([5, 9], [5, 1]):
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
max_step=0.5, atol=atol, method=method,
dense_output=True, first_step=first_step)
assert_equal(res.t[0], t_span[0])
assert_equal(res.t[-1], t_span[-1])
assert_allclose(first_step, np.abs(res.t[1] - 5))
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
assert_raises(ValueError, method, fun_rational, t_span[0], y0,
t_span[1], first_step=-1)
assert_raises(ValueError, method, fun_rational, t_span[0], y0,
t_span[1], first_step=5)
def test_t_eval():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for t_span in ([5, 9], [5, 1]):
t_eval = np.linspace(t_span[0], t_span[1], 10)
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [5, 5.01, 7, 8, 8.01, 9]
res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]
res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
t_eval = [5.01, 7, 8, 8.01]
res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [4.99, 3, 1.5, 1.1, 1.01]
res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
t_eval = [4, 6]
assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,
rtol=rtol, atol=atol, t_eval=t_eval)
def test_t_eval_dense_output():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
t_span = [5, 9]
t_eval = np.linspace(t_span[0], t_span[1], 10)
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
t_eval=t_eval)
res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
t_eval=t_eval, dense_output=True)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_equal(res.t, res_d.t)
assert_equal(res.y, res_d.y)
assert_(res_d.t_events is None)
assert_(res_d.success)
assert_equal(res_d.status, 0)
# if t and y are equal only test values for one case
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
@pytest.mark.thread_unsafe
def test_t_eval_early_event():
def early_event(t, y):
return t - 7
early_event.terminal = True
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
t_span = [5, 9]
t_eval = np.linspace(7.5, 9, 16)
for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The following arguments have no effect for a chosen "
"solver: `jac`")
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
method=method, t_eval=t_eval, events=early_event,
jac=jac_rational)
assert res.success
assert res.message == 'A termination event occurred.'
assert res.status == 1
assert not res.t and not res.y
assert len(res.t_events) == 1
assert res.t_events[0].size == 1
assert res.t_events[0][0] == 7
def test_event_dense_output_LSODA(num_parallel_threads):
if num_parallel_threads > 1:
pytest.skip('LSODA does not allow for concurrent execution')
def event_lsoda(t, y):
return y[0] - 2.02e-5
rtol = 1e-3
atol = 1e-6
y0 = [0.05]
t_span = [-2, 2]
first_step = 1e-3
res = solve_ivp(
fun_event_dense_output_LSODA,
t_span,
y0,
method="LSODA",
dense_output=True,
events=event_lsoda,
first_step=first_step,
max_step=1,
rtol=rtol,
atol=atol,
jac=jac_event_dense_output_LSODA,
)
assert_equal(res.t[0], t_span[0])
assert_equal(res.t[-1], t_span[-1])
assert_allclose(first_step, np.abs(res.t[1] - t_span[0]))
assert res.success
assert_equal(res.status, 0)
y_true = sol_event_dense_output_LSODA(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_array_less(e, 5)
tc = np.linspace(*t_span)
yc_true = sol_event_dense_output_LSODA(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_array_less(e, 5)
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
def test_no_integration():
for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],
method=method, dense_output=True)
assert_equal(sol.sol(4), [2, 3])
assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])
def test_no_integration_class():
for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)
solver.step()
assert_equal(solver.status, 'finished')
sol = solver.dense_output()
assert_equal(sol(0.0), [10.0, 0.0])
assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])
solver = method(lambda t, y: -y, 0.0, [], np.inf)
solver.step()
assert_equal(solver.status, 'finished')
sol = solver.dense_output()
assert_equal(sol(100.0), [])
assert_equal(sol([0, 1, 2]), np.empty((0, 3)))
def test_empty():
def fun(t, y):
return np.zeros((0,))
y0 = np.zeros((0,))
for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,
method=method, dense_output=True)
assert_equal(sol.sol(10), np.zeros((0,)))
assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,
method=method, dense_output=True)
assert_equal(sol.sol(10), np.zeros((0,)))
assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
def test_ConstantDenseOutput():
sol = ConstantDenseOutput(0, 1, np.array([1, 2]))
assert_allclose(sol(1.5), [1, 2])
assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])
sol = ConstantDenseOutput(0, 1, np.array([]))
assert_allclose(sol(1.5), np.empty(0))
assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))
def test_classes():
y0 = [1 / 3, 2 / 9]
for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
solver = cls(fun_rational, 5, y0, np.inf)
assert_equal(solver.n, 2)
assert_equal(solver.status, 'running')
assert_equal(solver.t_bound, np.inf)
assert_equal(solver.direction, 1)
assert_equal(solver.t, 5)
assert_equal(solver.y, y0)
assert_(solver.step_size is None)
if cls is not LSODA:
assert_(solver.nfev > 0)
assert_(solver.njev >= 0)
assert_equal(solver.nlu, 0)
else:
assert_equal(solver.nfev, 0)
assert_equal(solver.njev, 0)
assert_equal(solver.nlu, 0)
assert_raises(RuntimeError, solver.dense_output)
message = solver.step()
assert_equal(solver.status, 'running')
assert_equal(message, None)
assert_equal(solver.n, 2)
assert_equal(solver.t_bound, np.inf)
assert_equal(solver.direction, 1)
assert_(solver.t > 5)
assert_(not np.all(np.equal(solver.y, y0)))
assert_(solver.step_size > 0)
assert_(solver.nfev > 0)
assert_(solver.njev >= 0)
assert_(solver.nlu >= 0)
sol = solver.dense_output()
assert_allclose(sol(5), y0, rtol=1e-15, atol=0)
def test_OdeSolution():
ts = np.array([0, 2, 5], dtype=float)
s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
sol = OdeSolution(ts, [s1, s2])
assert_equal(sol(-1), [-1])
assert_equal(sol(1), [-1])
assert_equal(sol(2), [-1])
assert_equal(sol(3), [1])
assert_equal(sol(5), [1])
assert_equal(sol(6), [1])
assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),
np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))
ts = np.array([10, 4, -3])
s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
sol = OdeSolution(ts, [s1, s2])
assert_equal(sol(11), [-1])
assert_equal(sol(10), [-1])
assert_equal(sol(5), [-1])
assert_equal(sol(4), [-1])
assert_equal(sol(0), [1])
assert_equal(sol(-3), [1])
assert_equal(sol(-4), [1])
assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),
np.array([[-1, 1, -1, 1, -1, 1, -1]]))
ts = np.array([1, 1])
s = ConstantDenseOutput(1, 1, np.array([10]))
sol = OdeSolution(ts, [s])
assert_equal(sol(0), [10])
assert_equal(sol(1), [10])
assert_equal(sol(2), [10])
assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))
def test_num_jac():
def fun(t, y):
return np.vstack([
-0.04 * y[0] + 1e4 * y[1] * y[2],
0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,
3e7 * y[1] ** 2
])
def jac(t, y):
return np.array([
[-0.04, 1e4 * y[2], 1e4 * y[1]],
[0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],
[0, 6e7 * y[1], 0]
])
t = 1
y = np.array([1, 0, 0])
J_true = jac(t, y)
threshold = 1e-5
f = fun(t, y).ravel()
J_num, factor = num_jac(fun, t, y, f, threshold, None)
assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
J_num, factor = num_jac(fun, t, y, f, threshold, factor)
assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
def test_num_jac_sparse():
def fun(t, y):
e = y[1:]**3 - y[:-1]**2
z = np.zeros(y.shape[1])
return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))
def structure(n):
A = np.zeros((n, n), dtype=int)
A[0, 0] = 1
A[0, 1] = 1
for i in range(1, n - 1):
A[i, i - 1: i + 2] = 1
A[-1, -1] = 1
A[-1, -2] = 1
return A
np.random.seed(0)
n = 20
y = np.random.randn(n)
A = structure(n)
groups = group_columns(A)
f = fun(0, y[:, None]).ravel()
# Compare dense and sparse results, assuming that dense implementation
# is correct (as it is straightforward).
J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,
sparsity=(A, groups))
J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)
assert_allclose(J_num_dense, J_num_sparse.toarray(),
rtol=1e-12, atol=1e-14)
assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
# Take small factors to trigger their recomputing inside.
factor = np.random.uniform(0, 1e-12, size=n)
J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,
sparsity=(A, groups))
J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)
assert_allclose(J_num_dense, J_num_sparse.toarray(),
rtol=1e-12, atol=1e-14)
assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
def test_args():
# sys3 is actually two decoupled systems. (x, y) form a
# linear oscillator, while z is a nonlinear first order
# system with equilibria at z=0 and z=1. If k > 0, z=1
# is stable and z=0 is unstable.
def sys3(t, w, omega, k, zfinal):
x, y, z = w
return [-omega*y, omega*x, k*z*(1 - z)]
def sys3_jac(t, w, omega, k, zfinal):
x, y, z = w
J = np.array([[0, -omega, 0],
[omega, 0, 0],
[0, 0, k*(1 - 2*z)]])
return J
def sys3_x0decreasing(t, w, omega, k, zfinal):
x, y, z = w
return x
def sys3_y0increasing(t, w, omega, k, zfinal):
x, y, z = w
return y
def sys3_zfinal(t, w, omega, k, zfinal):
x, y, z = w
return z - zfinal
# Set the event flags for the event functions.
sys3_x0decreasing.direction = -1
sys3_y0increasing.direction = 1
sys3_zfinal.terminal = True
omega = 2
k = 4
tfinal = 5
zfinal = 0.99
# Find z0 such that when z(0) = z0, z(tfinal) = zfinal.
# The condition z(tfinal) = zfinal is the terminal event.
z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal))
w0 = [0, -1, z0]
# Provide the jac argument and use the Radau method to ensure that the use
# of the Jacobian function is exercised.
# If event handling is working, the solution will stop at tfinal, not tend.
tend = 2*tfinal
sol = solve_ivp(sys3, [0, tend], w0,
events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal],
dense_output=True, args=(omega, k, zfinal),
method='Radau', jac=sys3_jac,
rtol=1e-10, atol=1e-13)
# Check that we got the expected events at the expected times.
x0events_t = sol.t_events[0]
y0events_t = sol.t_events[1]
zfinalevents_t = sol.t_events[2]
assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi])
assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi])
assert_allclose(zfinalevents_t, [tfinal])
# Check that the solution agrees with the known exact solution.
t = np.linspace(0, zfinalevents_t[0], 250)
w = sol.sol(t)
assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12)
assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12)
assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1),
rtol=1e-9, atol=1e-12)
# Check that the state variables have the expected values at the events.
x0events = sol.sol(x0events_t)
y0events = sol.sol(y0events_t)
zfinalevents = sol.sol(zfinalevents_t)
assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14)
assert_allclose(x0events[1], np.ones_like(x0events[1]))
assert_allclose(y0events[0], np.ones_like(y0events[0]))
assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14)
assert_allclose(zfinalevents[2], [zfinal])
@pytest.mark.thread_unsafe
def test_array_rtol():
# solve_ivp had a bug with array_like `rtol`; see gh-15482
# check that it's fixed
def f(t, y):
return y[0], y[1]
# no warning (or error) when `rtol` is array_like
sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-1])
err1 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
# warning when an element of `rtol` is too small
with pytest.warns(UserWarning, match="At least one element..."):
sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-16])
err2 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
# tighter rtol improves the error
assert err2 < err1
@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'])
def test_integration_zero_rhs(method, num_parallel_threads):
if method == 'LSODA' and num_parallel_threads > 1:
pytest.skip(reason='LSODA does not allow for concurrent execution')
result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method)
assert_(result.success)
assert_equal(result.status, 0)
assert_allclose(result.y, 1.0, rtol=1e-15)
def test_args_single_value():
def fun_with_arg(t, y, a):
return a*y
message = "Supplied 'args' cannot be unpacked."
with pytest.raises(TypeError, match=message):
solve_ivp(fun_with_arg, (0, 0.1), [1], args=-1)
sol = solve_ivp(fun_with_arg, (0, 0.1), [1], args=(-1,))
assert_allclose(sol.y[0, -1], np.exp(-0.1))
@pytest.mark.parametrize("f0_fill", [np.nan, np.inf])
def test_initial_state_finiteness(f0_fill):
# regression test for gh-17846
msg = "All components of the initial state `y0` must be finite."
with pytest.raises(ValueError, match=msg):
solve_ivp(fun_zero, [0, 10], np.full(3, f0_fill))
@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF'])
def test_zero_interval(method):
# Case where upper and lower limits of integration are the same
# Result of integration should match initial state.
# f[y(t)] = 2y(t)
def f(t, y):
return 2 * y
res = solve_ivp(f, (0.0, 0.0), np.array([1.0]), method=method)
assert res.success
assert_allclose(res.y[0, -1], 1.0)
@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF'])
def test_tbound_respected_small_interval(method):
"""Regression test for gh-17341"""
SMALL = 1e-4
# f[y(t)] = 2y(t) on t in [0,SMALL]
# undefined otherwise
def f(t, y):
if t > SMALL:
raise ValueError("Function was evaluated outside interval")
return 2 * y
res = solve_ivp(f, (0.0, SMALL), np.array([1]), method=method)
assert res.success
@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF'])
def test_tbound_respected_larger_interval(method):
"""Regression test for gh-8848"""
def V(r):
return -11/r + 10 * r / (0.05 + r**2)
def func(t, p):
if t < -17 or t > 2:
raise ValueError("Function was evaluated outside interval")
P = p[0]
Q = p[1]
r = np.exp(t)
dPdr = r * Q
dQdr = -2.0 * r * ((-0.2 - V(r)) * P + 1 / r * Q)
return np.array([dPdr, dQdr])
result = solve_ivp(func,
(-17, 2),
y0=np.array([1, -11]),
max_step=0.03,
vectorized=False,
t_eval=None,
atol=1e-8,
rtol=1e-5)
assert result.success
@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF'])
def test_tbound_respected_oscillator(method):
"Regression test for gh-9198"
def reactions_func(t, y):
if (t > 205):
raise ValueError("Called outside interval")
yprime = np.array([1.73307544e-02,
6.49376470e-06,
0.00000000e+00,
0.00000000e+00])
return yprime
def run_sim2(t_end, n_timepoints=10, shortest_delay_line=10000000):
init_state = np.array([134.08298555, 138.82348612, 100., 0.])
t0 = 100.0
t1 = 200.0
return solve_ivp(reactions_func,
(t0, t1),
init_state.copy(),
dense_output=True,
max_step=t1 - t0)
result = run_sim2(1000, 100, 100)
assert result.success
def test_inital_maxstep():
"""Verify that select_inital_step respects max_step"""
rtol = 1e-3
atol = 1e-6
y0 = np.array([1/3, 2/9])
for (t0, t_bound) in ((5, 9), (5, 1)):
for method_order in [RK23.error_estimator_order,
RK45.error_estimator_order,
DOP853.error_estimator_order,
3, #RADAU
1 #BDF
]:
step_no_max = select_initial_step(fun_rational, t0, y0, t_bound,
np.inf,
fun_rational(t0,y0),
np.sign(t_bound - t0),
method_order,
rtol, atol)
max_step = step_no_max/2
step_with_max = select_initial_step(fun_rational, t0, y0, t_bound,
max_step,
fun_rational(t0, y0),
np.sign(t_bound - t0),
method_order,
rtol, atol)
assert_equal(max_step, step_with_max)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@integrate@_ivp@tests@test_ivp.py@.PATH_END.py
|
{
"filename": "core_correlation_pytorch.py",
"repo_name": "leungcalvin/pyfx-public",
"repo_path": "pyfx-public_extracted/pyfx-public-main/src/pyfx/core_correlation_pytorch.py",
"type": "Python"
}
|
"""
Identical to core_correlation, but uses gpus. Still W.I.P.
Fringestops station B to station A and cross correlates baseband data from station A and B.
Written by Shion Andrew
"""
import numpy as np
from astropy.time import Time, TimeDelta
from decimal import Decimal
import astropy.units as un
import time
from pyfx.core_math_torch import fft_corr_gpu
from pyfx.core_math import max_lag_slice
from baseband_analysis.core.bbdata import BBData
import torch
from pycalc11 import Calc
import logging
#enable type hints for static tools
from typing import Optional, Tuple, Union
K_DM = 1 / 2.41e-4 # in s MHz^2 / (pc cm^-3)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def autocorr_core_gpu(
DM: float,
bbdata_A: BBData,
t_a: np.ndarray,
window: Union[np.ndarray, int],
R: Union[np.ndarray, float],
max_lag: int=None,
n_pol: int=2):
## assumes window is constant and R varies vs time
## this is not yet properly vectorized for variable t_a
"""Correlates and downselects over lag (potentially more lags at shorter integration times
DM - the DM with which we de-smear the data before the final gating. for steady sources, set dispersion measure to 0.
bbdata_A - baseband data
t_a[i,j] - start times at ith frequency, for jth time chunk, for telescope A
window[j] - integer or np.array of size (nscan) holding length of time chunk window (us)
R[i,j] - integer or np.array of size (nfreq,nscan). Fraction of time chunk (defines pulse window). Variable name should be more descriptive
max_lag - maximum (absolute value) lag (in frames) for auto-correlation (useful for very long time series data)
n_pol - number of polarizations in data
"""
n_freq = len(bbdata_A.freq)
n_scan = np.size(t_a, axis=-1)
# SA: basing this off of how the data is arranged now, may want to change
n_pointings = bbdata_A["tiedbeam_baseband"].shape[1] // 2
if max_lag is None:
max_lag = np.max(
window
) # in order to hold all autocorrelations, there must be one max lag for all frequencies and times.
vis_shape = (n_freq, n_pointings, n_pol, n_pol, n_scan, 2 * max_lag + 1)
# autocorr = np.zeros((n_freq, n_pol, n_pol, n_lag, n_time))
auto_vis = np.zeros(vis_shape, dtype=bbdata_A['tiedbeam_baseband'].dtype)
# will compress this horrific number of for loops later
for pointing in range(n_pointings):
for iipol in range(n_pol):
for jjpol in range(n_pol):
for jjscan in range(n_scan):
if type(window)==int:
window_jjscan=window
else:
window_jjscan=window[jjscan]
t_a_indices = t_a[:, jjscan] # array of length 1024
if type(R)==int: #should be 1 for steady sources
r_jjscan=R
elif len(np.unique(R[:,jjscan]))==1:
r_jjscan=R[0,jjscan]
else: # "on" window varies as a function of frequency (e.g. pulsar)
r_jjscan=R[:,jjscan] #np array of size (nfreq)
if len(np.unique(r_jjscan))==1:
r_jjscan=r_jjscan[0]
if (type(r_jjscan)==float or type(r_jjscan)==int) and len(np.unique(t_a_indices))==1:
r_ij=r_jjscan
start = int((window_jjscan - window_jjscan*r_ij) // 2)+t_a_indices[0]
stop = int((window_jjscan + window_jjscan*r_ij) // 2)+t_a_indices[0]
_vis = fft_corr(
bbdata_A['tiedbeam_baseband'][
:,
iipol,
start: stop,
],
bbdata_A['tiedbeam_baseband'][
:,
jjpol,
start: stop,
])
auto_vis[:, pointing, iipol, jjpol, jjscan, :] = np.concatenate((_vis[:,:max_lag+1], _vis[:,-max_lag:]),axis=-1)
else:
for iifreq,r_ij in enumerate(r_jjscan):
start = int((window_jjscan - window_jjscan*r_ij) // 2)+t_a_indices[iifreq]
stop = int((window_jjscan + window_jjscan*r_ij) // 2)+t_a_indices[iifreq]
_vis = fft_corr_gpu(
bbdata_A['tiedbeam_baseband'][
iifreq,
iipol,
start: stop,
],
bbdata_A['tiedbeam_baseband'][
iifreq,
jjpol,
start: stop,
])
auto_vis[iifreq, pointing, iipol, jjpol, jjscan, :] = torch.concat(
(_vis[:max_lag+1], _vis[-max_lag:]))
return auto_vis
def crosscorr_core_vectorized_gpu(
bbdata_A: BBData,
bbdata_B: BBData,
t_a: np.ndarray,
window: Union[np.ndarray, int],
R: Union[np.ndarray, float],
calc_results: IMReader,
DM: float,
index_A: int=0,
index_B: int=1,
sample_rate: float=2.56,
max_lag: Optional[int]=None,
n_pol: int=2,
complex_conjugate_convention: int=-1,
intra_channel_sign: int=1,
):
"""
**only works for sources with a constant window as a function of frequency (can still vary with pointing and scan)**
** also currently only tested for steady sources with constant R.
inputs:
bbdata_A - telescope A data (sliced into a frequency chunk)
bbdata_B - telescope B data (sliced into a frequency chunk)
t_a[i,j] - starting frames at ith frequency, for jth time chunk, for telescope A
window - np.array of size (nscans) containing integer numbers, each element is the length of scan window in frames
R[i,j] - fraction of time chunk (defines pulse window). For steady sources, R=1 ("on" window = full window)
DM - the DM with which we de-smear the data before the final gating. for steady sources, set dispersion measure to 0.
calc_results - difxcalc object containing
index_A - where telescope A corresponds to in calc_results: CL: ideally, you should figure out the telescope index from the BBData object.
index_B - where telescope B corresponds to in calc_results: CL: ideally, you should figure out the telescope index from the BBData object.
sample_rate - rate at which data is sampled in microseconds
max_lag - maximum (absolute value) lag (in frames) for correlations (useful for very long time series data)
Outputs:
cross - array of autocorrelations and cross correlations with shape (pointing,freq, timechunk, pol, pol, delay)
"""
#assert (type(R)==float or type(R)==int or R.shape=(n_freq,n_scan)), 'R needs to either be a number (1 for steady sources) or a numpy array of size (nfreq, nscan)'
n_freq = len(bbdata_A.freq)
n_scan = np.size(t_a, axis=-1)
# SA: basing this off of how the data is arranged now, may want to change
n_pointings = bbdata_A["tiedbeam_baseband"].shape[1] // 2
ctime_diff=bbdata_A["time0"]["ctime"]-bbdata_B["time0"]["ctime"]
ctime_offset_diff=bbdata_A["time0"]["ctime_offset"]-bbdata_B["time0"]["ctime_offset"]
delta_A_B=ctime_diff-ctime_offset_diff
#assert f0==bbdata_B.index_map["freq"]["centre"], "frequency values need to be same"
# initialize output autocorrelations and cross correlations
if max_lag is None:
# in order to hold all autocorrelations, there must be one max lag for all frequencies and times.
max_lag = 100
vis_shape = (n_freq, n_pointings, n_pol, n_pol, n_scan, 2 * max_lag + 1)
cross = torch.zeros(vis_shape, dtype=torch.complex64)
for pointing in range(n_pointings):
# require user to have "well-ordered" bbdata in frequency (iifreqA=iifreqB)
# frequency centers in MHz # array of length 1024
#shape is (nfreq)
start = time.time()
baseband_a=torch.as_tensor(np.array(bbdata_A['tiedbeam_baseband']))
baseband_a=baseband_a.to(device)
baseband_b=torch.as_tensor(np.array(bbdata_B['tiedbeam_baseband']))
baseband_b=baseband_b.to(device)
f0=torch.as_tensor(np.array(bbdata_B.index_map["freq"]["centre"]))
f0=f0.to(device)
end = time.time()
print(f"convert to torch: {end-start}")
for jjscan in range(n_scan):
if type(window)==int:
window_jjscan=window
else:
window_jjscan=window[jjscan]
t_a_indices = t_a[:, jjscan] # array of length 1024
t0_a = bbdata_A["time0"]["ctime"][:] + t_a_indices * (sample_rate*1e-6) # array of length 1024
start_times = Time(
t0_a,
val2=bbdata_A["time0"]["ctime_offset"][:],
format="unix",
precision=9,
)
start_times._set_scale('tai')
dt_vals=(sample_rate * 1e-6 * (t_a_indices[:, np.newaxis] + 1 + np.arange(window_jjscan)))
geodelays_flattened = calc_results.retarded_baseline_delay(
ant1=index_A, ant2=index_B, time=start_times, src=pointing, delay_sign=0, self_consistent=False,
frame_dt=dt_vals
)
geodelays = geodelays_flattened.reshape(dt_vals.shape)
# Fringestopping B -> A
scan_a_cd, scan_b_fs_cd = get_aligned_scans_gpu(
baseband_a, baseband_b, f0,t_a_indices, window_jjscan, geodelays,delta_A_B,
complex_conjugate_convention=complex_conjugate_convention, intra_channel_sign=intra_channel_sign, sample_rate=sample_rate
)
#freeing up vram: want to delete original rather than deleting a view of the tensor (i.e. a reference, which is what is seen in external function calls)
del baseband_a
del baseband_b
del f0
print("scans are aligned")
#######################################################
######### intrachannel de-dispersion ##################
if DM!=0: #save computation time
print("not yet implemented for pulses")
#######################################################
# Now that the pulses are centered at zero, calculate
### the start and stop time indices for on-signal ######
if type(R)==int: #should be 1 for steady sources
r_jjscan=R
elif len(np.unique(R[:,jjscan]))==1:
r_jjscan=R[0,jjscan]
else: # "on" window varies as a function of frequency (e.g. pulsar)
r_jjscan=R[:,jjscan] #np array of size (nfreq)
if len(np.unique(r_jjscan))==1:
r_jjscan=r_jjscan[0]
if type(r_jjscan)==int or type(r_jjscan)==float:
r_ij=r_jjscan
start = int((window_jjscan - window_jjscan*r_ij) // 2)
stop = int((window_jjscan + window_jjscan*r_ij) // 2)
#######################################################
########## cross-correlate the on-signal ##############
for pol_0 in range(n_pol):
for pol_1 in range(n_pol):
if pol_0 == pol_1:
print("FFT CORR")
_vis = fft_corr_gpu(
scan_a_cd[:, pol_0, start:stop],
scan_b_fs_cd[:, pol_1, start:stop])
cross[:, pointing, pol_0, pol_1, jjscan, :] = torch.concat(
(_vis[:,:max_lag+1], _vis[:,-max_lag:]),dim=-1)
else:
for r_ij in r_jjscan:
start = int((window_jjscan - window_jjscan*r_ij) // 2)
stop = int((window_jjscan + window_jjscan*r_ij) // 2)
#######################################################
########## cross-correlate the on-signal ##############
for pol_0 in range(n_pol):
for pol_1 in range(n_pol):
if pol_0 == pol_1:
_vis = fft_corr_gpu(
scan_a_cd[:, pol_0, start:stop],
scan_b_fs_cd[:, pol_1, start:stop])
cross[:, pointing, pol_0, pol_1, jjscan, :] = torch.concat(
(_vis[:,:max_lag+1], _vis[:,-max_lag:]),dim=-1)
return cross.cpu().numpy()
def get_aligned_scans_gpu(
baseband_a: torch.Tensor,
baseband_b: torch.Tensor,
f0: torch.Tensor,
t_a_index: np.ndarray,
wij:np.ndarray,
tau:np.ndarray,
delta_A_B: float,
complex_conjugate_convention: int=-1,
intra_channel_sign:int=1,
sample_rate:float=2.56):
"""For a single frequency corresponding to a given FPGA freq_id, returns aligned scans of data for that freq_id out of two provided BBData objects.
baseband_a : torch.tensor
BBData['tiedbeam_baseband'] tensor, with arbitrary frequency coverage.
baseband_b : torch.tensor
A BBData['tiedbeam_baseband], with arbitrary frequency coverage. We apply a sub-frame phase rotation with fractional sample correction to data extracted out of bbdata_b.
t_a_index : np.array of shape (1024)
An array of indices corresponding to the start frames for telescope A
w_ij : window length. Should be an integer, and brownie points for a good FFT length.
tau : np.array (nfreq, n_frame) of dtype np.float
A delay in microseconds to apply to BBData_b, corresponding to the geometric delay.
The first index is the delay evaluated at time t_ij_a
freq_index : int
Returns
-------
aligned_a : np.array
A dual-pol scan of shape (2,w_ij)
aligned_b : np.array
A dual-pol scan of shape (2,w_ij)
newstart: int
Number of frames by which we need to shift t_a_ij in order to ensure t_a_ij+geodelay is contained within bbdata_B. Note that in the event that geodelay is positive, newstart will always be 0 (assuming the user has chosen t_a_ij such that the unix time is in both datasets)
Super technical note on floor vs round: it doesn't matter AS LONG AS you do a sub-sample rotation (or better yet, a frac samp correction)! Suppose your total delay is 10.6 frames.
- You can round to 11 frames. You should keep track that you rounded to 11, and then do frac samp -0.4.
- You can floor to 10 frames, you should keep track that you floored to 10, and then do frac samp +0.6.
Answer should be the same either way -- as long as you do the frac samp correction!
After doing the integer part (shift by either 10 or 11 frames), we need to apply a phase rotation. Note that exp(2j*np.pi*channel_center * -0.4/(2.56us) = exp(2j*np.pi*channel_center * +0.6/(2.56us), for the exact frequency corresponding to channel center, but not for any of the other frequencies that do not satisfy f = 800 - (N * 0.390625 MHz) for integers N -- this is the narrowband approximation. We experience some de-correlation near the band edges, which is why we use fractional sample correction in this code.
"""
time_we_want_at_b = tau[:, 0] # us
a_shape = list(baseband_a.shape)
a_shape[-1] = wij
start = time.time()
aligned_a = torch.zeros(a_shape, dtype=baseband_a.dtype)
# TODO vectorize
if len(np.unique(t_a_index))==1:
aligned_a[:, ...] = baseband_a[:, ...,
t_a_index[0]:t_a_index[0] + wij]
else:
for i in range(len(t_a_index)):
aligned_a[i, ...] = bbdata_A['tiedbeam_baseband'][i, ...,
t_a_index[i]:t_a_index[i] + wij]
end = time.time()
print(f"Creating aligned_a: {end-start}")
# aligned_a = bbdata_A['tiedbeam_baseband'][freq_id,...,t_a_index:t_a_index + wij]
# initialize aligned B array
start=time.time()
aligned_b = torch.zeros(
baseband_a.shape, dtype=baseband_a.dtype)
aligned_b=aligned_b.to(device)
end = time.time()
print(f"moving torch b: {end-start}")
# calculate the additional offset between A and B in the event that the (samples points of) A and B are misaligned in absolute time by < 1 frame
# i.e. to correctly fringestop, we must also account for a case such as:
## A: |----|----|----|----|----| ##
## B: |----|----|----|----|----| ##
# TODO vectorize
int_delay = np.array([int(np.round((timeb*1e-6 - delta) / (sample_rate*1e-6)))
for timeb, delta, in zip(time_we_want_at_b, delta_A_B)])
# frame number closest to start time
start_index_we_want_at_b = t_a_index+int_delay
# account for case where t_a_index+geodelay < 0 (i.e. signal arrives at telescope B before start of data acquision)
start_index_we_have_at_b = np.array(
[np.max([start, 0]) for start in start_index_we_want_at_b])
# if index_we_have_at_b is negative, this will be the amount we need to cushion our output data by
pad_index_b = start_index_we_have_at_b-start_index_we_want_at_b
# TODO vectorize -- for pad, start in zip(pad_index_b, start_index_we_have_at_b)] is slow
start = time.time()
w_pad = wij - pad_index_b
ntime_start = baseband_b.size()[-1] - start_index_we_have_at_b
new_wij = np.minimum(w_pad, ntime_start)
# new_wij = np.array([np.min([wij-pad, bbdata_B.ntime-start])
# for pad, start in zip(pad_index_b, start_index_we_have_at_b)])
end = time.time()
print(f"Creating new_wij: {end-start}")
# if you are missing half the data, multiply by 2.
correction_factor = wij / new_wij
if correction_factor.any() > 2:
# warn the user that the boundary conditions are sketch if we are missing e.g. more than half the data.
print("warning: based on specified start time and scan length, over half the data is missing from telescope XX.")
start = time.time()
for i in range(len(pad_index_b)):
aligned_b[i, ..., pad_index_b[i]:pad_index_b[i]+new_wij[i]] = \
baseband_b[i, ...,start_index_we_have_at_b[i]:start_index_we_have_at_b[i]+new_wij[i]] * correction_factor[i]
end = time.time()
print(f"updating aligned_b: {end-start}")
# multiply by the correction factor to ensure that a steady source, when correlated, has the correct flux corresponding to the desired w_ij, even when we run out of data.
aligned_b = aligned_b[..., :wij]
time_we_have_at_b = (delta_A_B+int_delay*sample_rate*1e-6) # s
sub_frame_tau = np.array([tau[i, :wij] - time_b*1e6 for time_b, i in zip(
time_we_have_at_b, range(len(tau)))]) # sub-frame delay at start time in mircoseconds
start = time.time()
sub_frame_tau=torch.as_tensor(sub_frame_tau)
sub_frame_tau=sub_frame_tau.to(device)
end = time.time()
print(f"creating sub_frame_tau: {end-start}")
start = time.time()
aligned_b = frac_samp_shift_gpu(aligned_b,
f0=f0,
sub_frame_tau=sub_frame_tau,
complex_conjugate_convention=complex_conjugate_convention,
intra_channel_sign=intra_channel_sign,
sample_rate=sample_rate)
end = time.time()
print(f"Running frac_samp_shift_vectorized: {end-start}")
aligned_a=aligned_a.to(device)
print("moving aligned")
return aligned_a, aligned_b
def frac_samp_shift_gpu(data, f0, sub_frame_tau, complex_conjugate_convention, intra_channel_sign, sample_rate=2.56):
n = data.shape[-1]
f = torch.fft.fftfreq(n, sample_rate)
f=f.to(device)
transfer_func = torch.exp(intra_channel_sign*2j * np.pi * f[np.newaxis,:] * torch.median(sub_frame_tau,dim=-1).values[:,np.newaxis]) # apply dphi/dfreq
del f #free up vram
output= torch.fft.ifft(
torch.fft.fft(data) * transfer_func[:,np.newaxis]) * (torch.exp(complex_conjugate_convention*2j * np.pi * f0[:,np.newaxis] * sub_frame_tau))[:, np.newaxis, :] # apply phi
del transfer_func
return output
|
leungcalvinREPO_NAMEpyfx-publicPATH_START.@pyfx-public_extracted@pyfx-public-main@src@pyfx@core_correlation_pytorch.py@.PATH_END.py
|
{
"filename": "do_signal_llhs_quick.py",
"repo_name": "Swift-BAT/NITRATES",
"repo_path": "NITRATES_extracted/NITRATES-main/nitrates/archive/do_signal_llhs_quick.py",
"type": "Python"
}
|
import numpy as np
from astropy.io import fits
import os
import argparse
import logging, traceback
import time
from ..analysis_seeds.bkg_rate_estimation import rate_obj_from_sqltab
from ..lib.sqlite_funcs import get_conn, write_result
from ..lib.dbread_funcs import (
get_rate_fits_tab,
guess_dbfname,
get_seeds_tab,
get_info_tab,
get_files_tab,
)
from ..config import EBINS0, EBINS1
from ..models.flux_models import Plaw_Flux
from ..llh_analysis.minimizers import (
NLLH_ScipyMinimize_Wjacob,
imxy_grid_miner,
NLLH_ScipyMinimize,
)
from ..lib.drm_funcs import DRMs
from ..response.ray_trace_funcs import RayTraces
from ..llh_analysis.LLH import LLH_webins
from ..models.models import Bkg_Model, Point_Source_Model, CompoundModel
# need to read rate fits from DB
# and read twinds
# and read/get event, dmask, and ebins
# then get bkg_llh_obj and a minimizer
# then loop over all time windows
# minimizing nllh and recording bf params
def cli():
parser = argparse.ArgumentParser()
parser.add_argument("--evfname", type=str, help="Event data file", default=None)
parser.add_argument("--dmask", type=str, help="Detmask fname", default=None)
parser.add_argument(
"--job_id", type=int, help="ID to tell it what seeds to do", default=-1
)
parser.add_argument(
"--dbfname", type=str, help="Name to save the database to", default=None
)
args = parser.parse_args()
return args
def do_analysis(
seed_tab,
pl_flux,
drm_obj,
rt_obj,
bkg_llh_obj,
sig_llh_obj,
bkg_rate_obj,
conn,
db_fname,
grid_width=4e-3,
grid_step=1e-3,
):
seed_t_gs = seed_tab.groupby("timeID")
N_twinds = seed_t_gs.ngroups
ebins0 = sig_llh_obj.ebins0
ebins1 = sig_llh_obj.ebins1
bl_dmask = sig_llh_obj.bl_dmask
bkg_miner = NLLH_ScipyMinimize("")
sig_miner = NLLH_ScipyMinimize_Wjacob("")
for seed_g in seed_t_gs:
timeID = seed_g[0]
seed_tab = seed_g[1]
Nseeds = len(seed_tab)
t0 = np.nanmean(seed_tab["time"])
dt = np.nanmean(seed_tab["duration"])
t1 = t0 + dt
tmid = (t0 + t1) / 2.0
bkg_llh_obj.set_time(t0, dt)
sig_llh_obj.set_time(t0, dt)
bkg_mod = Bkg_Model(
bkg_rate_obj, bl_dmask, t=tmid, bkg_err_fact=2.0, use_prior=False
)
logging.debug("bkg exp rates, errors")
logging.debug(bkg_mod._rates)
logging.debug(bkg_mod._errs)
bkg_llh_obj.set_model(bkg_mod)
bkg_miner.set_llh(bkg_llh_obj)
bkg_miner.set_fixed_params(bkg_llh_obj.model.param_names)
bkg_params = {
pname: bkg_llh_obj.model.param_dict[pname]["val"]
for pname in bkg_llh_obj.model.param_names
}
bkg_nllh = -bkg_llh_obj.get_llh(bkg_params)
for row in seed_tab.itertuples():
try:
blipID = row.blipID
sig_mod = Point_Source_Model(
row.imx,
row.imy,
grid_width,
pl_flux,
drm_obj,
[ebins0, ebins1],
rt_obj,
bl_dmask,
use_deriv=True,
)
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_llh_obj.set_model(comp_mod)
sig_miner.set_llh(sig_llh_obj)
fixed_pars = [
pname
for pname in sig_miner.param_names
if ("A" not in pname) or ("gamma" not in pname)
]
sig_miner.set_fixed_params(fixed_pars)
sig_miner.set_fixed_params(["Signal_A", "Signal_gamma"], fixed=False)
param_list, nllhs, imxs, imys = imxy_grid_miner(
sig_miner,
row.imx - grid_width / 2.0,
row.imy - grid_width / 2.0,
row.imx + grid_width / 2.0,
row.imy + grid_width / 2.0,
dimxy=grid_step,
)
best_ind = np.argmin(nllhs)
bf_params = param_list[best_ind]
sig_nllh = nllhs[best_ind]
imx = imxs[best_ind]
imy = imys[best_ind]
sig_param_dict = {}
i = 0
for pname in sig_miner.param_names:
if pname in sig_miner.fixed_params:
if "imx" in pname:
sig_param_dict[pname] = imx
elif "imy" in pname:
sig_param_dict[pname] = imy
else:
sig_param_dict[pname] = sig_miner.param_info_dict[pname][
"val"
]
else:
sig_param_dict[pname] = bf_params[i]
i += 1
logging.info("sig_param_dict: ")
logging.info(str(sig_param_dict))
TS = np.sqrt(2.0 * (bkg_nllh - sig_nllh))
if np.isnan(TS):
TS = 0.0
try:
write_result(conn, row, sig_param_dict, bkg_nllh, sig_nllh, TS)
except Exception as E:
logging.error(E)
logging.error("Problem writing to DB")
conn.close()
conn = get_conn(db_fname)
try:
write_result(conn, row, sig_param_dict, bkg_nllh, sig_nllh, TS)
except Exception as E:
logging.error(E)
logging.error("Problem writing to DB")
logging.error("Couldn't write ")
logging.error(str(sig_param_dict))
logging.error("to DB")
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.error("Failed to minimize seed: ")
logging.error(row)
def main(args):
fname = "llh_analysis_" + str(args.job_id)
logging.basicConfig(
filename=fname + ".log",
level=logging.DEBUG,
format="%(asctime)s-" "%(levelname)s- %(message)s",
)
while True:
try:
if args.dbfname is None:
db_fname = guess_dbfname()
if isinstance(db_fname, list):
db_fname = db_fname[0]
else:
db_fname = args.dbfname
logging.info("Connecting to DB")
conn = get_conn(db_fname)
info_tab = get_info_tab(conn)
logging.info("Got info table")
files_tab = get_files_tab(conn)
logging.info("Got files table")
trigtime = info_tab["trigtimeMET"][0]
evfname = files_tab["evfname"][0]
ev_data = fits.open(evfname)[1].data
dmask_fname = files_tab["detmask"][0]
dmask = fits.open(dmask_fname)[0].data
bl_dmask = dmask == 0.0
logging.debug("Opened up event and detmask files")
rate_fits_df = get_rate_fits_tab(conn)
bkg_rates_obj = rate_obj_from_sqltab(rate_fits_df, 0, 1)
break
except Exception as E:
logging.error(str(E))
logging.error(traceback.format_exc())
time.sleep(30.0)
time_starting = time.time()
proc_num = args.job_id
# init classes up here
drm_dir = files_tab["drmDir"][0]
rt_dir = files_tab["rtDir"][0]
drm_obj = DRMs(drm_dir)
rt_obj = RayTraces(rt_dir)
pl_flux = Plaw_Flux()
ebins0 = np.array(EBINS0)
ebins1 = np.array(EBINS1)
logging.debug("ebins0")
logging.debug(ebins0)
logging.debug("ebins1")
logging.debug(ebins1)
bkg_llh_obj = LLH_webins(ev_data, ebins0, ebins1, bl_dmask)
sig_llh_obj = LLH_webins(ev_data, ebins0, ebins1, bl_dmask)
while True:
conn = get_conn(db_fname)
try:
if proc_num >= 0:
seeds_tab = get_seeds_tab(conn, proc_group=proc_num)
else:
seeds_tab = get_seeds_tab(conn)
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.warning("Failed to get seed tab, will try again")
conn.close()
time.sleep(30.0)
continue
new_seeds = seeds_tab["done"] == 0
seed_tab = seeds_tab[new_seeds]
Nseeds_todo = np.sum(new_seeds)
logging.info(str(Nseeds_todo) + " new seeds")
if Nseeds_todo == 0:
conn.close()
time.sleep(30.0)
continue
# seed_prio_gs = seed_tab.groupby('priority')
# Nprio = seed_prio_gs.ngroups
min_priority = np.min(seed_tab["priority"])
logging.info("min priority is " + str(min_priority))
prio_bl = seed_tab["priority"] == min_priority
Nmin_prio = np.sum(prio_bl)
logging.info(str(Nmin_prio) + " new seeds with priority " + str(min_priority))
seed_tab = seed_tab[prio_bl]
do_analysis(
seed_tab,
pl_flux,
drm_obj,
rt_obj,
bkg_llh_obj,
sig_llh_obj,
bkg_rates_obj,
conn,
db_fname,
)
conn.close()
if __name__ == "__main__":
args = cli()
main(args)
|
Swift-BATREPO_NAMENITRATESPATH_START.@NITRATES_extracted@NITRATES-main@nitrates@archive@do_signal_llhs_quick.py@.PATH_END.py
|
{
"filename": "filament.py",
"repo_name": "SimonPfeifer/cows",
"repo_path": "cows_extracted/cows-master/src/cows/filament.py",
"type": "Python"
}
|
import numpy as np
from ._filament import _label_skeleton, _find_filaments
def label_skeleton(skel, periodic=False):
''' Label the skeleton.
Label all skeleton cells with their respective number of neighbour
that they share a face, edge or vertex with (N_26).
Parameters
----------
skel : ndarray, 3D
A binary image containing the skeletonized objects. Zeros
represent background, nonzero values are foreground.
periodic: bool
If True, the skeletonization uses periodic boundary conditions
for the input array. Input array must be 3D.
Returns
-------
result : ndarray
The labeled skeleton.
'''
assert skel.ndim == 3
return _label_skeleton(skel, periodic)
def separate_skeleton(skel, periodic=False):
''' Separate the skeleton.
Set all the skeleton cells with more than 2 neighbours to the
background value of zero. This results in a set of individual
objects of arbitrary length and 2 endpoints.
Parameters
----------
skel : ndarray, 3D
A binary image containing the skeletonized objects. Zeros
represent background, nonzero values are foreground.
periodic: bool
If True, the skeletonization uses periodic boundary conditions
for the input array. Input array must be 3D.
Returns
-------
result : ndarray
The separated skeleton.
'''
assert skel.ndim == 3
# Label the skeleton
skel = _label_skeleton(skel, periodic)
# Remove all cells with more than two neighbours
data_shape = skel.shape
skel[skel>2] = 0
# Label the separated skeleton
skel = _label_skeleton(skel, periodic)
return skel
def find_filaments(skel, periodic=False):
''' Find individual filament.
Connects all cells that are neighbours within a 3x3x3 neihbourhood.
The set of connected cells are labled with a unique ID.
Parameters
----------
skel : ndarray, 3D
An array containing the classified and separated skeleton. Zeros
represent background, ones are endpoints and twos are regular
cells.
periodic: bool
If True, the skeletonization uses periodic boundary conditions
for the input array. Input array must be 3D.
Returns
-------
result : ndarray, 3D
An array with skel.shape containing the sets of connected cells
(filaments) with their respective ID.
catalogue : ndarray, 2D
A catalogue containing, for each cell, a row of ID, X-, Y- and Z-
position.
'''
assert skel.ndim == 3
assert skel.shape[0] == skel.shape[1]
assert skel.shape[0] == skel.shape[2]
return _find_filaments(skel, periodic)
|
SimonPfeiferREPO_NAMEcowsPATH_START.@cows_extracted@cows-master@src@cows@filament.py@.PATH_END.py
|
{
"filename": "_linewidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/carpet/aaxis/_linewidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="linewidth", parent_name="carpet.aaxis", **kwargs):
super(LinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@carpet@aaxis@_linewidth.py@.PATH_END.py
|
{
"filename": "CANDIDATES-Oumuamua-Past.md",
"repo_name": "seap-udea/iWander",
"repo_path": "iWander_extracted/iWander-master/objects/CANDIDATES-Oumuamua-Past.md",
"type": "Markdown"
}
|
# Progenitor candidates of 1I/2017 U1
[](http://arxiv.org/abs/1711.09397)
_Latest update_: ``Mon Mar 19 08:45:41 2018``
|#|Name|d(pc)|q|dmin(pc)|tmin(Myr)|vrel(km/s)|Ppos|find|Pposvel|
|--|--|--|--|--|--|--|--|--|--|
| 1 | HIP 103749 ([HD 200325](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=HD%20200325)) | 53.8 | 1 | 1.75 [f=0.55,0.31,0.37,4.98] | -4.22 [-4.41,-4.22,-4.05] | 12.0 [11.4,12.0,12.5] | -1.6 | -1.9 | -0.5 |
| 2 | TYC 3144-2040-1 | 4.5 | 2 | 1.00 [f=0.60,0.86,0.88,1.14] | -0.12 [-0.12,-0.11,-0.11] | 17.9 [17.3,18.0,18.5] | -1.8 | -2.9 | -1.6 |
| 3 | TYC 7069-1289-1 | 8.3 | 1 | 0.99 [f=0.30,0.53,0.61,5.25] | -0.39 [-0.41,-0.38,-0.26] | 24.6 [23.3,25.1,30.6] | -1.8 | -3.7 | -2.3 |
| 4 | HIP 3821 ([* eta Cas](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=*%20eta%20Cas)) | 6.0 | 85 | 1.26 [f=0.60,1.23,1.23,1.27] | -0.17 [-0.17,-0.17,-0.17] | 23.5 [23.3,23.5,23.6] | -2.8 | -3.4 | -3.1 |
| 5 | TYC 3663-2669-1 | 6.1 | 34 | 1.34 [f=0.65,1.13,1.20,1.46] | -0.17 [-0.17,-0.17,-0.16] | 23.9 [23.1,23.8,24.3] | -3.0 | -3.5 | -3.4 |
| 6 | HIP 91768 ([HD 173739](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=HD%20173739)) | 3.5 | 11 | 0.82 [f=0.50,0.81,0.81,0.82] | -0.03 [-0.03,-0.03,-0.03] | 36.8 [36.7,36.8,36.9] | -1.2 | -5.5 | -3.6 |
| 7 | HIP 91772 ([HD 173740](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=HD%20173740)) | 3.5 | 10 | 0.76 [f=0.55,0.75,0.75,0.76] | -0.03 [-0.03,-0.03,-0.03] | 39.3 [39.1,39.3,39.4] | -1.1 | -5.8 | -3.8 |
| 8 | TYC 6573-3979-1 | 6.5 | 2 | 0.95 [f=0.55,0.24,0.27,1.81] | -0.18 [-0.18,-0.18,-0.17] | 44.6 [44.1,44.7,45.8] | -0.8 | -6.6 | -4.3 |
| 9 | HIP 18453 ([* 43 Per](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=*%2043%20Per)) | 37.4 | 30 | 0.86 [f=0.25,0.76,0.80,1.42] | -0.86 [-0.87,-0.86,-0.85] | 41.0 [40.6,41.1,41.5] | -1.8 | -6.1 | -4.8 |
| 10 | TYC 7582-1449-1 | 192.2 | 1 | 1.26 [f=0.05,1.26,2.59,25.59] | -8.96 [-9.20,-8.83,-8.59] | 22.1 [21.8,22.4,23.3] | -4.6 | -3.6 | -5.3 |
| 11 | TYC 7142-1661-1 | 21.7 | 1 | 0.75 [f=0.06,0.75,1.43,14.50] | -0.62 [-0.62,-0.59,-0.54] | 36.9 [36.8,37.9,47.4] | -2.9 | -5.7 | -5.4 |
| 12 | HIP 63797 ([HD 113376](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=HD%20113376)) | 118.1 | 11 | 1.00 [f=0.25,0.50,0.67,3.28] | -2.90 [-3.33,-2.83,-2.49] | 40.2 [35.0,41.2,46.8] | -2.5 | -5.7 | -5.8 |
| 13 | HIP 101180 ([LHS 3558](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=LHS%203558)) | 8.1 | 52 | 1.58 [f=0.50,1.57,1.58,1.60] | -0.17 [-0.17,-0.17,-0.16] | 32.6 [32.4,32.6,32.7] | -4.4 | -4.9 | -6.2 |
| 14 | TYC 7093-310-1 | 6.7 | 1 | 1.96 [f=0.50,1.08,1.56,2.50] | -0.19 [-0.21,-0.20,-0.19] | 40.3 [38.6,40.2,41.7] | -3.7 | -5.9 | -6.5 |
| 15 | HIP 1475 ([V* GX And](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=V*%20GX%20And)) | 3.6 | 106 | 1.47 [f=0.65,1.47,1.47,1.47] | -0.03 [-0.03,-0.03,-0.03] | 38.7 [38.6,38.7,38.8] | -3.8 | -5.8 | -6.5 |
| 16 | HIP 21553 ([HD 232979](http://simbad.u-strasbg.fr/simbad/sim-id?Ident=HD%20232979)) | 9.9 | 171 | 1.94 [f=0.45,1.90,1.91,1.96] | -0.24 [-0.25,-0.24,-0.24] | 34.8 [34.6,34.8,34.9] | -6.6 | -5.2 | -8.7 |
|
seap-udeaREPO_NAMEiWanderPATH_START.@iWander_extracted@iWander-master@objects@CANDIDATES-Oumuamua-Past.md@.PATH_END.py
|
{
"filename": "_tickvals.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymapbox/colorbar/_tickvals.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="densitymapbox.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymapbox@colorbar@_tickvals.py@.PATH_END.py
|
{
"filename": "TTTEEE.py",
"repo_name": "SouthPoleTelescope/spt3g_y1_dist",
"repo_path": "spt3g_y1_dist_extracted/spt3g_y1_dist-main/cobaya_files/SPT3G_Y1/TTTEEE.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import planck_clik
from cobaya.log import LoggedError, get_logger
from cobaya.tools import are_different_params_lists
import numpy as np
from typing import Optional
import os
class TTTEEE(planck_clik.PlanckClik):
SPT3G_2018_TTTEEE_late_crop_msk : Optional[str]
SPT3G_2018_TTTEEE_beam_covariance_scale : Optional[str]
SPT3G_2018_TTTEEE_galdust_T : Optional[str]
SPT3G_2018_TTTEEE_CIB_T : Optional[str]
SPT3G_2018_TTTEEE_tSZ_cosmology_scaling : Optional[str]
SPT3G_2018_TTTEEE_kSZ_cosmology_scaling : Optional[str]
SPT3G_2018_TTTEEE_spectra_to_fit_bin_min : Optional[str]
SPT3G_2018_TTTEEE_spectra_to_fit_bin_max : Optional[str]
SPT3G_2018_TTTEEE_spectra_to_fit : Optional[str]
def initialize(self):
try:
install_path = (
lambda p: self.get_code_path(p) if p else None)(self.packages_path)
# min_version here is checked inside get_clik_import_path, since it is
# displayed in the folder name and cannot be retrieved from the module.
clik = planck_clik.load_clik(
"clik", path=self.path, install_path=install_path,
get_import_path=lambda pth:os.path.join(planck_clik.get_clik_source_folder(pth), 'lib/python/site-packages'), logger=self.log,
not_installed_level="debug")
except planck_clik.VersionCheckError as excpt:
raise planck_clik.VersionCheckError(
str(excpt) + " Install new clik version following indications at https://github.com/SouthPoleTelescope/spt3g_y1_dist")
except ComponentNotInstalledError as excpt:
raise ComponentNotInstalledError(
self.log, (f"Could not find clik: {excpt}. "
"To install follow indications at https://github.com/SouthPoleTelescope/spt3g_y1_dist"))
if int(clik.version().split("_")[1].split(".")[0])<16:
raise planck_clik.VersionCheckError("SPT3G likelihood requires clik v16+. See information here https://github.com/SouthPoleTelescope/spt3g_y1_dist")
# Loading the likelihood data
data_path = planck_clik.get_data_path(self.__class__.get_qualified_class_name())
if not os.path.isabs(self.clik_file):
self.path_data = getattr(self, "path_data", os.path.join(
self.path or self.packages_path, "data", data_path))
self.clik_file = os.path.join(self.path_data, self.clik_file)
# get the options
options_list = [v for v in dir(self) if v.startswith("SPT3G_2018_TTTEEE")]
options = dict([(op,str(getattr(self,op))) for op in options_list if str(getattr(self,op))!=""])
try:
self.clik = clik.clik(self.clik_file,**options)
except clik.lkl.CError:
# Is it that the file was not found?
if not os.path.exists(self.clik_file):
raise ComponentNotInstalledError(
self.log, "The .clik file was not found where specified in the "
"'clik_file' field of the settings of this likelihood. "
"Maybe the 'path' given is not correct? The full path where"
" the .clik file was searched for is '%s'", self.clik_file)
# Else: unknown clik error
self.log.error("An unexpected error occurred in clik (possibly related to "
"multiple simultaneous initialization, or simultaneous "
"initialization of incompatible likelihoods; e.g. polarised "
"vs non-polarised 'lite' likelihoods. See error info below:")
raise
self.l_maxs = self.clik.get_lmax()
# calculate requirements here so class can also be separately instantiated
requested_cls = ["tt", "ee", "bb", "te", "tb", "eb"]
has_cl = self.clik.get_has_cl()
self.requested_cls = [cl for cl, i in zip(requested_cls, has_cl) if int(i)]
self.l_maxs_cls = [lmax for lmax, i in zip(self.l_maxs, has_cl) if int(i)]
self.expected_params = list(self.clik.extra_parameter_names)
# Placeholder for vector passed to clik
length = (len(self.clik.get_has_cl()))
self.vector = np.zeros(np.sum(self.l_maxs) + length + len(self.expected_params))
def log_likelihood(self, cl, **params_values):
# fill with Cl's
self.vector[:-len(self.expected_params)] = np.concatenate(
[(cl[spectrum][:1 + lmax] if spectrum not in ["tb", "eb"]
else np.zeros(1 + lmax))
for spectrum, lmax in zip(self.requested_cls, self.l_maxs_cls)])
# check for nan's: mey produce a segfault in clik
# dot product is apparently the fastest way in threading-enabled numpy
if np.isnan(np.dot(self.vector, self.vector)):
return -np.inf
# fill with likelihood parameters
#first the nuisance
self.vector[-len(self.expected_params):] = (
[params_values[p] for p in self.expected_params])
loglike = self.clik(self.vector)[0]
# "zero" of clik, and sometimes nan's returned
if np.allclose(loglike, -1e30) or np.isnan(loglike):
loglike = -np.inf
return loglike
_planck_get_data_path = planck_clik.get_data_path
def get_data_path(name):
log = get_logger(name)
if "spt" not in name.lower():
return _planck_get_data_path(name)
log.info("override default get_data_path from %s"%(_planck_get_data_path.__module__))
return "spt_data"
planck_clik.get_data_path = get_data_path
|
SouthPoleTelescopeREPO_NAMEspt3g_y1_distPATH_START.@spt3g_y1_dist_extracted@spt3g_y1_dist-main@cobaya_files@SPT3G_Y1@TTTEEE.py@.PATH_END.py
|
{
"filename": "_fgopacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/marker/pattern/_fgopacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FgopacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="fgopacity", parent_name="treemap.marker.pattern", **kwargs
):
super(FgopacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@marker@pattern@_fgopacity.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "hmuellergoe/mrbeam",
"repo_path": "mrbeam_extracted/mrbeam-main/mr_beam/itreg/regpy/solvers/__init__.py",
"type": "Python"
}
|
"""Iterative solvers for inverse problems.
"""
from regpy.util import classlogger
from regpy.hilbert import HilbertSpace
class Solver:
"""Abstract base class for solvers. Solvers do not loop themselves, but are driven by
repeatedly calling the `next` method. They expose the current iterate and value as attributes
`x` and `y`, and can be iterated over, yielding the `(x, y)` tuple on every iteration (which
may or may not be the same arrays as before, modified in-place).
There are some convenience methods to run the solver with a `regpy.stoprules.StopRule`.
Subclasses should override the method `_next(self)` to perform a single iteration. The main
difference to `next` is that `_next` does not have a return value. If the solver
converged, `converge` should be called, afterwards `_next` will never be called again. Most
solvers will probably never converge on their own, but rely on the caller or a
`regpy.stoprules.StopRule` for termination.
"""
log = classlogger
def __init__(self, callback=None):
self.x = None
"""The current iterate."""
self.y = None
"""The value at the current iterate. May be needed by stopping rules, but callers should
handle the case when it is not available."""
self.__converged = False
self.callback = callback
def converge(self):
"""Mark the solver as converged. This is intended to be used by child classes
implementing the `_next` method.
"""
self.__converged = True
def next(self):
"""Perform a single iteration.
Returns
-------
boolean
False if the solver already converged and no step was performed.
True otherwise.
"""
if self.__converged:
return False
self._next()
if self.callback != None:
self.callback(self.x)
return True
def _next(self):
"""Perform a single iteration. This is an abstract method called from the public method
`next`. Child classes should override it.
The main difference to `next` is that `_next` does not have a return value. If the solver
converged, `converge` should be called.
"""
raise NotImplementedError
def __iter__(self):
"""Return and iterator on the iterates of the solver.
Yields
------
tuple of arrays
The (x, y) pair of the current iteration.
"""
while self.next():
yield self.x, self.y
def until(self, stoprule=None):
"""Generator that runs the solver with the given stopping rule. This is convenience method
that implements a simple generator loop running the solver until it either converges or the
stopping rule triggers.
Parameters
----------
stoprule : regpy.stoprules.StopRule, optional
The stopping rule to be used. If omitted, stopping will only be
based on the return value of `next`.
Yields
------
tuple of arrays
The (x, y) pair of the current iteration, or the solution chosen by
the stopping rule.
"""
for x, y in self:
yield x, y
if stoprule is not None and stoprule.stop(x, y):
self.log.info('Stopping rule triggered.')
# TODO document this behaviour
yield x, y
return
self.log.info('Solver converged.')
def run(self, stoprule=None):
"""Run the solver with the given stopping rule. This method simply runs the generator
`regpy.solvers.Solver.until` and returns the final `(x, y)` pair.
"""
for x, y in self.until(stoprule):
pass
return x, y
class HilbertSpaceSetting:
"""A Hilbert space *setting* for an inverse problem, used by e.g. Tikhonov-type solvers. A
setting consists of
- a forward operator,
- a Hilbert space structure on its domain that measures the regularity of reconstructions, and
- a Hilbert space structur on its codomain for the data misfit.
This class is mostly a container that keeps all of this data in one place and makes sure that
the `regpy.hilbert.HilbertSpace.discr`s match the operator's domain and codomain.
It also handles the case when the specified Hilbert space is actually an
`regpy.hilbert.AbstractSpace` (or actually any callable) instead of a
`regpy.hilbert.HilbertSpace`, calling it on the operator's domain or codomain to construct
the concrete Hilbert space instances.
Parameters
----------
op : regpy.operators.Operator
The forward operator.
Hdomain, Hcodomain : regpy.hilbert.HilbertSpace or callable
The Hilbert spaces or abstract spaces on the domain or codomain.
"""
def __init__(self, op, Hdomain, Hcodomain):
if not isinstance(Hdomain, HilbertSpace) and callable(Hdomain):
Hdomain = Hdomain(op.domain)
assert isinstance(Hdomain, HilbertSpace)
assert Hdomain.discr == op.domain
if not isinstance(Hcodomain, HilbertSpace) and callable(Hcodomain):
Hcodomain = Hcodomain(op.codomain)
assert isinstance(Hcodomain, HilbertSpace)
assert Hcodomain.discr == op.codomain
self.op = op
"""The operator."""
self.Hdomain = Hdomain
"""The `regpy.hilbert.HilbertSpace` on the domain."""
self.Hcodomain = Hcodomain
"""The `regpy.hilbert.HilbertSpace` on the codomain."""
|
hmuellergoeREPO_NAMEmrbeamPATH_START.@mrbeam_extracted@mrbeam-main@mr_beam@itreg@regpy@solvers@__init__.py@.PATH_END.py
|
{
"filename": "grid.py",
"repo_name": "Herculens/herculens",
"repo_path": "herculens_extracted/herculens-main/herculens/LensImage/Numerics/grid.py",
"type": "Python"
}
|
# Handles coordinate grid on which ray-tracing and convolution are performed
#
# Copyright (c) 2021, herculens developers and contributors
# Copyright (c) 2018, Simon Birrer & lenstronomy contributors
# based on the ImSim.Numerics module from lenstronomy (version 1.9.3)
__author__ = 'sibirrer', 'austinpeel', 'aymgal'
import jax.numpy as np
from herculens.Util import util
from herculens.Util import image_util
from herculens.Coordinates.coord_transforms import Coordinates1D
__all__ = ['RegularGrid']
class RegularGrid(Coordinates1D):
"""
manages a super-sampled grid on the partial image
"""
def __init__(self, nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampling_factor=1):
"""
:param nx: number of pixels in x-axis
:param ny: number of pixels in y-axis
:param transform_pix2angle: 2x2 matrix, mapping of pixel to coordinate
:param ra_at_xy_0: ra coordinate at pixel (0,0)
:param dec_at_xy_0: dec coordinate at pixel (0,0)
:param supersampling_indexes: bool array of shape nx x ny, corresponding to pixels being super_sampled
:param supersampling_factor: int, factor (per axis) of super-sampling
:param flux_evaluate_indexes: bool array of shape nx x ny, corresponding to pixels being evaluated
(for both low and high res). Default is None, replaced by setting all pixels to being evaluated.
"""
super(RegularGrid, self).__init__(transform_pix2angle, ra_at_xy_0, dec_at_xy_0)
self._supersampling_factor = supersampling_factor
self._nx = nx
self._ny = ny
self._x_grid, self._y_grid = self.coordinate_grid(nx, ny)
x_grid_sub, y_grid_sub = util.subgrid_from_coordinate_transform(self._nx, self._nx,
transform_pix2angle, ra_at_xy_0, dec_at_xy_0,
subgrid_res=self._supersampling_factor)
self._ra_subgrid = x_grid_sub
self._dec_subgrid = y_grid_sub
@property
def coordinates_evaluate(self):
"""
:return: 1d array of all coordinates being evaluated to perform the image computation
"""
return self._ra_subgrid, self._dec_subgrid
@property
def grid_points_spacing(self):
"""
effective spacing between coordinate points, after supersampling
:return: sqrt(pixel_area)/supersampling_factor
"""
return self.pixel_width / self._supersampling_factor
@property
def num_grid_points_axes(self):
"""
effective number of points along each axes, after supersampling
:return: number of pixels per axis, nx*supersampling_factor ny*supersampling_factor
"""
return self._nx * self._supersampling_factor, self._ny * self._supersampling_factor
@property
def num_grid_points(self):
"""
effective number of points along each axes, after supersampling
:return: number of pixels per axis, nx*supersampling_factor ny*supersampling_factor
"""
return self._nx * self._supersampling_factor * self._ny * self._supersampling_factor
@property
def supersampling_factor(self):
"""
:return: factor (per axis) of super-sampling relative to a pixel
"""
return self._supersampling_factor
def flux_array2image_low_high(self, flux_array, **kwargs):
"""
:param flux_array: 1d array of low and high resolution flux values corresponding to the coordinates_evaluate order
:return: 2d array, 2d array, corresponding to (partial) images in low and high resolution (to be convolved)
"""
image = self._array2image(flux_array)
if self._supersampling_factor > 1:
image_high_res = image
image_low_res = image_util.re_size(image, self._supersampling_factor)
else:
image_high_res = None
image_low_res = image
return image_low_res, image_high_res
def _array2image(self, array):
"""
maps a 1d array into a (nx, ny) 2d grid with array populating the idex_mask indices
:param array: 1d array
:param idex_mask: 1d array of length nx*ny
:param nx: x-axis of 2d grid
:param ny: y-axis of 2d grid
:return:
"""
nx, ny = self._nx * self._supersampling_factor, self._ny * self._supersampling_factor
return util.array2image(array, nx, ny)
|
HerculensREPO_NAMEherculensPATH_START.@herculens_extracted@herculens-main@herculens@LensImage@Numerics@grid.py@.PATH_END.py
|
{
"filename": "_bordercolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmapgl/hoverlabel/_bordercolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="heatmapgl.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmapgl@hoverlabel@_bordercolor.py@.PATH_END.py
|
{
"filename": "lineout.py",
"repo_name": "fmihpc/analysator",
"repo_path": "analysator_extracted/analysator-master/pyCalculations/lineout.py",
"type": "Python"
}
|
#
# This file is part of Analysator.
# Copyright 2013-2016 Finnish Meteorological Institute
# Copyright 2017-2018 University of Helsinki
#
# For details of usage, see the COPYING file and read the "Rules of the Road"
# at http://www.physics.helsinki.fi/vlasiator/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Cut-throughs from vlsv files
import numpy as np
import sys
import logging
def lineout( vlsvReader, point1, point2, variable, operator="pass",interpolation_order=1, points=100 ):
''' Returns a line cut-through from a given VLSV file for distance, coordinates and variable values. The main difference between this and cut_through is that this function interpolates a given variable.
:param vlsvReader: Some open VlsvReader
:type vlsvReader: :class:`vlsvfile.VlsvReader`
:param point1: The starting point of a cut-through line
:param point2: The ending point of a cut-through line
:param variable: Variable to return
:param operator: The operator for the variable, for example "x" for x-component or "magnitude" for magnitude
:param interpolation_order: Order of interpolation (0 or 1), defaults to 1
:param points: Number of points to return
:returns: A tuple with output: (distance, coordinates, variable_values)
.. code-block:: python
# Example:
import pytools as pt # import analysator
vlsvReader = pt.vlsvfile.VlsvReader(\"testfile.vlsv\") # Open a vlsv file
lineout_rho = pt.calculations.lineout( vlsvReader=vlsvReader, point1=[1.0e5, 1.0e6, 0], point2=[2.0e5, 2.0e6, 0], variable="rho", interpolation_order=1, points=100 )
distance = lineout_rho[0]
coordinates = lineout_rho[1]
values = lineout_rho[2]
'''
# Transform point1 and point2 into numpy array:
point1 = np.array(point1)
point2 = np.array(point2)
# Get parameters from the file to determine a good length between points (step length):
# Make sure point1 and point2 are inside bounds
if vlsvReader.get_cellid(point1) == 0:
logging.info("ERROR, POINT1 IN CUT-THROUGH OUT OF BOUNDS!")
if vlsvReader.get_cellid(point2) == 0:
logging.info("ERROR, POINT2 IN CUT-THROUGH OUT OF BOUNDS!")
value_len=len(np.atleast_1d(vlsvReader.read_interpolated_variable( variable, point1, operator)))
if value_len==1:
values=np.zeros(points)
else:
values=np.zeros((points,value_len))
distance=np.zeros(points)
coordinates=np.zeros((points,3))
for i in range(points):
relative_coordinate=(point2 - point1) * i / (points-1)
if interpolation_order==1:
values[i]=vlsvReader.read_interpolated_variable( variable, point1 + relative_coordinate, operator)
elif interpolation_order==0:
values[i]=vlsvReader.read_variable(variable, vlsvReader.get_cellid(point1 + relative_coordinate), operator)
distance[i]=np.sqrt(sum(relative_coordinate**2))
coordinates[i]=point1 + relative_coordinate
return (distance,coordinates,values)
|
fmihpcREPO_NAMEanalysatorPATH_START.@analysator_extracted@analysator-master@pyCalculations@lineout.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.