metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "test_import.py",
"repo_name": "natashabatalha/PandExo",
"repo_path": "PandExo_extracted/PandExo-master/test_import.py",
"type": "Python"
}
|
import pandexo.engine.justdoit as jdi # THIS IS THE HOLY GRAIL OF PANDEXO
exo_dict = jdi.load_exo_dict()
exo_dict['observation']['sat_level'] = 80 #saturation level in percent of full well
exo_dict['observation']['sat_unit'] = '%'
exo_dict['observation']['noccultations'] = 2 #number of transits
exo_dict['observation']['R'] = None #fixed binning. I usually suggest ZERO binning.. you can always bin later
#without having to redo the calcualtion
exo_dict['observation']['baseline'] = 1.0 #fraction of time in transit versus out = in/out
exo_dict['observation']['baseline_unit'] = 'frac'
exo_dict['observation']['noise_floor'] = 0 #this can be a fixed level or it can be a filepath
exo_dict['star']['type'] = 'phoenix' #phoenix or user (if you have your own)
exo_dict['star']['mag'] = 8.0 #magnitude of the system
exo_dict['star']['ref_wave'] = 1.25 #For J mag = 1.25, H = 1.6, K =2.22.. etc (all in micron)
exo_dict['star']['temp'] = 5500 #in K
exo_dict['star']['metal'] = 0.0 # as log Fe/H
exo_dict['star']['logg'] = 4.0
exo_dict['star']['radius'] = 1
exo_dict['star']['r_unit'] = 'R_sun'
exo_dict['planet']['type'] = 'constant'
exo_dict['planet']['radius'] = 1 #other options include "um","nm" ,"Angs", "secs" (for phase curves)
exo_dict['planet']['r_unit'] = 'R_jup'
exo_dict['planet']['transit_duration'] = 2.0*60.0*60.0
exo_dict['planet']['td_unit'] = 's'
exo_dict['planet']['f_unit'] = 'rp^2/r*^2'
#jdi.run_pandexo(exo_dict, ['NIRSpec G140H'], save_file=False)
#print('SUCCESS')
|
natashabatalhaREPO_NAMEPandExoPATH_START.@PandExo_extracted@PandExo-master@test_import.py@.PATH_END.py
|
{
"filename": "interpolated.py",
"repo_name": "exoplanet-dev/exoplanet",
"repo_path": "exoplanet_extracted/exoplanet-main/src/exoplanet/light_curves/interpolated.py",
"type": "Python"
}
|
__all__ = ["InterpolatedLightCurve"]
import numpy as np
from exoplanet.compat import tensor as pt
def interp(n, x, xmin, xmax, dx, func):
"""One-dimensional regularly spaced cubic interpolation
Args:
n (int): The axis of the output that should be interpolated
x (tensor): The x coordinates where the model should be evaluated
xmin (scalar): The first coordinate in the grid
xmax (scalar): The last coordinate in the grid
dx (scalar): The grid spacing
func (callable): The function that should be interpolated
Returns:
y: The function ``func`` interpolated to the coordinates ``x``
"""
xp = pt.arange(xmin - dx, xmax + 2.5 * dx, dx)
yp = func(xp)
y0 = yp[:-3, n]
y1 = yp[1:-2, n]
y2 = yp[2:-1, n]
y3 = yp[3:, n]
a0 = y1
a1 = -y0 / 3.0 - 0.5 * y1 + y2 - y3 / 6.0
a2 = 0.5 * (y0 + y2) - y1
a3 = 0.5 * ((y1 - y2) + (y3 - y0) / 3.0)
inds = pt.cast(pt.floor((x - xmin) / dx), "int64")
x0 = (x - xp[inds + 1]) / dx
return a0[inds] + a1[inds] * x0 + a2[inds] * x0**2 + a3[inds] * x0**3
class InterpolatedLightCurve:
"""This light curve object is an EXPERIMENTAL and UNTESTED interface for
pre-computing transit light curves on a grid and then interpolating this
model onto the observed datapoints. This can improve the computational
cost of a light curve model, especially when the dataset is large or the
planet is short period. WARNING: You should only use this at your own risk
if you know what you're doing!
"""
def __init__(
self, base_light_curve, num_phase, num_planets=None, **kwargs
):
self.base_light_curve = base_light_curve
self.num_phase = int(num_phase)
self.num_planets = num_planets
def get_light_curve(
self,
orbit=None,
r=None,
t=None,
texp=None,
oversample=7,
order=0,
use_in_transit=None,
light_delay=False,
):
if self.num_planets is None:
try:
vec = orbit.period.tag.test_value
except AttributeError:
raise ValueError(
"Can't compute num_planets, please provide a value"
)
num_planets = len(np.atleast_1d(vec))
else:
num_planets = int(self.num_planets)
if num_planets <= 1:
func = _wrapper(
self.base_light_curve,
orbit=orbit,
r=r,
texp=texp,
oversample=oversample,
order=order,
use_in_transit=use_in_transit,
light_delay=light_delay,
)
mn = orbit.t0
mx = orbit.t0 + orbit.period
return interp(
0,
pt.mod(t - orbit.t0, orbit.period) + orbit.t0,
mn,
mx,
(mx - mn) / (self.num_phase + 1),
func,
)[:, None]
ys = []
for n in range(num_planets):
func = _wrapper(
self.base_light_curve,
orbit=orbit,
r=r,
texp=texp,
oversample=oversample,
order=order,
use_in_transit=use_in_transit,
light_delay=light_delay,
)
mn = orbit.t0[n]
mx = orbit.t0[n] + orbit.period[n]
ys.append(
interp(
n,
pt.mod(t - orbit.t0[n], orbit.period[n]) + orbit.t0[n],
mn,
mx,
(mx - mn) / (self.num_phase + 1),
func,
)
)
return pt.stack(ys, axis=-1)
class _wrapper:
def __init__(self, base_light_curve, *args, **kwargs):
self.base_light_curve = base_light_curve
self.args = args
self.kwargs = kwargs
def __call__(self, x):
kwargs = dict(t=x, **self.kwargs)
return self.base_light_curve.get_light_curve(*self.args, **kwargs)
|
exoplanet-devREPO_NAMEexoplanetPATH_START.@exoplanet_extracted@exoplanet-main@src@exoplanet@light_curves@interpolated.py@.PATH_END.py
|
{
"filename": "data_inspect_2458055.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/data_inspect/data_inspect_2458055.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import matplotlib.patches as mpatches
import matplotlib.gridspec as gridspec
import numpy as np
from pyuvdata import UVCal, UVData
import os
import sys
import glob
import uvtools as uvt
from astropy.time import Time
from astropy.coordinates import EarthLocation, AltAz, Angle
from astropy.coordinates import SkyCoord as sc
import pandas
import warnings
import copy
from hera_notebook_templates import utils_h1c as utils
import hera_qm
from hera_mc import cm_hookup
import h5py
import importlib
from scipy import stats
import yaml
#warnings.filterwarnings('ignore')
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = 2458045
# data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458045"
# flag_yaml_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/h1c_idr3_software/hera_pipelines/pipelines/h1c/idr3/v2/a_priori_flags"
# os.environ["JULIANDATE"] = str(JD)
# os.environ["DATA_PATH"] = data_path
# os.environ["PATH_TO_A_PRIORI_FLAGS"] = flag_yaml_path
```
```python
#get data location
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
flag_yaml_path = os.environ['PATH_TO_A_PRIORI_FLAGS']
print(f'JD = {JD}')
print(f'data_path = "{data_path}"')
print(f'flag_yaml_path = "{flag_yaml_path}"')
```
JD = 2458055
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458055"
flag_yaml_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/h1c_idr3_software/hera_pipelines/pipelines/h1c/idr3/v2/a_priori_flags"
```python
utc = Time(JD, format='jd').datetime
print(f'Date: {utc.month}-{utc.day}-{utc.year}')
```
Date: 10-28-2017
```python
# Load in data
HHfiles, uvdx, uvdy = utils.load_data(data_path,JD)
ex_ants = utils.read_a_priori_ant_flags(f'{flag_yaml_path}/{JD}.yaml')
uv = UVData()
unread = True
readInd=0
while unread and readInd<len(HHfiles):
try:
uv.read(HHfiles[readInd])
unread = False
except:
readInd += 1
continue
ants = uv.antenna_numbers
use_ants = [ant for ant in ants if ant not in ex_ants]
uvd = UVData()
uvd.read(HHfiles, ant_str='auto')
uvd.select(antenna_nums=use_ants)
lsts = uvd.lst_array
uvdx.select(antenna_nums=use_ants)
uvdy.select(antenna_nums=use_ants)
```
73 sum files found between JDs 2458055.12552 and 2458055.66239
## Sky Coverage Map
Map of the sky (made using the Haslam 408MHz map). The RA/DEC range covered by this night of observation is shaded based on a 12 degree FWHM of the beam. Horizontal dashed lines represent the stripe that HERA can observe, while the shaded region is what was observed on this night. Vertical lines represent the beginning and ending LSTs of this observation. Selected sources are labelled, sources included are those in the GLEAM 4Jy catalog with a flux >10.9 Jy. Note that the map is clipped at the northern horizon.
```python
sources = utils.gather_source_list()
utils.plot_sky_map(uvd,dec_pad=55,ra_pad=55,clip=False,sources=sources)
```

## LST Coverage
Shows the LSTs (in hours) and JDs for which data is collected. Green represents data, red means no data.
```python
utils.plot_lst_coverage(uvd)
```

## Autocorrelations for a single file
This plot shows autocorrelations for one timestamp of each antenna that is active and each polarization. For each node, antennas are ordered by SNAP number, and within that by SNAP input number. The antenna number label color corresponds to the a priori status of that antenna.
```python
### plot autos
utils.plot_autos(uvdx, uvdy)
```

## Waterfalls of Autocorrelation Amplitudes for each Antenna and Each polarization
These plots show autocorrelation waterfalls of each antenna that is active and whose status qualifies for this notebook. For each node, antennas are ordered by SNAP number, and within that by SNAP input number. The antenna number label color corresponds to the a priori status of that antenna.
```python
utils.plot_wfs(uvd, pol = 0)
```

```python
utils.plot_wfs(uvd, pol = 1)
```

## Correlation Metrics
The first plot shows the correlation metric (described below) for a set of baseline types, as calculated at several times throughout the night. It is expected that longer baselines (darker color) will exhibit lower values than the short baselines.
The matrices show the phase correlation between antennas. Using the even and odd visibilities, each pixel is calculated as (even/abs(even)) * (conj(odd)/abs(odd)), and then averaged across time and frequency. If the phases are noise-like, this value will average down to zero. If the antennas are well correlated, the phases should not be noise-like, and this value should average to 1. The lines denoting node boundaries are intended to help confirm that inter-node correlations are functioning - if they aren't, this plot will appear block-diagonal.
This metric has shown to be LST locked - when comparing to other nights, be sure to compare for the same LST. It is expected that some LSTs will look much better or worse than others.
Note: Within each node, the order of antennas is determined by snap, and within that by snap input number.
```python
badAnts = []
badAnts = utils.plotNodeAveragedSummary(uvd,HHfiles,JD,use_ants,mat_pols=['xx','yy','xy','yx'])
```



## Antenna Positions
Antennas outlined in black here have been identified by the correlation matrix as bad antennas. Antennas with a colorful outline correspond to their status as identified by ant_metrics (see above plot). Faded antennas are those not meeting the apriori status requirement for this notebook run. Gold stars are node box locations.
```python
uvd1 = UVData()
uvd1.read(HHfiles[readInd], skip_bad_files=True)
utils.plot_antenna_positions(uvd1, badAnts=badAnts,use_ants=use_ants)
```

## Mean-Subtracted Waterfalls
Here the mean value in each frequency bin has been subtracted out. This effectively subtracts out the bandpass shape, making time variations more visible.
```python
utils.plot_wfs(uvd,0,mean_sub=True,jd=JD)
utils.plot_wfs(uvd,1,mean_sub=True,jd=JD)
```


```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@data_inspect@data_inspect_2458055.ipynb@.PATH_END.py
|
{
"filename": "_smoothing.py",
"repo_name": "deepmind/optax",
"repo_path": "optax_extracted/optax-main/optax/losses/_smoothing.py",
"type": "Python"
}
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smoothing functions."""
import chex
import jax.numpy as jnp
def smooth_labels(
labels: chex.Array,
alpha: float,
) -> jnp.ndarray:
"""Apply label smoothing.
Label smoothing is often used in combination with a cross-entropy loss.
Smoothed labels favor small logit gaps, and it has been shown that this can
provide better model calibration by preventing overconfident predictions.
Args:
labels: One hot labels to be smoothed.
alpha: The smoothing factor.
Returns:
a smoothed version of the one hot input labels.
References:
Muller et al, `When does label smoothing help?
<https://arxiv.org/abs/1906.02629>`_, 2019
"""
chex.assert_type([labels], float)
num_categories = labels.shape[-1]
return (1.0 - alpha) * labels + alpha / num_categories
|
deepmindREPO_NAMEoptaxPATH_START.@optax_extracted@optax-main@optax@losses@_smoothing.py@.PATH_END.py
|
{
"filename": "base.py",
"repo_name": "showyourwork/showyourwork",
"repo_path": "showyourwork_extracted/showyourwork-main/src/showyourwork/exceptions/base.py",
"type": "Python"
}
|
import sys
import traceback
from .. import paths
from ..logging import get_logger
def redirect_exception(*args, **kwargs):
"""
Redirect the traceback exception printout to the log file.
"""
exc = traceback.format_exception(*args, **kwargs)
exc = "".join(exc)
get_logger().debug(exc)
def custom_excepthook(cls, exc, tb):
"""
Redirect the exception to the log file.
"""
get_logger().debug("".join(traceback.format_exception(cls, exc, tb)))
print_exception = traceback.print_exception
excepthook = sys.excepthook
def disable_trace():
"""Disable the traceback from being printed to the screen.
The traceback gets logged to file, unless the logging level
is `DEBUG`, in which case it also gets printed to the screen.
"""
traceback.print_exception = redirect_exception
sys.excepthook = custom_excepthook
try:
flag = paths.user().flags / "DISABLE_SNAKEMAKE_EXCEPTIONS"
except Exception:
pass
else:
flag.touch()
def restore_trace():
"""Restore traceback printing to the screen."""
traceback.print_exception = print_exception
sys.excepthook = excepthook
try:
flag = paths.user().flags / "DISABLE_SNAKEMAKE_EXCEPTIONS"
except Exception:
pass
else:
if flag.exists():
flag.unlink()
class ShowyourworkException(Exception):
def __init__(
self,
message="An error occurred while executing the workflow.",
level="error",
):
# Print the message using the logger
if level == "error":
get_logger().error(message)
elif level == "warn":
get_logger().warn(message)
elif level == "info":
get_logger().info(message)
elif level == "debug":
get_logger().debug(message)
else:
super().__init__(message)
# Disable tracebacks; if this exception is caught, the
# caller MUST call `restore_trace` in the `except` block
# to restore traceback printing!
disable_trace()
# Raise the exception
super().__init__()
|
showyourworkREPO_NAMEshowyourworkPATH_START.@showyourwork_extracted@showyourwork-main@src@showyourwork@exceptions@base.py@.PATH_END.py
|
{
"filename": "join_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/join_test.py",
"type": "Python"
}
|
import pytest
import vaex
import numpy as np
import pyarrow as pa
from common import small_buffer
df_a = vaex.from_arrays(a=np.array(['A', 'B', 'C']),
x=np.array([0., 1., 2.]),
y=np.ma.array([0., 9., 2.], mask=[False, True, False]),
m=np.ma.array([1, 2, 3], mask=[False, True, False])
)
df_b = vaex.from_arrays(b=np.array(['A', 'B', 'D']),
x=np.array([2., 1., 0.]),
y=np.ma.array([9., 1., 2.], mask=[True, False, False]),
m=np.ma.array([3, 1, 2], mask=[True, False, False])
)
df_dup = vaex.from_arrays(b=np.array(['A', 'B', 'A']),
x=np.array([2., 1., 2.]),
y=np.ma.array([9., 1., 9.], mask=[True, False, False]),
m=np.ma.array([3, 1, 2], mask=[True, True, False])
)
df_c = vaex.from_arrays(c=np.array(['B', 'C']),
z1=np.array([-1., -2.]),
z2=np.array([True, False]),
)
df_d = vaex.from_arrays(a=np.array(['B', 'C', 'D']),
x1=np.array(['dog', 'cat', 'mouse']),
x2=np.array([3.1, 25, np.nan]),
)
df_e = vaex.from_arrays(a=np.array(['X', 'Y', 'Z']),
x1=np.array(['dog', 'cat', 'mouse']),
x2=np.array([3.1, 25, np.nan]),
)
df_dt1 = vaex.from_arrays(date=[np.datetime64('2009-10-12T03:00:00'),
np.datetime64('2009-10-12T11:00:00'),
np.datetime64('2009-10-12T12:00:00'),
np.datetime64('2009-12-12T03:00:00')],
value=[1, 2, 3, 4])
df_f = vaex.from_arrays(
f=np.array(["B", "C", None]),
w1=np.array(["dog", "cat", "mouse"]),
w2=np.array([True, False, True]),
)
df_dt2 = vaex.from_arrays(date=[np.datetime64('2009-10-12T03:00:00'),
np.datetime64('2009-10-12T11:00:00'),
np.datetime64('2009-12-12T03:00:00')],
value=[11, 22, 44])
def test_no_on(rebuild_dataframe):
# just adds the columns
df = df_a.join(df_b, rsuffix='_r')
assert df.dataset.original.right._columns['b'] is df_b.dataset._columns['b']
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_masked(rebuild_dataframe):
df = df_a.join(other=df_b, left_on='m', right_on='m', rsuffix='_r')
assert df.evaluate('m').tolist() == [1, None, 3]
assert df.evaluate('m_r').tolist() == [1, None, None]
assert df.dataset.original.right._columns['m_r'].indices.dtype == np.int8
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_nomatch(rebuild_dataframe):
df = df_a.join(df_e, on=df_a.a, rprefix='r_')
assert df.x2.tolist() == [None, None, None]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_left_a_b(rebuild_dataframe):
df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
assert df['a'].tolist() == ['A', 'B', 'C']
assert df['b'].tolist() == ['A', 'B', None]
assert df['x'].tolist() == [0, 1, 2]
assert df['x_r'].tolist() == [2, 1, None]
assert df['y'].tolist() == [0, None, 2]
assert df['y_r'].tolist() == [None, 1, None]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
# test slicing
assert rebuild_dataframe(df).dataset == df.dataset
dfs = rebuild_dataframe(df[1:-1])
assert dfs.dataset == df[1:-1].dataset
assert df[1:-1]['a'].tolist() == ['B']
assert dfs['a'].tolist() == ['B']
def test_left_a_b_as_alias(rebuild_dataframe):
df_ac = df_a.copy()
df_bc = df_b.copy()
df_ac['1'] = df_ac['a']
df_bc['2'] = df_bc['b']
df = df_ac.join(other=df_bc, left_on='1', right_on='2', rsuffix='_r')
assert df.evaluate('a').tolist() == ['A', 'B', 'C']
assert df.evaluate('b').tolist() == ['A', 'B', None]
assert df.evaluate('x').tolist() == [0, 1, 2]
assert df.evaluate('x_r').tolist() == [2, 1, None]
assert df.evaluate('y').tolist() == [0, None, 2]
assert df.evaluate('y_r').tolist() == [None, 1, None]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_indexed(rebuild_dataframe):
df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
df_X = df_a.join(df, left_on='a', right_on='b', rsuffix='_r')
assert df_X['b'].tolist() == ['A', 'B', None]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_left_a_b_filtered(rebuild_dataframe):
df_af = df_a[df_a.x > 0]
df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
assert df['a'].tolist() == ['B', 'C']
assert df['b'].tolist() == ['B', None]
assert df['x'].tolist() == [1, 2]
assert df['x_r'].tolist() == [1, None]
assert df['y'].tolist() == [None, 2]
assert df['y_r'].tolist() == [1, None]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
# actually, even though the filter is applied, all rows will be matched
# since the filter can change
df.set_selection(None, vaex.dataframe.FILTER_SELECTION_NAME)
assert df['a'].tolist() == ['A', 'B', 'C']
assert df['b'].tolist() == ['A', 'B', None]
assert df['x'].tolist() == [0, 1, 2]
assert df['x_r'].tolist() == [2, 1, None]
assert df['y'].tolist() == [0, None, 2]
assert df['y_r'].tolist() == [None, 1, None]
# if we extract, that shouldn't be the case
df_af = df_a[df_a.x > 0].extract()
df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')
df.set_selection(None, vaex.dataframe.FILTER_SELECTION_NAME)
assert df['a'].tolist() == ['B', 'C']
assert df['b'].tolist() == ['B', None]
assert df['x'].tolist() == [1, 2]
assert df['x_r'].tolist() == [1, None]
assert df['y'].tolist() == [None, 2]
assert df['y_r'].tolist() == [1, None]
def test_inner_a_b_filtered(rebuild_dataframe):
df_a_filtered = df_a[df_a.x > 0]
df = df_a_filtered.join(other=df_b, left_on='a', right_on='b', rsuffix='_r', how='inner')
assert df['a'].tolist() == ['B']
assert df['b'].tolist() == ['B']
assert df['x'].tolist() == [1]
assert df['x_r'].tolist() == [1]
assert df['y'].tolist() == [None]
assert df['y_r'].tolist() == [1]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_left_a_b_filtered_right(rebuild_dataframe):
# similar to test_left_a_b_filtered, but now the df we join is filtered
# take b without the last tow
df_bf = df_b[df_b.b.str.contains('A|B')]
df = df_a.join(df_bf, how='left', on='x', rsuffix='_r')
# columns of the left df
assert df.x.tolist() == [0, 1, 2]
assert df.a.tolist() == ['A', 'B', 'C']
assert df.y.tolist() == [0, None, 2]
assert df.m.tolist() == [1, None, 3]
# columns of the right df
assert df.b.tolist() == [None, 'B', 'A']
assert df.x_r.tolist() == [None, 1, 2]
assert df.y_r.tolist() == [None, 1, None]
assert df.m_r.tolist() == [None, 1, None]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_right_x_x(rebuild_dataframe):
df = df_a.join(other=df_b, on='x', rsuffix='_r', how='right')._future()
assert df['a'].tolist() == ['C', 'B', 'A']
assert df['b'].tolist() == ['A', 'B', 'D']
assert df['x'].tolist() == [2, 1, 0]
assert df['x_r'].tolist() == [2, 1, 0]
assert df['y'].tolist() == [2, None, 0]
assert df['y_r'].tolist() == [None, 1, 2]
assert 'y_r' not in df_b
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_left_dup(rebuild_dataframe):
df = df_a.join(df_dup, left_on='a', right_on='b', rsuffix='_r', allow_duplication=True)
assert len(df) == 4
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
# df = df_a.join(df_dup, on='x', rsuffix='_r')
# df = df_a.join(df_dup, on='m', rsuffix='_r')
def test_left_a_c(rebuild_dataframe):
df = df_a.join(df_c, left_on='a', right_on='c', how='left')
assert df.a.tolist() == ['A', 'B', 'C']
assert df.x.tolist() == [0, 1, 2]
assert df.y.tolist() == [0., None, 2.]
assert df.m.tolist() == [1, None, 3]
assert df.c.tolist() == [None, 'B', 'C']
assert df.z1.tolist() == [None, -1., -2.]
assert df.z2.tolist() == [None, True, False]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_a_a_suffix_check(rebuild_dataframe):
df = df_a.join(df_a, on='a', lsuffix='_left', rsuffix='_right')
assert set(df.column_names) == {'a_left', 'x_left', 'y_left', 'm_left', 'a_right', 'x_right', 'y_right', 'm_right'}
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_a_a_prefix_check(rebuild_dataframe):
df = df_a.join(df_a, on='a', lprefix='left_', rprefix='right_')
assert set(df.column_names) == {'left_a', 'left_x', 'left_y', 'left_m', 'right_a', 'right_x', 'right_y', 'right_m'}
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_inner_a_d(rebuild_dataframe):
df = df_a.join(df_d, on='a', right_on='a', how='inner', rsuffix='_r')
assert df.a.tolist() == ['B', 'C']
assert df.x.tolist() == [1., 2.]
assert df.y.tolist() == [None, 2.]
assert df.m.tolist() == [None, 3.]
assert df.x1.tolist() == ['dog', 'cat']
assert df.x2.tolist() == [3.1, 25.]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
@pytest.mark.skip(reason='full join not supported yet')
def test_full_a_d(rebuild_dataframe):
df = df_a.join(df_d, on='a', right_on='a', how='full')
assert df.a.tolist() == ['A', 'B', 'C', 'D']
assert df.x.tolist() == [0., 1., 2., None]
assert df.y.tolist() == [0., None, 2., None]
assert df.m.tolist() == [1, None, 3, None]
assert df.x1.tolist() == [None, 'dog', 'cat', 'mouse']
assert df.x2.tolist() == [None, 3.1, 25., np.nan]
np.testing.assert_array_equal(np.array(df_d.x2.values), np.array([3.1, 25., np.nan]))
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_left_virtual_filter(rebuild_dataframe):
df = df_a.join(df_d, on='a', how='left', rsuffix='_b')
df['r'] = df.x + df.x2
df = df[df.r > 10]
assert set(df[0]) == {'C', 2.0, 2.0, 3, 'C', 'cat', 25.0, 27.0}
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_left_on_virtual_col(rebuild_dataframe):
mapper = {0: 'A', 1: 'B', 2: 'C'}
df_a['aa'] = df_a.x.map(mapper=mapper)
df = df_a._future().join(df_d._future(), left_on='aa', right_on='a', rsuffix='_right')
assert df.a.tolist() == ['A', 'B', 'C']
assert df.aa.tolist() == ['A', 'B', 'C']
assert df.x.tolist() == [0, 1, 2]
assert df.y.tolist() == [0., None, 2.]
assert df.m.tolist() == [1, None, 3]
assert df.x1.tolist() == [None, 'dog', 'cat']
assert df.x2.tolist() == [None, 3.1, 25.]
assert df.a_right.tolist() == [None, 'B', 'C']
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_filtered_inner(rebuild_dataframe):
df_a_filtered = df_a[df_a.y > 0]
df_joined = df_a_filtered.join(other=df_b, on='x', how='inner', rsuffix='_', allow_duplication=True)
assert len(df_joined) == len(df_a_filtered)
x = np.arange(20)
df = vaex.from_arrays(x=x, y=x**2)
df = df[df.x > 5]
dfj = df.join(df, on='x', rsuffix='right_', how='inner')
repr(dfj) # trigger issue with selection cache
assert rebuild_dataframe(dfj.hashed()).dataset.hashed() == dfj.dataset.hashed()
def test_join_duplicate_column(rebuild_dataframe):
df_left = vaex.from_arrays(index=[1, 2, 3], x=[10, 20, 30])
df_right = vaex.from_arrays(index=[1, 2, 3], y=[0.1, 0.2, 0.3])
df = df_left.join(df_right, on='index')
assert df.column_count() == 3
assert set(df.column_names) == {'index', 'x', 'y'}
assert df['index'].tolist() == [1, 2, 3]
assert df.x.tolist() == [10, 20, 30]
assert df.y.tolist() == [0.1, 0.2, 0.3]
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
# we join row based and on a column
@pytest.mark.parametrize("on", [None, 'j'])
def test_join_virtual_columns(on, rebuild_dataframe):
df1 = vaex.from_scalars(j=444, x=1, y=2)
df1['z'] = df1.x + df1.y
df1['__h'] = df1.z * 2
df2 = vaex.from_scalars(j=444, x=2, yy=3)
df2['z'] = df2.x + df2.yy
df2['__h'] = df2.z * 3
df = df1.join(df2, rprefix='r_', rsuffix='_rhs', on=on)
assert df.x.values[0] == 1
assert df.y.values[0] == 2
assert df.z.values[0] == 3
assert df.__h.values[0] == 6
assert df.r_x_rhs.values[0] == 2
assert df.yy.values[0] == 3
assert df.r_z_rhs.values[0] == 5
assert df.__r_h_rhs.values[0] == 15
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_variables(rebuild_dataframe):
df1 = vaex.from_scalars(j=444, x=1, y=2)
df1.add_variable('a', 2)
df1.add_variable('b', 3)
df1['z'] = df1.x * df1['a'] + df1.y * df1['b']
df2 = vaex.from_scalars(j=444, x=2, yy=3)
df2.add_variable('a', 3)
df2.add_variable('b', 4)
df2['z'] = df2.x * df2['a'] + df2.yy * df2['b']
df = df1.join(df2, rprefix='r_', rsuffix='_rhs')
assert df.x.values[0] == 1
assert df.y.values[0] == 2
assert df.z.values[0] == 2 + 2*3
# assert df.__h.values[0] == 6
assert df.r_x_rhs.values[0] == 2
assert df.yy.values[0] == 3
assert df.r_z_rhs.values[0] == 2*3 + 3*4
assert rebuild_dataframe(df.hashed()).dataset.hashed() == df.dataset.hashed()
def test_join_functions():
df1 = vaex.from_scalars(j=444, x=1, y=2)
df2 = vaex.from_scalars(k=555, x=1)
# df2['x'] = df2.apply(lambda y: y-1, arguments=[df2.y])
df2['z'] = df2.apply(lambda x: x+10, arguments=[df1.x])
df = df1.join(df2, on='x')
assert 'lambda_function' in df.get_names()
assert df.x.tolist() == [1]
assert df.y.tolist() == [2]
assert df.z.tolist() == [11]
assert df.j.tolist() == [444]
assert df.k.tolist() == [555]
def test_with_masked_no_short_circuit():
# this test that the full table is joined, in some rare condition
# it can happen that the left table has a value not present in the right
# which causes it to not evaluate the other lookups, due to Python's short circuit
# behaviour. E.g. True or func() will not call func
N = 1000
df = vaex.from_arrays(i=np.arange(100) % 10)
df_right = vaex.from_arrays(i=np.arange(9), j=np.arange(9))
with small_buffer(df, size=1):
dfj = df.join(other=df_right, on='i')
assert dfj.dataset.original.right._columns['j'].masked
assert dfj[:10].dataset.original.right._columns['j'].masked
assert dfj['j'][:10].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, None]
dfj['j'].tolist() # make sure we can evaluate the whole column
def test_join_datetime():
df = df_dt1.join(df_dt2, on='date', rsuffix='_right', how='left')
assert df.shape == (4, 4)
assert df.value.tolist() == [1, 2, 3, 4]
assert df.value_right.tolist() == [11, 22, None, 44]
assert df.date_right.tolist() == [np.datetime64('2009-10-12T03:00:00'),
np.datetime64('2009-10-12T11:00:00'),
None,
np.datetime64('2009-12-12T03:00:00')]
def test_apply_function_name_collision():
def transform(x):
return x + 1 if x is not None else 99
df1 = vaex.from_arrays(x=[1, 2])
df2 = vaex.from_arrays(y=[1, 3])
df1["x2"] = df1["x"].apply(transform, multiprocessing=False)
df2["y2"] = df2["y"].apply(transform, multiprocessing=False)
joined = df1.join(df2, left_on="x2", right_on="y2", allow_duplication=True)
assert joined.y2.tolist() == [2, 99]
def test_join_no_right_columns_left():
df1 = vaex.from_arrays(a=[1, 2, 3])
# df2 only contains 'a', so we don't do the join for real
df2 = vaex.from_arrays(a=[1, 10])
df = df1.join(df2, on="a", how="inner")
assert df["a"].tolist() == [1]
def test_join_on_nan_primitive():
df1 = vaex.from_arrays(id=[0., 1., np.nan], x=[1,2,3])
df2 = vaex.from_arrays(id=[0., 1., 2.], y=[2,3,4])
df3 = df1.join(df2, on="id")
assert df3.id.tolist()[:2] == [0, 1]
assert np.isnan(df3.id.tolist()[2])
assert df3.x.tolist() == [1, 2, 3]
assert df3.y.tolist() == [2, 3, None]
def test_join_on_null_primitive(array_factory):
df1 = vaex.from_arrays(id=array_factory([0., 1., None]), x=[1,2,3])
df2 = vaex.from_arrays(id=[0., 1., 2.], y=[2, 3, 4])
df3 = df1.join(df2, on="id")
assert df3.id.tolist() == [0, 1, None]
assert df3.x.tolist() == [1, 2, 3]
assert df3.y.tolist() == [2, 3, None]
def test_join_on_null_and_nan_primitive(array_factory):
df1 = vaex.from_arrays(id=array_factory([np.nan, 1., None]), x=[1,2,3])
df2 = vaex.from_arrays(id=[0., 1., 2.], y=[2,3,4])
df3 = df1.join(df2, on="id")
assert df3.id.tolist()[1:] == [1, None]
assert np.isnan(df3.id.tolist()[0])
assert df3.x.tolist() == [1, 2, 3]
assert df3.y.tolist() == [None, 3, None]
def test_join_on_null_string():
df1 = vaex.from_arrays(id=pa.array(["0", "1", None]), x=[1,2,3])
df2 = vaex.from_arrays(id=pa.array(["0", "1", "2"]), y=[2,3,4])
df3 = df1.join(df2, on="id")
assert df3.id.tolist() == ["0", "1", None]
assert df3.x.tolist() == [1, 2, 3]
assert df3.y.tolist() == [2, 3, None]
def test_join_f_c_left_none():
df = df_f.join(df_c, left_on="f", right_on="c", how="left")
assert df.shape == (3, 6)
assert df.f.tolist() == ["B", "C", None]
assert df.c.tolist() == ["B", "C", None]
assert df.w1.tolist() == ["dog", "cat", "mouse"]
assert df.w2.tolist() == [True, False, True]
assert df.z1.tolist() == [-1.0, -2.0, None]
assert df.z2.tolist() == [True, False, None]
def test_join_f_c_inner_none():
df = df_f.join(df_c, left_on="f", right_on="c", how="inner")
assert df.shape == (2, 6)
assert df.f.tolist() == ["B", "C"]
assert df.c.tolist() == ["B", "C"]
assert df.w1.tolist() == ["dog", "cat"]
assert df.w2.tolist() == [True, False]
assert df.z1.tolist() == [-1.0, -2.0]
assert df.z2.tolist() == [True, False]
def test_join_f_c_left_none_fillna():
df_f_copy = df_f.copy()
df_f_copy['f'] = df_f_copy.f.fillna(value='missing')
df = df_f_copy.join(df_c, left_on='f', right_on='c', how='left')
assert df.shape == (3, 6)
assert df.f.tolist() == ['B', 'C', 'missing']
assert df.c.tolist() == ['B', 'C', None]
assert df.w1.tolist() == ['dog', 'cat', 'mouse']
assert df.w2.tolist() == [True, False, True]
assert df.z1.tolist() == [-1.0, -2.0, None]
assert df.z2.tolist() == [True, False, None]
def test_join_f_c_inner_none_fillna():
df_f_copy = df_f.copy()
df_f_copy['f'] = df_f_copy.f.fillna(value='missing')
df = df_f_copy.join(df_c, left_on='f', right_on='c', how='inner')
assert df.shape == (2, 6)
assert df.f.tolist() == ['B', 'C']
assert df.c.tolist() == ['B', 'C']
assert df.w1.tolist() == ['dog', 'cat']
assert df.w2.tolist() == [True, False]
assert df.z1.tolist() == [-1.0, -2.0]
assert df.z2.tolist() == [True, False]
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@join_test.py@.PATH_END.py
|
{
"filename": "cutouts.py",
"repo_name": "legacysurvey/imagine",
"repo_path": "imagine_extracted/imagine-main/map/cutouts.py",
"type": "Python"
}
|
from __future__ import print_function
if __name__ == '__main__':
import sys
sys.path.insert(0, 'django-1.9')
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'viewer.settings'
import django
import os
import fitsio
import numpy as np
from map.utils import send_file
from map.views import needs_layer
from viewer import settings
try:
# py2
#from urllib2 import urlopen
from urllib import urlencode
except:
# py3
#from urllib.request import urlopen
from urllib.parse import urlencode
debug = print
if not settings.DEBUG_LOGGING:
def debug(*args, **kwargs):
pass
@needs_layer()
def _cutout(req, jpeg=True):
from django.http import HttpResponseRedirect, HttpResponse
if not settings.ENABLE_CUTOUTS:
return HttpResponse('No cutouts enabled')
# Sanjaya : redirect to NERSC
if (settings.REDIRECT_CUTOUTS_DECAPS and
req.layer_name in ['decaps', 'decaps-model', 'decaps-resid']):
return HttpResponseRedirect('https://legacysurvey.org/viewer' + req.path + '?' + urlencode(req.GET))
tempfiles = []
if jpeg:
rtn = req.layer.get_cutout(req, jpeg=True, tempfiles=tempfiles)
else:
rtn = req.layer.get_cutout(req, fits=True, tempfiles=tempfiles)
for fn in tempfiles:
os.unlink(fn)
return rtn
def jpeg_cutout(req):
return _cutout(req)
def fits_cutout(req):
return _cutout(req, jpeg=False)
if __name__ == '__main__':
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'viewer.settings'
class duck(object):
pass
req = duck()
req.META = dict()
req.GET = dict(layer='decals-dr3', ra=246.2093, dec=9.6062)
r = jpeg_cutout(req)
print('Result', r)
|
legacysurveyREPO_NAMEimaginePATH_START.@imagine_extracted@imagine-main@map@cutouts.py@.PATH_END.py
|
{
"filename": "testCAndCPP.py",
"repo_name": "terryyin/lizard",
"repo_path": "lizard_extracted/lizard-master/test/test_languages/testCAndCPP.py",
"type": "Python"
}
|
import unittest
from lizard_languages import CLikeReader
from ..testHelpers import get_cpp_fileinfo, get_cpp_function_list
class Test_C_Token_extension(unittest.TestCase):
def test_connecting_macro(self):
extended = CLikeReader(None).preprocess(("a##b c", ))
#tbd
class Test_c_cpp_lizard(unittest.TestCase):
def test_empty(self):
result = get_cpp_function_list("")
self.assertEqual(0, len(result))
def test_no_function(self):
result = get_cpp_function_list("#include <stdio.h>\n")
self.assertEqual(0, len(result))
def test_one_function(self):
result = get_cpp_function_list("int fun(){}")
self.assertEqual(1, len(result))
self.assertEqual("fun", result[0].name)
def test_two_function(self):
result = get_cpp_function_list("int fun(){}\nint fun1(){}\n")
self.assertEqual(2, len(result))
self.assertEqual("fun", result[0].name)
self.assertEqual("fun1", result[1].name)
self.assertEqual(1, result[0].start_line)
self.assertEqual(1, result[0].end_line)
self.assertEqual(2, result[1].start_line)
self.assertEqual(2, result[1].end_line)
def test_two_simplest_function(self):
result = get_cpp_function_list("f(){}g(){}")
self.assertEqual(2, len(result))
self.assertEqual("f", result[0].name)
self.assertEqual("g", result[1].name)
def test_function_with_content(self):
result = get_cpp_function_list("int fun(xx oo){int a; a= call(p1,p2);}")
self.assertEqual(1, len(result))
self.assertEqual("fun", result[0].name)
self.assertEqual("fun( xx oo)", result[0].long_name)
def test_old_style_c_function(self):
result = get_cpp_function_list("""int fun(param) int praram; {}""")
self.assertEqual(1, len(result))
def test_not_old_style_c_function(self):
result = get_cpp_function_list("m()"*1500+ "a(){}")
self.assertEqual(1, len(result))
def test_complicated_c_function(self):
result = get_cpp_function_list("""int f(int(*)()){}""")
self.assertEqual('f', result[0].name)
def test_function_dec_with_throw(self):
result = get_cpp_function_list("""int fun() throw();void foo(){}""")
self.assertEqual(1, len(result))
def test_function_dec_with_noexcept(self):
result = get_cpp_function_list("int fun() noexcept(true);void foo(){}")
self.assertEqual(1, len(result))
def test_function_dec_followed_with_one_word_is_ok(self):
result = get_cpp_function_list("""int fun() no_throw {}""")
self.assertEqual(1, len(result))
def test_function_declaration_is_not_counted(self):
result = get_cpp_function_list("""int fun();class A{};""")
self.assertEqual(0, len(result))
def test_old_style_c_function_has_semicolon(self):
result = get_cpp_function_list("""{(void*)a}{}""")
self.assertEqual(0, len(result))
def test_typedef_is_not_old_style_c_function(self):
result = get_cpp_function_list('''typedef T() nT; foo(){}''')
self.assertEqual("foo", result[0].name)
def test_stupid_macro_before_function(self):
result = get_cpp_function_list('''T() foo(){}''')
self.assertEqual("foo", result[0].name)
def test_only_word_can_be_function_name(self):
result = get_cpp_function_list("""[(){}""")
self.assertEqual(0, len(result))
def test_double_slash_within_string(self):
result = get_cpp_function_list("""int fun(){char *a="\\\\";}""")
self.assertEqual(1, len(result))
def test_number_with_thousands_separator_since_cpp14(self):
result = get_cpp_function_list("""int fun(){
int a= 100'000; if(b) c; return 123'456'789;
}""")
self.assertEqual(1, len(result))
self.assertEqual(2, result[0].cyclomatic_complexity)
def test_hex_number_with_thousands_separator_since_cpp14(self):
result = get_cpp_function_list("""int fun(){
int a= 0x12ab'34cd; if(b) c; return 0xEF56'7890'1A2B;
}""")
self.assertEqual(1, len(result))
self.assertEqual(2, result[0].cyclomatic_complexity)
def test_bin_number_with_thousands_separator_since_cpp14(self):
result = get_cpp_function_list("""int fun(){
int a= 0b0101'1100; if(b) c; return 0b1111'0000'1100'1110;
}""")
self.assertEqual(1, len(result))
self.assertEqual(2, result[0].cyclomatic_complexity)
def test_function_with_no_param_omitted(self):
result = get_cpp_function_list("int fun(){}")
self.assertEqual(0, result[0].parameter_count)
def test_function_with_no_param_void(self):
result = get_cpp_function_list("int fun(void){}")
self.assertEqual(0, result[0].parameter_count)
def test_function_with_1_param(self):
result = get_cpp_function_list("int fun(aa bb){}")
self.assertEqual(1, result[0].parameter_count)
self.assertEqual(["bb"], result[0].parameters)
def test_function_with_1_ref_param(self):
result = get_cpp_function_list("int fun(aa * bb){}")
self.assertEqual(1, result[0].parameter_count)
self.assertEqual(["bb"], result[0].parameters)
def test_function_with_param(self):
result = get_cpp_function_list("int fun(aa * bb, cc dd){}")
self.assertEqual(2, result[0].parameter_count)
def test_function_with_strang_param(self):
result = get_cpp_function_list("int fun(aa<mm, nn> bb){}")
self.assertEqual(1, result[0].parameter_count)
self.assertEqual("fun( aa<mm,nn> bb)", result[0].long_name)
def test_function_with_strang_param2(self):
result = get_cpp_function_list("int fun(aa<x<mm,(x, y)>, nn> bb, (cc)<xx, oo> d){}")
self.assertEqual(2, result[0].parameter_count)
def test_one_function_with_namespace(self):
result = get_cpp_function_list("int abc::fun(){}")
self.assertEqual(1, len(result))
self.assertEqual("abc::fun", result[0].name)
self.assertEqual("abc::fun()", result[0].long_name)
def test_one_function_with_const(self):
result = get_cpp_function_list("int abc::fun()const{}")
self.assertEqual(1, len(result))
self.assertEqual("abc::fun", result[0].name)
self.assertEqual("abc::fun() const", result[0].long_name)
def test_one_function_with_throw(self):
result = get_cpp_function_list("""int fun() throw() {}""")
self.assertEqual(1, len(result))
self.assertEqual('fun', result[0].name)
result = get_cpp_function_list("""int fun() throw(Exception) {}""")
self.assertEqual(1, len(result))
self.assertEqual('fun', result[0].name)
def test_one_function_with_noexcept(self):
result = get_cpp_function_list("int abc::fun()noexcept{}")
self.assertEqual(1, len(result))
self.assertEqual("abc::fun", result[0].name)
result = get_cpp_function_list("int fun() noexcept(true) {}")
self.assertEqual(1, len(result))
self.assertEqual('fun', result[0].name)
result = get_cpp_function_list(
"int fun() noexcept(noexcept(foo()) && noexcept(Bar())) {}")
self.assertEqual(1, len(result))
self.assertEqual('fun', result[0].name)
def test_two_functions_in_class(self):
result = get_cpp_function_list("class c {~c(){}}; int d(){}")
self.assertEqual(2, len(result))
self.assertEqual("c::~c", result[0].name)
self.assertEqual("d", result[1].name)
def test_one_macro_in_class(self):
result = get_cpp_function_list("class c {M()}; int d(){}")
self.assertEqual(1, len(result))
self.assertEqual("d", result[0].name)
def test_pre_class(self):
result = get_cpp_function_list("class c; int d(){}")
self.assertEqual(1, len(result))
self.assertEqual("d", result[0].name)
def test_class_with_inheritance(self):
result = get_cpp_function_list("class c final:public b {int f(){}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
def test_nested_class(self):
result = get_cpp_function_list("class c {class d {int f(){}};};")
self.assertEqual(1, len(result))
self.assertEqual("c::d::f", result[0].name)
def test_template_class(self):
result = get_cpp_function_list("template<typename T> class c {};")
self.assertEqual(0, len(result))
result = get_cpp_function_list("template<class T> class c {};")
self.assertEqual(0, len(result))
result = get_cpp_function_list("template<typename T> class c {"
"void f(T t) {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("template<class T> class c {"
"void f(T t) {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("template<class T, typename S>"
"class c {void f(T t) {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("namespace ns { template<class T>"
"class c {void f(T t) {}}; }")
self.assertEqual(1, len(result))
self.assertEqual("ns::c::f", result[0].name)
def test_template_class_full_specialization(self):
result = get_cpp_function_list("template<> class c<double> {};")
self.assertEqual(0, len(result))
result = get_cpp_function_list("template<> class c<double> {"
"void f() {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("template<>"
"class c<double, int> {void f() {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("namespace ns { template<>"
"class c<double> {void f() {}}; }")
self.assertEqual(1, len(result))
self.assertEqual("ns::c::f", result[0].name)
def test_template_class_partial_specialization(self):
result = get_cpp_function_list(
"template<typename T> class c<int,T> {};")
self.assertEqual(0, len(result))
result = get_cpp_function_list("template<class T> class c<int,T> {};")
self.assertEqual(0, len(result))
result = get_cpp_function_list("template<typename T> class c<int,T> {"
"void f(T t) {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("template<class T> class c<int,T> {"
"void f(T t) {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("template<class T, typename S>"
"class c<int,T,S> {void f(T t) {}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("namespace ns { template<class T>"
"class c<int,T> {void f(T t) {}}; }")
self.assertEqual(1, len(result))
self.assertEqual("ns::c::f", result[0].name)
def test_template_function(self):
result = get_cpp_function_list("template<typename T> void f(T t) {}")
self.assertEqual(1, len(result))
self.assertEqual("f", result[0].name)
result = get_cpp_function_list("template<class T> void f(T t) {}")
self.assertEqual(1, len(result))
self.assertEqual("f", result[0].name)
result = get_cpp_function_list("namespace ns {"
"template<class T> void f(T t) {}}")
self.assertEqual(1, len(result))
self.assertEqual("ns::f", result[0].name)
def test_template_function_specialization(self):
result = get_cpp_function_list("template<> void f<double>() {}")
self.assertEqual(1, len(result))
self.assertEqual("f<double>", result[0].name)
result = get_cpp_function_list("namespace ns {"
"template<> void f<double>() {}}")
self.assertEqual(1, len(result))
self.assertEqual("ns::f<double>", result[0].name)
def test_nested_template_function(self):
result = get_cpp_function_list("template<typename T> class c { "
"template<typename S> void f() {} };")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("template<class T> class c { "
"template<class S> void f() {} };")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
result = get_cpp_function_list("namespace ns { "
"template<class T> class c { "
"template<class S> void f() {} }; }")
self.assertEqual(1, len(result))
self.assertEqual("ns::c::f", result[0].name)
def test_templated_code_with_question_mark(self):
result = get_cpp_function_list("void a(){Class<?>[];}")
self.assertEqual(1, result[0].cyclomatic_complexity)
def test_class_as_an_attribute(self):
result = get_cpp_function_list("void a(){{String.class}}")
self.assertEqual(1, result[0].cyclomatic_complexity)
def test_1(self):
result = get_cpp_function_list("class c {{}}")
self.assertEqual(0, len(result))
def test_bracket_that_is_not_a_namespace(self):
result = get_cpp_function_list("class c { {};int f(){}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
def test_nested_class_middle(self):
result = get_cpp_function_list("class c {class d {};int f(){}};")
self.assertEqual(1, len(result))
self.assertEqual("c::f", result[0].name)
def test_template_as_reference(self):
result = get_cpp_function_list("abc::def(a<b>& c){}")
self.assertEqual(1, len(result))
def test_less_then_is_not_template(self):
result = get_cpp_function_list("def(<); foo(){}")
self.assertEqual(1, len(result))
def test_template_with_pointer(self):
result = get_cpp_function_list("abc::def (a<b*> c){}")
self.assertEqual(1, len(result))
def test_nested_template(self):
result = get_cpp_function_list("abc::def (a<b<c>> c){}")
self.assertEqual(1, len(result))
def test_double_nested_template(self):
result = get_cpp_function_list("abc::def (a<b<c<d>>> c){}")
self.assertEqual(1, len(result))
def test_template_with_reference(self):
result = get_cpp_function_list("void fun(t<int &>b){} ")
self.assertEqual(1, len(result))
def test_template_with_reference_as_reference(self):
result = get_cpp_function_list("void fun(t<const int&>&b){} ")
self.assertEqual(1, len(result))
def test_template_as_part_of_function_name(self):
result = get_cpp_function_list("void fun<a,b<c>>(){} ")
self.assertEqual('fun<a,b<c>>', result[0].name)
def test_operator_overloading(self):
result = get_cpp_function_list("bool operator +=(int b){}")
self.assertEqual("operator +=", result[0].name)
def test_operator_overloading_shift(self):
result = get_cpp_function_list("bool operator <<(int b){}")
self.assertEqual("operator < <", result[0].name)
def test_operator_with_complicated_name(self):
result = get_cpp_function_list("operator MyStruct&(){}")
self.assertEqual("operator MyStruct &", result[0].name)
def test_operator_overloading_with_namespace(self):
result = get_cpp_function_list("bool TC::operator !(int b){}")
self.assertEqual(1, len(result))
self.assertEqual("TC::operator !", result[0].name)
def test_function_operator(self):
result = get_cpp_function_list("bool TC::operator ()(int b){}")
self.assertEqual(1, len(result))
self.assertEqual("TC::operator ( )", result[0].name)
def test_inline_operator(self):
result = get_cpp_function_list("class A { bool operator ()(int b) {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::operator ( )", result[0].name)
def test_namespace_alias(self):
result = get_cpp_function_list(
"namespace p;"
"namespace real { bool foo() {} }")
self.assertEqual(1, len(result))
self.assertEqual("real::foo", result[0].name)
def test_nested_unnamed_namespace(self):
result = get_cpp_function_list(
"namespace real { namespace { bool foo() {} } }")
self.assertEqual(1, len(result))
self.assertEqual("real::foo", result[0].name)
def test_constructor_initialization_list(self):
result = get_cpp_function_list('''A::A():a(1){}''')
self.assertEqual(1, len(result))
self.assertEqual("A::A", result[0].name)
def test_constructor_initialization_list_noexcept(self):
result = get_cpp_function_list('''A::A()noexcept:a(1){}''')
def test_constructor_initializer_list(self):
result = get_cpp_function_list('''A::A():a({1}),value(true){}''')
self.assertEqual(1, len(result))
self.assertEqual("A::A", result[0].name)
def test_constructor_uniform_initialization(self):
result = get_cpp_function_list('''A::A():a{1}{}''')
self.assertEqual(1, len(result))
self.assertEqual("A::A", result[0].name)
def test_parentheses_before_function(self):
result = get_cpp_function_list('''()''')
self.assertEqual(0, len(result))
def test_destructor_implementation(self):
result = get_cpp_function_list('''A::~A(){}''')
self.assertEqual(1, len(result))
self.assertEqual("A::~A", result[0].name)
def test_function_that_returns_function_pointers(self):
result = get_cpp_function_list('''int (*fun())(){}''')
self.assertEqual(1, len(result))
self.assertEqual("int( * fun())", result[0].name)
def test_struct_in_return_type(self):
result = get_cpp_function_list(''' struct a b() { a(1>2); }''')
self.assertEqual(1, len(result))
self.assertEqual("b", result[0].name)
def test_function_name_class(self):
result = get_cpp_function_list('''int class(){}''');
self.assertEqual(1, len(result))
def test_underscore(self):
from lizard_languages.code_reader import CodeReader
generate_tokens = CodeReader.generate_tokens
result = get_cpp_function_list(''' a() _() { }''')
self.assertEqual(1, len(result))
def test_global_var_constructor(self):
result = get_cpp_function_list('''std::string s("String");''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''std::string s = "String";''')
self.assertEqual(0, len(result))
def test_non_function_initializer_list(self):
result = get_cpp_function_list('''v={}''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''v = {};''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''std::vector<int> v = {1, 2, 3};''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''v = {1, 2, 3};''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''namespace n { v = {}; }''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''class n { int v = {0}; }''')
self.assertEqual(0, len(result))
def test_non_function_uniform_initialization(self):
result = get_cpp_function_list('''std::vector<int> v{1, 2, 3};''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''std::vector<int> v{};''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''namespace n { int v{0}; }''')
self.assertEqual(0, len(result))
result = get_cpp_function_list('''class n { int v{0}; }''')
self.assertEqual(0, len(result))
def test_struct_in_param(self):
result = get_cpp_function_list('''int fun(struct a){}''')
self.assertEqual(1, len(result))
def test_trailing_return_type(self):
"""C++11 trailing return type for functions."""
result = get_cpp_function_list("auto foo() -> void {}")
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
result = get_cpp_function_list("auto foo(int a) -> decltype(a) {}")
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
def test_ref_qualifiers(self):
"""C++11 ref qualifiers for member functions."""
result = get_cpp_function_list("struct A { void foo() & {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
result = get_cpp_function_list("struct A { void foo() const & {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
result = get_cpp_function_list("struct A { void foo() && {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
result = get_cpp_function_list("struct A { void foo() const && {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
def test_union_as_qualifier(self):
"""Union as namespace for functions."""
result = get_cpp_function_list("union A { void foo() {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
class Test_cpp11_Attributes(unittest.TestCase):
"""C++11 extendable attributes can appear pretty much anywhere."""
def test_namespace(self):
result = get_cpp_function_list(
"namespace [[visibility(hidden)]] ns { void foo() {} }")
self.assertEqual(1, len(result))
self.assertEqual("ns::foo", result[0].name)
result = get_cpp_function_list(
"namespace ns [[deprecated]] { void foo() {} }")
self.assertEqual(1, len(result))
self.assertEqual("ns::foo", result[0].name)
def test_class(self):
result = get_cpp_function_list(
"struct [[alignas(8)]] A { void foo() {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
result = get_cpp_function_list(
"struct A [[deprecated]] { void foo() {} };")
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
def test_function(self):
result = get_cpp_function_list("void foo() [[noreturn]] {}")
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
def test_function_parameters(self):
result = get_cpp_function_list("void foo(int a [[unused]]) {}")
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
result = get_cpp_function_list("void foo(int a [[unused]], int b) {}")
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
result = get_cpp_function_list("void foo(int b, int a [[unused]]) {}")
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
def test_function_return_type(self):
result = get_cpp_function_list(
"int [[warn_unused_result]] foo(int a) {}")
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
def test_control_structures(self):
result = get_cpp_function_list(
"int foo() { [[likely(true)]] if (a) return 1; else return 2; }")
self.assertEqual(1, len(result))
self.assertEqual(2, result[0].cyclomatic_complexity)
result = get_cpp_function_list(
"""int foo() {
for [[omp::parallel()]] (int i{}; i < n; ++i)
sum += i; }""")
self.assertEqual(1, len(result))
self.assertEqual(2, result[0].cyclomatic_complexity)
class Test_Preprocessing(unittest.TestCase):
def test_content_macro_should_be_ignored(self):
result = get_cpp_function_list(r'''
#define MTP_CHEC \
int foo () { \
}
''')
self.assertEqual(0, len(result))
def test_preprocessors_should_be_ignored_outside_function_implementation(self):
result = get_cpp_function_list('''
#ifdef MAGIC
#endif
void foo()
{}
''')
self.assertEqual(1, len(result))
def test_preprocessor_is_not_function(self):
result = get_cpp_function_list('''
#ifdef A
#elif (defined E)
#endif
''')
self.assertEqual(0, len(result))
def test_body_with_function_like(self):
'''in the following example 'xws' is a macro defined somewhere else'''
result = get_cpp_function_list("""int a() { xws (a) if(){} }""")
self.assertEqual(1, len(result))
self.assertEqual('a', result[0].name)
def test_body_with_macro_call_after_if(self):
result = get_cpp_function_list("""int a() { if (a) b(){} }""")
self.assertEqual(1, len(result))
self.assertEqual('a', result[0].name)
def test_body_with_macro_call_after_if_and_no_semicolon_before_the_closing_br(self):
result = get_cpp_function_list("""int a() { if (a) b() } int c(){}""")
self.assertEqual(2, len(result))
self.assertEqual('c', result[1].name)
def test_body_with_macro_call_after_if_and_no_semicolon_before_the_closing_br2(self):
result = get_cpp_function_list("""int a() { if (a) if(x) b() } int c(){}""")
self.assertEqual(2, len(result))
self.assertEqual('c', result[1].name)
def test_body_with_macro_and_class(self):
result = get_cpp_function_list("""class A{int a() { if (a) if(x) b() } int c(){}}""")
self.assertEqual(2, len(result))
self.assertEqual('A::c', result[1].name)
def test_body_with_function_like2(self):
'''in the following example 'b' is a macro defined somewhere else'''
result = get_cpp_function_list("""
void myFunction()
{
IGNORE_FLAGS("w-maybe")
if(2+2==4)
END_IGNORE_FLAGS("w-maybe")
{
mySecondFunction()
}
}
int mySecondFunction()
{
return 2;
}
""")
self.assertEqual(2, len(result))
self.assertEqual('mySecondFunction', result[1].name)
class Test_Big(unittest.TestCase):
def test_trouble(self):
code = "foo<y () >> 5> r;"
result = get_cpp_function_list(code)
self.assertEqual(0, len(result))
def test_typedef(self):
code = """
typedef struct tagAAA
{
}AAA;
int func_a(int size)
{
if(ccc && eee)
{
return 1;
}
}
"""
result = get_cpp_function_list(code)
self.assertEqual(1, len(result))
self.assertEqual(3, result[0].cyclomatic_complexity)
class Test_Dialects(unittest.TestCase):
def test_cuda_kernel_launch(self):
"""Special triple < and > for Nvidia CUDA C/C++ code."""
result = get_cpp_function_list('''void foo() {
kernel <<< gridDim, blockDim, 0 >>> (d_data, height, width);
}''')
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
self.assertEqual(1, result[0].cyclomatic_complexity)
result = get_cpp_function_list('''void foo() {
kernel <<< gridDim, blockDim, (bar ? 0 : 1) >>> (x, y, z);
}''')
self.assertEqual(1, len(result))
self.assertEqual(2, result[0].cyclomatic_complexity)
result = get_cpp_function_list('''void foo() {
kernel <<< gridDim, blockDim, 0 >>> (x, y, (bar ? w : z));
}''')
self.assertEqual(1, len(result))
self.assertEqual(2, result[0].cyclomatic_complexity)
|
terryyinREPO_NAMElizardPATH_START.@lizard_extracted@lizard-master@test@test_languages@testCAndCPP.py@.PATH_END.py
|
{
"filename": "test_cli.py",
"repo_name": "morpheus-project/morpheus",
"repo_path": "morpheus_extracted/morpheus-master/morpheus/tests/test_cli.py",
"type": "Python"
}
|
# MIT License
# Copyright 2019 Ryan Hausen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ==============================================================================
"""Unit tests for the cli interface."""
import os
import pytest
import morpheus.__main__ as cli
@pytest.mark.unit
class TestCLI:
"""Tests for the cli interface."""
@staticmethod
def test_valid_file():
"""Tests _valid_file."""
local = os.path.dirname(os.path.abspath(__file__))
f_name = "test.fits"
open(os.path.join(local, f_name), "w").close()
valid_file = cli._valid_file(os.path.join(local, f_name))
os.remove(os.path.join(local, f_name))
assert valid_file
@staticmethod
def test_valid_file_raises():
"""Tests _valid_file, raises ValueError for incorrect file ending"""
with pytest.raises(ValueError):
cli._valid_file(__file__)
@staticmethod
def test_valid_dir():
"""Tests _valid_dir."""
local = os.path.dirname(os.path.abspath(__file__))
assert cli._valid_dir(local)
@staticmethod
def test_valid_dir_raises():
"""Tests _valid_dir raises for non dir."""
with pytest.raises(ValueError):
assert cli._valid_dir(__file__)
@staticmethod
def test_gpus():
"""Tests _gpus."""
gpus = "1,2,3"
assert [1, 2, 3] == cli._gpus(gpus)
@staticmethod
def test_gpus_raises():
"""Test _gpus raises ValueError for passing single gpus."""
gpus = "1"
with pytest.raises(ValueError):
cli._gpus(gpus)
@staticmethod
def test_parse_args_raises_cpus_gpus():
"""test _parse_args raise ValueError for passing cpus and gpus."""
local = os.path.dirname(os.path.abspath(__file__))
f_name = "test.fits"
open(os.path.join(local, f_name), "w").close()
test_file = os.path.join(local, f_name)
cli_args = f"{test_file} {test_file} {test_file} {test_file} "
cli_args += "--cpus 3 --gpus 1,2,3"
print(cli_args)
with pytest.raises(ValueError):
cli._parse_args(cli_args.split())
os.remove(test_file)
@staticmethod
def test_parse_args_doesnt_raise():
"""test _parse_args smooth sailing."""
local = os.path.dirname(os.path.abspath(__file__))
f_name = "test.fits"
open(os.path.join(local, f_name), "w").close()
test_file = os.path.join(local, f_name)
cli_args = f"{test_file} {test_file} {test_file} {test_file} "
cli_args += "--cpus 3 --gpus 1,2,3"
with pytest.raises(ValueError):
cli._parse_args(cli_args.split())
os.remove(test_file)
|
morpheus-projectREPO_NAMEmorpheusPATH_START.@morpheus_extracted@morpheus-master@morpheus@tests@test_cli.py@.PATH_END.py
|
{
"filename": "_forest.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/ensemble/_forest.py",
"type": "Python"
}
|
"""
Forest of trees-based ensemble methods.
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import threading
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
from warnings import catch_warnings, simplefilter, warn
import numpy as np
from scipy.sparse import hstack as sparse_hstack
from scipy.sparse import issparse
from ..base import (
ClassifierMixin,
MultiOutputMixin,
RegressorMixin,
TransformerMixin,
_fit_context,
is_classifier,
)
from ..exceptions import DataConversionWarning
from ..metrics import accuracy_score, r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (
BaseDecisionTree,
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from ..tree._tree import DOUBLE, DTYPE
from ..utils import check_random_state, compute_sample_weight
from ..utils._param_validation import Interval, RealNotInt, StrOptions
from ..utils._tags import get_tags
from ..utils.multiclass import check_classification_targets, type_of_target
from ..utils.parallel import Parallel, delayed
from ..utils.validation import (
_check_feature_names_in,
_check_sample_weight,
_num_samples,
check_is_fitted,
validate_data,
)
from ._base import BaseEnsemble, _partition_estimators
__all__ = [
"RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding",
]
MAX_INT = np.iinfo(np.int32).max
def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0.0, 1.0]`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, Integral):
if max_samples > n_samples:
msg = "`max_samples` must be <= n_samples={} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, Real):
return max(round(n_samples * max_samples), 1)
def _generate_sample_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(
0, n_samples, n_samples_bootstrap, dtype=np.int32
)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(
random_state, n_samples, n_samples_bootstrap
)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(
tree,
bootstrap,
X,
y,
sample_weight,
tree_idx,
n_trees,
verbose=0,
class_weight=None,
n_samples_bootstrap=None,
missing_values_in_feature_mask=None,
):
"""
Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(
tree.random_state, n_samples, n_samples_bootstrap
)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == "subsample":
with catch_warnings():
simplefilter("ignore", DeprecationWarning)
curr_sample_weight *= compute_sample_weight("auto", y, indices=indices)
elif class_weight == "balanced_subsample":
curr_sample_weight *= compute_sample_weight("balanced", y, indices=indices)
tree._fit(
X,
y,
sample_weight=curr_sample_weight,
check_input=False,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
else:
tree._fit(
X,
y,
sample_weight=sample_weight,
check_input=False,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
return tree
class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):
"""
Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
_parameter_constraints: dict = {
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"bootstrap": ["boolean"],
"oob_score": ["boolean", callable],
"n_jobs": [Integral, None],
"random_state": ["random_state"],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"max_samples": [
None,
Interval(RealNotInt, 0.0, 1.0, closed="right"),
Interval(Integral, 1, None, closed="left"),
],
}
@abstractmethod
def __init__(
self,
estimator,
n_estimators=100,
*,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
self.max_samples = max_samples
def apply(self, X):
"""
Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : ndarray of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(delayed(tree.apply)(X, check_input=False) for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""
Return the decision path in the forest.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator matrix where non zero elements indicates
that the samples goes through the nodes. The matrix is of CSR
format.
n_nodes_ptr : ndarray of shape (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(
delayed(tree.decision_path)(X, check_input=False)
for tree in self.estimators_
)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Fitted estimator.
"""
# Validate or convert input data
if issparse(y):
raise ValueError("sparse multilabel-indicator for y is not supported.")
X, y = validate_data(
self,
X,
y,
multi_output=True,
accept_sparse="csc",
dtype=DTYPE,
ensure_all_finite=False,
)
# _compute_missing_values_in_feature_mask checks if X has missing values and
# will raise an error if the underlying tree base estimator can't handle missing
# values. Only the criterion is required to determine if the tree supports
# missing values.
estimator = type(self.estimator)(criterion=self.criterion)
missing_values_in_feature_mask = (
estimator._compute_missing_values_in_feature_mask(
X, estimator_name=self.__class__.__name__
)
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn(
(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel()."
),
DataConversionWarning,
stacklevel=2,
)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if self.criterion == "poisson":
if np.any(y < 0):
raise ValueError(
"Some value(s) of y are negative which is "
"not allowed for Poisson regression."
)
if np.sum(y) <= 0:
raise ValueError(
"Sum of y is not strictly positive which "
"is necessary for Poisson regression."
)
self._n_samples, self.n_outputs_ = y.shape
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
if not self.bootstrap and self.max_samples is not None:
raise ValueError(
"`max_sample` cannot be set if `bootstrap=False`. "
"Either switch to `bootstrap=True` or set "
"`max_sample=None`."
)
elif self.bootstrap:
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples=X.shape[0], max_samples=self.max_samples
)
else:
n_samples_bootstrap = None
self._n_samples_bootstrap = n_samples_bootstrap
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [
self._make_estimator(append=False, random_state=random_state)
for i in range(n_more_estimators)
]
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, for joblib 0.12+ we respect any
# parallel_backend contexts set at a higher level,
# since correctness does not rely on using threads.
trees = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(
delayed(_parallel_build_trees)(
t,
self.bootstrap,
X,
y,
sample_weight,
i,
len(trees),
verbose=self.verbose,
class_weight=self.class_weight,
n_samples_bootstrap=n_samples_bootstrap,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
for i, t in enumerate(trees)
)
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score and (
n_more_estimators > 0 or not hasattr(self, "oob_score_")
):
y_type = type_of_target(y)
if y_type == "unknown" or (
is_classifier(self) and y_type == "multiclass-multioutput"
):
# FIXME: we could consider to support multiclass-multioutput if
# we introduce or reuse a constructor parameter (e.g.
# oob_score) allowing our user to pass a callable defining the
# scoring strategy on OOB sample.
raise ValueError(
"The type of target cannot be used to compute OOB "
f"estimates. Got {y_type} while only the following are "
"supported: continuous, continuous-multioutput, binary, "
"multiclass, multilabel-indicator."
)
if callable(self.oob_score):
self._set_oob_score_and_attributes(
X, y, scoring_function=self.oob_score
)
else:
self._set_oob_score_and_attributes(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Default depends on whether
this is a regression (R2 score) or classification problem
(accuracy score).
"""
def _compute_oob_predictions(self, X, y):
"""Compute and set the OOB score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
Returns
-------
oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or \
(n_samples, 1, n_outputs)
The OOB predictions.
"""
# Prediction requires X to be in CSR format
if issparse(X):
X = X.tocsr()
n_samples = y.shape[0]
n_outputs = self.n_outputs_
if is_classifier(self) and hasattr(self, "n_classes_"):
# n_classes_ is a ndarray at this stage
# all the supported type of target will have the same number of
# classes in all outputs
oob_pred_shape = (n_samples, self.n_classes_[0], n_outputs)
else:
# for regression, n_classes_ does not exist and we create an empty
# axis to be consistent with the classification case and make
# the array operations compatible with the 2 settings
oob_pred_shape = (n_samples, 1, n_outputs)
oob_pred = np.zeros(shape=oob_pred_shape, dtype=np.float64)
n_oob_pred = np.zeros((n_samples, n_outputs), dtype=np.int64)
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples,
self.max_samples,
)
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state,
n_samples,
n_samples_bootstrap,
)
y_pred = self._get_oob_predictions(estimator, X[unsampled_indices, :])
oob_pred[unsampled_indices, ...] += y_pred
n_oob_pred[unsampled_indices, :] += 1
for k in range(n_outputs):
if (n_oob_pred == 0).any():
warn(
(
"Some inputs do not have OOB scores. This probably means "
"too few trees were used to compute any reliable OOB "
"estimates."
),
UserWarning,
)
n_oob_pred[n_oob_pred == 0] = 1
oob_pred[..., k] /= n_oob_pred[..., [k]]
return oob_pred
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""
Validate X whenever one tries to predict, apply, predict_proba."""
check_is_fitted(self)
if self.estimators_[0]._support_missing_values(X):
ensure_all_finite = "allow-nan"
else:
ensure_all_finite = True
X = validate_data(
self,
X,
dtype=DTYPE,
accept_sparse="csr",
reset=False,
ensure_all_finite=ensure_all_finite,
)
if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based sparse matrices")
return X
@property
def feature_importances_(self):
"""
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
check_is_fitted(self)
all_importances = Parallel(n_jobs=self.n_jobs, prefer="threads")(
delayed(getattr)(tree, "feature_importances_")
for tree in self.estimators_
if tree.tree_.node_count > 1
)
if not all_importances:
return np.zeros(self.n_features_in_, dtype=np.float64)
all_importances = np.mean(all_importances, axis=0, dtype=np.float64)
return all_importances / np.sum(all_importances)
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for tree in self.estimators_:
if not self.bootstrap:
yield np.arange(self._n_samples, dtype=np.int32)
else:
# tree.random_state is actually an immutable integer seed rather
# than a mutable RandomState instance, so it's safe to use it
# repeatedly when calling this property.
seed = tree.random_state
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_trees()`
yield _generate_sample_indices(
seed, self._n_samples, self._n_samples_bootstrap
)
@property
def estimators_samples_(self):
"""The subset of drawn samples for each base estimator.
Returns a dynamically generated list of indices identifying
the samples used for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
return [sample_indices for sample_indices in self._get_estimators_indices()]
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
# Only the criterion is required to determine if the tree supports
# missing values
estimator = type(self.estimator)(criterion=self.criterion)
tags.input_tags.allow_nan = get_tags(estimator).input_tags.allow_nan
return tags
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(
self,
estimator,
n_estimators=100,
*,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples,
)
@staticmethod
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeClassifier object
A single decision tree classifier.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, n_classes, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict_proba(X, check_input=False)
y_pred = np.asarray(y_pred)
if y_pred.ndim == 2:
# binary and multiclass
y_pred = y_pred[..., np.newaxis]
else:
# Roll the first `n_outputs` axis to the last axis. We will reshape
# from a shape of (n_outputs, n_samples, n_classes) to a shape of
# (n_samples, n_classes, n_outputs).
y_pred = np.rollaxis(y_pred, axis=0, start=3)
return y_pred
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `accuracy_score`.
"""
self.oob_decision_function_ = super()._compute_oob_predictions(X, y)
if self.oob_decision_function_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_decision_function_ = self.oob_decision_function_.squeeze(axis=-1)
if scoring_function is None:
scoring_function = accuracy_score
self.oob_score_ = scoring_function(
y, np.argmax(self.oob_decision_function_, axis=1)
)
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(
y[:, k], return_inverse=True
)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ("balanced", "balanced_subsample")
if isinstance(self.class_weight, str):
if self.class_weight not in valid_presets:
raise ValueError(
"Valid presets for class_weight include "
'"balanced" and "balanced_subsample".'
'Given "%s".' % self.class_weight
)
if self.warm_start:
warn(
'class_weight presets "balanced" or '
'"balanced_subsample" are '
"not recommended for warm_start if the fitted data "
"differs from the full dataset. In order to use "
'"balanced" weights, use compute_class_weight '
'("balanced", classes, y). In place of y you can use '
"a large enough sample of the full training set "
"target to properly estimate the class frequency "
"distributions. Pass the resulting weights as the "
"class_weight parameter."
)
if self.class_weight != "balanced_subsample" or not self.bootstrap:
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight, y_original)
return y, expanded_class_weight
def predict(self, X):
"""
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[k], axis=1), axis=0
)
return predictions
def predict_proba(self, X):
"""
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples of
the same class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [
np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)
]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem")(
delayed(_accumulate_prediction)(e.predict_proba, X, all_proba, lock)
for e in self.estimators_
)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""
Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.multi_label = True
return tags
class ForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(
self,
estimator,
n_estimators=100,
*,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
max_samples=None,
):
super().__init__(
estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples,
)
def predict(self, X):
"""
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem")(
delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_
)
y_hat /= len(self.estimators_)
return y_hat
@staticmethod
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeRegressor object
A single decision tree regressor.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, 1, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict(X, check_input=False)
if y_pred.ndim == 1:
# single output regression
y_pred = y_pred[:, np.newaxis, np.newaxis]
else:
# multioutput regression
y_pred = y_pred[:, np.newaxis, :]
return y_pred
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `r2_score`.
"""
self.oob_prediction_ = super()._compute_oob_predictions(X, y).squeeze(axis=1)
if self.oob_prediction_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_prediction_ = self.oob_prediction_.squeeze(axis=-1)
if scoring_function is None:
scoring_function = r2_score
self.oob_score_ = scoring_function(y, self.oob_prediction_)
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=DTYPE
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order="C")
target_features = np.asarray(target_features, dtype=np.intp, order="C")
averaged_predictions = np.zeros(
shape=grid.shape[0], dtype=np.float64, order="C"
)
for tree in self.estimators_:
# Note: we don't sum in parallel because the GIL isn't released in
# the fast method.
tree.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions
)
# Average over the forest
averaged_predictions /= len(self.estimators_)
return averaged_predictions
class RandomForestClassifier(ForestClassifier):
"""
A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and uses averaging to
improve the predictive accuracy and control over-fitting.
Trees in the forest use the best split strategy, i.e. equivalent to passing
`splitter="best"` to the underlying :class:`~sklearn.tree.DecisionTreeClassifier`.
The sub-sample size is controlled with the `max_samples` parameter if
`bootstrap=True` (default), otherwise the whole dataset is used to build
each tree.
For a comparison between tree-based ensemble models see the example
:ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
Note: This parameter is tree-specific.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"sqrt", "log2", None}, int or float, default="sqrt"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at each
split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
.. versionchanged:: 1.1
The default of `max_features` changed from `"auto"` to `"sqrt"`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : bool, default=True
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool or callable, default=False
Whether to use out-of-bag samples to estimate the generalization score.
By default, :func:`~sklearn.metrics.accuracy_score` is used.
Provide a callable with signature `metric(y_true, y_pred)` to use a
custom metric. Only available if `bootstrap=True`.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls both the randomness of the bootstrapping of the samples used
when building trees (if ``bootstrap=True``) and the sampling of the
features to consider when looking for the best split at each node
(if ``max_features < n_features``).
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`Glossary <warm_start>` and
:ref:`tree_ensemble_warm_start` for details.
class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \
default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details. See
:ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
for an example of such pruning.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
estimator_ : :class:`~sklearn.tree.DecisionTreeClassifier`
The child estimator template used to create the collection of fitted
sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes) or \
(n_samples, n_classes, n_outputs)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
.. versionadded:: 1.4
See Also
--------
sklearn.tree.DecisionTreeClassifier : A decision tree classifier.
sklearn.ensemble.ExtraTreesClassifier : Ensemble of extremely randomized
tree classifiers.
sklearn.ensemble.HistGradientBoostingClassifier : A Histogram-based Gradient
Boosting Classification Tree, very fast for big datasets (n_samples >=
10_000).
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(...)
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
_parameter_constraints: dict = {
**ForestClassifier._parameter_constraints,
**DecisionTreeClassifier._parameter_constraints,
"class_weight": [
StrOptions({"balanced_subsample", "balanced"}),
dict,
list,
None,
],
}
_parameter_constraints.pop("splitter")
def __init__(
self,
n_estimators=100,
*,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="sqrt",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None,
monotonic_cst=None,
):
super().__init__(
estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"random_state",
"ccp_alpha",
"monotonic_cst",
),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples,
)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.monotonic_cst = monotonic_cst
self.ccp_alpha = ccp_alpha
class RandomForestRegressor(ForestRegressor):
"""
A random forest regressor.
A random forest is a meta estimator that fits a number of decision tree
regressors on various sub-samples of the dataset and uses averaging to
improve the predictive accuracy and control over-fitting.
Trees in the forest use the best split strategy, i.e. equivalent to passing
`splitter="best"` to the underlying :class:`~sklearn.tree.DecisionTreeRegressor`.
The sub-sample size is controlled with the `max_samples` parameter if
`bootstrap=True` (default), otherwise the whole dataset is used to build
each tree.
For a comparison between tree-based ensemble models see the example
:ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"squared_error", "absolute_error", "friedman_mse", "poisson"}, \
default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and minimizes the L2
loss using the mean of each terminal node, "friedman_mse", which uses
mean squared error with Friedman's improvement score for potential
splits, "absolute_error" for the mean absolute error, which minimizes
the L1 loss using the median of each terminal node, and "poisson" which
uses reduction in Poisson deviance to find splits.
Training using "absolute_error" is significantly slower
than when using "squared_error".
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 1.0
Poisson criterion.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"sqrt", "log2", None}, int or float, default=1.0
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at each
split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None or 1.0, then `max_features=n_features`.
.. note::
The default of 1.0 is equivalent to bagged trees and more
randomness can be achieved by setting smaller values, e.g. 0.3.
.. versionchanged:: 1.1
The default of `max_features` changed from `"auto"` to 1.0.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : bool, default=True
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool or callable, default=False
Whether to use out-of-bag samples to estimate the generalization score.
By default, :func:`~sklearn.metrics.r2_score` is used.
Provide a callable with signature `metric(y_true, y_pred)` to use a
custom metric. Only available if `bootstrap=True`.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls both the randomness of the bootstrapping of the samples used
when building trees (if ``bootstrap=True``) and the sampling of the
features to consider when looking for the best split at each node
(if ``max_features < n_features``).
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`Glossary <warm_start>` and
:ref:`tree_ensemble_warm_start` for details.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details. See
:ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
for an example of such pruning.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max(round(n_samples * max_samples), 1)` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonically increasing
- 0: no constraint
- -1: monotonically decreasing
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multioutput regressions (i.e. when `n_outputs_ > 1`),
- regressions trained on data with missing values.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
estimator_ : :class:`~sklearn.tree.DecisionTreeRegressor`
The child estimator template used to create the collection of fitted
sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Prediction computed with out-of-bag estimate on the training set.
This attribute exists only when ``oob_score`` is True.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
.. versionadded:: 1.4
See Also
--------
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
sklearn.ensemble.ExtraTreesRegressor : Ensemble of extremely randomized
tree regressors.
sklearn.ensemble.HistGradientBoostingRegressor : A Histogram-based Gradient
Boosting Regression Tree, very fast for big datasets (n_samples >=
10_000).
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
The default value ``max_features=1.0`` uses ``n_features``
rather than ``n_features / 3``. The latter was originally suggested in
[1], whereas the former was more recently justified empirically in [2].
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
.. [2] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(...)
>>> print(regr.predict([[0, 0, 0, 0]]))
[-8.32987858]
"""
_parameter_constraints: dict = {
**ForestRegressor._parameter_constraints,
**DecisionTreeRegressor._parameter_constraints,
}
_parameter_constraints.pop("splitter")
def __init__(
self,
n_estimators=100,
*,
criterion="squared_error",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=1.0,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
ccp_alpha=0.0,
max_samples=None,
monotonic_cst=None,
):
super().__init__(
estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"random_state",
"ccp_alpha",
"monotonic_cst",
),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples,
)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.monotonic_cst = monotonic_cst
class ExtraTreesClassifier(ForestClassifier):
"""
An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
Note: This parameter is tree-specific.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"sqrt", "log2", None}, int or float, default="sqrt"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at each
split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
.. versionchanged:: 1.1
The default of `max_features` changed from `"auto"` to `"sqrt"`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : bool, default=False
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool or callable, default=False
Whether to use out-of-bag samples to estimate the generalization score.
By default, :func:`~sklearn.metrics.accuracy_score` is used.
Provide a callable with signature `metric(y_true, y_pred)` to use a
custom metric. Only available if `bootstrap=True`.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls 3 sources of randomness:
- the bootstrapping of the samples used when building trees
(if ``bootstrap=True``)
- the sampling of the features to consider when looking for the best
split at each node (if ``max_features < n_features``)
- the draw of the splits for each of the `max_features`
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`Glossary <warm_start>` and
:ref:`tree_ensemble_warm_start` for details.
class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \
default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details. See
:ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
for an example of such pruning.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonically increasing
- 0: no constraint
- -1: monotonically decreasing
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
estimator_ : :class:`~sklearn.tree.ExtraTreeClassifier`
The child estimator template used to create the collection of fitted
sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes) or \
(n_samples, n_classes, n_outputs)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
.. versionadded:: 1.4
See Also
--------
ExtraTreesRegressor : An extra-trees regressor with random splits.
RandomForestClassifier : A random forest classifier with optimal splits.
RandomForestRegressor : Ensemble regressor using trees with optimal splits.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.ensemble import ExtraTreesClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = ExtraTreesClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
ExtraTreesClassifier(random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
_parameter_constraints: dict = {
**ForestClassifier._parameter_constraints,
**DecisionTreeClassifier._parameter_constraints,
"class_weight": [
StrOptions({"balanced_subsample", "balanced"}),
dict,
list,
None,
],
}
_parameter_constraints.pop("splitter")
def __init__(
self,
n_estimators=100,
*,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="sqrt",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None,
monotonic_cst=None,
):
super().__init__(
estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"random_state",
"ccp_alpha",
"monotonic_cst",
),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples,
)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.monotonic_cst = monotonic_cst
class ExtraTreesRegressor(ForestRegressor):
"""
An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"squared_error", "absolute_error", "friedman_mse", "poisson"}, \
default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and minimizes the L2
loss using the mean of each terminal node, "friedman_mse", which uses
mean squared error with Friedman's improvement score for potential
splits, "absolute_error" for the mean absolute error, which minimizes
the L1 loss using the median of each terminal node, and "poisson" which
uses reduction in Poisson deviance to find splits.
Training using "absolute_error" is significantly slower
than when using "squared_error".
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"sqrt", "log2", None}, int or float, default=1.0
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at each
split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None or 1.0, then `max_features=n_features`.
.. note::
The default of 1.0 is equivalent to bagged trees and more
randomness can be achieved by setting smaller values, e.g. 0.3.
.. versionchanged:: 1.1
The default of `max_features` changed from `"auto"` to 1.0.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : bool, default=False
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool or callable, default=False
Whether to use out-of-bag samples to estimate the generalization score.
By default, :func:`~sklearn.metrics.r2_score` is used.
Provide a callable with signature `metric(y_true, y_pred)` to use a
custom metric. Only available if `bootstrap=True`.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls 3 sources of randomness:
- the bootstrapping of the samples used when building trees
(if ``bootstrap=True``)
- the sampling of the features to consider when looking for the best
split at each node (if ``max_features < n_features``)
- the draw of the splits for each of the `max_features`
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`Glossary <warm_start>` and
:ref:`tree_ensemble_warm_start` for details.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details. See
:ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
for an example of such pruning.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonically increasing
- 0: no constraint
- -1: monotonically decreasing
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multioutput regressions (i.e. when `n_outputs_ > 1`),
- regressions trained on data with missing values.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor`
The child estimator template used to create the collection of fitted
sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Prediction computed with out-of-bag estimate on the training set.
This attribute exists only when ``oob_score`` is True.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
.. versionadded:: 1.4
See Also
--------
ExtraTreesClassifier : An extra-trees classifier with random splits.
RandomForestClassifier : A random forest classifier with optimal splits.
RandomForestRegressor : Ensemble regressor using trees with optimal splits.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import ExtraTreesRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> reg = ExtraTreesRegressor(n_estimators=100, random_state=0).fit(
... X_train, y_train)
>>> reg.score(X_test, y_test)
0.2727...
"""
_parameter_constraints: dict = {
**ForestRegressor._parameter_constraints,
**DecisionTreeRegressor._parameter_constraints,
}
_parameter_constraints.pop("splitter")
def __init__(
self,
n_estimators=100,
*,
criterion="squared_error",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=1.0,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
ccp_alpha=0.0,
max_samples=None,
monotonic_cst=None,
):
super().__init__(
estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"random_state",
"ccp_alpha",
"monotonic_cst",
),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples,
)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.monotonic_cst = monotonic_cst
class RandomTreesEmbedding(TransformerMixin, BaseForest):
"""
An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int, default=100
Number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
max_depth : int, default=5
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
sparse_output : bool, default=True
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`transform`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the generation of the random `y` used to fit the trees
and the draw of the splits for each feature at the trees' nodes.
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`Glossary <warm_start>` and
:ref:`tree_ensemble_warm_start` for details.
Attributes
----------
estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance
The child estimator template used to create the collection of fitted
sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of :class:`~sklearn.tree.ExtraTreeRegressor` instances
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The feature importances (the higher, the more important the feature).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
one_hot_encoder_ : OneHotEncoder instance
One-hot encoder used to create the sparse embedding.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
.. versionadded:: 1.4
See Also
--------
ExtraTreesClassifier : An extra-trees classifier.
ExtraTreesRegressor : An extra-trees regressor.
RandomForestClassifier : A random forest classifier.
RandomForestRegressor : A random forest regressor.
sklearn.tree.ExtraTreeClassifier: An extremely randomized
tree classifier.
sklearn.tree.ExtraTreeRegressor : An extremely randomized
tree regressor.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
Examples
--------
>>> from sklearn.ensemble import RandomTreesEmbedding
>>> X = [[0,0], [1,0], [0,1], [-1,0], [0,-1]]
>>> random_trees = RandomTreesEmbedding(
... n_estimators=5, random_state=0, max_depth=1).fit(X)
>>> X_sparse_embedding = random_trees.transform(X)
>>> X_sparse_embedding.toarray()
array([[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
[0., 1., 0., 1., 0., 1., 0., 1., 0., 1.],
[1., 0., 1., 0., 1., 0., 1., 0., 1., 0.],
[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.]])
"""
_parameter_constraints: dict = {
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"n_jobs": [Integral, None],
"verbose": ["verbose"],
"warm_start": ["boolean"],
**BaseDecisionTree._parameter_constraints,
"sparse_output": ["boolean"],
}
for param in ("max_features", "ccp_alpha", "splitter", "monotonic_cst"):
_parameter_constraints.pop(param)
criterion = "squared_error"
max_features = 1
def __init__(
self,
n_estimators=100,
*,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
sparse_output=True,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
):
super().__init__(
estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"random_state",
),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=None,
)
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.sparse_output = sparse_output
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns the instance itself.
"""
# Parameters are validated in fit_transform
self.fit_transform(X, y, sample_weight=sample_weight)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None, sample_weight=None):
"""
Fit estimator and transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
"""
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=_num_samples(X))
super().fit(X, y, sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse_output=self.sparse_output)
output = self.one_hot_encoder_.fit_transform(self.apply(X))
self._n_features_out = output.shape[1]
return output
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names, in the format of
`randomtreesembedding_{tree}_{leaf}`, where `tree` is the tree used
to generate the leaf and `leaf` is the index of a leaf node
in that tree. Note that the node indexing scheme is used to
index both nodes with children (split nodes) and leaf nodes.
Only the latter can be present as output features.
As a consequence, there are missing indices in the output
feature names.
"""
check_is_fitted(self, "_n_features_out")
_check_feature_names_in(
self, input_features=input_features, generate_names=False
)
feature_names = [
f"randomtreesembedding_{tree}_{leaf}"
for tree in range(self.n_estimators)
for leaf in self.one_hot_encoder_.categories_[tree]
]
return np.asarray(feature_names, dtype=object)
def transform(self, X):
"""
Transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
"""
check_is_fitted(self)
return self.one_hot_encoder_.transform(self.apply(X))
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@ensemble@_forest.py@.PATH_END.py
|
{
"filename": "test_observatory.py",
"repo_name": "rasg-affiliates/21cmSense",
"repo_path": "21cmSense_extracted/21cmSense-main/tests/test_observatory.py",
"type": "Python"
}
|
"""Test the observatory module."""
import re
from pathlib import Path
import numpy as np
import pytest
import pyuvdata
from astropy import units
from astropy.coordinates import EarthLocation
from py21cmsense import Observatory
from py21cmsense.baseline_filters import BaselineRange
from py21cmsense.beam import GaussianBeam
from py21cmsense.data import PATH
@pytest.fixture(scope="module")
def bm():
return GaussianBeam(frequency=150.0 * units.MHz, dish_size=14 * units.m)
def test_antpos(bm):
a = Observatory(antpos=np.zeros((10, 3)) * units.m, beam=bm)
assert a.antpos.unit == units.m
assert np.all(a.baselines_metres == 0)
# If bad units given, should raise error.
with pytest.raises(units.UnitConversionError):
Observatory(antpos=np.zeros((10, 3)) * units.s, beam=bm)
# Need more than one antenna
with pytest.raises(ValueError, match="antpos must have at least two antennas"):
Observatory(antpos=np.zeros((1, 3)) * units.m, beam=bm)
with pytest.raises(ValueError, match="antpos must be a 2D array"):
Observatory(antpos=np.zeros(10) * units.m, beam=bm)
with pytest.raises(ValueError, match=re.escape("antpos must have shape (Nants, 3)")):
Observatory(antpos=np.zeros((10, 2)) * units.m, beam=bm)
def test_observatory_class(bm):
a = Observatory(antpos=np.zeros((3, 3)) * units.m, beam=bm)
b = a.clone()
assert a == b
def test_Trcv(bm):
a = Observatory(antpos=np.zeros((3, 3)) * units.m, beam=bm, Trcv=10 * units.mK)
assert a.Trcv.unit == units.mK
def test_Trcv_func(bm):
a = Observatory(
antpos=np.zeros((3, 3)) * units.m,
beam=bm,
Trcv=lambda f: (f / units.MHz) * 10 * units.mK,
)
assert a.Trcv(7 * units.Hz).unit.is_equivalent(units.K)
def test_Trcv_func_bad(bm):
with pytest.raises(ValueError, match="Trcv function must return a temperature"):
Observatory(antpos=np.zeros((3, 3)) * units.m, beam=bm, Trcv=lambda f: 3)
def test_observatory(bm):
a = Observatory(antpos=np.zeros((3, 3)) * units.m, beam=bm)
assert a.frequency == bm.frequency
assert a.baselines_metres.shape == (3, 3, 3)
assert (a.baselines_metres * a.metres_to_wavelengths).unit == units.dimensionless_unscaled
assert a.baseline_lengths.shape == (3, 3)
assert np.all(a.baseline_lengths == 0)
b = Observatory(antpos=np.array([[0, 0, 0], [1, 0, 0], [3, 0, 0]]) * units.m, beam=bm)
assert units.isclose(b.shortest_baseline / b.metres_to_wavelengths, 1 * units.m, rtol=1e-3)
assert units.isclose(b.longest_baseline / b.metres_to_wavelengths, 3 * units.m, rtol=1e-3)
assert b.observation_duration < 1 * units.day
assert len(b.get_redundant_baselines()) == 6 # including swapped ones
with pytest.raises(AssertionError):
b.time_offsets_from_obs_int_time(b.observation_duration * 1.1)
assert len(b.time_offsets_from_obs_int_time(b.observation_duration / 1.05)) == 2
assert units.isclose(
b.longest_used_baseline() / b.metres_to_wavelengths, 3 * units.m, rtol=1e-3
)
def test_grid_baselines(bm):
rng = np.random.default_rng(1234)
a = Observatory(antpos=rng.normal(loc=0, scale=50, size=(20, 3)) * units.m, beam=bm)
bl_groups = a.get_redundant_baselines()
bl_coords = a.baseline_coords_from_groups(bl_groups)
bl_counts = a.baseline_weights_from_groups(bl_groups)
grid0 = a.grid_baselines(coherent=True)
grid1 = a.grid_baselines(coherent=True, baselines=bl_coords, weights=bl_counts)
assert np.allclose(grid0, grid1)
def test_min_max_antpos(bm):
a = Observatory(
antpos=np.array([np.linspace(0, 50, 11), np.zeros(11), np.zeros(11)]).T * units.m,
beam=bm,
min_antpos=7 * units.m,
)
assert len(a.antpos) == 9
a = Observatory(
antpos=np.array([np.linspace(0, 50, 11), np.zeros(11), np.zeros(11)]).T * units.m,
beam=bm,
max_antpos=10 * units.m,
)
assert len(a.antpos) == 2
def test_from_uvdata(bm):
uv = pyuvdata.UVData()
uv.telescope.antenna_positions = (
np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [40, 0, 40]]) * units.m
)
uv.telescope.location = [x.value for x in EarthLocation.from_geodetic(0, 0).to_geocentric()]
uv.telescope.antenna_positions = (
np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [40, 0, 40]]) * units.m
)
uv.telescope.location = EarthLocation.from_geodetic(0, 0)
a = Observatory.from_uvdata(uvdata=uv, beam=bm)
assert np.all(a.antpos == uv.telescope.antenna_positions)
def test_different_antpos_loaders(tmp_path: Path):
antpos = np.array([[0, 0, 0], [14, 0, 0], [28, 0, 0], [70, 0, 0]])
np.save(tmp_path / "antpos.npy", antpos)
np.savetxt(tmp_path / "antpos.txt", antpos)
beamtxt = """
beam:
class: GaussianBeam
frequency: !astropy.units.Quantity
unit: !astropy.units.Unit {unit: MHz}
value: 150
dish_size: !astropy.units.Quantity
unit: !astropy.units.Unit {unit: m}
value: 14.0
"""
yamlnpy = f"""
antpos: !astropy.units.Quantity
unit: !astropy.units.Unit {{unit: m}}
value: !npy {tmp_path}/antpos.npy
{beamtxt}
"""
with open(tmp_path / "npy.yml", "w") as fl:
fl.write(yamlnpy)
obsnpy = Observatory.from_yaml(tmp_path / "npy.yml")
yamltxt = f"""
antpos: !astropy.units.Quantity
unit: !astropy.units.Unit {{unit: m}}
value: !txt {tmp_path}/antpos.txt
{beamtxt}
"""
with open(tmp_path / "txt.yml", "w") as fl:
fl.write(yamltxt)
obstxt = Observatory.from_yaml(tmp_path / "txt.yml")
assert obsnpy == obstxt
def test_longest_used_baseline(bm):
a = Observatory(antpos=np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0]]) * units.m, beam=bm)
assert np.isclose(a.longest_used_baseline() / a.metres_to_wavelengths, 2 * units.m, atol=1e-3)
assert np.isclose(
a.longest_used_baseline(bl_max=1.5 * units.m) / a.metres_to_wavelengths,
1 * units.m,
atol=1e-4,
)
def test_from_yaml(bm):
rng = np.random.default_rng(1234)
obs = Observatory.from_yaml(
{
"antpos": rng.random((20, 3)) * units.m,
"beam": {
"class": "GaussianBeam",
"frequency": 150 * units.MHz,
"dish_size": 14 * units.m,
},
}
)
assert obs.beam == bm
with pytest.raises(ValueError, match="yaml_file must be a string filepath"):
Observatory.from_yaml(3)
def test_get_redundant_baselines(bm):
a = Observatory(antpos=np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0]]) * units.m, beam=bm)
reds = a.get_redundant_baselines()
assert len(reds) == 4 # len-1, len-2 and backwards
reds = a.get_redundant_baselines(baseline_filters=BaselineRange(bl_max=1.5 * units.m))
assert len(reds) == 2 # len-1
def test_no_up_coordinate(tmp_path: Path):
mwafl = PATH / "antpos" / "mwa_phase2_compact_antpos.txt"
enu = np.genfromtxt(mwafl)
# Save with only EN coordinates
with open(tmp_path / "mwa_antpos.txt", "w") as fl:
np.savetxt(fl, enu[:, :2])
new_yaml = f"""
antpos: !astropy.units.Quantity
value: !txt "{tmp_path}/mwa_antpos.txt"
unit: !astropy.units.Unit {{unit: m}}
beam:
class: GaussianBeam
frequency: !astropy.units.Quantity
unit: !astropy.units.Unit {{unit: MHz}}
value: 150
dish_size: !astropy.units.Quantity
unit: !astropy.units.Unit {{unit: m}}
value: 35
latitude: !astropy.units.Quantity
unit: !astropy.units.Unit {{unit: rad}}
value: -0.4681819
Trcv: !astropy.units.Quantity
unit: !astropy.units.Unit {{unit: K}}
value: 100
"""
with open(tmp_path / "mwa.yaml", "w") as fl:
fl.write(new_yaml)
obs = Observatory.from_yaml(tmp_path / "mwa.yaml")
assert np.all(obs.antpos[:, 2] == 0)
def test_setting_freq_in_profile():
obs = Observatory.from_profile("MWA-PhaseII", frequency=75 * units.MHz)
assert obs.frequency == 75 * units.MHz
|
rasg-affiliatesREPO_NAME21cmSensePATH_START.@21cmSense_extracted@21cmSense-main@tests@test_observatory.py@.PATH_END.py
|
{
"filename": "yahoo_finance_news.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/tools/yahoo_finance_news.ipynb",
"type": "Jupyter Notebook"
}
|
# Yahoo Finance News
This notebook goes over how to use the `yahoo_finance_news` tool with an agent.
## Setting up
First, you need to install `yfinance` python package.
```python
%pip install --upgrade --quiet yfinance
```
## Example with Chain
```python
import os
os.environ["OPENAI_API_KEY"] = "..."
```
```python
from langchain.agents import AgentType, initialize_agent
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = [YahooFinanceNewsTool()]
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
```
```python
agent_chain.invoke(
"What happened today with Microsoft stocks?",
)
```
[1m> Entering new AgentExecutor chain...[0m
[32;1m[1;3mI should check the latest financial news about Microsoft stocks.
Action: yahoo_finance_news
Action Input: MSFT[0m
Observation: [36;1m[1;3mMicrosoft (MSFT) Gains But Lags Market: What You Should Know
In the latest trading session, Microsoft (MSFT) closed at $328.79, marking a +0.12% move from the previous day.[0m
Thought:[32;1m[1;3mI have the latest information on Microsoft stocks.
Final Answer: Microsoft (MSFT) closed at $328.79, with a +0.12% move from the previous day.[0m
[1m> Finished chain.[0m
'Microsoft (MSFT) closed at $328.79, with a +0.12% move from the previous day.'
```python
agent_chain.invoke(
"How does Microsoft feels today comparing with Nvidia?",
)
```
[1m> Entering new AgentExecutor chain...[0m
[32;1m[1;3mI should compare the current sentiment of Microsoft and Nvidia.
Action: yahoo_finance_news
Action Input: MSFT[0m
Observation: [36;1m[1;3mMicrosoft (MSFT) Gains But Lags Market: What You Should Know
In the latest trading session, Microsoft (MSFT) closed at $328.79, marking a +0.12% move from the previous day.[0m
Thought:[32;1m[1;3mI need to find the current sentiment of Nvidia as well.
Action: yahoo_finance_news
Action Input: NVDA[0m
Observation: [36;1m[1;3m[0m
Thought:[32;1m[1;3mI now know the current sentiment of both Microsoft and Nvidia.
Final Answer: I cannot compare the sentiment of Microsoft and Nvidia as I only have information about Microsoft.[0m
[1m> Finished chain.[0m
'I cannot compare the sentiment of Microsoft and Nvidia as I only have information about Microsoft.'
# How YahooFinanceNewsTool works?
```python
tool = YahooFinanceNewsTool()
```
```python
tool.invoke("NVDA")
```
'No news found for company that searched with NVDA ticker.'
```python
res = tool.invoke("AAPL")
print(res)
```
Top Research Reports for Apple, Broadcom & Caterpillar
Today's Research Daily features new research reports on 16 major stocks, including Apple Inc. (AAPL), Broadcom Inc. (AVGO) and Caterpillar Inc. (CAT).
Apple Stock on Pace for Worst Month of the Year
Apple (AAPL) shares are on pace for their worst month of the year, according to Dow Jones Market Data. The stock is down 4.8% so far in August, putting it on pace for its worst month since December 2022, when it fell 12%.
```python
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@tools@yahoo_finance_news.ipynb@.PATH_END.py
|
{
"filename": "_side.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcoords/line/colorbar/title/_side.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="parcoords.line.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcoords@line@colorbar@title@_side.py@.PATH_END.py
|
{
"filename": "circ.py",
"repo_name": "SAMI-Galaxy-Survey/sami",
"repo_path": "sami_extracted/sami-master/utils/circ.py",
"type": "Python"
}
|
from __future__ import print_function
"""
Functions to calculate the:
1) drizzle overlap between a square and a circle (function `resample_circle`
by Jon Nielsen 2012)
2) Gaussian overlap between a circular Gaussian and a square grid (function
`inteGrauss2d` by Francesco D'Eugenio 2017)
These calculations are the crucial step in drizzling, to know how much each
input fibre (circle) contributes to each output spaxel (square).
In general this code isn't actually used, as there is a C++ version that
is significantly faster. The functions here are provided as a fall-back in
case the C++ version hasn't been compiled.
WARNING by Francesco D'Eugenio 16/02/2017
The original drizzle overlap as it is implemented in Python has a
severe bug, in that if the circle falls partially outside the grid, the
weights are ``wrapped around`` and distributed to the opposide side of the
grid, as illustrated below.
+---+---+---+---+---+---+---+
| | | x | x | x | | |
+---+---+---+---+---+---+---+
| | | | x | | | |
+---+---+---+---+---+---+---+
| | | | | | | |
+---+---+---+---+---+---+---+
| | | | | | | |
+---+---+---+---+---+---+---+
| | | | x | | | |
+---+---+---+---+---+---+---+
| | | x | x | x | | |
+---+---+---+---+---+---+---+
| | x | x | x | x | x | |
+---+---+---+---+---+---+---+
Because of this bug that I do not have time to address, every time you load
the pipeline and use this implementation (as opposed to the C++ implementation)
you will receive a horrible warning. You're welcome.
"""
import sys
import math
import itertools
import warnings
import numpy as np
from scipy.special import erf
warning_mess = (
'There is a bug in the python implementation of `circ`. We recommend that'
+ ' you read the documentation of this module to assess whether this bug'
+ ' will affect your science.')
warnings.warn(warning_mess)
SQRT2 = np.sqrt(2.)
# +---+------------------------------------------------------------------------+
# | 1.| Drizzling functions by Jon Nielsen. |
# +---+------------------------------------------------------------------------+
def find_squares_in_circle(xc, yc, r):
# Establish bounds in y
ymin = int(math.ceil(yc - r))
ymax = int(math.floor(yc + r))
yspace = np.arange(ymin, ymax)
# Use these to calculate, for each y, the bounds in x
# Ensure that we check that the whole square is in the circle, not
# just its lower left point.
y = yspace - yc
x1 = r*r - y*y
x1[np.abs(x1) <= sys.float_info.epsilon] = 0
x2 = r*r - (y+1)*(y+1)
x2[np.abs(x2) <= sys.float_info.epsilon] = 0
x = np.sqrt(np.minimum(x1, x2))
xmin = np.cast[int](np.ceil(-x + xc))
xmax = np.cast[int](np.floor(x + xc))
# Now we have, for each y, the bounds in x
# Use these to create a list of squares that are in the circle
arr = np.column_stack((yspace, xmin, xmax, xmax-xmin))
if (arr.shape[0] == 0):
return None
# Make sure we don't have any where max<min (can happen due to the way
# we search for the bounds)
keep = (arr[:,3] > 0)
arr = arr[keep]
npoints = np.add.reduce(arr[:,3])
# Make sure there's something to work with
if (npoints <= 0):
return None
points = np.empty((npoints,2), dtype=int)
i = 0
for row in arr:
points[i:i+row[3]] = np.column_stack((np.arange(row[1],row[2]),np.repeat(row[0],row[2]-row[1])))
i += row[3]
return points
def find_intersections(xc, yc, r):
# First establish the limits within which the intersections will lie
xmin = int(math.ceil(xc - r))
xmax = int(math.floor(xc + r)) + 1
ymin = int(math.ceil(yc - r))
ymax = int(math.floor(yc + r)) + 1
# Generate the grid
xspace = np.arange(xmin, xmax)
yspace = np.arange(ymin, ymax)
# Calculate the intersections with each integer x
x = xspace - xc
y2 = r*r - x*x
# Deal with floating point issues
y2[np.abs(y2) <= sys.float_info.epsilon] = 0
y = np.sqrt(y2)
# Ignore tangents
keep = (y > sys.float_info.epsilon)
x = x[keep]
newx = xspace[keep]
y = y[keep]
# Make sure there's something to work with
if (y.shape[0] <= 0):
return None
# Get +/- solutions
x = np.tile(x, 2)
newx = np.tile(newx, 2)
y = np.hstack((y, -y))
newy = y+yc
# Decide if any of these intersections are also on an integer y
on_y = (np.abs(newy-np.round(newy)) <= sys.float_info.epsilon)
newy[on_y] = np.round(newy[on_y])
# Calculate angles (+ve please)
theta = np.arctan2(y, x)
theta[(theta < 0)] += 2*math.pi
# Store the points
points = np.column_stack([newx, newy, theta, np.ones_like(x), on_y])
# Calculate the intersections with each integer y
y = yspace - yc
x2 = r*r - y*y
# Deal with floating point issues
x2[np.abs(x2) <= sys.float_info.epsilon] = 0
x = np.sqrt(x2)
# Ignore tangents
keep = (x > sys.float_info.epsilon)
x = x[keep]
y = y[keep]
newy = yspace[keep]
# Get +/- solutions
x = np.hstack((x, -x))
y = np.tile(y, 2)
newy = np.tile(newy, 2)
# Decide if any of these intersections are also on an integer x
newx = x+xc
on_x = (np.abs(newx-np.round(newx)) <= sys.float_info.epsilon)
newx[on_x] = np.round(newx[on_x])
# Calculate angles (+ve please)
theta = np.arctan2(y, x)
theta[(theta < 0)] += 2*math.pi
# Store the points
points = np.append(points, np.column_stack([newx, newy, theta, on_x, np.ones_like(y)]), axis=0)
# Sort by theta, and repeat the first point at the end
args = np.argsort(points[:,2])
points = points[np.append(args, args[0])]
points[-1,2] += 2*math.pi;
# Remove duplicates
# We don't need an abs on the diff because we have already sorted into
# ascending order.
args = (np.diff(points[:,2]) > sys.float_info.epsilon)
# Don't forget to keep the second last point. It will be diffed against the
# repeated first point, and will get a -ve result.
args[-1] = True
# The very last point is the repeated first point, but the diff is one shorter
# so we fix that here
args = np.append(args, True)
points = points[args]
return points
def area_contribution(p1, p2, xc, yc, r):
i = 0
j = 0
area = 0.0
# We assume that p2 theta < p1 theta
delta_theta = p2[2] - p1[2]
# Work out which square we are dealing with here
mid_theta = (p1[2] + p2[2]) / 2.0
x = r * math.cos(mid_theta) + xc
i = int(math.floor(x))
y = r * math.sin(mid_theta) + yc
j = int(math.floor(y))
# First get the circle segment area
area = 0.5*r*r*(delta_theta - math.sin(delta_theta))
# Next get the polygonal area
if (p1[3] and p2[3]):
# Both points are on an x gridline
delta_x = math.fabs(p1[0] - p2[0])
if (delta_x <= sys.float_info.epsilon):
# Both points are on the same x gridline
# No polygonal contribution at all.
pass
else:
# Points are on different x gridlines. Note that they must both
# have the same upper and lower y grid bounds, or else we would have
# a point on a y gridline between them.
delta_y = math.fabs(p1[1] - p2[1])
if (y < yc):
tmpy = max(p1[1], p2[1])
# rectangular area
area += math.ceil(tmpy) - tmpy
else:
tmpy = min(p1[1], p2[1])
# rectangular area
area += tmpy - math.floor(tmpy)
# triangular area
area += 0.5*delta_y
elif (p1[4] and p2[4]):
# Both points are on a y gridline
delta_y = math.fabs(p1[1] - p2[1])
if (delta_y <= sys.float_info.epsilon):
# Both points are on the same y gridline
# No polygonal contribution at all.
pass
else:
# Points are on different y gridlines. Note that they must both
# have the same upper and lower x grid bounds, or else we would have
# a point on an x gridline between them.
delta_x = math.fabs(p1[0] - p2[0])
if (x < xc):
tmpx = max(p1[0], p2[0])
# rectangular area
area += math.ceil(tmpx) - tmpx
else:
tmpx = min(p1[0], p2[0])
# rectangular area
area += tmpx - math.floor(tmpx)
# triangular area
area += 0.5*delta_x
else:
# One is on x, the other on y
# Call the point on x xp, and the point on y yp
if (p1[3] and p2[4]):
xp = p1
yp = p2
else:
xp = p2
yp = p1
# Now we know which is which, construct point c, which is the
# point on the same x gridline as xp, but also on the next y gridline
# closer to the centre of the circle
if (xp[1] < yc):
cy = math.ceil(xp[1])
else:
cy = math.floor(xp[1])
cx = xp[0]
# Now also point d, which is on the same y gridline as yp,
# but also on the next x gridline closer to the centre of the circle
if (yp[0] < xc):
dx = math.ceil(yp[0])
else:
dx = math.floor(yp[0])
dy = yp[1]
# Work out if c and d are different points, or the same point
if (math.sqrt((cx-dx)**2 + (cy-dy)**2) <= sys.float_info.epsilon):
# The same point, so it's a triangle
area += math.fabs(0.5*(xp[1]-cy)*(yp[0]-cx))
else:
# Not the same point - it's a pentagon
# Note that we ignore any intersections of the circle with other edges
# of this square. This is handled by the calling function, which uses
# 1-area as a subtractive term.
area += math.fabs(xp[1]-cy) + math.fabs((xp[1]-dy)*(yp[0]-dx)) + \
math.fabs(0.5*(xp[0]-yp[0])*(xp[1]-yp[1]))
return i, j, area
def resample_circle(xpix, ypix, xc, yc, r, *args):
"""Resample a circle/drop onto an output grid.
Written by Jon Nielsen 2012
Parameters
----------
xpix: (int) Number of pixels in the x-dimension of the output grid
ypix: (int) Number of pixels in the y-dimension of the output grid
xc: (float) x-position of the centre of the circle.
yc: (float) y-position of the centre of the circle.
r: (float) radius of the circle
args: any additional arguments. This is ignored in the context of this
function (its purpose is to gather additional arguments that may be
passed to equivalent functions with a different signature).
Output
------
2D array of floats. A weight map for the intersection of a circle with
a square grid. Squares on the grid that are completely within the circle
receive a weight of 1. Square that intersect the circle are given a weight
that is proportional to the area of the square inside the circle.
Notes
-----
The zeroth axis of the output array is for the y-dimension and the
first axis is for the x-dimension. i.e., out.shape -> (ypix, xpix)
This can be VERY CONFUSING, particularly when one remembers that imshow's
behaviour is to plot the zeroth axis as the vertical coordinate and the first
axis as the horizontal coordinate.
"""
# Create the output array
out = np.zeros((ypix,xpix))
# First find the squares that are entirely in the circle
a = find_squares_in_circle(xc, yc, r)
if not a is None:
out[a[:,1],a[:,0]] = 1.0
# Now work out the tricky bits around the circumference
b = find_intersections(xc, yc, r)
if b is None:
# The whole circle fits in one square
i = int(math.floor(xc))
j = int(math.floor(yc))
out[j,i] = math.pi*r*r
else:
# Work out way through the points, pairwise, calculating area as we go
for (p1,p2) in pairwise(b):
i,j,area = area_contribution(p1, p2, xc, yc, r)
#print p1, p2
#print "i,j,area",i,j,area,out[j,i]
if (out[j,i] != 0.0):
# We already had area for this square, so that means the circle
# has intersected it again and we need to subtract off the new bit
# from what we already calculated
area = 1-area
out[j,i] -= area
else:
# Just set the output for this square
out[j,i] = area
return out
# A useful function for iterating over a sequence in pairwise fashion
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
|
SAMI-Galaxy-SurveyREPO_NAMEsamiPATH_START.@sami_extracted@sami-master@utils@circ.py@.PATH_END.py
|
{
"filename": "full_analysis.py",
"repo_name": "CosmicFish/CosmicFish",
"repo_path": "CosmicFish_extracted/CosmicFish-master/python/apps/full_analysis.py",
"type": "Python"
}
|
#----------------------------------------------------------------------------------------
#
# This file is part of CosmicFish.
#
# Copyright (C) 2015-2017 by the CosmicFish authors
#
# The CosmicFish code is free software;
# You can use it, redistribute it, and/or modify it under the terms
# of the GNU General Public License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any later version.
# The full text of the license can be found in the file LICENSE at
# the top level of the CosmicFish distribution.
#
#----------------------------------------------------------------------------------------
"""
Simple Python code to perform analysis of Fisher matrices (plot_1D, plot_2D, bounds...)
The ouput will be a set of png with 1D, 2D, triangular plots and a file with bounds
Invoking the help option ``full_analysis.py -h`` will result in::
usage: full_analysis.py [-h] [-v] [-q] inifile [inifile ...]
Analysis tool for plot and bounds
positional arguments:
inifile file with a list of instructions
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-q, --quiet decides wether something gets printed to screen or not
Developed by Matteo Martinelli (martinelli@lorentz.leidenuniv.nl)
and Marco Raveri (mraveri@sissa.it) for the CosmicFish code.
"""
# ***************************************************************************************
__version__ = '1.0' #: version of the application
# ***************************************************************************************
""" Hard coded options """
# ***************************************************************************************
# import first dependencies:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.lines as mlines
import numpy as np
import argparse
import math
import sys
import os
import copy
import itertools as it
import configparser
# get the path of the application and the CosmicFish library:
here = os.path.dirname(os.path.abspath(__file__))
cosmicfish_pylib_path = here+'/..'
sys.path.insert(0, os.path.normpath(cosmicfish_pylib_path))
# import the CosmicFish pylib
import cosmicfish_pylib.utilities as fu
import cosmicfish_pylib.colors as fc
import cosmicfish_pylib.fisher_matrix as fm
import cosmicfish_pylib.fisher_derived as fd
import cosmicfish_pylib.fisher_operations as fo
import cosmicfish_pylib.fisher_plot_settings as fps
import cosmicfish_pylib.fisher_plot_analysis as fpa
import cosmicfish_pylib.fisher_plot as fp
# ***************************************************************************************
# protection against importing:
if __name__ == "__main__":
# parse command line arguments:
parser = argparse.ArgumentParser(description='Analysis tool for plot and bounds')
# parse file names:
parser.add_argument('inifile', metavar='inifile', type=str, nargs='+',
help='file with a list of instructions')
# version:
parser.add_argument('-v','--version', action='version', version='%(prog)s '+__version__)
# quiet mode:
parser.add_argument('-q','--quiet', dest='quiet', action='store_true',
help='decides wether something gets printed to screen or not')
# do the parsing:
args = parser.parse_args()
# print the CosmicFish header:
if not args.quiet:
fu.CosmicFish_write_header('Global analysis app version '+__version__)
# process input arguments:
inifile = args.inifile
#function used to deal with ini sections
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print(("exception on %s!" % option))
dict1[option] = None
return dict1
#initializing and reading the config file
Config = configparser.ConfigParser()
Config.read(inifile)
#Reading general options
outroot = ConfigSectionMap("General Options")['outroot']
files = Config.get("General Options", "fishers").split("\n")
derived = Config.getboolean("General Options", "derived")
sum_fish = Config.get("General Options", "sum_fish").split("\n")
eliminate = Config.getboolean("General Options", "eliminate")
fishnames = Config.get("General Options", "names").split("\n")
#General screen output
if not args.quiet:
print('GENERAL OPTIONS:')
print(' Output root='+outroot)
print(' Using derived parameters='+str(derived))
print(' Eliminate rather than marginalize='+str(eliminate))
print(' ---------------------------------')
print(' Bounds from these matrices will be computed:')
for elem in files:
print(elem)
if sum_fish[0]:
print('Also the combination of these will be computed:')
for elem in sum_fish:
print(elem)
print(' ---------------------------------')
print()
print()
if not files[0]:
if not sum_fish[0]:
print('NO MATRICES TO WORK WITH!')
exit()
else:
files = sum_fish
print('No fishers to plot, using only the combined one')
#MOD: too much putput here!
if derived is not False:
fishers = fpa.CosmicFish_FisherAnalysis(fisher_path=files, with_derived=True)
if sum_fish[0]:
print('NOT HERE')
summing = fpa.CosmicFish_FisherAnalysis(fisher_path=sum_fish, with_derived=True)
else:
fishers = fpa.CosmicFish_FisherAnalysis(fisher_path=files, with_derived=False)
if sum_fish[0]:
summing = fpa.CosmicFish_FisherAnalysis(fisher_path=sum_fish, with_derived=False)
fishers_temp = fpa.CosmicFish_FisherAnalysis()
fisher_list = fishers.get_fisher_matrix()
if sum_fish[0]:
summing_list = summing.get_fisher_matrix()
for fish in summing_list[1:]:
summing_list[0] = summing_list[0]+fish
fisher_list.append(summing_list[0])
for i in range(len(fisher_list)):
fisher_list[i].name = fishnames[i]
fishers_temp.add_fisher_matrix( fisher_list[:] )
fishers = fishers_temp
#producing 1D plots
num1D = Config.items( "1Dplot" )
if not args.quiet and len(num1D)>0:
print()
print('Producing 1D plots:')
for key, params in num1D:
params = Config.get("1Dplot", key).split(",")
fishers_temp = fishers
if eliminate is not False:
fishers_temp = fishers_temp.reshuffle( params=params )
plot_settings = fps.CosmicFish_PlotSettings()
plotter = fp.CosmicFishPlotter( settings=plot_settings, fishers=fishers_temp)
plotter.new_plot()
plotter.plot1D( params=params )
plotter.export( outroot+'_1Dplot_'+str(key)+'.png' )
plotter.close_plot()
if not args.quiet:
print(' 1D plots done for parameters '+str(params))
print(' Saved results in: ', outroot+'_1Dplot_'+str(key)+'.png')
if not args.quiet and len(num1D)>0:
print('1D plots done!')
#Producing 2D plots
num2D = Config.items( "2Dplot" )
if not args.quiet and len(num2D)>0:
print()
print('Producing 2D plots:')
for key, params in num2D:
params = Config.get("2Dplot", key).split(",")
fishers_temp = fishers
if eliminate is not False:
fishers_temp = fishers_temp.reshuffle( params=params )
plot_settings = fps.CosmicFish_PlotSettings()
plotter = fp.CosmicFishPlotter( settings=plot_settings, fishers=fishers_temp)
if params is not None:
params = [ list(i) for i in it.combinations(params, 2)]
if len(params)==0:
raise ValueError('Not enough parameters for 2D plot.')
plotter.new_plot()
plotter.plot2D( params=params )
plotter.export( outroot+'_2Dplot_'+str(key)+'.png' )
plotter.close_plot()
if not args.quiet:
print(' 2D plots done for parameters '+str(params))
print(' Saved results in: ', outroot+'_2Dplot_'+str(key)+'.png')
if not args.quiet and len(num2D)>0:
print('2D plots done!')
#Producing triangular plots
numtri = Config.items( "triplot" )
if not args.quiet and len(numtri)>0:
print()
print('Producing triangular plots:')
for key, params in numtri:
params = Config.get("triplot", key).split(",")
fishers_temp = fishers
if eliminate is not False:
fishers_temp = fishers_temp.reshuffle( params=params )
plot_settings = fps.CosmicFish_PlotSettings()
plotter = fp.CosmicFishPlotter( settings=plot_settings, fishers=fishers_temp)
plotter.new_plot()
plotter.plot_tri( params=params )
plotter.export( outroot+'_triplot_'+str(key)+'.png' )
plotter.close_plot()
if not args.quiet:
print(' Triangular plots done for parameters '+str(params))
print(' Saved results in: ', outroot+'_triplot_'+str(key)+'.png')
if not args.quiet and len(numtri)>0:
print('Triangular plots done!')
#Producing bounds files:
# get the parameters:
numbounds = [ i for i in Config.items( "bounds" ) if "params" in i[0] ]
use_latex = Config.getboolean( "bounds",'use_latex')
latex_num_col = Config.getint( "bounds",'latex_num_col')
if len(numbounds)>0:
if not args.quiet:
print()
print('Producing bounds:')
# open the file if wanted:
if outroot is not None:
out_file = open(outroot+'_bounds.txt',"w")
# do some first printing for Latex:
if use_latex:
out_file.write( '\\begin{tabular}{ |'+''.join(['l|' for i in range(latex_num_col) ])+' }\n' )
for key, params in numbounds:
params = Config.get("bounds", key).split(",")
fishers_temp = fishers
if eliminate is not False:
fishers_temp = fishers_temp.reshuffle( params=params )
elif params is not None:
fishers_temp = fishers_temp.marginalise( params=params )
for num, fish in enumerate(fishers_temp.get_fisher_list()):
# get the bounds:
Bounds_68 = list( fu.v_nice_number( fish.get_confidence_bounds( 0.680 ), mode=1 ) )
Bounds_95 = list( fu.v_nice_number( fish.get_confidence_bounds( 0.950 ), mode=1 ) )
Bounds_99 = list( fu.v_nice_number( fish.get_confidence_bounds( 0.997 ), mode=1 ) )
# get the fiducial:
fiducial = []
for num, par in enumerate(fish.get_param_fiducial()):
fiducial.append( fu.significant_digits( (par, Bounds_68[num]), mode=1 ) )
# get the names:
if use_latex:
parameter_names_latex = copy.deepcopy(fish.get_param_names_latex())
else:
parameter_names = copy.deepcopy(fish.get_param_names())
# do the printing:
if use_latex:
print_table = []
for par,fid,bound in zip( parameter_names_latex,fiducial,Bounds_68 ):
print_table.append( '$'+str(par)+' = '+str(fid)+' \pm '+str(bound)+'$' )
if len(print_table)%latex_num_col == 0:
table_length = len(print_table)/latex_num_col
else:
table_length = len(print_table)/latex_num_col +1
print_table = fu.grouper( table_length, print_table,fillvalue='' )
print_table = [ list(i) for i in print_table]
print_table = list(map(list, list(zip(*print_table))))
col_width = [max(len(str(x)) for x in col) for col in zip(*print_table)]
if outroot is not None:
out_file.write( '\hline'+'\n' )
out_file.write( '\multicolumn{'+str(latex_num_col)+'}{|c|}{'+fish.name.replace('_',' ')+'} \\\[1mm]'+'\n' )
out_file.write( '\hline'+'\n' )
for line in print_table:
out_file.write( " " + " & ".join("{:{}}".format(x, col_width[i]) for i, x in enumerate(line)) + " \\\[1mm]\n" )
else:
print('\hline')
print('\multicolumn{'+str(latex_num_col)+'}{|c|}{'+fish.name.replace('_',' ')+'} \\\[1mm]')
print('\hline')
for line in print_table:
print(" " + " & ".join("{:{}}".format(x, col_width[i]) for i, x in enumerate(line)) + " \\\[1mm]")
else:
# put on top the labels of the columns:
parameter_names.insert(0,' Parameter ')
fiducial.insert(0, ' fiducial')
Bounds_68.insert(0,' 68% C.L.')
Bounds_95.insert(0,' 95% C.L.')
Bounds_99.insert(0,' 99.7% C.L.')
# put a white space:
parameter_names.insert(1,' ')
fiducial.insert(1, ' ')
Bounds_68.insert(1,' ')
Bounds_95.insert(1,' ')
Bounds_99.insert(1,' ')
#
print_table = [parameter_names,fiducial,Bounds_68, Bounds_95, Bounds_99]
out_file.write( ''.join([ '*' for i in range(len('Parameter bounds for the Fisher matrix: '+fish.name)+1)])+'\n' )
out_file.write( 'Parameter bounds for the Fisher matrix: '+fish.name+'\n' )
out_file.write( ''.join([ '*' for i in range(len('Parameter bounds for the Fisher matrix: '+fish.name)+1)])+'\n' )
out_file.write( '\n' )
print_table = list(map(list, list(zip(*print_table))))
col_width = [max(len(str(x)) for x in col) for col in zip(*print_table)]
# print it to file:
for line in print_table:
out_file.write( "| " + " | ".join("{:{}}".format(x, col_width[i]) for i, x in enumerate(line)) + " |"+'\n' )
out_file.write( '\n' )
if not args.quiet:
print(' Bounds computed for parameters '+str( fishers_temp.get_parameter_list() ))
# finalize the latex part:
if use_latex:
out_file.write( '\hline\n' )
out_file.write( '\end{tabular}' )
# close the file:
out_file.close()
if not args.quiet:
print(' Saved results in: ', outroot+'_bounds.txt')
print('bounds done!')
# finalize:
if not args.quiet:
print()
print('It seems everything is done...')
print('It was nice working with you. See you soon!')
# everything's fine, exit without error:
exit(0)
|
CosmicFishREPO_NAMECosmicFishPATH_START.@CosmicFish_extracted@CosmicFish-master@python@apps@full_analysis.py@.PATH_END.py
|
{
"filename": "igimf_epoch_49.py",
"repo_name": "juzikong/photGalIMF",
"repo_path": "photGalIMF_extracted/photGalIMF-main/simulation_results_from_galaxy_evol/example/igimf_epoch_49.py",
"type": "Python"
}
|
# File to define a custom IMF
# The return value represents the chosen IMF value for the input mass
def custom_imf(mass, time): # there is no time dependence for IGIMF
if mass < 0.08:
return 0
elif mass < 0.101:
return -881912095498.0004 * mass + 126349388048.4443
elif mass < 0.10201:
return -852999343379.5463 * mass + 123429200084.48044
elif mass < 0.10510100501:
return -771825086636.5791 * mass + 115067381227.91583
elif mass < 0.10828567056280801:
return -698375642355.5582 * mass + 107272041085.80531
elif mass < 0.11156683466653165:
return -631915891670.408 * mass + 100004803063.36438
elif mass < 0.11494742132376223:
return -571780672073.184 * mass + 93229890421.70459
elif mass < 0.11843044313729356:
return -517368120134.2647 * mass + 86913950148.33691
elif mass < 0.12201900399479669:
return -468133647751.17334 * mass + 81025888759.68959
elif mass < 0.12571630183484303:
return -423584491638.08563 * mass + 75536719227.383
elif mass < 0.12952563149674062:
return -383274781503.5729 * mass + 70419418274.561
elif mass < 0.13345038765672337:
return -346801077557.1967 * mass + 65648793339.83267
elif mass < 0.13749406785310975:
return -313798332681.9032 * mass + 61201358553.86909
elif mass < 0.14166027560312686:
return -283936238859.25653 * mass + 57055219118.036606
elif mass < 0.14595272361417722:
return -256915921281.43967 * mass + 53189963515.95951
elif mass < 0.15037523709241038:
return -232466947062.03006 * mass + 49586563027.230675
elif mass < 0.15493175715154747:
return -210344618608.2964 * mass + 46227278048.721466
elif mass < 0.1596263443249965:
return -190327524564.86612 * mass + 43095570762.188644
elif mass < 0.16446318218438824:
return -172215323817.93555 * mass + 40176023718.31866
elif mass < 0.1694465810677574:
return -155826740380.95596 * mass + 37454263936.35827
elif mass < 0.17458098192069152:
return -140997749093.59116 * mass + 34916892145.66835
elif mass < 0.1798709602538704:
return -127579933975.75043 * mass + 32551416820.89501
elif mass < 0.18532123022052294:
return -115439002806.02321 * mass + 30346192685.970474
elif mass < 0.190936648817435:
return -104453443057.75208 * mass + 28290363384.209488
elif mass < 0.19672222021325209:
return -94513305740.80682 * mass + 26373808032.28923
elif mass < 0.20268310020793384:
return -85519104976.9103 * mass + 24587091394.955666
elif mass < 0.20882460082733445:
return -77380822295.08893 * mass + 22921417435.19663
elif mass < 0.21515219505700353:
return -70017005681.73714 * mass + 21368586011.210938
elif mass < 0.2216715217194258:
return -63353954367.92658 * mass + 19920952506.94763
elif mass < 0.22838839049904613:
return -57324981195.24943 * mass + 18571390197.54771
elif mass < 0.23530878711955774:
return -51869745177.25965 * mass + 17313255164.34588
elif mass < 0.24243887867806746:
return -46933647576.609474 * mass + 16140353586.739586
elif mass < 0.24978501914089157:
return -42467285453.54647 * mass + 15046911249.911201
elif mass < 0.2573537550058797:
return -38425957216.49946 * mass + 14027545118.26312
elif mass < 0.26515183113631285:
return -34769215226.14052 * mass + 13077236834.640993
elif mass < 0.27318619677157424:
return -31460460975.12397 * mass + 12191308014.872707
elif mass < 0.2814640117199497:
return -28466578791.900406 * mass + 11365397216.00891
elif mass < 0.28999265273907593:
return -25757604402.43424 * mass + 10595438464.85373
elif mass < 0.29877972010972265:
return -23306425032.748863 * mass + 9877641241.114536
elif mass < 0.30783304440876735:
return -21088508050.685318 * mass + 9208471816.603436
elif mass < 0.31716069348739745:
return -19081655431.023422 * mass + 8584635858.633564
elif mass < 0.32677097966075913:
return -17265781586.4992 * mass + 8003062211.957123
elif mass < 0.33667246711545984:
return -15622712341.196539 * mass + 7460887779.420978
elif mass < 0.34687397954152543:
return -14136003034.26778 * mass + 6955443426.885202
elif mass < 0.3573846079956132:
return -12790773933.533293 * mass + 6484240843.032959
elif mass < 0.36821371900248834:
return -11573561311.646477 * mass + 6044960289.365638
elif mass < 0.3793709629019827:
return -10472182694.378483 * mass + 5635439180.096561
elif mass < 0.39086628244887567:
return -9475614932.292295 * mass + 5253661435.698137
elif mass < 0.40270992167335906:
return -8573883875.54401 * mass + 4897747557.71019
elif mass < 0.41491243500998354:
return -7757964547.586873 * mass + 4565945375.935192
elif mass < 0.4274846967032211:
return -7019690818.683823 * mass + 4256621422.4751673
elif mass < 0.4404379104980254:
return -6351673675.18426 * mass + 3968252890.1399918
elif mass < 0.453783619624026:
return -5747227266.568564 * mass + 3699420135.6402307
elif mass < 0.4675337170822536:
return -5200301990.110294 * mass + 3448799690.66144
elif mass < 0.48170045624356295:
return -4705423943.412504 * mass + 3215157746.403937
elif mass < 0.49629646176819914:
return -4257640138.8508053 * mass + 2997344079.521927
elif mass < 0.5113347408562374:
return -3852468931.589354 * mass + 2794286389.553832
elif mass < 0.5268286948389223:
return -3485855165.0324383 * mass + 2604985019.9686236
elif mass < 0.5427921311212365:
return -3154129584.782682 * mass + 2428508036.838693
elif mass < 0.5592392754863411:
return -2853972114.905384 * mass + 2263986640.914044
elif mass < 0.5761847847728528:
return -2582378628.942507 * mass + 2110610890.5078216
elif mass < 0.593643759936255:
return -2336630883.1087937 * mass + 1967625714.139305
elif mass < 0.6116317595060835:
return -2114269310.7448237 * mass + 1834327193.3037505
elif mass < 0.6301648134508773:
return -1913068405.742814 * mass + 1710059097.0706878
elif mass < 0.6492594374632522:
return -1731014448.562266 * mass + 1594209651.4456558
elif mass < 0.6689326476778262:
return -1566285351.917694 * mass + 1486208527.597762
elif mass < 0.689201975835112:
return -1417232424.4142952 * mass + 1385524034.1202335
elif mass < 0.7100854849048917:
return -1282363869.6177766 * mass + 1291660499.5043395
elif mass < 0.7316017851829947:
return -1160329855.4088144 * mass + 1204155831.9407647
elif mass < 0.7537700508758246:
return -1049909004.1847667 * mass + 1122579244.432484
elif mass < 0.7766100371874131:
return -949996168.7014714 * mass + 1046529134.0237803
elif mass < 0.8001420979242287:
return -859591371.2048419 * mass + 975631104.701447
elif mass < 0.8243872036334308:
return -777789795.1523108 * mass + 909536124.2368587
elif mass < 0.8493669602907274:
return -703772729.355241 * mass + 847918805.8942916
elif mass < 0.8751036285544969:
return -636799373.9069062 * mass + 790475806.5464351
elif mass < 0.9016201436033267:
return -576199425.885282 * mass + 736924333.3106526
elif mass < 0.9289401355746512:
return -521366370.6256449 * mass + 687000751.3549962
elif mass < 0.9570879506226987:
return -471751411.4175549 * mass + 640459286.0192342
elif mass < 0.9860886726145172:
return -426857976.8721611 * mass + 597070812.8618844
elif mass < 1.0159681454834097:
return -378224034.20224994 * mass + 548611192.837717
elif mass < 1.0467529962597026:
return -342742154.28123385 * mass + 512208975.96922934
elif mass < 1.078470658799368:
return -310588893.6146222 * mass + 478222169.8875282
elif mass < 1.1111493982316476:
return -281451988.41692126 * mass + 446490503.8791636
elif mass < 1.1448183361474649:
return -255048468.93246916 * mass + 416864341.73713684
elif mass < 1.1795074765510691:
return -231121911.30959457 * mass + 389203976.12528235
elif mass < 1.215247732598043:
return -209439947.281311 * mass + 363378969.76383543
elif mass < 1.2520709541434965:
return -189792007.4675967 * mass + 339267540.3298471
elif mass < 1.2900099561249987:
return -171987276.38238186 * mass + 316755986.1713393
elif mass < 1.3290985478055424:
return -155852839.28501594 * mass + 295738150.12725586
elif mass < 1.3693715629025982:
return -141232002.87908453 * mass + 276114918.9249596
elif mass < 1.4108648906301098:
return -127982773.5493596 * mass + 257793755.7942201
elif mass < 1.4536155076810928:
return -115976478.35815808 * mass + 240688264.09393674
elif mass < 1.4976615111793377:
return -105096515.40857038 * mass + 224717779.89378324
elif mass < 1.5430421526295828:
return -95237221.43822642 * mass + 209806991.5892322
elif mass < 1.589797872896412:
return -86302845.64635111 * mass + 195885584.7567351
elif mass < 1.6379703382430462:
return -78206619.78770545 * mass + 182887910.57358485
elif mass < 1.6876024774621488:
return -70869915.5029363 * mass + 170752676.23962244
elif mass < 1.7387385201317294:
return -64221480.701078944 * mass + 159422655.94017956
elif mass < 1.791424036030241:
return -58196747.57857353 * mass + 148844420.98790362
elif mass < 1.8457059757459933:
return -52737205.554136746 * mass + 138968087.87036958
elif mass < 1.9016327125170727:
return -47789833.029834844 * mass + 129747083.01574302
elif mass < 1.9592540853390523:
return -43306582.45961368 * mass + 121137923.16692841
elif mass < 2.018621443378911:
return -39243913.72449903 * mass + 113100010.32866889
elif mass < 2.0797876917347353:
return -35562371.28275334 * mass + 105595440.32068571
elif mass < 2.14280733858199:
return -32226200.988276925 * mass + 98588824.03384253
elif mass < 2.2077365437483625:
return -29203002.856017157 * mass + 92047120.54667199
elif mass < 2.2746331687604817:
return -26463416.40203488 * mass + 85939481.3150872
elif mass < 2.3435568284070927:
return -23980835.502444685 * mass + 80237104.70076022
elif mass < 2.4145689438646563:
return -21731150.001893587 * mass + 74913100.15190408
elif mass < 2.4877327974326993:
return -19692511.562272504 * mass + 69942361.39625351
elif mass < 2.5631135889277075:
return -17845121.47753356 * mass + 65301448.04800619
elif mass < 2.640778493785806:
return -16171038.394003037 * mass + 60968475.07059142
elif mass < 2.7207967229260097:
return -14654004.068820138 * mass + 56923009.57401629
elif mass < 2.8032395844273905:
return -13279285.474247394 * mass + 53145974.45994402
elif mass < 2.888180547075125:
return -12033531.714499747 * mass + 49619558.46035055
elif mass < 2.9756953058320486:
return -10904644.36544424 * mass + 46327132.145368725
elif mass < 3.0658618492940652:
return -9881659.977970002 * mass + 43253169.50430538
elif mass < 3.1587605291895247:
return -8954643.603935985 * mass + 40383174.730044276
elif mass < 3.2544741319844963:
return -8114592.310631201 * mass + 37703613.86152058
elif mass < 3.353087952657759:
return -7353347.746728442 * mass + 35201850.96198223
elif mass < 3.4546898707112415:
return -6663516.910575382 * mass + 32866088.532013968
elif mass < 3.559370428483663:
return -6038400.351360663 * mass + 30685311.876376472
elif mass < 3.667222911837147:
return -5471927.105856929 * mass + 28649237.16229057
elif mass < 3.7783434332887245:
return -4958595.738863783 * mass + 26748262.92422453
elif mass < 3.892831017660806:
return -4493420.914756326 * mass + 24973424.786514346
elif mass < 4.010787690326946:
return -4071884.9812498903 * mass + 23316353.19027959
elif mass < 4.132318568131543:
return -3689894.095182401 * mass + 21769233.92531387
elif mass < 4.257531953064498:
return -3343738.464214127 * mass + 20324771.28080991
elif mass < 4.386539428774305:
return -3030056.3183271675 * mass + 18976153.641166396
elif mass < 4.519455960005596:
return -2745801.2612217274 * mass + 17717021.364621893
elif mass < 4.656399995049725:
return -2488212.684538307 * mass + 16541436.793256415
elif mass < 4.797493571299727:
return -2254788.957574303 * mass + 15443856.252927545
elif mass < 4.94286242400368:
return -2043263.1321236799 * mass + 14419103.91111523
elif mass < 5.092636098313417:
return -1851580.9264861692 * mass + 13462347.369371472
elif mass < 5.246948064728412:
return -1677880.7748385717 * mass + 12569074.875304293
elif mass < 5.405935838037748:
return -1520475.7482114013 * mass + 11735074.04662867
elif mass < 5.569741099866129:
return -1377837.171488895 * mass + 10956412.006936198
elif mass < 5.738509824933173:
return -1248579.7773293327 * mass + 10229416.839531496
elif mass < 5.912392411138472:
return -1131448.2528230778 * mass + 9550660.27187066
elif mass < 6.091543813588379:
return -1025305.0482320755 * mass + 8916941.508941254
elif mass < 6.27612368268392:
return -929119.3294146868 * mass + 8325272.139359356
elif mass < 6.466296506392926:
return -841956.966641636 * mass + 7772862.042986926
elif mass < 6.662231756833138:
return -762971.4625816531 * mass + 7257106.233641686
elif mass < 6.8641040412969385:
return -691395.7313472136 * mass + 6775572.574825793
elif mass < 7.072093257852278:
return -626534.6487633276 * mass + 6325990.310560303
elif mass < 7.286384755658459:
return -567758.3015101728 * mass + 5906239.357245501
elif mass < 7.507169500139667:
return -514495.8695739057 * mass + 5514340.306029102
elif mass < 7.734644243163398:
return -466230.0825977056 * mass + 5148445.088564688
elif mass < 7.969011698375493:
return -422492.19629126147 * mass + 4806828.262119563
elif mass < 8.210480721847969:
return -382857.44011296297 * mass + 4487878.872949734
elif mass < 8.459266498200684:
return -346940.8920130764 * mass + 4190092.859566341
elif mass < 8.715590732362662:
return -314393.74017471925 * mass + 3912065.9600711716
elif mass < 8.979681847143983:
return -284899.89544761827 * mass + 3652487.0901141223
elif mass < 9.251775186794292:
return -258172.9215758934 * mass + 3410132.1602485017
elif mass < 9.532113226729347:
return -233953.25340609462 * mass + 3183858.3035202585
elif mass < 9.820945789612473:
return -212005.67606085283 * mass + 2972598.4860824393
elif mass < 10.118530267983521:
return -192117.04059527343 * mass + 2775356.475408798
elif mass < 10.42513185363369:
return -174094.1939521161 * mass + 2591202.1423872104
elif mass < 10.741023773930646:
return -157762.10311133554 * mass + 2419267.0751323723
elif mass < 11.06648753530452:
return -142962.15521672467 * mass + 2258740.483838777
elif mass < 11.401813174111782:
return -129550.61717061569 * mass + 2108865.3773591933
elif mass < 11.747299515100543:
return -117397.23973695925 * mass + 1968934.9934820938
elif mass < 12.103254437707607:
return -106383.99259577072 * mass + 1838289.4660695463
elif mass < 12.469995150424586:
return -96403.91806463679 * mass + 1716312.7133444645
elif mass < 12.847848473477601:
return -87360.09235456264 * mass + 1602429.5326492898
elif mass < 13.237151130072446:
return -79164.6842722866 * mass + 1496102.8879772783
elif mass < 13.638250046464773:
return -71738.10222743743 * mass + 1396831.37748537
elif mass < 14.051502661122703:
return -65008.221260554754 * mass + 1304146.869047045
elif mass < 14.477277243257383:
return -58909.68258489232 * mass + 1217612.2926927358
elif mass < 14.915953221005326:
return -53383.25883957604 * mass + 1136819.579530759
elif mass < 15.367921519555008:
return -48375.27888946114 * mass + 1061387.7374272011
elif mass < 15.833584909519045:
return -43837.10658551415 * mass + 990961.054370338
elif mass < 16.31335836586238:
return -39724.66842374193 * mass + 925207.4210497297
elif mass < 16.807669437706377:
return -35998.02551516624 * mass + 863816.764735992
elif mass < 17.316958629338316:
return -32620.98571012829 * mass + 806499.5870788775
elif mass < 17.841679792765895:
return -29560.752109917725 * mass + 752985.5989275265
elif mass < 18.382300532166493:
return -26787.604552143403 * mass + 703022.4457347527
elif mass < 18.93930262059167:
return -24274.61097653632 * mass + 656374.5175350609
elif mass < 19.51318242929822:
return -21997.365868049095 * mass + 612821.837884678
elif mass < 20.104451370088384:
return -19933.75323709776 * mass + 572159.0265245157
elif mass < 20.71363635105343:
return -18063.73183503036 * mass + 534194.3308734964
elif mass < 21.3412802461267:
return -16369.140518938324 * mass + 498748.721785933
elif mass < 21.987942378864584:
return -14833.521875538247 * mass + 465655.0493083184
elif mass < 22.654199020886562:
return -13441.962391212743 * mass + 434757.25445444183
elif mass < 23.340643905418442:
return -12180.947616004374 * mass + 405909.6332822564
elif mass < 24.047888756396528:
return -11038.230914917563 * mass + 378976.1498013189
elif mass < 24.7765638336041:
return -10002.71453190961 * mass + 353829.79447141325
elif mass < 25.52731849432614:
return -9064.341811481938 * mass + 330351.9852669377
elif mass < 26.300821772022715:
return -8213.999531154454 * mass + 308432.0084826039
elif mass < 27.097762972536774:
return -7443.429396312369 * mass + 287966.49664371257
elif mass < 27.91885228836761:
return -6745.1478378761585 * mass + 268858.94105872384
elif mass < 28.764821431557436:
return -6112.373333903076 * mass + 251019.2367157852
elif mass < 29.63642428575506:
return -5538.96054927282 * mass + 234363.25737670768
elif mass < 30.53443757803772:
return -5019.34065385514 * mass + 218812.45886509324
elif mass < 31.459661571089846:
return -4548.467239534732 * mass + 204293.50867755222
elif mass < 32.41292077635544:
return -4121.767310858169 * mass + 190737.94017148804
elif mass < 33.395064688799785:
return -3735.096873336325 * mass + 178081.8296986817
elif mass < 34.40696854393511:
return -3384.7006880894783 * mass + 166265.495162195
elif mass < 35.44953409778489:
return -3067.175801981311 * mass + 155233.21457503116
elif mass < 36.52369043048187:
return -2779.4384990565954 * mass + 144932.96329337286
elif mass < 37.63039477421591:
return -2518.6943523251293 * mass + 135316.16868532004
elif mass < 38.77063336626942:
return -2282.4110850403245 * mass + 126337.48107813275
elif mass < 39.94542232790075:
return -2098.830304819598 * mass + 119149.66772500594
elif mass < 41.15580856985847:
return -1886.2412854129125 * mass + 110574.58168060276
elif mass < 42.402870725333756:
return -1721.2251525174197 * mass + 103716.39177930114
elif mass < 43.68772011118209:
return -1558.5915916816336 * mass + 96757.97174053216
elif mass < 45.01150171827101:
return -1400.4085968484894 * mass + 89779.67112006168
elif mass < 46.37539523183634:
return -1267.9757671119032 * mass + 83750.65664387631
elif mass < 47.78061608275621:
return -1157.3487629808835 * mass + 78569.80448206994
elif mass < 49.22841653067981:
return -1047.9670308564878 * mass + 73296.4201529144
elif mass < 50.720086779975944:
return -948.9152231708367 * mass + 68376.38739686998
elif mass < 52.256956129496:
return -852.2676650968601 * mass + 63426.434415172145
elif mass < 53.84039415717585:
return -771.6413266114362 * mass + 59164.75518994806
elif mass < 55.47181194053243:
return -698.6369762907558 * mass + 55188.962723190736
elif mass < 57.1526633141425:
return -632.5330787055476 * mass + 51479.82283481699
elif mass < 58.88444616522433:
return -572.6793366195942 * mass + 48019.56213315388
elif mass < 60.668703768476796:
return -518.4840159751182 * mass + 44791.43434745292
elif mass < 62.50702616136541:
return -469.4137314874244 * mass + 41779.9596309389
elif mass < 64.40105156108095:
return -428.70207155685165 * mass + 39207.686490530454
elif mass < 66.35246782443328:
return -388.1554827750738 * mass + 36573.149005620304
elif mass < 68.36301395198143:
return -351.44119004559644 * mass + 34115.354301338746
elif mass < 70.43448163774043:
return -318.1430781657978 * mass + 31816.660841848618
elif mass < 72.5687168658456:
return -288.0456767502926 * mass + 29677.91564178936
elif mass < 74.76762155559759:
return -258.3598336364044 * mass + 27500.73324765288
elif mass < 77.03315525635374:
return -236.07609477009555 * mass + 25816.499419184136
elif mass < 79.36733689377651:
return -213.69788027666138 * mass + 24075.721432927
elif mass < 81.77224656899482:
return -191.6048703181 * mass + 22303.55636399969
elif mass < 84.25002741228194:
return -175.1308349925522 * mass + 20941.720795221983
elif mass < 86.8028874929015:
return -158.52447133121225 * mass + 19528.907705399888
elif mass < 89.4331017868239:
return -143.49062072130545 * mass + 18211.122915502798
elif mass < 92.14301420406645:
return -129.85589276250371 * mass + 16978.53657696608
elif mass < 94.93503967746386:
return -117.53733946730361 * mass + 15832.331460204688
elif mass < 97.81166631473069:
return -106.38621211242004 * mass + 14763.31224564384
elif mass < 100.77545761573333:
return -96.27220727177901 * mass + 13763.303578911187
elif mass < 103.82905475694768:
return -87.11809610786906 * mass + 12830.763433328146
elif mass < 106.97517894513796:
return -78.83277492961786 * mass + 11961.139319274958
elif mass < 110.21663384235457:
return -71.33396883599468 * mass + 11150.205667431943
elif mass < 113.55630806441175:
return -65.25968997331644 * mass + 10474.128501135436
elif mass < 116.99717775507149:
return -58.39214107599081 * mass + 9686.645525210588
elif mass < 120.54230923822792:
return -52.82258508484542 * mass + 9027.137875397693
elif mass < 124.19486175045546:
return -48.33882451770344 * mass + 8480.626537567978
elif mass < 127.95809025635602:
return -43.725775306179074 * mass + 7902.089208774394
elif mass < 131.83534834921383:
return -39.551609523690175 * mass + 7362.731228954252
elif mass < 135.83009123954335:
return -35.774718468081005 * mass + 6859.918666609693
elif mass < 139.94587883419274:
return -32.754930560282254 * mass + 6445.70371016537
elif mass < 144.1863789087476:
return -29.621013927287606 * mass + 6003.374484117547
elif mass < 148.55537037606157:
return -27.133515506848582 * mass + 5642.235489786828
elif mass < 153.05674665382662:
return -24.211271151611026 * mass + 5204.37643183128
elif mass < 157.69451913418422:
return -21.878122000081255 * mass + 4842.8025954939685
elif mass < 162.47282075846914:
return -20.0457000220736 * mass + 4550.067523571768
elif mass < 167.39590970027152:
return -18.37401625079183 * mass + 4275.345142828897
elif mass < 172.46817316009944:
return -16.851098356037333 * mass + 4017.9099444185154
elif mass < 177.69413127502364:
return -15.701958729294592 * mass + 3817.7722681209907
elif mass < 183.07844114678812:
return -14.662876310591198 * mass + 3631.325700812953
elif mass < 188.62590099197692:
return -14.655764398061297 * mass + 3630.4831542698353
elif mass < 194.3414544179348:
return -16.798722448739376 * mass + 4041.2264183852003
elif mass < 198.24771765173531:
return 0 * mass + 0
elif mass < 198.24771765173531:
return 0 * mass + 0
else:
return 0
|
juzikongREPO_NAMEphotGalIMFPATH_START.@photGalIMF_extracted@photGalIMF-main@simulation_results_from_galaxy_evol@example@igimf_epoch_49.py@.PATH_END.py
|
{
"filename": "uniformcausticsampling.py",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/source/MulensModel/uniformcausticsampling.py",
"type": "Python"
}
|
import numpy as np
import math
import warnings
from MulensModel.utils import Utils
class UniformCausticSampling(object):
"""
Uniform sampling of a binary lens caustic.
Note that calculations take some time for given (s, q).
Keep that in mind, when optimizing your fitting routine.
Arguments :
s: *float*
Separation of the two lens components relative to
the Einstein ring size.
q: *float*
Mass ratio of the two lens components.
n_points: *int*
Number of points used for internal integration.
Default value should work fine.
Instead of standard parameters (*t_0*, *u_0*, *t_E*, *alpha*), here
we use four other parameters: two epochs of caustic crossing
(*t_caustic_in*, *t_caustic_out*) and two curvelinear coordinates of
caustic crossing (*x_caustic_in*, *x_caustic_out*).
The curvelinear coordinate, *x_caustic*,
is defined so that going from 0 to 1 draws all caustics
for given separation and mass ratio. We use 0-1 range, which is
a different convention than
in the papers cited below (we also use different symbols for
epochs of caustic crossing and curvelinear coordinates).
For a wide topology (i.e., 2 caustics), there
is a value between 0 and 1 (called ``x_caustic_sep``) which separates
the caustics and a trajectory
exists only if *x_caustic_in* and *x_caustic_out* correspond to
the same caustic, i.e., both are smaller than ``x_caustic_sep`` or
both are larger than ``x_caustic_sep``. For a close topology
(i.e., 3 caustics), there are two such separating values.
For description of the curvelinear coordinates, see:
`Cassan A. 2008 A&A 491, 587 "An alternative parameterisation for
binary-lens caustic-crossing events"
<https://ui.adsabs.harvard.edu/abs/2008A%26A...491..587C/abstract>`_
`Cassan A. et al. 2010 A&A 515, 52
"Bayesian analysis of caustic-crossing microlensing events"
<https://ui.adsabs.harvard.edu/abs/2010A%26A...515A..52C/abstract>`_
In order to visualize the curvelinear coordinates,
you can run a code like:
.. code-block:: python
import matplotlib.pyplot as plt
import numpy as np
sampling = UniformCausticSampling(s=1.1, q=0.3)
color = np.linspace(0., 1., 200)
points = [sampling.caustic_point(c) for c in color]
x = [p.real for p in points]
y = [p.imag for p in points]
plt.scatter(x, y, c=color)
plt.axis('equal')
plt.colorbar()
plt.show()
This will show an intermediate topology. Change *s=1.1* to *s=2.*
to plot a wide topology, or to *s=0.7* to plot a close topology.
To be specific, the central caustics are plotted counter-clockwise
and *x_caustic=0.* corresponds to right-hand point where the caustic
crosses the X-axis. For a wide topology, the planetary caustic is
plotted in a similar way. For a close topology, the lower planetary
caustic is plotted counter-clockwise and the upper planetary caustic
is symmetric, thus plotted clockwise. For planetary caustics in
a close topology, the zero-point of *x_caustic* values is defined
in a very complicated way, however it is a smooth function of
*s* and *q*.
For more advanced fitting of binary lens events see:
`Kains N. et al. 2009 MNRAS 395, 787
"A systematic fitting scheme for caustic-crossing microlensing events"
<https://ui.adsabs.harvard.edu/abs/2009MNRAS.395..787K/abstract>`_
`Kains N. et al. 2012 MNRAS 426, 2228 "A Bayesian algorithm for model
selection applied to caustic-crossing binary-lens microlensing events"
<https://ui.adsabs.harvard.edu/abs/2012MNRAS.426.2228K/abstract>`_
"""
def __init__(self, s, q, n_points=10000):
self._s = s
self._q = q
self._n_points = n_points
self._n_caustics = Utils.get_n_caustics(s=self.s, q=self.q)
self._get_phi()
self._integrate()
self._find_inflections_and_correct()
def _get_phi(self):
"""
Prepare internal variables:
self._phi - gives all phi values used for integration
self._d_phi - step between adjacent phi values
"""
phi_begin = 0.
phi_end = 2. * np.pi - 1e-14
if self._n_caustics == 1:
phi_end = 4. * np.pi - 1e-14
self._d_phi = (phi_end - phi_begin) / self._n_points
self._phi = np.linspace(phi_begin, phi_end, self._n_points)
def _get_indexes_of_inflection_points(self, values):
"""
Find inflection points in give tabulated function.
"""
diff_ = values[1:] - values[:-1]
diff = np.concatenate(([diff_[-2], diff_[-1]], diff_))
out = []
for i in range(1, len(diff)-1):
if diff[i-1] > diff[i] and diff[i+1] > diff[i]:
# parabola = np.polyfit([-1., 0., 1.], diff[i-1:i+2], 2)
# shift = -0.5 * parabola[1] / parabola[0]
# 1) use it
# 2) if shift > 0.5 or < -0.5 than use the other triple
# to calculate it
out.append(i)
return out
def _zeta(self, z):
"""
Apply lens equation in complex coordinates and
shift to center of mass coordinates.
"""
z_bar = z.conjugate()
zeta = -z_bar + (1./z + self.q/(z+self.s)) / (1. + self.q)
zeta -= self.s * self.q / (1. + self.q)
return zeta
def _find_nearest_index(self, array, value):
"""
returns element of array that is closest to value
"""
idx = (np.abs(array - value)).argmin()
return array[idx]
def _critical_curve(self, phi):
"""
Calculate a point on critical curve - see eq. 6 in Cassan 2008.
"""
coeffs = [0., 0., 0., 2.*self.s, 1.]
exp_i_phi = np.exp(1j * phi)
coeffs[0] = -self.s * self.s * exp_i_phi / (1. + self.q)
coeffs[1] = -2. * self.s * exp_i_phi / (1. + self.q)
coeffs[2] = self.s * self.s - exp_i_phi
roots = np.polynomial.polynomial.polyroots(np.array(coeffs))
if self._n_caustics == 3:
if self._critical_curve_previous is not None:
# This makes sure we're following right branch.
new_roots = np.array([0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j])
for (i, v) in enumerate(self._critical_curve_previous):
new_roots[i] = self._find_nearest_index(roots, v)
roots = new_roots
self._critical_curve_previous = roots
return roots
def _dz_dphi(self, z):
"""
Eq. 11 from Cassan (2008)
"""
z_plus_d = z + self.s
z_plus_d_2 = z_plus_d**2
z_plus_d_3 = z_plus_d_2 * z_plus_d
q_z_2 = self.q * z * z
value = (z_plus_d_2 + q_z_2) * z_plus_d * z / (z_plus_d_3 + z * q_z_2)
return 0.5j * value
def _dz_bar_dphi(self, z_bar):
"""
almost the same as eq. 11 from Cassan (2008)
"""
return -self._dz_dphi(z_bar)
def _dzeta_dphi(self, z, phi):
"""
Eq. (9) and (11) from Cassan (2008)
"""
dz_dphi_ = self._dz_dphi(z)
dz_bar_dphi_ = self._dz_bar_dphi(np.conjugate(z))
return dz_dphi_ + np.exp(1j * phi) * dz_bar_dphi_
def _caustic_and_trajectory(self, zetas, u_0, alpha,
sum_use, flip, caustic):
"""
check if caustic crosses the line defined by 2 points
"""
cos_a = math.cos(alpha * np.pi / 180. + np.pi)
sin_a = math.sin(alpha * np.pi / 180. + np.pi)
x_1 = zetas[:-1].real
y_1 = zetas[:-1].imag
x_2 = zetas[1:].real
y_2 = zetas[1:].imag
dx = x_2 - x_1
dy = y_2 - y_1
tau = (x_1 * y_2 - x_2 * y_1 + u_0 * (dy * sin_a + dx * cos_a))
tau /= dy * cos_a - dx * sin_a
x_cross = tau * cos_a - u_0 * sin_a
# Check if crossing point is between x_1 and x_2:
index = np.where((x_cross - x_1) * (x_cross - x_2) <= 0.)[0]
# Earlier we were using " < 0.", but this failed sometimes for
# trajectory going exactly through the cusp.
fraction = (x_cross[index] - x_1[index]) / (x_2[index] - x_1[index])
sum_ = sum_use[index] * (1. - fraction) + sum_use[index+1] * fraction
in_caustic = sum_ / sum_use[-1]
if self._n_caustics == 3 and caustic > 1:
begin = self._which_caustic[caustic-1]
end = self._which_caustic[caustic]
else:
end = 0.5 * (self._which_caustic[caustic] +
self._which_caustic[caustic-1])
if flip:
begin = self._which_caustic[caustic]
else:
begin = self._which_caustic[caustic-1]
x_caustic = begin + in_caustic * (end - begin)
return x_caustic.tolist()
def _integrate(self):
"""
Main integration for Cassan (2008) parameterization.
It sets internal variables:
- self._z_all
- self._sum_1
- self._sum_2
- self._z_sum_1
- self._z_sum_2
- self._z_index_sum_1
- self._z_index_sum_2
"""
size = (self._n_points, 4)
self._z_all = np.zeros(size, dtype=np.complex128)
self._sum_1 = np.zeros(self._n_points)
self._z_sum_1 = np.zeros(self._n_points, dtype=np.complex128)
self._z_index_sum_1 = np.zeros(self._n_points, dtype=int)
if self._n_caustics > 1:
self._sum_2 = np.zeros(self._n_points)
self._z_sum_2 = np.zeros(self._n_points, dtype=np.complex128)
self._z_index_sum_2 = np.zeros(self._n_points, dtype=int)
self._critical_curve_previous = None
for (i, phi) in enumerate(self._phi):
self._z_all[i] = self._critical_curve(phi)
if self._n_caustics == 1:
self._z_index_sum_1[i] = int(phi / np.pi)
self._z_sum_1[i] = self._z_all[i, self._z_index_sum_1[i]]
abs_1 = abs(self._dzeta_dphi(self._z_sum_1[i], phi))
self._sum_1[i] = self._sum_1[i-1] + abs_1 * self._d_phi
if self._n_caustics > 1:
if self._n_caustics == 2:
self._z_index_sum_2[i] = int(phi / np.pi)
self._z_index_sum_1[i] = self._z_index_sum_2[i] + 2
if self._n_caustics == 3:
signs = 1. / np.conjugate(self._z_all[i])
signs = (signs - self._z_all[i]).imag
if signs[1] * signs[2] >= 0.:
args = [
self.s, self.q, self._n_points, i, self._z_all[i]]
raise ValueError("Critical error: {:}".format(args))
if signs[1] < 0.:
self._z_index_sum_2[i] = 2
else:
self._z_index_sum_2[i] = 1
self._z_sum_1[i] = self._z_all[i, self._z_index_sum_1[i]]
self._z_sum_2[i] = self._z_all[i, self._z_index_sum_2[i]]
abs_1 = abs(self._dzeta_dphi(self._z_sum_1[i], phi))
abs_2 = abs(self._dzeta_dphi(self._z_sum_2[i], phi))
self._sum_1[i] = self._sum_1[i-1] + abs_1 * self._d_phi
self._sum_2[i] = self._sum_2[i-1] + abs_2 * self._d_phi
def _find_inflections_and_correct(self):
"""
Find inflection points of s(phi). In the case of close configuration
also correct the phase of planetary caustic - Do we do it in current
version XXX ??? - yes because of self._which_caustic
"""
indexes = self._get_indexes_of_inflection_points(self._sum_1)
value_1 = [float(i)/self._n_points for i in indexes]
self._inflections_fractions = {1: value_1}
if self._n_caustics == 1:
self._which_caustic = np.array([0., 1.])
return
# XXX DO WE NEED PART BELOW?
"""
cusps_z_1 = [self._z_sum_1[i] for i in indexes]
if self._n_caustics == 2:
add = self._z_all[indexes[1], 2]
else:
add = self._z_all[indexes[1], 3]
cusps_z_1 = [add] + cusps_z_1
cusps_z_1 += [cusps_z_1[0].conjugate()]
cusps_zeta_1 = [self._zeta(z) for z in cusps_z_1]
indexes = self._get_indexes_of_inflection_points(self._sum_2)
value_2 = [float(i)/self._n_points for i in indexes]
self._inflections_fractions[2] = value_2
cusps_z_2 = [self._z_sum_2[i] for i in indexes]
if self._n_caustics == 2:
cusps_z_2 = [self._z_all[indexes[1], 0]] + cusps_z_2
cusps_z_2 += [cusps_z_2[0].conjugate()]
cusps_zeta_2 = [self._zeta(z) for z in cusps_z_2]
"""
length_1 = 2. * self._sum_1[-1]
lengths_sum = length_1
lengths = [length_1]
if self._n_caustics > 1:
length_2 = self._sum_2[-1]
if self._n_caustics == 2:
length_2 *= 2.
lengths_sum += length_2
lengths += [length_1 + length_2]
if self._n_caustics == 3:
length_3 = length_2
lengths_sum += length_2
lengths += [lengths[-1] + length_3]
self._which_caustic = np.array([0.] + lengths) / lengths_sum
def get_standard_parameters(self, x_caustic_in, x_caustic_out,
t_caustic_in, t_caustic_out):
"""
Get standard binary lens parameters (i.e., ``t_0``, ``u_0``, ``t_E``,
``alpha``; see
:py:class:`~MulensModel.modelparameters.ModelParameters`)
based on provided curvelinear parameters.
Note that this function quite frequently raises ``ValueError``
exception. This is because not all
(``s``, ``q``, ``x_caustic_in``, and ``x_caustic_out``)
correspond to real trajectories. The returned values are in
conventions used by :py:class:`~MulensModel.model.Model`.
Keywords :
x_caustic_in: *float*
Curvelinear coordinate of caustic entrance.
Must be in (0, 1) range.
x_caustic_out: *float*
Curvelinear coordinate of caustic exit.
Must be in (0, 1) range.
t_caustic_in: *float*
Epoch of caustic entrance.
t_caustic_out: *float*
Epoch of caustic exit.
Returns :
parameters: *dict*
Dictionary with standard binary parameters, i.e, keys are
``t_0``, ``u_0``, ``t_E``, and ``alpha``.
"""
if (x_caustic_in < 0. or x_caustic_in > 1. or
x_caustic_out < 0. or x_caustic_out > 1. or
t_caustic_in >= t_caustic_out or
x_caustic_in == x_caustic_out):
msg = 'Wrong input in get_standard_parameters(): {:} {:} {:} {:}'
raise ValueError(msg.format(x_caustic_in, x_caustic_out,
t_caustic_in, t_caustic_out))
caustic_in = self.which_caustic(x_caustic_in)
caustic_out = self.which_caustic(x_caustic_out)
if caustic_in != caustic_out:
message = (
"Function get_standard_parameters() got curvelinear caustic " +
"coordinates on different caustics.\n" +
"x_caustic_in = {:} is on caustic ".format(x_caustic_in) +
"{:} and\n".format(caustic_in) +
"x_caustic_out = {:} is on caustic ".format(x_caustic_out) +
"{:}".format(caustic_out))
raise ValueError(message)
zeta_in = self.caustic_point(x_caustic_in)
zeta_out = self.caustic_point(x_caustic_out)
u_0 = zeta_out.real*zeta_in.imag - zeta_out.imag*zeta_in.real
u_0 /= abs(zeta_out - zeta_in)
if zeta_out.real == zeta_in.real:
alpha = 0.5 * np.pi * np.sign(zeta_out.imag - zeta_in.imag)
else:
diff_real = zeta_out.real - zeta_in.real
alpha = np.arctan((zeta_out.imag - zeta_in.imag) / diff_real)
if diff_real < 0.:
alpha += np.pi
alpha *= 180. / np.pi
alpha += 180.
if alpha < 0.:
alpha += 360.
if alpha > 360.:
alpha -= 360.
t_E = (t_caustic_out - t_caustic_in) / abs(zeta_out - zeta_in)
t_0 = ((zeta_out + zeta_in) / (zeta_out - zeta_in)).real
t_0 *= 0.5 * (t_caustic_in - t_caustic_out)
t_0 += 0.5 * (t_caustic_out + t_caustic_in)
return {'t_0': t_0, 'u_0': u_0, 't_E': t_E, 'alpha': alpha}
def get_x_in_x_out(self, u_0, alpha):
"""
Calculate where given trajectory crosses the caustic.
Parameters :
u_0: *float*
The parameter u_0 of source trajectory, i.e., impact parameter.
alpha: *float*
Angle defining the source trajectory.
Returns :
x_caustic_points: *list* of *float*
Caustic coordinates of points where given trajectory crosses
the caustic. The length is 0, 2, 4, or 6.
Note that if there are 4 or 6 points,
then only some pairs will produce real trajectories.
"""
zetas_1 = self._zeta(self._z_sum_1)
if self._n_caustics > 1:
zetas_2 = self._zeta(self._z_sum_2)
points = self._caustic_and_trajectory(
zetas_1, u_0, alpha, self._sum_1, flip=False, caustic=1)
points += self._caustic_and_trajectory(
zetas_1.conjugate(), u_0, alpha, self._sum_1, flip=True, caustic=1)
if self._n_caustics > 1:
points += self._caustic_and_trajectory(
zetas_2, u_0, alpha, self._sum_2, flip=False, caustic=2)
points += self._caustic_and_trajectory(
zetas_2.conjugate(), u_0, alpha, self._sum_2, flip=True,
caustic=self._n_caustics)
if len(points) not in [0, 2, 4, 6]:
warnings.warn(
'This is strange: there are {:} points '.format(len(points)) +
'in output of UniformCausticSampling.get_x_in_x_out() and ' +
'expected number is 0, 2, 4, or 6. You may contact code ' +
'authors and provide following numbers: \n' +
repr(self._s) + repr(self._q) + repr(self._n_points) +
repr(u_0) + repr(alpha), UserWarning)
return points
def get_uniform_sampling(self, n_points, n_min_for_caustic=10,
caustic=None):
"""
Sample uniformly (x_caustic_in, x_caustic_out) space according to
Jacobian and requirement that x_caustic_in corresponds to caustic
entrance, and x_caustic_out corresponds to caustic exit. The Jacobian
is defined by Eq. 23 of:
`Cassan A. et al. 2010 A&A 515, 52
"Bayesian analysis of caustic-crossing microlensing events"
<https://ui.adsabs.harvard.edu/abs/2010A%26A...515A..52C/abstract>`_
and the above requirement is defined under Eq. 27 of that paper.
Relative number of points per caustic is not yet specified.
Points do not repeat. This function is useful for sampling starting
distribution for model fitting. For example sampling see
`Cassan et al. (2010)
<https://ui.adsabs.harvard.edu/abs/2010A%26A...515A..52C/abstract>`_
bottom panel of Fig. 1.
Parameters :
n_points: *int*
number of points to be returned
n_min_for_caustic: *int*
minimum number of points in each caustic
caustic: *int* or *None*
Select which caustic will be sampled. *None* means all
caustics. Can be *1*, *2*, or *3* but has to be
<= :py:attr:`n_caustics`.
Returns :
x_caustic_in: *np.ndarray*
Randomly drawn entrance points.
x_caustic_out: *np.ndarray*
Corresponding randomly drawn exit points.
"""
if caustic is not None:
instance = isinstance(caustic, int)
if not instance or caustic < 1 or caustic > self._n_caustics:
raise ValueError(
'Wrong caustic in get_uniform_sampling(): ' + str(caustic))
if n_min_for_caustic * self._n_caustics > n_points:
msg = 'wrong input for {:} caustics: {:} {:}'
raise ValueError(msg.format(
self._n_caustics, n_points, n_min_for_caustic))
increase_factor = 10 # How many more points we will have internally.
if caustic is not None:
n_points_for_caustic = [n_points]
caustics = [caustic]
else:
n_points_for_caustic = self._select_n_points(n_points,
n_min_for_caustic)
caustics = [i+1 for i in range(self._n_caustics)]
x_1_out = []
x_2_out = []
for (caustic_, n_points_) in zip(caustics, n_points_for_caustic):
out = [False]
factor = 1.
while not out[0]:
out = self._get_uniform_sampling_one_caustic(
caustic_, n_points_, increase_factor*factor)
if not out[0]:
factor *= 1.1 * out[1]
x_1_out += out[1].tolist()
x_2_out += out[2].tolist()
return (np.array(x_1_out), np.array(x_2_out))
def _get_uniform_sampling_one_caustic(self, caustic, n_points,
increase_factor=10):
"""
Get uniform sampling for a single caustic.
"""
min_factor = 3
n_all = int(n_points * increase_factor) + 1
begin = self._which_caustic[caustic-1]
scale = self._which_caustic[caustic] - begin
x_1 = np.random.rand(n_all) * scale + begin
x_2 = np.random.rand(n_all) * scale + begin
jacobian = np.zeros(n_all)
for (i, (x_1_, x_2_)) in enumerate(zip(x_1, x_2)):
jacobian[i] = self.jacobian(x_1_, x_2_)
index = np.where(jacobian > 0.)[0]
if len(index) < n_points * min_factor:
return (False, n_points * min_factor / len(index))
jacobian_masked = jacobian[index]
probabilities = jacobian_masked / np.sum(jacobian_masked)
out = np.random.choice(index, size=n_points, replace=False,
p=probabilities)
return (True, x_1[out], x_2[out])
def jacobian(self, x_caustic_in, x_caustic_out):
"""
Evaluates Eq. 23 from Cassan et al. (2010) with condition under Eq. 27.
Parameters :
x_caustic_in: *float*
Point of caustic entrance.
x_caustic_out: *float*
Point of caustic exit.
Returns :
jacobian: *float*
Value of Jacobian. Returns *0.* if trajectory does not exist.
"""
check = self._check_valid_trajectory(x_caustic_in, x_caustic_out)
if not check[0]:
return 0.
(zeta_in, zeta_out, dzeta_dphi_in, dzeta_dphi_out) = check[1:]
dzeta = zeta_out - zeta_in
wedge_in = (dzeta.real * dzeta_dphi_in.imag -
dzeta.imag * dzeta_dphi_in.real)
wedge_out = (dzeta.real * dzeta_dphi_out.imag -
dzeta.imag * dzeta_dphi_out.real)
jacobian = np.abs(wedge_in) * np.abs(wedge_out) / np.abs(dzeta)**4
return jacobian
def _select_n_points(self, n_points, n_min_for_caustic):
"""
divide n_points into caustics
"""
if self._n_caustics == 1:
out = [n_points]
elif self._n_caustics == 2:
area_1 = (self._which_caustic[1] - self._which_caustic[0])**2
area_2 = (self._which_caustic[2] - self._which_caustic[1])**2
fraction = area_1 / (area_1 + area_2)
n_1 = int(fraction * n_points + 0.5)
if n_1 < n_min_for_caustic:
n_1 = n_min_for_caustic
n_2 = n_points - n_1
if n_2 < n_min_for_caustic:
n_2 = n_min_for_caustic
n_1 = n_points - n_2
out = [n_1, n_2]
elif self._n_caustics == 3:
area_1 = (self._which_caustic[1] - self._which_caustic[0])**2
area_2 = (self._which_caustic[2] - self._which_caustic[1])**2
area_3 = area_2
fraction_2 = area_2 / (area_1 + area_2 + area_3)
n_2 = int(fraction_2 * n_points + 0.5)
if n_2 < n_min_for_caustic:
n_2 = n_min_for_caustic
n_3 = n_2
n_1 = n_points - n_2 - n_3
if n_1 < n_min_for_caustic:
n_1 = n_min_for_caustic
n_2 = (n_points - n_1) // 2
n_3 = n_points - n_1 - n_2
out = [n_1, n_2, n_3]
else:
raise ValueError('strange error: {:}'.format(self._n_caustics))
return out
def check_valid_trajectory(self, x_caustic_in, x_caustic_out):
"""
Check if given (x_caustic_in, x_caustic_out) define an existing
trajectory. An obvious case, when they don't is when both caustic
points are on the same fold, but other cases exists.
Parameters :
x_caustic_in: *float*
Coordinate of putative caustic entrance.
x_caustic_out: *float*
Coordinate of putative caustic exit.
Returns :
check: *bool*
*True* if input defines a trajectory, *False* if it does not.
"""
return self._check_valid_trajectory(x_caustic_in, x_caustic_out)[0]
def _check_valid_trajectory(self, x_caustic_in, x_caustic_out):
"""
Check if given parameters define real trajectory.
Returns a list with first element being True/False
"""
if self._n_caustics > 1:
caustic_in = self.which_caustic(x_caustic_in)
caustic_out = self.which_caustic(x_caustic_out)
if caustic_in != caustic_out:
return [False]
zeta_in = self.caustic_point(x_caustic_in)
dzeta_dphi_in = self._last_dzeta_dphi
zeta_out = self.caustic_point(x_caustic_out)
dzeta_dphi_out = self._last_dzeta_dphi
# Eq. 27 from Cassan+2010:
n_t = (zeta_out - zeta_in) / np.abs(zeta_out - zeta_in)
# Eq. 26 from Cassan+2010:
n_c_in = 1j * dzeta_dphi_in / np.abs(dzeta_dphi_in)
n_c_out = 1j * dzeta_dphi_out / np.abs(dzeta_dphi_out)
condition_in = n_c_in.real * n_t.real + n_c_in.imag * n_t.imag
condition_out = n_c_out.real * n_t.real + n_c_out.imag * n_t.imag
if condition_in < 0. and condition_out > 0.:
return [True, zeta_in, zeta_out, dzeta_dphi_in, dzeta_dphi_out]
else:
return [False]
def _mirror_normalize_to_0_1(self, x, x_min=0., x_max=1.):
"""
Normalizes input to 0-1 range but in special way,
which considers the middle point.
"""
if x < x_min or x > x_max:
msg = "problem in _mirror_normalize: {:} {:} {:}"
raise ValueError(msg.format(x, x_min, x_max))
middle = (x_min + x_max) / 2.
if x < middle:
xx = (x - x_min) / (middle - x_min)
return (xx, False)
else:
xx = (x_max - x) / (x_max - middle)
return (xx, True)
def caustic_point(self, x_caustic):
"""
Calculate caustic position corresponding to given x_caustic.
Keywords :
x_caustic: *float*
Curvelinear coordinate of the point considered.
Has to be in 0-1 range.
Returns :
point: *numpy.complex128*
Caustic point in complex coordinates.
"""
caustic = self.which_caustic(x_caustic)
if self._n_caustics < 3 or caustic == 1:
(fraction_in_caustic, flip) = self._mirror_normalize_to_0_1(
x_caustic, self._which_caustic[caustic-1],
self._which_caustic[caustic])
else:
in_caustic = x_caustic - self._which_caustic[caustic-1]
diff = (
self._which_caustic[caustic] - self._which_caustic[caustic-1])
fraction_in_caustic = in_caustic / diff
flip = False
if caustic == 1:
sum_use = self._sum_1
z_use = self._z_sum_1
else:
sum_use = self._sum_2
z_use = self._z_sum_2
sum_ = fraction_in_caustic * sum_use[-1]
phi_interp = np.interp([sum_], sum_use, self._phi)[0]
if tuple(map(int, (np.__version__.split(".")))) >= (1, 12, 0):
zeta = self._zeta(np.interp([phi_interp], self._phi, z_use)[0])
else: # Older versions of numpy cannot interpolate complex array:
temp_real = np.interp([phi_interp], self._phi, z_use.real)[0]
temp_imag = np.interp([phi_interp], self._phi, z_use.imag)[0]
zeta = self._zeta(temp_real+1.j*temp_imag)
# XXX the calculation of dzeta_dphi should not use index
# XXX and should only be done if really needed
index = np.argsort(np.abs(phi_interp-self._phi))[0]
zeta_1 = self._zeta(z_use[index])
index_ = index + 1
if index_ == self._n_points:
index_ = 0
zeta_2 = self._zeta(z_use[index_])
if flip or caustic == 3:
zeta = zeta.conjugate()
dzeta = zeta_2.conjugate() - zeta_1.conjugate()
else:
dzeta = zeta_1 - zeta_2
dzeta /= np.abs(dzeta)
self._last_dzeta_dphi = dzeta
return zeta
def which_caustic(self, x_caustic):
"""
Indicates on which caustic given point is.
Keywords :
x_caustic: *float*
Curvelinear coordinate to be checked
Returns :
i_caustic: *int*
Number indicating the caustic:
``1`` - central caustic,
``2`` - planetary caustic; for close configuration it is
the lower of the two planetary caustics,
``3`` - upper planetary caustic.
"""
if x_caustic > 1.:
raise ValueError('Got x_caustic > 1 : {:}'.format(x_caustic))
if x_caustic < 0.:
raise ValueError('Got x_caustic < 0 : {:}'.format(x_caustic))
if self._n_caustics == 1:
return 1
caustic = np.searchsorted(self._which_caustic, x_caustic)
if caustic == 0:
if x_caustic == 0.:
return 1
else:
fmt = 'which_caustic() got {:} and internally had {:}'
raise ValueError(fmt.format(x_caustic, self._which_caustic))
return caustic
@property
def n_caustics(self):
"""
*int*
Number of caustics: *1* for resonant topology, *2* for wide topology,
or *3* for close topology.
"""
return self._n_caustics
@property
def s(self):
"""
*float*
separation of the two lens components relative to Einstein ring size
"""
return self._s
@property
def q(self):
"""
*float*
Mass ratio.
"""
return self._q
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@source@MulensModel@uniformcausticsampling.py@.PATH_END.py
|
{
"filename": "make_glass.py",
"repo_name": "pec27/lizard",
"repo_path": "lizard_extracted/lizard-master/example_scripts/make_glass.py",
"type": "Python"
}
|
"""
Script to use the P^3M solver to make a glass file
"""
from __future__ import print_function
from lizard.p3m import CubicPeriodicForceSplit
import numpy as np
from lizard.ngb_kernel import *
from numpy.random import RandomState
from scipy.spatial import cKDTree
from lizard.grid import *
from lizard.log import VerboseTimingLog
import cPickle as pickle # to save the file
glass_size = 64
lattice_end_frac = 0.85 # Stop when the minimum distance has reached this fraction of the lattice spacing
max_iterations = 1000
seed = 123
out = 'glass%d.dat'%glass_size
log = VerboseTimingLog(filename='glass.log', also_stdout=False, insert_timings=True)
npts = glass_size**3
rs = RandomState(seed=seed)
pos = (np.reshape(rs.rand(3*npts), (3,npts)) + np.reshape(np.mgrid[:glass_size,:glass_size,:glass_size], (3,npts))).T * (1.0/glass_size)
wts = np.ones(npts)
rcrit = 3.0/ glass_size # good guess for splitting scale
r_soft = 0.3/glass_size # 0.3 x interparticle spacing
steps_per_min_dist = 5 # Don't calculate the minimum distance between points every step
min_dist = 1.0
fs = CubicPeriodicForceSplit(rcrit, 500, r_soft=r_soft, log=log)
for i in range(max_iterations):
pairs, accel_short = fs.short_force(wts, pos)
accel_long = fs.long_force(wts, pos)
accel = accel_short + accel_long
max_accel = sqrt(square(accel).sum(1).max())
rms_accel = sqrt(square(accel).sum(1).mean())
rms_short = sqrt(square(accel_short).sum(1).mean())
rms_long = sqrt(square(accel_long).sum(1).mean())
dt0 = 1.5e-3 / sqrt(glass_size)
dt = dt0*(max_accel**(-0.6))
print('Timestep', i, 'Maximum acceleration', max_accel, 'timestep', dt, 'RMS',rms_accel, 'RMS short', rms_short, 'RMS long', rms_long, file=log)
print('Timestep', i, 'Maximum acceleration', max_accel, 'timestep', dt, 'RMS',rms_accel, 'RMS short', rms_short, 'RMS long', rms_long)
vel = -accel # run gravity 'backwards' to make glass, hubble drag
pos = pos + vel*dt
pos = pos - floor(pos) # restrict to [0,1)
if i%steps_per_min_dist==0:
# find the minimum distance between points (ignore repeats)
tree = cKDTree(pos)
dist, idx = tree.query(pos,k=2)
min_dist = dist[:,1].min() * glass_size # minimum distance as a fraction of lattice spacing
print('Nearest pair, fraction=', min_dist, 'of lattice spacing', file=log)
print('Nearest pair, fraction=', min_dist, 'of lattice spacing')
if min_dist>lattice_end_frac:
break
if min_dist<lattice_end_frac:
raise Exception('Failed to converge after max=%d iterations'%max_iterations);
# Save the file
f = open(out, 'wb')
pickle.dump(pos, f, pickle.HIGHEST_PROTOCOL)
f.close()
#### Plot the top slice 1/glass_size
import pylab as pl
idx = np.flatnonzero(pos[:,2]<1.0/glass_size)
pl.plot(pos[idx,0], pos[idx,1], 'b.')
pl.show()
|
pec27REPO_NAMElizardPATH_START.@lizard_extracted@lizard-master@example_scripts@make_glass.py@.PATH_END.py
|
{
"filename": "paris.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/projects/bigboss/paris.py",
"type": "Python"
}
|
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from astrometry.util.pyfits_utils import *
from astrometry.util.file import *
from astrometry.libkd.spherematch import *
def plot_cmd(allmags, i2mags, band, catflags, classstar):
print('i2 mags shape', i2mags.shape)
print('allmags shape', allmags.shape)
print('catflags shape', catflags.shape)
plt.figure(figsize=(6,6))
plt.clf()
#plotpos0 = [0.15, 0.15, 0.84, 0.80]
#plt.gca().set_position(plotpos0)
xx,yy,xerr = [],[],[]
xx2,xerr2 = [],[]
for i2,rr in zip(i2mags, allmags):
#print 'rr', rr
# When the source is off the image, the optimizer doesn't change anything and
# we end up with r = i2
I = (rr != i2)
rr = rr[I]
ii2 = i2.repeat(len(rr))
rr = np.minimum(rr, 25.)
#plt.plot(rr - ii2, ii2, 'o', mfc='b', mec='none', ms=5, alpha=0.5)
mr = np.mean(rr)
sr = np.std(rr)
#plt.plot([(mr-sr) - i2, (mr+sr) - i2], [i2,i2], 'b-', lw=3, alpha=0.25)
medr = np.median(rr)
iqr = (1./0.6745) * 0.5 * (np.percentile(rr, 75) - np.percentile(rr, 25))
#plt.plot([(medr - iqr) - i2, (medr + iqr) - i2], [i2,i2], 'g-', lw=3, alpha=0.25)
xx.append(mr - i2)
yy.append(i2)
xerr.append(sr)
xx2.append(medr - i2)
xerr2.append(iqr)
yy2 = np.array(yy)
xx2 = np.array(xx2)
xerr2 = np.array(xerr2)
I = (xerr2 < 1)
xx2 = xx2[I]
yy2 = yy2[I]
xerr2 = xerr2[I]
flag = catflags[I]
cstar = classstar[I]
plt.clf()
#plt.plot(xx2, yy2, 'o', mfc='b', mec='none', mew=0, ms=5, alpha=0.8)
LL = []
for F,c in [((flag > 0), '0.5'), ((flag == 0) * (cstar < 0.5), 'b'),
((flag == 0) * (cstar >= 0.5), 'g')]:
p1 = plt.plot(xx2[F], yy2[F], 'o', mfc=c, mec='none', mew=0, ms=5, alpha=0.8)
LL.append(p1[0])
plt.plot([xx2[F]-xerr2[F], xx2[F]+xerr2[F]], [yy2[F],yy2[F]], '-',
color=c, lw=2, mew='none', alpha=0.5)
#plt.axis([-3, 3, 21.5, 15.5])
plt.legend(LL, ('flagged', 'galaxy', 'star'))
plt.ylim(21.5, 15.5)
cl,ch = { 'u': (-3,6), 'g': (-1,5), 'r': (-2,3), 'i': (-2,2), 'z': (-2,1),
'w1': (-10,10),
'w2': (-10,10),
'w3': (-10,10),
'w4': (-10,10),
}[band]
plt.xticks(range(cl,ch+1))
plt.xlim(cl,ch)
plt.xlabel('SDSS %s - CFHT i (mag)' % band)
plt.ylabel('CFHT i (mag)')
plt.yticks(range(16, 21 + 1))
plt.title('CS82 test patch: SDSS--CFHT CMD')
plt.savefig('cmd-%s.png' % band)
plt.savefig('cmd-%s.pdf' % band)
I = np.flatnonzero((flag == 0) * (xx2 < -1))
print('Not-flagged and c < -1:', I)
return I
if __name__ == '__main__':
#(allp, i2magsA, cat) = unpickle_from_file('s2-260-A.pickle')
(allp, i2mags, cat) = unpickle_from_file('s2-382.pickle')
#print 'i2 mags A:', len(i2magsA)
#print 'i2 mags:', len(i2mags)
#i2mags = i2magsA
from tractor.basics import NanoMaggies
#print 'i2mags', i2mags
i2mags = np.array([NanoMaggies(i=m).getMag('i') for m in i2mags])
#print 'i2mags', mags
#allbands = ['i2','u','g','r','i','z']
allbands = ['i2','u','g','r','i','z', 'w1','w2','w3','w4']
T = fits_table('cs82data/W4p1m1_i.V2.7A.swarp.cut.deVexp.fit', hdunum=2)
#RA = 334.32
#DEC = 0.315
#sz = 0.12 * 3600.
#S = sz / 3600.
#ra0 ,ra1 = RA-S/2., RA+S/2.
#dec0,dec1 = DEC-S/2., DEC+S/2.
print('Read', len(T), 'sources')
T.ra = T.alpha_j2000
T.dec = T.delta_j2000
sra = np.array([src.getPosition().ra for src in cat])
sdec = np.array([src.getPosition().dec for src in cat])
ra0,ra1 = sra.min(), sra.max()
dec0,dec1 = sdec.min(), sdec.max()
T = T[(T.ra >= ra0) * (T.ra <= ra1) * (T.dec >= dec0) * (T.dec <= dec1)]
print('ra', ra0, ra1, 'dec', dec0, dec1)
print('Cut to', len(T), 'objects nearby.')
#print 'RA', sra.min(), sra.max()
#print 'Dec', sdec.min(), sdec.max()
I1,I2,D = match_radec(sra, sdec, T.ra, T.dec, 0.5/3600.)
print('Matched', len(I1), 'of', len(cat))
print('D', D)
print(len(np.unique(I1)), 'unique cat')
print(len(np.unique(I2)), 'unique T')
catflags = np.zeros(len(cat), int)
for i1,i2 in zip(I1,I2):
catflags[i1] |= T.flags[i2]
print('Set', np.sum(catflags), 'catalog flags')
classstar = np.zeros(len(cat))
for i1,i2 in zip(I1,I2):
classstar[i1] = T.class_star[i2]
#print 'i2 mags', i2mags
allmags = dict([(b, []) for b in allbands])
for ii,bb,pa in allp:
#print 'pa', pa
# Thaw just this image's band
cat.freezeParamsRecursive(*allbands)
cat.thawParamsRecursive(bb)
cat.setParams(pa)
mags = [src.getBrightness().getMag(bb) for src in cat]
#print 'mags', mags
#print len(mags)
assert(len(mags) == len(i2mags))
allmags[bb].append(mags)
print('allmags:', allmags.keys())
for bb in allbands:
m = np.array(allmags[bb])
print('Band', bb, 'shape', m.shape)
#allmags[bb] = np.array(allmags[bb])
if bb == 'i2':
continue
#if bb != 'i2':
m = m.T
I = plot_cmd(m, i2mags, bb, catflags, classstar)
if bb == 'i':
outliers = I
for b1,b2 in zip(allbands[1:-1], allbands[2:]):
print('Bands', b1, b2)
m1 = np.array(allmags[b1])
m2 = np.array(allmags[b2])
#plot_cmd(m1, m2, b1, catflags, classstar)
print('m1 shape', m1.shape)
print('m2 shape', m2.shape)
mn1,mn2 = [],[]
for i,(s1,s2) in enumerate(zip(m1.T,m2.T)):
print('src', i)
print('i2mag', i2mags[i])
print('mag 1', s1)
print('mag 2', s2)
s1 = s1[np.isfinite(s1)]
s2 = s2[np.isfinite(s2)]
if len(s1) == 0 or len(s2) == 0:
continue
mn1.append(np.median(s1))
mn2.append(np.median(s2))
mn1 = np.array(mn1)
mn2 = np.array(mn2)
plt.clf()
#I = np.flatnonzero(np.isfinite(m1) * np.isfinite(m2))
#if len(I) == 0:
# print 'No', b1, 'and', b2, 'mags'
# continue
#plt.plot(m1[I], m1[I]-m2[I], 'k.')
plt.plot(mn1, mn1 - mn2, 'k.')
plt.xlabel('band '+b1)
plt.ylabel('band %s - %s' % (b1, b2))
plt.savefig('cmd-%s-%s.png' % (b1,b2))
from cs82 import *
RA = 334.32
DEC = 0.315
sz = 0.12 * 3600.
pixscale = 0.187
S = int(1.01 * sz / pixscale) / 2
filtermap = {'i.MP9701': 'i2'}
coim = get_cfht_coadd_image(RA, DEC, S, filtermap=filtermap)
rr,dd = [],[]
xx,yy = [],[]
for i in outliers:
print('Outlier source', cat[i])
rr.append(cat[i].getPosition().ra)
dd.append(cat[i].getPosition().dec)
x,y = coim.getWcs().positionToPixel(cat[i].getPosition())
xx.append(x)
yy.append(y)
plt.clf()
plt.imshow(coim.getImage(), interpolation='nearest', origin='lower',
vmin=coim.zr[0], vmax=coim.zr[1])
plt.gray()
ax = plt.axis()
#plt.plot(rr, dd, 'r+', ms=10)
plt.plot(xx, yy, 'o', ms=25, mec='r', lw=2, alpha=0.5)
plt.axis(ax)
plt.savefig('outliers.png')
def s1():
(allp, i2mags, cat) = unpickle_from_file('s1-258.pickle')
plt.figure(figsize=(6,6))
T=fits_table('ri.fits')
plt.clf()
plt.plot(T.r - T.i, T.i, 'r,', mfc='r', mec='none', alpha=0.5)
plt.xlabel('SDSS r - SDSS i (mag)')
plt.ylabel('SDSS i (mag)')
plt.title('SDSS galaxies')
plt.axis([-3, 3, 21.5, 15.5])
#plt.axis([-3, 3, 22, 16])
plt.savefig('gal-ri.pdf')
T=fits_table('star-ri.fits')
plt.clf()
plt.plot(T.r - T.i, T.i, 'r,', mfc='r', mec='none', alpha=0.5)
plt.xlabel('SDSS r - SDSS i (mag)')
plt.ylabel('SDSS i (mag)')
plt.title('SDSS stars')
plt.axis([-3, 3, 21.5, 15.5])
#plt.axis([-3, 3, 22, 16])
plt.savefig('star-ri.pdf')
(allp, i2mags, cat) = unpickle_from_file('s1-258.pickle')
allbands = ['i2','u','g','r','i','z']
#print 'i2 mags', i2mags
allmags = []
for ii,bb,pa in allp:
#print 'pa', pa
# Thaw just this image's band
cat.freezeParamsRecursive(*allbands)
cat.thawParamsRecursive(bb)
cat.setParams(pa)
mags = [src.getBrightness().getMag(bb) for src in cat]
#print 'mags', mags
#print len(mags)
assert(len(mags) == len(i2mags))
allmags.append(mags)
allmags = np.array(allmags)
print('i2 mags shape', i2mags.shape)
print('allmags shape', allmags.shape)
plt.figure(figsize=(6,6))
plt.clf()
#plotpos0 = [0.15, 0.15, 0.84, 0.80]
#plt.gca().set_position(plotpos0)
xx,yy,xerr = [],[],[]
xx2,xerr2 = [],[]
for i2,rr in zip(i2mags, allmags.T):
print('rr', rr)
# When the source is off the image, the optimizer doesn't change anything and
# we end up with r = i2
I = (rr != i2)
rr = rr[I]
ii2 = i2.repeat(len(rr))
rr = np.minimum(rr, 25.)
#plt.plot(rr - ii2, ii2, 'b+', mfc='b', mec='b', ms=5)
#plt.plot(rr - ii2, ii2, 'bo', mfc='none', mec='b', ms=5)
#plt.plot(rr - ii2, ii2, '.', mfc='b', mec='none', ms=15)
plt.plot(rr - ii2, ii2, 'o', mfc='b', mec='none', ms=5, alpha=0.5)
#plt.plot([rr - ii2]*2, [ii2 - 0.02, ii2 + 0.02], 'b-', lw=2, alpha=0.5)
mr = np.mean(rr)
sr = np.std(rr)
plt.plot([(mr-sr) - i2, (mr+sr) - i2], [i2,i2], 'b-', lw=3, alpha=0.25)
medr = np.median(rr)
iqr = (1./0.6745) * 0.5 * (np.percentile(rr, 75) - np.percentile(rr, 25))
plt.plot([(medr - iqr) - i2, (medr + iqr) - i2], [i2,i2], 'g-', lw=3, alpha=0.25)
xx.append(mr - i2)
yy.append(i2)
xerr.append(sr)
xx2.append(medr - i2)
xerr2.append(iqr)
print('Axis', plt.axis())
plt.axis([-3, 3, 21, 15])
plt.xlabel('SDSS r - CFHT i (mag)')
plt.ylabel('CFHT i (mag)')
plt.yticks(range(15, 21 + 1))
plt.savefig('cmd.png')
yy2 = np.array(yy)
xx = np.array(xx)
yy = np.array(yy)
xerr = np.array(xerr)
I = (xerr < 1)
xx = xx[I]
yy = yy[I]
xerr = xerr[I]
xx2 = np.array(xx2)
xerr2 = np.array(xerr2)
I = (xerr2 < 1)
xx2 = xx2[I]
yy2 = yy2[I]
xerr2 = xerr2[I]
plt.clf()
plt.errorbar(xx, yy, xerr=xerr, fmt=None, linewidth=2, alpha=0.5)
plt.errorbar(xx2, yy2, xerr=xerr2, fmt=None, color='g', linewidth=2, alpha=0.5)
plt.axis([-3, 3, 21, 15])
plt.xlabel('SDSS r - CFHT i (mag)')
plt.ylabel('CFHT i (mag)')
plt.yticks(range(15, 21 + 1))
plt.savefig('cmd2.png')
plt.clf()
plt.plot(xx, yy, 'o', mfc='b', mec='none', mew=0, ms=5, alpha=0.8)
plt.plot([xx-xerr, xx+xerr], [yy,yy], '-', color='b', lw=2, mew='none', alpha=0.5)
plt.axis([-3, 3, 21, 15])
plt.xlabel('SDSS r - CFHT i (mag)')
plt.ylabel('CFHT i (mag)')
plt.yticks(range(15, 21 + 1))
plt.title('CS82 test patch: SDSS--CFHT CMD')
plt.savefig('cmd3.png')
plt.clf()
plt.plot(xx2, yy2, 'o', mfc='b', mec='none', mew=0, ms=5, alpha=0.8)
plt.plot([xx2-xerr2, xx2+xerr2], [yy2,yy2], '-', color='b', lw=2, mew='none', alpha=0.5)
plt.axis([-3, 3, 21.5, 15.5])
plt.xlabel('SDSS r - CFHT i (mag)')
plt.ylabel('CFHT i (mag)')
plt.yticks(range(16, 21 + 1))
plt.title('CS82 test patch: SDSS--CFHT CMD')
plt.savefig('cmd4.png')
plt.savefig('cmd4.pdf')
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@projects@bigboss@paris.py@.PATH_END.py
|
{
"filename": "_cauto.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/marker/line/_cauto.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="cauto", parent_name="scatter3d.marker.line", **kwargs
):
super(CautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@marker@line@_cauto.py@.PATH_END.py
|
{
"filename": "dota8.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/datasets/obb/dota8.md",
"type": "Markdown"
}
|
---
comments: true
description: Explore the DOTA8 dataset - a small, versatile oriented object detection dataset ideal for testing and debugging object detection models using Ultralytics YOLO11.
keywords: DOTA8 dataset, Ultralytics, YOLO11, object detection, debugging, training models, oriented object detection, dataset YAML
---
# DOTA8 Dataset
## Introduction
[Ultralytics](https://www.ultralytics.com/) DOTA8 is a small, but versatile oriented [object detection](https://www.ultralytics.com/glossary/object-detection) dataset composed of the first 8 images of 8 images of the split DOTAv1 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).
## Dataset YAML
A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the DOTA8 dataset, the `dota8.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dota8.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dota8.yaml).
!!! example "ultralytics/cfg/datasets/dota8.yaml"
```yaml
--8<-- "ultralytics/cfg/datasets/dota8.yaml"
```
## Usage
To train a YOLO11n-obb model on the DOTA8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="dota8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640
```
## Sample Images and Annotations
Here are some examples of images from the DOTA8 dataset, along with their corresponding annotations:
<img src="https://github.com/ultralytics/docs/releases/download/0/mosaiced-training-batch.avif" alt="Dataset sample image" width="800">
- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts.
The example showcases the variety and complexity of the images in the DOTA8 dataset and the benefits of using mosaicing during the training process.
## Citations and Acknowledgments
If you use the DOTA dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{9560031,
author={Ding, Jian and Xue, Nan and Xia, Gui-Song and Bai, Xiang and Yang, Wen and Yang, Michael and Belongie, Serge and Luo, Jiebo and Datcu, Mihai and Pelillo, Marcello and Zhang, Liangpei},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Object Detection in Aerial Images: A Large-Scale Benchmark and Challenges},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3117983}
}
```
A special note of gratitude to the team behind the DOTA datasets for their commendable effort in curating this dataset. For an exhaustive understanding of the dataset and its nuances, please visit the [official DOTA website](https://captain-whu.github.io/DOTA/index.html).
## FAQ
### What is the DOTA8 dataset and how can it be used?
The DOTA8 dataset is a small, versatile oriented object detection dataset made up of the first 8 images from the DOTAv1 split set, with 4 images designated for training and 4 for validation. It's ideal for testing and debugging object detection models like Ultralytics YOLO11. Due to its manageable size and diversity, it helps in identifying pipeline errors and running sanity checks before deploying larger datasets. Learn more about object detection with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics).
### How do I train a YOLO11 model using the DOTA8 dataset?
To train a YOLO11n-obb model on the DOTA8 dataset for 100 epochs with an image size of 640, you can use the following code snippets. For comprehensive argument options, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="dota8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640
```
### What are the key features of the DOTA dataset and where can I access the YAML file?
The DOTA dataset is known for its large-scale benchmark and the challenges it presents for object detection in aerial images. The DOTA8 subset is a smaller, manageable dataset ideal for initial tests. You can access the `dota8.yaml` file, which contains paths, classes, and configuration details, at this [GitHub link](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dota8.yaml).
### How does mosaicing enhance model training with the DOTA8 dataset?
Mosaicing combines multiple images into one during training, increasing the variety of objects and contexts within each batch. This improves a model's ability to generalize to different object sizes, aspect ratios, and scenes. This technique can be visually demonstrated through a training batch composed of mosaiced DOTA8 dataset images, helping in robust model development. Explore more about mosaicing and training techniques on our [Training](../../modes/train.md) page.
### Why should I use Ultralytics YOLO11 for object detection tasks?
Ultralytics YOLO11 provides state-of-the-art real-time object detection capabilities, including features like oriented bounding boxes (OBB), [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), and a highly versatile training pipeline. It's suitable for various applications and offers pretrained models for efficient fine-tuning. Explore further about the advantages and usage in the [Ultralytics YOLO11 documentation](https://github.com/ultralytics/ultralytics).
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@datasets@obb@dota8.md@.PATH_END.py
|
{
"filename": "mo_summary_metrics.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/metrics/mo_summary_metrics.py",
"type": "Python"
}
|
__all__ = (
"power_law_dndh",
"neo_dndh_granvik",
"neo_dndh_grav",
"pha_dndh_granvik",
"pha_dndh_grav",
"integrate_over_h",
"sum_over_h",
"TotalNumberSSO",
"ValueAtHMetric",
"MeanValueAtHMetric",
"MoCompletenessMetric",
"MoCompletenessAtTimeMetric",
)
import warnings
import numpy as np
from .mo_metrics import BaseMoMetric
def power_law_dndh(hvalues, hindex=0.33, no=None, ho=None, **kwargs):
"""Power law distribution of objects.
Parameters
----------
hvalues : `np.ndarray`, (N,)
The H values corresponding to each metric_value
(must be the same length).
The hvalues are expected to be evenly spaced.
hindex : `float`, optional
The power-law index expected for the H value distribution.
Default is 0.33 (dN/dH = 10^(hindex * H) ).
no : `float`, optional
ho: `float`, optional
If no and ho are specified, this provides an anchor
for the power law distribution,so that the expected number no
of objects at ho is returned.
Does not need to be set if just doing comparative weighting.
Returns
-------
dndh : `np.ndarray`, (N,)
"""
if no is None or ho is None:
ho = hvalues.min()
no = 10
binratio = (np.diff(hvalues, append=hvalues[-1] + np.diff(hvalues)[-1])) / 0.1
dndh = (no * 0.1) * np.power(10.0, hindex * (hvalues - ho)) * binratio
return dndh
def neo_dndh_granvik(hvalues, **kwargs):
binratio = (np.diff(hvalues, append=hvalues[-1] + np.diff(hvalues)[-1])) / 0.1
y0 = 0 # 10 * np.power(10, 0.55 * (x - 17))
y1 = 150 * np.power(10, 0.3 * (hvalues - 18.5))
y2 = 2500 * np.power(10, 0.92 * (hvalues - 23.2))
dndh = (y0 + y1 + y2) * binratio
return dndh
def neo_dndh_grav(hvalues, **kwargs):
binratio = (np.diff(hvalues, append=hvalues[-1] + np.diff(hvalues)[-1])) / 0.1
y1 = 110 * np.power(10, 0.35 * (hvalues - 18.5))
dndh = y1 * binratio
return dndh
def pha_dndh_granvik(hvalues, **kwargs):
binratio = (np.diff(hvalues, append=hvalues[-1] + np.diff(hvalues)[-1])) / 0.1
y0 = 0 # 10 * np.power(10, 0.55 * (x - 17))
y1 = 20 * np.power(10, 0.3 * (hvalues - 18.5))
y2 = 500 * np.power(10, 0.92 * (hvalues - 23.2))
dndh = (y0 + y1 + y2) * binratio
return dndh
def pha_dndh_grav(hvalues, **kwargs):
binratio = (np.diff(hvalues, append=hvalues[-1] + np.diff[hvalues][-1])) / 0.1
y1 = 23.5 * np.power(10, 0.35 * (hvalues - 18.5))
dndh = y1 * binratio
return dndh
def integrate_over_h(metric_values, hvalues, dndh_func=power_law_dndh, **kwargs):
"""Calculate a metric value integrated over an h_range.
This is the metric value *weighted* by the size distribution.
Parameters
----------
metric_values : `numpy.ndarray`
The metric values at each H value.
hvalues : `numpy.ndarray`
The H values corresponding to each metric_value
(must be the same length).
dndh_func : function, optional
One of the dN/dH functions defined below.
**kwargs : `dict`, optional
Keyword arguments to pass to dndh_func
Returns
--------
int_vals : `np.ndarray`, (N,)
The integrated metric values.
"""
# Set expected H distribution.
# dndh = differential size distribution (number in this bin)
dndh = dndh_func(hvalues, **kwargs)
# calculate the metric values *weighted* by the number of objects
# in this bin and brighter
int_vals = np.cumsum(metric_values * dndh) / np.cumsum(dndh)
return int_vals
def sum_over_h(metric_values, hvalues, dndh_func=power_law_dndh, **kwargs):
"""Calculate the sum of the metric value multiplied by the number of
objects at each H value. This is equivalent to calculating the
number of objects meeting X requirement in the differential completeness
or fraction of objects with lightcurves, etc.
Parameters
----------
metric_values : `np.ndarray`, (N,)
The metric values at each H value.
hvalues : `np.ndarray`, (N,)
The H values corresponding to each metric_value.
dndh_func : function, optional
One of the dN/dH functions defined below.
**kwargs : `dict`, optional
Keyword arguments to pass to dndh_func
Returns
--------
sum_vals : `np.ndarray`, (N,)
The cumulative metric values.
"""
# Set expected H distribution.
# dndh = differential size distribution (number in this bin)
dndh = dndh_func(hvalues, **kwargs)
# calculate the metric values *weighted* by the number of objects
# in this bin and brighter
sum_vals = np.cumsum(metric_values * dndh)
return sum_vals
class TotalNumberSSO(BaseMoMetric):
"""Calculate the total number of objects of a given population
expected at a given H value or larger.
Operations on differential completeness values
(or equivalent; fractions of the population is ok if
still a differential metric result, not cumulative).
Parameters
----------
h_mark : `float`, optional
The H value at which to calculate the expected total number of objects.
dndh_func : function, optional
The dN/dH distribution used calculate the expected population size.
Returns
-------
nObj : `float`
The predicted number of objects in the population.
"""
def __init__(self, h_mark=22, dndh_func=neo_dndh_granvik, **kwargs):
self.h_mark = h_mark
self.dndh_func = dndh_func
metric_name = "Nobj <= %.1f" % (h_mark)
self.kwargs = kwargs
super().__init__(metric_name=metric_name, **kwargs)
def run(self, metric_vals, h_vals):
totals = sum_over_h(metric_vals, h_vals, self.dndh_func, **self.kwargs)
n_obj = np.interp(self.h_mark, h_vals, totals)
return n_obj
class ValueAtHMetric(BaseMoMetric):
"""Return the metric value at a given H value.
Requires the metric values to be one-dimensional
(typically, completeness values).
Parameters
----------
h_mark : `float`, optional
The H value at which to look up the metric value.
Returns
-------
value: : `float`
"""
def __init__(self, h_mark=22, **kwargs):
metric_name = "Value At H=%.1f" % (h_mark)
self.units = "<= %.1f" % (h_mark)
super().__init__(metric_name=metric_name, **kwargs)
self.h_mark = h_mark
def run(self, metric_vals, h_vals):
# Check if desired H value is within range of H values.
if (self.h_mark < h_vals.min()) or (self.h_mark > h_vals.max()):
warnings.warn("Desired H value of metric outside range of provided H values.")
return None
if metric_vals.shape[0] != 1:
warnings.warn("This is not an appropriate summary statistic for this data - need 1d values.")
return None
value = np.interp(self.h_mark, h_vals, metric_vals[0])
return value
class MeanValueAtHMetric(BaseMoMetric):
"""Return the mean value of a metric at a given H.
Allows the metric values to be multi-dimensional
(i.e. use a cloned H distribution).
Parameters
----------
h_mark : `float`, optional
The H value at which to look up the metric value.
Returns
-------
value: : `float`
"""
def __init__(self, h_mark=22, reduce_func=np.mean, metric_name=None, **kwargs):
if metric_name is None:
metric_name = "Mean Value At H=%.1f" % (h_mark)
self.units = "@ H= %.1f" % (h_mark)
super().__init__(metric_name=metric_name, **kwargs)
self.h_mark = h_mark
self.reduce_func = reduce_func
def run(self, metric_vals, h_vals):
# Check if desired H value is within range of H values.
if (self.h_mark < h_vals.min()) or (self.h_mark > h_vals.max()):
warnings.warn("Desired H value of metric outside range of provided H values.")
return None
value = np.interp(self.h_mark, h_vals, self.reduce_func(metric_vals.swapaxes(0, 1), axis=1))
return value
class MoCompletenessMetric(BaseMoMetric):
"""Calculate the fraction of the population that meets `threshold` value
or higher. This is equivalent to calculating the completeness
(relative to the entire population) given the output of a
Discovery_N_Chances metric, or the fraction of the population that meets
a given cutoff value for Color determination metrics.
Any moving object metric that outputs a float value can thus have
the 'fraction of the population' with greater than X value calculated here,
as a summary statistic.
Parameters
----------
threshold : `int`, optional
Count the fraction of the population that exceeds this value.
nbins : `int`, optional
If the H values for the metric are not a cloned distribution,
then split up H into this many bins.
min_hrange : `float`, optional
If the H values for the metric are not a cloned distribution,
then split up H into at least this
range (otherwise just use the min/max of the H values).
cumulative : `bool`, optional
If False, simply report the differential fractional value
(or differential completeness).
If True, integrate over the H distribution (using IntegrateOverH)
to report a cumulative fraction.
Default of None will use True, unless metric_name is set and starts
with "Differential" - then default will use False.
hindex : `float`, optional
Use hindex as the power law to integrate over H,
if cumulative is True.
"""
def __init__(
self,
threshold=1,
nbins=20,
min_hrange=1.0,
cumulative=None,
hindex=0.33,
**kwargs,
):
if cumulative is None:
# if metric_name does not start with 'differential',
# then cumulative->True
if "metric_name" not in kwargs:
self.cumulative = True
metric_name = "CumulativeCompleteness"
else:
# 'metric_name' in kwargs:
metric_name = kwargs.pop("metric_name")
if metric_name.lower().startswith("differential"):
self.cumulative = False
else:
self.cumulative = True
else:
# cumulative was set
self.cumulative = cumulative
if "metric_name" in kwargs:
metric_name = kwargs.pop("metric_name")
if metric_name.lower().startswith("differential") and self.cumulative:
warnings.warn(f"Completeness metric_name is {metric_name} but cumulative is True")
else:
if self.cumulative:
metric_name = "CumulativeCompleteness"
else:
metric_name = "DifferentialCompleteness"
if self.cumulative:
units = "<=H"
else:
units = "@H"
super().__init__(metric_name=metric_name, units=units, **kwargs)
self.threshold = threshold
# If H is not a cloned distribution,
# then we need to specify how to bin these values.
self.nbins = nbins
self.min_hrange = min_hrange
self.hindex = hindex
def run(self, metric_values, h_vals):
n_ssos = metric_values.shape[0]
n_hval = len(h_vals)
metric_val_h = metric_values.swapaxes(0, 1)
if n_hval == metric_values.shape[1]:
# h_vals array is probably the same as the cloned H array.
completeness = np.zeros(len(h_vals), float)
for i, H in enumerate(h_vals):
completeness[i] = np.where(metric_val_h[i].filled(0) >= self.threshold)[0].size
completeness = completeness / float(n_ssos)
else:
# The h_vals are spread more randomly among the objects
# (we probably used one per object).
hrange = h_vals.max() - h_vals.min()
min_h = h_vals.min()
if hrange < self.min_hrange:
hrange = self.min_hrange
min_h = h_vals.min() - hrange / 2.0
stepsize = hrange / float(self.nbins)
bins = np.arange(min_h, min_h + hrange + stepsize / 2.0, stepsize)
h_vals = bins[:-1]
n_all, b = np.histogram(metric_val_h[0], bins)
condition = np.where(metric_val_h[0] >= self.threshold)[0]
n_found, b = np.histogram(metric_val_h[0][condition], bins)
completeness = n_found.astype(float) / n_all.astype(float)
completeness = np.where(n_all == 0, 0, completeness)
if self.cumulative:
completeness_int = integrate_over_h(completeness, h_vals, power_law_dndh, Hindex=self.hindex)
summary_val = np.empty(len(completeness_int), dtype=[("name", np.str_, 20), ("value", float)])
summary_val["value"] = completeness_int
for i, Hval in enumerate(h_vals):
summary_val["name"][i] = "H <= %f" % (Hval)
else:
summary_val = np.empty(len(completeness), dtype=[("name", np.str_, 20), ("value", float)])
summary_val["value"] = completeness
for i, Hval in enumerate(h_vals):
summary_val["name"][i] = "H = %f" % (Hval)
return summary_val
class MoCompletenessAtTimeMetric(BaseMoMetric):
"""Calculate the completeness (relative to the entire population)
<= a given H as a function of time, given the times of each discovery.
Input values of the discovery times can come from the Discovery_Time
(child) metric or the KnownObjects metric.
Parameters
----------
times : `np.ndarray`, (N,) or `list` [`float`]
The bins to distribute the discovery times into.
Same units as the discovery time (typically MJD).
hval : `float`, optional
The value of H to count completeness at, or cumulative completeness to.
Default None, in which case a value halfway through h_vals
(the slicer H range) will be chosen.
cumulative : `bool`, optional
If True, calculate the cumulative completeness (completeness <= H).
If False, calculate the differential completeness (completeness @ H).
Default None which becomes 'True',
unless metric_name starts with 'differential'.
hindex : `float`, optional
Use hindex as the power law to integrate over H,
if cumulative is True.
"""
def __init__(self, times, hval=None, cumulative=None, hindex=0.33, **kwargs):
self.hval = hval
self.times = times
self.hindex = hindex
if cumulative is None:
# if metric_name does not start with 'differential',
# then cumulative->True
if "metric_name" not in kwargs:
self.cumulative = True
metric_name = "CumulativeCompleteness@Time@H=%.2f" % self.hval
else:
# 'metric_name' in kwargs:
metric_name = kwargs.pop("metric_name")
if metric_name.lower().startswith("differential"):
self.cumulative = False
else:
self.cumulative = True
else:
# cumulative was set
self.cumulative = cumulative
if "metric_name" in kwargs:
metric_name = kwargs.pop("metric_name")
if metric_name.lower().startswith("differential") and self.cumulative:
warnings.warn(f"Completeness metric_name is {metric_name} but cumulative is True")
else:
if self.cumulative:
metric_name = "CumulativeCompleteness@Time@H=%.2f" % self.hval
else:
metric_name = "DifferentialCompleteness@Time@H=%.2f" % self.hval
self._set_labels()
super().__init__(metric_name=metric_name, units=self.units, **kwargs)
def _set_labels(self):
if self.hval is not None:
if self.cumulative:
self.units = "H <=%.1f" % (self.hval)
else:
self.units = "H = %.1f" % (self.hval)
else:
self.units = "H"
def run(self, discovery_times, h_vals):
if len(h_vals) != discovery_times.shape[1]:
warnings.warn("This summary metric expects cloned H distribution. Cannot calculate summary.")
return
n_ssos = discovery_times.shape[0]
timesin_h = discovery_times.swapaxes(0, 1)
completeness_h = np.empty([len(h_vals), len(self.times)], float)
for i, H in enumerate(h_vals):
n, b = np.histogram(timesin_h[i].compressed(), bins=self.times)
completeness_h[i][0] = 0
completeness_h[i][1:] = n.cumsum()
completeness_h = completeness_h / float(n_ssos)
completeness = completeness_h.swapaxes(0, 1)
if self.cumulative:
for i, t in enumerate(self.times):
completeness[i] = integrate_over_h(completeness[i], h_vals)
# To save the summary statistic, we must pick out a given H value.
if self.hval is None:
hidx = len(h_vals) // 2
self.hval = h_vals[hidx]
self._set_labels()
else:
hidx = np.where(np.abs(h_vals - self.hval) == np.abs(h_vals - self.hval).min())[0][0]
self.hval = h_vals[hidx]
self._set_labels()
summary_val = np.empty(len(self.times), dtype=[("name", np.str_, 20), ("value", float)])
summary_val["value"] = completeness[:, hidx]
for i, time in enumerate(self.times):
summary_val["name"][i] = "%s @ %.2f" % (self.units, time)
return summary_val
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@metrics@mo_summary_metrics.py@.PATH_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/treemap/marker/colorbar/_title.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap.marker.colorbar"
_path_str = "treemap.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.treemap.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.treemap.marker
.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@treemap@marker@colorbar@_title.py@.PATH_END.py
|
{
"filename": "vol-lines.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/doc/source/cookbook/vol-lines.py",
"type": "Python"
}
|
import numpy as np
import yt
from yt.units import kpc
from yt.visualization.volume_rendering.api import LineSource
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
sc = yt.create_scene(ds)
np.random.seed(1234567)
nlines = 50
vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
colors = np.random.random([nlines, 4])
colors[:, 3] = 0.1
lines = LineSource(vertices, colors)
sc.add_source(lines)
sc.camera.width = 300 * kpc
sc.save(sigma_clip=4.0)
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@doc@source@cookbook@vol-lines.py@.PATH_END.py
|
{
"filename": "_maxpoints.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sankey/stream/_maxpoints.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="maxpoints", parent_name="sankey.stream", **kwargs):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sankey@stream@_maxpoints.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "gbrammer/msaexp",
"repo_path": "msaexp_extracted/msaexp-main/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import os
import sys
import datetime
from importlib import import_module
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
# Get configuration information from setup.cfg
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import_module(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output --------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
html_theme_options = {
'logotext1': 'msaexp', # white, semi-bold
'logotext2': 'docs', # orange, light
'logotext3': '' # white, light
}
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Resolving issue number to links in changelog -----------------------------
github_issues_url = f"{setup_cfg['url']}/issues/"
# -- Turn on nitpicky mode for sphinx (to warn about references not found) ----
#
# nitpicky = True
# nitpick_ignore = []
#
# Some warnings are impossible to suppress, and you can list specific references
# that should be ignored in a nitpick-exceptions file which should be inside
# the docs/ directory. The format of the file should be:
#
# <type> <class>
#
# for example:
#
# py:class astropy.io.votable.tree.Element
# py:class astropy.io.votable.tree.SimpleElement
# py:class astropy.io.votable.tree.SimpleElementWithContent
#
# Uncomment the following lines to enable the exceptions:
#
# for line in open('nitpick-exceptions'):
# if line.strip() == "" or line.startswith("#"):
# continue
# dtype, target = line.split(None, 1)
# target = target.strip()
# nitpick_ignore.append((dtype, six.u(target)))
|
gbrammerREPO_NAMEmsaexpPATH_START.@msaexp_extracted@msaexp-main@docs@conf.py@.PATH_END.py
|
{
"filename": "build_py.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/distutils/command/build_py.py",
"type": "Python"
}
|
from __future__ import division, absolute_import, print_function
from distutils.command.build_py import build_py as old_build_py
from numpy.distutils.misc_util import is_string
class build_py(old_build_py):
def run(self):
build_src = self.get_finalized_command('build_src')
if build_src.py_modules_dict and self.packages is None:
self.packages = list(build_src.py_modules_dict.keys ())
old_build_py.run(self)
def find_package_modules(self, package, package_dir):
modules = old_build_py.find_package_modules(self, package, package_dir)
# Find build_src generated *.py files.
build_src = self.get_finalized_command('build_src')
modules += build_src.py_modules_dict.get(package, [])
return modules
def find_modules(self):
old_py_modules = self.py_modules[:]
new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
self.py_modules[:] = new_py_modules
modules = old_build_py.find_modules(self)
self.py_modules[:] = old_py_modules
return modules
# XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
# and item[2] is source file.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@distutils@command@build_py.py@.PATH_END.py
|
{
"filename": "test_atlas.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/vectorstores/test_atlas.py",
"type": "Python"
}
|
"""Test Atlas functionality."""
import time
from langchain_community.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@vectorstores@test_atlas.py@.PATH_END.py
|
{
"filename": "test_util.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/contrib/forecast/test_util.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from torch.distributions import transform_to
import pyro.distributions as dist
from pyro.contrib.forecast.util import (
UNIVARIATE_DISTS,
UNIVARIATE_TRANSFORMS,
prefix_condition,
reshape_batch,
)
from tests.ops.gaussian import random_mvn
DISTS = [
dist.Bernoulli,
dist.Beta,
dist.BetaBinomial,
dist.Cauchy,
dist.Dirichlet,
dist.DirichletMultinomial,
dist.Exponential,
dist.FoldedDistribution,
dist.Gamma,
dist.GammaPoisson,
dist.GaussianHMM,
dist.Geometric,
dist.IndependentHMM,
dist.InverseGamma,
dist.Laplace,
dist.LinearHMM,
dist.LogNormal,
dist.MaskedDistribution,
dist.MultivariateNormal,
dist.NegativeBinomial,
dist.Normal,
dist.StudentT,
dist.TransformedDistribution,
dist.Uniform,
dist.ZeroInflatedPoisson,
dist.ZeroInflatedNegativeBinomial,
]
def random_dist(Dist, shape, transform=None):
if Dist is dist.FoldedDistribution:
return Dist(random_dist(dist.Normal, shape))
elif Dist is dist.MaskedDistribution:
base_dist = random_dist(dist.Normal, shape)
mask = torch.empty(shape, dtype=torch.bool).bernoulli_(0.5)
return base_dist.mask(mask)
elif Dist is dist.TransformedDistribution:
base_dist = random_dist(dist.Normal, shape)
transforms = [
dist.transforms.ExpTransform(),
dist.transforms.ComposeTransform(
[
dist.transforms.AffineTransform(1, 1),
dist.transforms.ExpTransform().inv,
]
),
]
return dist.TransformedDistribution(base_dist, transforms)
elif Dist in (dist.GaussianHMM, dist.LinearHMM):
batch_shape, duration, obs_dim = shape[:-2], shape[-2], shape[-1]
hidden_dim = obs_dim + 1
init_dist = random_dist(dist.Normal, batch_shape + (hidden_dim,)).to_event(1)
trans_mat = torch.randn(batch_shape + (duration, hidden_dim, hidden_dim))
trans_dist = random_dist(
dist.Normal, batch_shape + (duration, hidden_dim)
).to_event(1)
obs_mat = torch.randn(batch_shape + (duration, hidden_dim, obs_dim))
obs_dist = random_dist(dist.Normal, batch_shape + (duration, obs_dim)).to_event(
1
)
if Dist is dist.LinearHMM and transform is not None:
obs_dist = dist.TransformedDistribution(obs_dist, transform)
return Dist(
init_dist, trans_mat, trans_dist, obs_mat, obs_dist, duration=duration
)
elif Dist is dist.IndependentHMM:
batch_shape, duration, obs_dim = shape[:-2], shape[-2], shape[-1]
base_shape = batch_shape + (obs_dim, duration, 1)
base_dist = random_dist(dist.GaussianHMM, base_shape)
return Dist(base_dist)
elif Dist is dist.MultivariateNormal:
return random_mvn(shape[:-1], shape[-1])
elif Dist is dist.Uniform:
low = torch.randn(shape)
high = low + torch.randn(shape).exp()
return Dist(low, high)
else:
params = {
name: transform_to(Dist.arg_constraints[name])(torch.rand(shape) - 0.5)
for name in UNIVARIATE_DISTS[Dist]
}
return Dist(**params)
@pytest.mark.parametrize("dim", [1, 7])
@pytest.mark.parametrize("t,f", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("batch_shape", [(), (6,), (5, 4)])
@pytest.mark.parametrize("Dist", DISTS)
def test_prefix_condition(Dist, batch_shape, t, f, dim):
if Dist is dist.LinearHMM:
pytest.xfail(reason="not implemented")
duration = t + f
d = random_dist(Dist, batch_shape + (duration, dim))
d = d.to_event(2 - d.event_dim)
data = d.sample()
expected = d.log_prob(data)
d2 = prefix_condition(d, data[..., :t, :])
actual = d2.log_prob(data[..., t:, :])
actual.shape == expected.shape
@pytest.mark.parametrize("dim", [1, 7])
@pytest.mark.parametrize("duration", [1, 2, 3])
@pytest.mark.parametrize("batch_shape", [(), (6,), (5, 4)])
@pytest.mark.parametrize("Dist", DISTS)
def test_reshape_batch(Dist, batch_shape, duration, dim):
d = random_dist(Dist, batch_shape + (duration, dim))
d = d.to_event(2 - d.event_dim)
assert d.batch_shape == batch_shape
assert d.event_shape == (duration, dim)
actual = reshape_batch(d, batch_shape + (1,))
assert type(actual) is type(d)
assert actual.batch_shape == batch_shape + (1,)
assert actual.event_shape == (duration, dim)
@pytest.mark.parametrize("dim", [1, 7])
@pytest.mark.parametrize("duration", [1, 2, 3])
@pytest.mark.parametrize("batch_shape", [(), (6,), (5, 4)])
@pytest.mark.parametrize("transform", list(UNIVARIATE_TRANSFORMS.keys()))
def test_reshape_transform_batch(transform, batch_shape, duration, dim):
params = {
p: torch.rand(batch_shape + (duration, dim))
for p in UNIVARIATE_TRANSFORMS[transform]
}
t = transform(**params)
d = random_dist(dist.LinearHMM, batch_shape + (duration, dim), transform=t)
d = d.to_event(2 - d.event_dim)
assert d.batch_shape == batch_shape
assert d.event_shape == (duration, dim)
actual = reshape_batch(d, batch_shape + (1,))
assert type(actual) is type(d)
assert actual.batch_shape == batch_shape + (1,)
assert actual.event_shape == (duration, dim)
# test if we have reshape transforms correctly
assert actual.rsample().shape == actual.shape()
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@contrib@forecast@test_util.py@.PATH_END.py
|
{
"filename": "_nca.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/neighbors/_nca.py",
"type": "Python"
}
|
"""
Neighborhood Component Analysis
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import sys
import time
from numbers import Integral, Real
from warnings import warn
import numpy as np
from scipy.optimize import minimize
from ..base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from ..decomposition import PCA
from ..exceptions import ConvergenceWarning
from ..metrics import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils._param_validation import Interval, StrOptions
from ..utils.extmath import softmax
from ..utils.multiclass import check_classification_targets
from ..utils.random import check_random_state
from ..utils.validation import check_array, check_is_fitted, validate_data
class NeighborhoodComponentsAnalysis(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Neighborhood Components Analysis.
Neighborhood Component Analysis (NCA) is a machine learning algorithm for
metric learning. It learns a linear transformation in a supervised fashion
to improve the classification accuracy of a stochastic nearest neighbors
rule in the transformed space.
Read more in the :ref:`User Guide <nca>`.
Parameters
----------
n_components : int, default=None
Preferred dimensionality of the projected space.
If None it will be set to `n_features`.
init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \
(n_features_a, n_features_b), default='auto'
Initialization of the linear transformation. Possible options are
`'auto'`, `'pca'`, `'lda'`, `'identity'`, `'random'`, and a numpy
array of shape `(n_features_a, n_features_b)`.
- `'auto'`
Depending on `n_components`, the most reasonable initialization
is chosen. If `n_components <= min(n_features, n_classes - 1)`
we use `'lda'`, as it uses labels information. If not, but
`n_components < min(n_features, n_samples)`, we use `'pca'`, as
it projects data in meaningful directions (those of higher
variance). Otherwise, we just use `'identity'`.
- `'pca'`
`n_components` principal components of the inputs passed
to :meth:`fit` will be used to initialize the transformation.
(See :class:`~sklearn.decomposition.PCA`)
- `'lda'`
`min(n_components, n_classes)` most discriminative
components of the inputs passed to :meth:`fit` will be used to
initialize the transformation. (If `n_components > n_classes`,
the rest of the components will be zero.) (See
:class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
- `'identity'`
If `n_components` is strictly smaller than the
dimensionality of the inputs passed to :meth:`fit`, the identity
matrix will be truncated to the first `n_components` rows.
- `'random'`
The initial transformation will be a random array of shape
`(n_components, n_features)`. Each value is sampled from the
standard normal distribution.
- numpy array
`n_features_b` must match the dimensionality of the inputs passed
to :meth:`fit` and n_features_a must be less than or equal to that.
If `n_components` is not `None`, `n_features_a` must match it.
warm_start : bool, default=False
If `True` and :meth:`fit` has been called before, the solution of the
previous call to :meth:`fit` is used as the initial linear
transformation (`n_components` and `init` will be ignored).
max_iter : int, default=50
Maximum number of iterations in the optimization.
tol : float, default=1e-5
Convergence tolerance for the optimization.
callback : callable, default=None
If not `None`, this function is called after every iteration of the
optimizer, taking as arguments the current solution (flattened
transformation matrix) and the number of iterations. This might be
useful in case one wants to examine or store the transformation
found after each iteration.
verbose : int, default=0
If 0, no progress messages will be printed.
If 1, progress messages will be printed to stdout.
If > 1, progress messages will be printed and the `disp`
parameter of :func:`scipy.optimize.minimize` will be set to
`verbose - 2`.
random_state : int or numpy.RandomState, default=None
A pseudo random number generator object or a seed for it if int. If
`init='random'`, `random_state` is used to initialize the random
transformation. If `init='pca'`, `random_state` is passed as an
argument to PCA when initializing the transformation. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear transformation learned during fitting.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
Counts the number of iterations performed by the optimizer.
random_state_ : numpy.RandomState
Pseudo random number generator object used during initialization.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis : Linear
Discriminant Analysis.
sklearn.decomposition.PCA : Principal component analysis (PCA).
References
----------
.. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov.
"Neighbourhood Components Analysis". Advances in Neural Information
Processing Systems. 17, 513-520, 2005.
http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf
.. [2] Wikipedia entry on Neighborhood Components Analysis
https://en.wikipedia.org/wiki/Neighbourhood_components_analysis
Examples
--------
>>> from sklearn.neighbors import NeighborhoodComponentsAnalysis
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... stratify=y, test_size=0.7, random_state=42)
>>> nca = NeighborhoodComponentsAnalysis(random_state=42)
>>> nca.fit(X_train, y_train)
NeighborhoodComponentsAnalysis(...)
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> knn.fit(X_train, y_train)
KNeighborsClassifier(...)
>>> print(knn.score(X_test, y_test))
0.933333...
>>> knn.fit(nca.transform(X_train), y_train)
KNeighborsClassifier(...)
>>> print(knn.score(nca.transform(X_test), y_test))
0.961904...
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 1, None, closed="left"),
None,
],
"init": [
StrOptions({"auto", "pca", "lda", "identity", "random"}),
np.ndarray,
],
"warm_start": ["boolean"],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left")],
"callback": [callable, None],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
init="auto",
warm_start=False,
max_iter=50,
tol=1e-5,
callback=None,
verbose=0,
random_state=None,
):
self.n_components = n_components
self.init = init
self.warm_start = warm_start
self.max_iter = max_iter
self.tol = tol
self.callback = callback
self.verbose = verbose
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
Fitted estimator.
"""
# Validate the inputs X and y, and converts y to numerical classes.
X, y = validate_data(self, X, y, ensure_min_samples=2)
check_classification_targets(y)
y = LabelEncoder().fit_transform(y)
# Check the preferred dimensionality of the projected space
if self.n_components is not None and self.n_components > X.shape[1]:
raise ValueError(
"The preferred dimensionality of the "
f"projected space `n_components` ({self.n_components}) cannot "
"be greater than the given data "
f"dimensionality ({X.shape[1]})!"
)
# If warm_start is enabled, check that the inputs are consistent
if (
self.warm_start
and hasattr(self, "components_")
and self.components_.shape[1] != X.shape[1]
):
raise ValueError(
f"The new inputs dimensionality ({X.shape[1]}) does not "
"match the input dimensionality of the "
f"previously learned transformation ({self.components_.shape[1]})."
)
# Check how the linear transformation should be initialized
init = self.init
if isinstance(init, np.ndarray):
init = check_array(init)
# Assert that init.shape[1] = X.shape[1]
if init.shape[1] != X.shape[1]:
raise ValueError(
f"The input dimensionality ({init.shape[1]}) of the given "
"linear transformation `init` must match the "
f"dimensionality of the given inputs `X` ({X.shape[1]})."
)
# Assert that init.shape[0] <= init.shape[1]
if init.shape[0] > init.shape[1]:
raise ValueError(
f"The output dimensionality ({init.shape[0]}) of the given "
"linear transformation `init` cannot be "
f"greater than its input dimensionality ({init.shape[1]})."
)
# Assert that self.n_components = init.shape[0]
if self.n_components is not None and self.n_components != init.shape[0]:
raise ValueError(
"The preferred dimensionality of the "
f"projected space `n_components` ({self.n_components}) does"
" not match the output dimensionality of "
"the given linear transformation "
f"`init` ({init.shape[0]})!"
)
# Initialize the random generator
self.random_state_ = check_random_state(self.random_state)
# Measure the total training time
t_train = time.time()
# Compute a mask that stays fixed during optimization:
same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
# (n_samples, n_samples)
# Initialize the transformation
transformation = np.ravel(self._initialize(X, y, init))
# Create a dictionary of parameters to be passed to the optimizer
disp = self.verbose - 2 if self.verbose > 1 else -1
optimizer_params = {
"method": "L-BFGS-B",
"fun": self._loss_grad_lbfgs,
"args": (X, same_class_mask, -1.0),
"jac": True,
"x0": transformation,
"tol": self.tol,
"options": dict(maxiter=self.max_iter, disp=disp),
"callback": self._callback,
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
# Reshape the solution found by the optimizer
self.components_ = opt_result.x.reshape(-1, X.shape[1])
# Stop timer
t_train = time.time() - t_train
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warn(
"[{}] NCA did not converge: {}".format(
cls_name, opt_result.message
),
ConvergenceWarning,
)
print("[{}] Training took {:8.2f}s.".format(cls_name, t_train))
return self
def transform(self, X):
"""Apply the learned transformation to the given data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data samples.
Returns
-------
X_embedded: ndarray of shape (n_samples, n_components)
The data samples transformed.
Raises
------
NotFittedError
If :meth:`fit` has not been called before.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
return np.dot(X, self.components_.T)
def _initialize(self, X, y, init):
"""Initialize the transformation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The training labels.
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Returns
-------
transformation : ndarray of shape (n_components, n_features)
The initialized linear transformation.
"""
transformation = init
if self.warm_start and hasattr(self, "components_"):
transformation = self.components_
elif isinstance(init, np.ndarray):
pass
else:
n_samples, n_features = X.shape
n_components = self.n_components or n_features
if init == "auto":
n_classes = len(np.unique(y))
if n_components <= min(n_features, n_classes - 1):
init = "lda"
elif n_components < min(n_features, n_samples):
init = "pca"
else:
init = "identity"
if init == "identity":
transformation = np.eye(n_components, X.shape[1])
elif init == "random":
transformation = self.random_state_.standard_normal(
size=(n_components, X.shape[1])
)
elif init in {"pca", "lda"}:
init_time = time.time()
if init == "pca":
pca = PCA(
n_components=n_components, random_state=self.random_state_
)
if self.verbose:
print("Finding principal components... ", end="")
sys.stdout.flush()
pca.fit(X)
transformation = pca.components_
elif init == "lda":
from ..discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis(n_components=n_components)
if self.verbose:
print("Finding most discriminative components... ", end="")
sys.stdout.flush()
lda.fit(X, y)
transformation = lda.scalings_.T[:n_components]
if self.verbose:
print("done in {:5.2f}s".format(time.time() - init_time))
return transformation
def _callback(self, transformation):
"""Called after each iteration of the optimizer.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The solution computed by the optimizer in this iteration.
"""
if self.callback is not None:
self.callback(transformation, self.n_iter_)
self.n_iter_ += 1
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
"""Compute the loss and the loss gradient w.r.t. `transformation`.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The raveled linear transformation on which to compute loss and
evaluate gradient.
X : ndarray of shape (n_samples, n_features)
The training samples.
same_class_mask : ndarray of shape (n_samples, n_samples)
A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong
to the same class, and `0` otherwise.
Returns
-------
loss : float
The loss computed for the given transformation.
gradient : ndarray of shape (n_components * n_features,)
The new (flattened) gradient of the loss.
"""
if self.n_iter_ == 0:
self.n_iter_ += 1
if self.verbose:
header_fields = ["Iteration", "Objective Value", "Time(s)"]
header_fmt = "{:>10} {:>20} {:>10}"
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print("[{}]".format(cls_name))
print(
"[{}] {}\n[{}] {}".format(
cls_name, header, cls_name, "-" * len(header)
)
)
t_funcall = time.time()
transformation = transformation.reshape(-1, X.shape[1])
X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
# Compute softmax distances
p_ij = pairwise_distances(X_embedded, squared=True)
np.fill_diagonal(p_ij, np.inf)
p_ij = softmax(-p_ij) # (n_samples, n_samples)
# Compute loss
masked_p_ij = p_ij * same_class_mask
p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
loss = np.sum(p)
# Compute gradient of loss w.r.t. `transform`
weighted_p_ij = masked_p_ij - p_ij * p
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
# time complexity of the gradient: O(n_components x n_samples x (
# n_samples + n_features))
if self.verbose:
t_funcall = time.time() - t_funcall
values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}"
print(
values_fmt.format(
self.__class__.__name__, self.n_iter_, loss, t_funcall
)
)
sys.stdout.flush()
return sign * loss, sign * gradient.ravel()
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.required = True
return tags
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@neighbors@_nca.py@.PATH_END.py
|
{
"filename": "residual_tests.py",
"repo_name": "markusbonse/applefy",
"repo_path": "applefy_extracted/applefy-main/applefy/gaussianity/residual_tests.py",
"type": "Python"
}
|
"""
Functions to evaluate the noise in high-contrast-imaging residuals. Note, none
of the tests implemented here can proof that the residual noise is Gaussian.
But they can provide useful insides whether the noise deviates for Gaussian.
"""
from typing import Union, Tuple
import numpy as np
from scipy import stats
from sklearn.linear_model import TheilSenRegressor, LinearRegression
from sklearn.metrics import r2_score
from photutils.aperture import CircularAnnulus
from applefy.utils.positions import center_subpixel
from applefy.utils.photometry import AperturePhotometryMode, \
IterNoiseBySeparation
def extract_circular_annulus(
input_residual_frame: np.ndarray,
separation: float,
size_resolution_elements: float,
annulus_width: float = 0.5
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Function to extract the pixel positions and values around a circular annulus
at a given separation form the center of an image.
Args:
input_residual_frame: The input frame from which the pixel values are
extracted.
separation: The separation from the center at which the annulus is
placed (in units of size_resolution_elements).
size_resolution_elements: The size of the resolution elements i.e. the
diameter of the PSF FWHM in pixel.
annulus_width: The width of the annulus radius in units of
size_resolution_elements.
Returns:
1. The pixel values in the annulus extracted from the frame
2. The 2D positions of the pixels in the annulus
3. A 2D image of the annulus mask
"""
frame_center = center_subpixel(input_residual_frame)
tmp_mask = CircularAnnulus(
frame_center,
size_resolution_elements * (separation - annulus_width),
size_resolution_elements * (separation + annulus_width)).to_mask(
'center') # center mode returns a binary mask
res_mask_img = tmp_mask.to_image(input_residual_frame.shape)
res_all_pixel = input_residual_frame[res_mask_img != 0].flatten()
tmp_positions = np.array(np.where(res_mask_img != 0)).T
return res_all_pixel, tmp_positions, res_mask_img
def gaussian_r2(
noise_samples: np.ndarray,
fit_method: str = "linear regression",
return_fit: bool = False
) -> Union[float,
Tuple[float,
Union[TheilSenRegressor, LinearRegression],
np.ndarray]]:
"""
Simple function to calculate how well the given noise samples can be
explained by the normal distribution.
Args:
noise_samples: Noise values to be compared against gaussian noise.
fit_method: Method used to do the fit the noise samples w.r.t the
normal distribution. This is needed to compute the r2 metric.
Option 1: "linear regression" - A linear regression.
Option 2: "theil sen" - TheilSenRegressor linear fit.
More robust towards outliers.
return_fit: If set to true the function will return the model fit and
the gaussian quantile points. This can be useful to plot Q-Q plots.
Returns:
1. R2 - Coefficient of determination
2. The linear model used for the fit (only if return_fit is True)
3. The gaussian_quantile points (only if return_fit is True)
"""
gaussian_samples = stats.probplot(noise_samples)[0][0]
if fit_method == "linear regression":
tmp_linear_model = LinearRegression()
elif fit_method == "theil sen":
tmp_linear_model = TheilSenRegressor()
else:
raise ValueError("Regression model unknown")
tmp_linear_model.fit(np.sort(gaussian_samples).reshape(-1, 1),
np.sort(noise_samples).ravel())
predictions = tmp_linear_model.predict(np.sort(
gaussian_samples).reshape(-1, 1))
r_2 = r2_score(np.sort(noise_samples), predictions)
if return_fit:
return r_2, tmp_linear_model, gaussian_samples
return r_2
def estimate_gaussian_r2(
input_residual_frame: np.ndarray,
separation: float,
size_resolution_elements: float,
annulus_width: float = 0.5,
fit_method: str = "linear regression"
) -> float:
"""
Extracts pixel values inside a circular annulus around the center of the
input_residual_frame and computes the r2 of the pixel values w.r.t. to
the gaussian distribution.
As neighbouring pixel values in HCI residuals are usually not independent
the result of the test should only be used as an indicator against not a
proof for gaussian residual noise.
Args:
input_residual_frame: The input input_residual_frame on which the test i
s performed.
separation: The separation from the center at which the annulus is
placed and the noise gets extracted (in units of
size_resolution_elements).
size_resolution_elements: The size of the resolution elements i.e. the
diameter of the PSF FWHM in pixel.
annulus_width: The width of the annulus radius in units of
size_resolution_elements
fit_method: Method used to do the fit the noise samples w.r.t the
normal distribution. This is needed to compute the r2 metric.
Option 1: "linear regression" - A linear regression.
Option 2: "theil sen" - TheilSenRegressor linear fit.
More robust towards outliers.
Returns:
R2 - Coefficient of determination for the pixel values in the annulus.
"""
# 1.) Extract the pixel values on which the test is performed
noise_elements, _, _ = extract_circular_annulus(
separation=separation,
size_resolution_elements=size_resolution_elements,
input_residual_frame=input_residual_frame,
annulus_width=annulus_width)
# 2.) compute the r2
r_2 = gaussian_r2(noise_samples=noise_elements,
fit_method=fit_method)
return r_2
def test_normality_shapiro_wilk(
input_residual_frame: np.ndarray,
separation: float,
size_resolution_elements: float,
num_rot_iter: int,
photometry_mode: AperturePhotometryMode
) -> Tuple[float, float]:
"""
Runs a Shapiro-Wilk test on photometry values at a given separation around
the center of the input_residual_frame. The noise elements are sampled such
that measurements are independent as required by the Shapiro-Wilk test.
However, due to the small number of residual elements at small separation
the test has only very limited sensitivity. Further the test can never
proof that the noise is Gaussian.
Args:
input_residual_frame: The frame on which the test is performed.
separation: The separation from the center at which the noise
photometry is taken (in units of size_resolution_elements).
size_resolution_elements: The size of the resolution elements i.e. the
diameter of the PSF FWHM in pixel.
num_rot_iter: Number of different noise positions at which the
Shapiro-Wilk test is evaluated. See
`Figure 10 <../04_apples_with_apples/paper_experiments/10_Rotation.ipynb>`_
for more information.
photometry_mode: An instance of AperturePhotometryMode which defines
how the noise photometry is measured.
Returns:
1. The average test statistic of the Shapiro-Wilk test over all
num_rot_iter
2. The average p-value of the Shapiro-Wilk test over all num_rot_iter
"""
# 1.) Create the iterator to extract the noise elements
noise_iterator = IterNoiseBySeparation(
residual=input_residual_frame,
separation=separation * size_resolution_elements,
psf_fwhm_radius=size_resolution_elements / 2.,
num_rot_iter=num_rot_iter,
photometry_mode=photometry_mode)
# 2.) Loop over the noise elements and collect the p-values of the test
p_values = []
statistic_values = []
for tmp_noise_samples in noise_iterator:
p_values.append(stats.shapiro(tmp_noise_samples).pvalue)
statistic_values.append(stats.shapiro(tmp_noise_samples).statistic)
# 3.) Return the averaged values
return float(np.mean(statistic_values)), float(np.mean(p_values))
|
markusbonseREPO_NAMEapplefyPATH_START.@applefy_extracted@applefy-main@applefy@gaussianity@residual_tests.py@.PATH_END.py
|
{
"filename": "runDensityBMP.py",
"repo_name": "treecode/Bonsai",
"repo_path": "Bonsai_extracted/Bonsai-master/tools/postProcessTools/density/runDensityBMP.py",
"type": "Python"
}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import Image, ImageDraw, ImageFont
#Get the imaging library from:
#http://www.pythonware.com/products/pil/
# python setup.py install
print "Usage: program folder-with-density-files"
fileNameBase = str(sys.argv[1])
dirName = os.path.dirname (fileNameBase)
fileName = os.path.basename(fileNameBase)
#Get the file list
snapShotFiles = [x for x in os.listdir(dirName) if x.startswith(fileName)]
#print sorted(snapShotFiles)
snapShotFiles = [x for x in snapShotFiles if (not "eps" in x)]
snapShotFiles = [x for x in snapShotFiles if (not "jpeg" in x)]
snapShotFiles = [x for x in snapShotFiles if (not "bmp" in x)]
snapShotFiles = [x for x in snapShotFiles if (not ".avi" in x)]
snapShotFiles = [x for x in snapShotFiles if ("density" in x)]
snapShotFiles.sort(key=lambda x: float(x.split('-')[-1]))
print snapShotFiles
#For each file write the config, and launch the plotter
counter = 0
for x in snapShotFiles:
#if(not "density.txt" in x):
if(not "density-" in x):
continue
if("eps" in x or "jpeg" in x or "bmp" in x):
continue
temp = x.split('-')
time = float(temp[-1])
timeGyr = time*9.7676470588235293
#test = '%.3f' % float(temp[-1])
#test = test.rjust(9, '0')
prePend = str(counter).rjust(6, '0')
outputName = dirName + "/" + x
outputName2 = dirName + "/" + prePend + "_" + x
counter = counter + 1
#Launch the density program
com = './gen_image_voxel ' + outputName + " " + outputName2 + " color_map.bmp"
p = subprocess.Popen(com, shell=True)
p.wait()
#Add text to the image
# use a truetype font
font = ImageFont.truetype("/usr/local/share/fonts/c/CenturyGothic.ttf", 30)
bmpName = outputName2 + "-top.bmp"
image = Image.open(bmpName)
draw = ImageDraw.Draw(image)
textOut = "T= " + str(round(timeGyr, 2)) + " Myr"
draw.text((50, 950), textOut, font=font)
del draw
image.save(bmpName,"BMP",quality=100)
#Rotate the front view by 90 degrees and add text
bmpName = outputName2 + "-front.bmp"
image = Image.open(bmpName)
image = image.rotate(90)
draw = ImageDraw.Draw(image)
textOut = "T= " + str(round(timeGyr, 2)) + " Myr"
draw.text((50, 575), textOut, font=font)
del draw
image.save(bmpName,"BMP",quality=100)
#Done
print "To convert the bmps into a movie use: "
print "mencoder \"mf://*-top.bmp\" -mf fps=10 -o out_top.avi -ovc lavc -lavcopts vcodec=mpeg4"
print "mencoder \"mf://*-front.bmp\" -mf fps=10 -o out_front.avi -ovc lavc -lavcopts vcodec=mpeg4"
|
treecodeREPO_NAMEBonsaiPATH_START.@Bonsai_extracted@Bonsai-master@tools@postProcessTools@density@runDensityBMP.py@.PATH_END.py
|
{
"filename": "lstchain_merge_run_summaries.py",
"repo_name": "cta-observatory/cta-lstchain",
"repo_path": "cta-lstchain_extracted/cta-lstchain-main/lstchain/scripts/lstchain_merge_run_summaries.py",
"type": "Python"
}
|
"""
Create a summary of all runs from daily run summaries,
adding pointing information.
It is also possible to append a single night to an already
existing merged summary file when date is especified.
"""
import logging
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
import astropy.units as u
import numpy as np
from astropy.table import Table, vstack
from astropy.time import Time
from ctapipe_io_lst import LSTEventSource
from ctapipe_io_lst.event_time import read_run_summary
from ctapipe_io_lst.pointing import PointingSource
log = logging.getLogger('create_run_overview')
RUN_SUMMARY_COLUMNS = [
'run_id',
'n_subruns',
'run_type',
'run_start',
]
base_path = Path('/fefs/aswg/data/real/monitoring')
parser = ArgumentParser()
parser.add_argument('output_file', type=Path)
parser.add_argument('-m', '--monitoring-path', type=Path, default=base_path)
parser.add_argument('-v', '--verbose', action='store_true')
# Option to only append a single night to an existing merged summary file.
# By default, the whole directory with nightly summaries is processed.
parser.add_argument(
'-d',
'--date',
type=str,
default=None,
help='Date in YYYYMMDD format. When the date is given append only '
'the summary of that night to an existing merged summary file',
)
SUBARRAY = LSTEventSource.create_subarray()
def get_pointing_info(times, drive_report):
pointing_source = PointingSource(
SUBARRAY,
drive_report_path=drive_report,
)
try:
pointing_source._read_drive_report_for_tel(1)
valid = pointing_source.drive_log[1]['unix_time'] != 0
pointing_source.drive_log[1] = pointing_source.drive_log[1][valid]
if np.count_nonzero(valid) < 2:
raise ValueError('Not enough values')
except:
return {
'ra': np.full(len(times), np.nan) * u.deg,
'dec': np.full(len(times), np.nan) * u.deg,
'alt': np.full(len(times), np.nan) * u.rad,
'az': np.full(len(times), np.nan) * u.rad,
}
pointing_info = {k: [] for k in ('ra', 'dec', 'alt', 'az')}
for time in times:
try:
ra, dec = pointing_source.get_pointing_position_icrs(tel_id=1, time=time)
except ValueError:
ra = dec = np.nan * u.deg
pointing_info['ra'].append(ra)
pointing_info['dec'].append(dec)
try:
altaz = pointing_source.get_pointing_position_altaz(tel_id=1, time=time)
alt = altaz.altitude
az = altaz.azimuth
except ValueError:
alt = az = np.nan * u.rad
pointing_info['alt'].append(alt)
pointing_info['az'].append(az)
return pointing_info
def merge_run_summary_with_pointing(run_summary, drive_report):
table = read_run_summary(run_summary)[RUN_SUMMARY_COLUMNS]
if len(table) == 0:
return None
table['run_start'] = Time(table['run_start'] / 1e9, format='unix_tai', scale='utc')
pointing_info = get_pointing_info(table['run_start'], drive_report)
for k, v in pointing_info.items():
table[k] = u.Quantity(v)
# add date as column and remove from meta date so merging does not complain
table['date'] = table.meta['date']
del table.meta['date']
del table.meta['lstchain_version']
table['run_start'].format = 'isot'
# reorder columns
table = table[
[
'date',
'run_id',
'run_type',
'n_subruns',
'run_start',
'ra',
'dec',
'alt',
'az',
]
]
return table
def main():
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if args.date is not None:
# Append only the run summary corresponding to this date
night = datetime.strptime(args.date, '%Y%m%d')
if not args.output_file.is_file():
raise ValueError(f'Output file {args.output_file} does not exist.')
log.info(f'Appending {args.date} to {args.output_file}')
run_summary_dir = args.monitoring_path / 'RunSummary'
drive_report_dir = args.monitoring_path / 'DrivePositioning'
run_summary = run_summary_dir / f'RunSummary_{args.date}.ecsv'
drive_report = drive_report_dir / f'DrivePosition_log_{args.date}.txt'
summary = merge_run_summary_with_pointing(run_summary, drive_report)
if summary is not None:
old_summary = Table.read(args.output_file)
new_summary = vstack([old_summary, summary])
new_summary.write(args.output_file, overwrite=True)
return
# Otherwise, merge all summary files found
run_summary_dir = args.monitoring_path / 'RunSummary'
drive_report_dir = args.monitoring_path / 'DrivePositioning'
run_summaries = sorted(run_summary_dir.glob('RunSummary*.ecsv'))
log.info('Found %d run summaries', len(run_summaries))
summaries = []
for run_summary in run_summaries:
log.debug('Processing %s', run_summary)
night = datetime.strptime(run_summary.stem, 'RunSummary_%Y%m%d')
drive_report = drive_report_dir / f'DrivePosition_log_{night:%Y%m%d}.txt'
if not drive_report.is_file():
log.error(f'No drive report found for {night:%Y-%m-%d}')
continue
summary = merge_run_summary_with_pointing(
run_summary,
drive_report,
)
if summary is not None:
summaries.append(summary)
vstack(summaries).write(args.output_file, overwrite=True)
if __name__ == '__main__':
main()
|
cta-observatoryREPO_NAMEcta-lstchainPATH_START.@cta-lstchain_extracted@cta-lstchain-main@lstchain@scripts@lstchain_merge_run_summaries.py@.PATH_END.py
|
{
"filename": "stellar_blends_muygps_classification_max_all.ipynb",
"repo_name": "cae0027/Stellar-Blends",
"repo_path": "Stellar-Blends_extracted/Stellar-Blends-main/ce/model-comparison/stellar_blends_muygps_classification_max_all.ipynb",
"type": "Jupyter Notebook"
}
|
# Stellar Blends Classification
### In this notebook we run the un-normalized and normalized datasets through the MuyGPyS classifier (a python classifying function that uses the MuyGPS Gaussian process hyperparameter estimation method), and compare the resulting accuracies.
**Note:** Must have run `data_normalization.ipynb` to continue.
```python
# from MuyGPyS import config
# config.update("muygpys_jax_enabled", False)
import numpy as np
import pandas as pd
import random
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from MuyGPyS.examples.classify import do_classify
from MuyGPyS.gp.deformation import F2, Isotropy
from MuyGPyS.gp.hyperparameter import Parameter, Parameter as ScalarParam
from MuyGPyS.gp.kernels import RBF, Matern
from MuyGPyS.gp.noise import HomoscedasticNoise
from MuyGPyS.optimize import Bayes_optimize
from MuyGPyS.optimize.loss import LossFn, cross_entropy_fn
```
### Read in all flattened data (normalized and un-normalized):
```python
from glob import glob
# read normalized data csv file names from the data directory
norm_data_names = glob('../data/data-norm/max-pixel-all/*.csv')
# get rid of "../data/data-norm/max-pixel-all/"
norm_data_names = [name.split('/')[-1] for name in norm_data_names]
# norm_data_names[:10]
```
```python
# sort the names by their numbers
norm_data_names.sort(key=lambda x: x.split('_')[1])
norm_data_names[:10]
```
['nthroot_0.0_data.csv',
'nthroot_0.03448_data.csv',
'nthroot_0.03448.csv',
'nthroot_0.06897_data.csv',
'nthroot_0.06897.csv',
'nthroot_0.1034_data.csv',
'nthroot_0.1034.csv',
'nthroot_0.1379_data.csv',
'nthroot_0.1379.csv',
'nthroot_0.1724_data.csv']
### Define a function that generates "one-hot" values.
This essentially just takes our truth labels of 0 and 1, and does the following conversions for use in the classifier:
- 0 to [1., -1.]
- 1 to [-1., 1.]
```python
def generate_onehot_value(values):
onehot = []
for val in values:
if val == 0:
onehot.append([1., -1.])
elif val == 1:
onehot.append([-1., 1.])
return onehot
```
### Run the classifier on each dataset
For each dataset (un-normalized and normalized) in `data_files`, this for loop does the following:
- Separate labels from data
- Split up data between training and testing
- `test_size` is the fraction of the data you want to use for testing, where 0.5 means half of the data is used for testing and half for training.
- `random_state` makes each dataset get trained and tested on the same number of stars and galaxies.
- Gets the one-hot values for the testing and training labels
- Gets `train` and `test` into the proper format for the classifier, a dictionary with the keys:
- 'input':
- 'output':
- 'lookup':
- Does the classification (`do_classify`)
- Computes the accuracy of the classifier for the given dataset, by compairing predicted labels to truth labels.
```python
nn_kwargs_exact = {"nn_method": "exact", "algorithm": "ball_tree"}
nn_kwargs_hnsw = {"nn_method": "hnsw"}
k_kwargs_rbf ={
"kernel": RBF(
deformation=Isotropy(
metric=F2,
length_scale=Parameter(1.0, (1e-2, 1e2)),
),
),
"noise": HomoscedasticNoise(1e-5),
}
k_kwargs_mattern= { "kernel": Matern(
smoothness=ScalarParam(0.5),
deformation=Isotropy(
metric=F2,
length_scale=Parameter(1.0, (1e-2, 1e2)),
),
),
"noise": HomoscedasticNoise(1e-5),
}
```
```python
norm_name = []
my_accuracy = []
for path in tqdm(norm_data_names):
path1 = '../data/data-norm/max-pixel-all/' + path
data = pd.read_csv(path1,na_values='-')
data.fillna(0,inplace=True)
data_label = ''.join(path.split('.')[:2])
truth_labels = data.iloc[:, 0].values
image_data = data.iloc[:, 1:].values
X_train, X_test, y_train, y_test = train_test_split(image_data, truth_labels, test_size=0.2, random_state=42)
print("=============== ", data_label, " ===============")
print('Training data:', len(y_train[y_train==0]), 'single stars and', len(y_train[y_train==1]), 'blended stars')
print('Testing data:', len(y_test[y_test==0]), 'single stars and', len(y_test[y_test==1]), 'blended stars')
onehot_train, onehot_test = generate_onehot_value(y_train), generate_onehot_value(y_test)
train = {'input': X_train, 'output': onehot_train, 'lookup': y_train}
test = {'input': X_test, 'output': onehot_test, 'lookup': y_test}
print("Running Classifier on", data_label)
#Switch verbose to True for more output
muygps, nbrs_lookup, surrogate_predictions = do_classify(
test_features=np.array(test['input']),
train_features=np.array(train['input']),
train_labels=np.array(train['output']),
nn_count=15,
batch_count=200,
loss_fn=cross_entropy_fn,
opt_fn=Bayes_optimize,
k_kwargs=k_kwargs_mattern,
nn_kwargs=nn_kwargs_hnsw,
verbose=False)
predicted_labels = np.argmax(surrogate_predictions, axis=1)
accur = np.around((np.sum(predicted_labels == np.argmax(test["output"], axis=1))/len(predicted_labels))*100, 3)
norm_name.append(''.join(data_label.split('_')[-3:]))
my_accuracy.append(accur)
print("Total accuracy for", data_label, ":", accur, '%')
```
0%| | 0/79 [00:00<?, ?it/s]
=============== nthroot_00_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_00_data
1%|▏ | 1/79 [00:02<02:49, 2.17s/it]
Total accuracy for nthroot_00_data : 55.715 %
=============== nthroot_003448_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_003448_data
3%|▎ | 2/79 [00:04<02:43, 2.12s/it]
Total accuracy for nthroot_003448_data : 80.517 %
=============== nthroot_003448 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_003448
4%|▍ | 3/79 [00:06<02:38, 2.09s/it]
Total accuracy for nthroot_003448 : 78.646 %
=============== nthroot_006897_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_006897_data
5%|▌ | 4/79 [00:08<02:37, 2.10s/it]
Total accuracy for nthroot_006897_data : 80.334 %
=============== nthroot_006897 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_006897
6%|▋ | 5/79 [00:10<02:29, 2.01s/it]
Total accuracy for nthroot_006897 : 78.848 %
=============== nthroot_01034_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_01034_data
8%|▊ | 6/79 [00:12<02:23, 1.97s/it]
Total accuracy for nthroot_01034_data : 80.646 %
=============== nthroot_01034 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_01034
9%|▉ | 7/79 [00:14<02:19, 1.94s/it]
Total accuracy for nthroot_01034 : 78.206 %
=============== nthroot_01379_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_01379_data
10%|█ | 8/79 [00:15<02:11, 1.85s/it]
Total accuracy for nthroot_01379_data : 80.646 %
=============== nthroot_01379 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_01379
11%|█▏ | 9/79 [00:17<02:12, 1.90s/it]
Total accuracy for nthroot_01379 : 78.499 %
=============== nthroot_01724_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_01724_data
13%|█▎ | 10/79 [00:20<02:28, 2.15s/it]
Total accuracy for nthroot_01724_data : 79.435 %
=============== nthroot_01724 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_01724
14%|█▍ | 11/79 [00:22<02:26, 2.15s/it]
Total accuracy for nthroot_01724 : 78.187 %
=============== nthroot_02069_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_02069_data
15%|█▌ | 12/79 [00:24<02:15, 2.02s/it]
Total accuracy for nthroot_02069_data : 80.664 %
=============== nthroot_02069 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_02069
16%|█▋ | 13/79 [00:26<02:15, 2.05s/it]
Total accuracy for nthroot_02069 : 78.609 %
=============== nthroot_02414_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_02414_data
18%|█▊ | 14/79 [00:28<02:13, 2.06s/it]
Total accuracy for nthroot_02414_data : 80.279 %
=============== nthroot_02414 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_02414
19%|█▉ | 15/79 [00:30<02:07, 2.00s/it]
Total accuracy for nthroot_02414 : 79.123 %
=============== nthroot_02759_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_02759_data
20%|██ | 16/79 [00:32<02:10, 2.07s/it]
Total accuracy for nthroot_02759_data : 79.618 %
=============== nthroot_02759 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_02759
22%|██▏ | 17/79 [00:34<02:03, 1.99s/it]
Total accuracy for nthroot_02759 : 78.481 %
=============== nthroot_03103_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_03103_data
23%|██▎ | 18/79 [00:36<02:07, 2.10s/it]
Total accuracy for nthroot_03103_data : 74.684 %
=============== nthroot_03103 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_03103
24%|██▍ | 19/79 [00:38<02:00, 2.00s/it]
Total accuracy for nthroot_03103 : 79.362 %
=============== nthroot_03448_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_03448_data
25%|██▌ | 20/79 [00:40<01:57, 2.00s/it]
Total accuracy for nthroot_03448_data : 80.04 %
=============== nthroot_03448 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_03448
27%|██▋ | 21/79 [00:42<02:02, 2.12s/it]
Total accuracy for nthroot_03448 : 77.399 %
=============== nthroot_03793_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_03793_data
28%|██▊ | 22/79 [00:44<01:57, 2.07s/it]
Total accuracy for nthroot_03793_data : 80.059 %
=============== nthroot_03793 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_03793
29%|██▉ | 23/79 [00:47<02:03, 2.20s/it]
Total accuracy for nthroot_03793 : 73.454 %
=============== nthroot_04138_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_04138_data
30%|███ | 24/79 [00:49<01:56, 2.12s/it]
Total accuracy for nthroot_04138_data : 80.407 %
=============== nthroot_04138 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_04138
32%|███▏ | 25/79 [00:51<01:55, 2.14s/it]
Total accuracy for nthroot_04138 : 79.031 %
=============== nthroot_04483_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_04483_data
33%|███▎ | 26/79 [00:53<01:51, 2.10s/it]
Total accuracy for nthroot_04483_data : 79.857 %
=============== nthroot_04483 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_04483
34%|███▍ | 27/79 [00:55<01:46, 2.05s/it]
Total accuracy for nthroot_04483 : 79.068 %
=============== nthroot_04828_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_04828_data
35%|███▌ | 28/79 [00:57<01:47, 2.10s/it]
Total accuracy for nthroot_04828_data : 76.041 %
=============== nthroot_04828 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_04828
37%|███▋ | 29/79 [01:00<01:50, 2.20s/it]
Total accuracy for nthroot_04828 : 73.326 %
=============== nthroot_05172_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_05172_data
38%|███▊ | 30/79 [01:01<01:43, 2.11s/it]
Total accuracy for nthroot_05172_data : 77.16 %
=============== nthroot_05172 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_05172
39%|███▉ | 31/79 [01:03<01:38, 2.06s/it]
Total accuracy for nthroot_05172 : 79.196 %
=============== nthroot_05517_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_05517_data
41%|████ | 32/79 [01:05<01:36, 2.05s/it]
Total accuracy for nthroot_05517_data : 71.583 %
=============== nthroot_05517 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_05517
42%|████▏ | 33/79 [01:07<01:31, 1.98s/it]
Total accuracy for nthroot_05517 : 78.683 %
=============== nthroot_05862_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_05862_data
43%|████▎ | 34/79 [01:10<01:32, 2.05s/it]
Total accuracy for nthroot_05862_data : 74.17 %
=============== nthroot_05862 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_05862
44%|████▍ | 35/79 [01:11<01:25, 1.95s/it]
Total accuracy for nthroot_05862 : 78.775 %
=============== nthroot_06207_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_06207_data
46%|████▌ | 36/79 [01:13<01:23, 1.93s/it]
Total accuracy for nthroot_06207_data : 74.39 %
=============== nthroot_06207 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_06207
47%|████▋ | 37/79 [01:15<01:19, 1.90s/it]
Total accuracy for nthroot_06207 : 70.006 %
=============== nthroot_06552_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_06552_data
48%|████▊ | 38/79 [01:17<01:18, 1.91s/it]
Total accuracy for nthroot_06552_data : 79.857 %
=============== nthroot_06552 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_06552
49%|████▉ | 39/79 [01:19<01:18, 1.97s/it]
Total accuracy for nthroot_06552 : 75.142 %
=============== nthroot_06897_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_06897_data
51%|█████ | 40/79 [01:21<01:16, 1.97s/it]
Total accuracy for nthroot_06897_data : 78.554 %
=============== nthroot_06897 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_06897
52%|█████▏ | 41/79 [01:23<01:14, 1.95s/it]
Total accuracy for nthroot_06897 : 78.554 %
=============== nthroot_07241_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_07241_data
53%|█████▎ | 42/79 [01:25<01:13, 1.99s/it]
Total accuracy for nthroot_07241_data : 77.527 %
=============== nthroot_07241 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_07241
54%|█████▍ | 43/79 [01:27<01:11, 1.98s/it]
Total accuracy for nthroot_07241 : 70.703 %
=============== nthroot_07586_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_07586_data
56%|█████▌ | 44/79 [01:29<01:10, 2.03s/it]
Total accuracy for nthroot_07586_data : 75.197 %
=============== nthroot_07586 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_07586
57%|█████▋ | 45/79 [01:31<01:08, 2.02s/it]
Total accuracy for nthroot_07586 : 78.536 %
=============== nthroot_07931_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_07931_data
58%|█████▊ | 46/79 [01:33<01:04, 1.97s/it]
Total accuracy for nthroot_07931_data : 79.673 %
=============== nthroot_07931 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_07931
59%|█████▉ | 47/79 [01:35<01:01, 1.93s/it]
Total accuracy for nthroot_07931 : 73.216 %
=============== nthroot_08276_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_08276_data
61%|██████ | 48/79 [01:37<01:00, 1.96s/it]
Total accuracy for nthroot_08276_data : 75.436 %
=============== nthroot_08276 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_08276
62%|██████▏ | 49/79 [01:39<00:59, 1.97s/it]
Total accuracy for nthroot_08276 : 76.812 %
=============== nthroot_08621_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_08621_data
63%|██████▎ | 50/79 [01:40<00:55, 1.90s/it]
Total accuracy for nthroot_08621_data : 79.508 %
=============== nthroot_08621 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_08621
65%|██████▍ | 51/79 [01:42<00:53, 1.91s/it]
Total accuracy for nthroot_08621 : 69.584 %
=============== nthroot_08966_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_08966_data
66%|██████▌ | 52/79 [01:44<00:51, 1.91s/it]
Total accuracy for nthroot_08966_data : 78.628 %
=============== nthroot_08966 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_08966
67%|██████▋ | 53/79 [01:46<00:49, 1.92s/it]
Total accuracy for nthroot_08966 : 77.839 %
=============== nthroot_0931_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_0931_data
[91mData point [100.] is not unique. 1 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 2 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 3 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 4 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 5 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 6 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 7 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 8 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 9 duplicates registered. Continuing ...[0m
68%|██████▊ | 54/79 [01:48<00:49, 1.97s/it]
Total accuracy for nthroot_0931_data : 69.382 %
=============== nthroot_0931 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_0931
70%|██████▉ | 55/79 [01:50<00:47, 1.98s/it]
Total accuracy for nthroot_0931 : 69.602 %
=============== nthroot_09655_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_09655_data
71%|███████ | 56/79 [01:52<00:46, 2.00s/it]
Total accuracy for nthroot_09655_data : 70.317 %
=============== nthroot_09655 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_09655
72%|███████▏ | 57/79 [01:54<00:43, 1.96s/it]
Total accuracy for nthroot_09655 : 77.766 %
=============== norm_1_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_1_datacsv
73%|███████▎ | 58/79 [01:56<00:39, 1.88s/it]
Total accuracy for norm_1_datacsv : 80.646 %
=============== nthroot_10_data ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_10_data
75%|███████▍ | 59/79 [01:58<00:38, 1.90s/it]
Total accuracy for nthroot_10_data : 75.142 %
=============== nthroot_10 ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_10
76%|███████▌ | 60/79 [02:00<00:36, 1.90s/it]
Total accuracy for nthroot_10 : 72.097 %
=============== norm_1csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_1csv
77%|███████▋ | 61/79 [02:02<00:35, 1.95s/it]
Total accuracy for norm_1csv : 80.866 %
=============== norm_2_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_2_datacsv
78%|███████▊ | 62/79 [02:04<00:33, 1.98s/it]
Total accuracy for norm_2_datacsv : 79.472 %
=============== norm_2csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_2csv
80%|███████▉ | 63/79 [02:06<00:31, 1.94s/it]
Total accuracy for norm_2csv : 78.628 %
=============== norm_21_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_21_datacsv
81%|████████ | 64/79 [02:08<00:28, 1.90s/it]
Total accuracy for norm_21_datacsv : 80.426 %
=============== norm_21csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_21csv
82%|████████▏ | 65/79 [02:10<00:27, 1.93s/it]
Total accuracy for norm_21csv : 80.444 %
=============== norm_3_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_3_datacsv
84%|████████▎ | 66/79 [02:12<00:25, 1.93s/it]
Total accuracy for norm_3_datacsv : 74.904 %
=============== norm_3csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_3csv
85%|████████▍ | 67/79 [02:13<00:23, 1.94s/it]
Total accuracy for norm_3csv : 77.527 %
=============== norm_31_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_31_datacsv
86%|████████▌ | 68/79 [02:15<00:20, 1.87s/it]
Total accuracy for norm_31_datacsv : 68.85 %
=============== norm_31csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_31csv
87%|████████▋ | 69/79 [02:17<00:18, 1.84s/it]
Total accuracy for norm_31csv : 69.712 %
=============== norm_4_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_4_datacsv
89%|████████▊ | 70/79 [02:19<00:16, 1.84s/it]
Total accuracy for norm_4_datacsv : 78.334 %
=============== norm_4csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_4csv
90%|████████▉ | 71/79 [02:20<00:14, 1.79s/it]
Total accuracy for norm_4csv : 77.472 %
=============== norm_41_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_41_datacsv
91%|█████████ | 72/79 [02:22<00:12, 1.74s/it]
Total accuracy for norm_41_datacsv : 68.96 %
=============== norm_41csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_41csv
92%|█████████▏| 73/79 [02:24<00:10, 1.72s/it]
Total accuracy for norm_41csv : 68.997 %
=============== norm_5_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_5_datacsv
94%|█████████▎| 74/79 [02:26<00:08, 1.80s/it]
Total accuracy for norm_5_datacsv : 79.6 %
=============== norm_5csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_5csv
95%|█████████▍| 75/79 [02:28<00:07, 1.81s/it]
Total accuracy for norm_5csv : 80.499 %
=============== norm_51_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on norm_51_datacsv
96%|█████████▌| 76/79 [02:30<00:05, 1.86s/it]
Total accuracy for norm_51_datacsv : 76.518 %
=============== norm_51csv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on norm_51csv
[91mData point [100.] is not unique. 1 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 2 duplicates registered. Continuing ...[0m
[91mData point [100.] is not unique. 3 duplicates registered. Continuing ...[0m
97%|█████████▋| 77/79 [02:32<00:04, 2.04s/it]
Total accuracy for norm_51csv : 68.043 %
=============== nthroot_log_datacsv ===============
Training data: 12072 single stars and 9729 blended stars
Testing data: 3037 single stars and 2414 blended stars
Running Classifier on nthroot_log_datacsv
99%|█████████▊| 78/79 [02:34<00:02, 2.05s/it]
Total accuracy for nthroot_log_datacsv : 77.747 %
=============== nthroot_logcsv ===============
Training data: 12122 single stars and 9679 blended stars
Testing data: 2987 single stars and 2464 blended stars
Running Classifier on nthroot_logcsv
100%|██████████| 79/79 [02:36<00:00, 1.98s/it]
Total accuracy for nthroot_logcsv : 77.27 %
```python
accura = pd.DataFrame({'norm_name': norm_name, 'accuracy': my_accuracy})
accura.to_csv('../data/muygps-max-all-accuracy.csv', index=False)
accura = pd.read_csv('../data/muygps-max-all-accuracy.csv')
accura.sort_values(by=['accuracy'], inplace=True)
accura.T
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>76</th>
<th>67</th>
<th>71</th>
<th>72</th>
<th>53</th>
<th>50</th>
<th>54</th>
<th>68</th>
<th>36</th>
<th>...</th>
<th>23</th>
<th>63</th>
<th>64</th>
<th>74</th>
<th>1</th>
<th>5</th>
<th>57</th>
<th>7</th>
<th>11</th>
<th>60</th>
</tr>
</thead>
<tbody>
<tr>
<th>norm_name</th>
<td>nthroot00data</td>
<td>norm51csv</td>
<td>norm31datacsv</td>
<td>norm41datacsv</td>
<td>norm41csv</td>
<td>nthroot0931data</td>
<td>nthroot08621</td>
<td>nthroot0931</td>
<td>norm31csv</td>
<td>nthroot06207</td>
<td>...</td>
<td>nthroot04138data</td>
<td>norm21datacsv</td>
<td>norm21csv</td>
<td>norm5csv</td>
<td>nthroot003448data</td>
<td>nthroot01034data</td>
<td>norm1datacsv</td>
<td>nthroot01379data</td>
<td>nthroot02069data</td>
<td>norm1csv</td>
</tr>
<tr>
<th>accuracy</th>
<td>55.715</td>
<td>68.043</td>
<td>68.85</td>
<td>68.96</td>
<td>68.997</td>
<td>69.382</td>
<td>69.584</td>
<td>69.602</td>
<td>69.712</td>
<td>70.006</td>
<td>...</td>
<td>80.407</td>
<td>80.426</td>
<td>80.444</td>
<td>80.499</td>
<td>80.517</td>
<td>80.646</td>
<td>80.646</td>
<td>80.646</td>
<td>80.664</td>
<td>80.866</td>
</tr>
</tbody>
</table>
<p>2 rows × 79 columns</p>
</div>
```python
accura.nlargest(10, 'accuracy')
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>norm_name</th>
<th>accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<th>60</th>
<td>norm1csv</td>
<td>80.866</td>
</tr>
<tr>
<th>11</th>
<td>nthroot02069data</td>
<td>80.664</td>
</tr>
<tr>
<th>5</th>
<td>nthroot01034data</td>
<td>80.646</td>
</tr>
<tr>
<th>57</th>
<td>norm1datacsv</td>
<td>80.646</td>
</tr>
<tr>
<th>7</th>
<td>nthroot01379data</td>
<td>80.646</td>
</tr>
<tr>
<th>1</th>
<td>nthroot003448data</td>
<td>80.517</td>
</tr>
<tr>
<th>74</th>
<td>norm5csv</td>
<td>80.499</td>
</tr>
<tr>
<th>64</th>
<td>norm21csv</td>
<td>80.444</td>
</tr>
<tr>
<th>63</th>
<td>norm21datacsv</td>
<td>80.426</td>
</tr>
<tr>
<th>23</th>
<td>nthroot04138data</td>
<td>80.407</td>
</tr>
</tbody>
</table>
</div>
<u>***Note:*** Each time you run the classifier will result in different accuracies.</u>
### As you can see, all 5 normalization techniques do much better than the un-normalized data, with some performing better than others.
### Things you can try, to see how they affect the classifier accuracy:
- Play around with different values of `test_size`. What does testing on more or less data do?
- Play around with different parameters that are passed to `do_classify`. Start with `nn_count` and `embed_dim`(For what those arguments are, and a full list of all of the arguments you can pass to do_classify, look at the function `do_classify` in `/MuyGPyS/examples/classify.py`).
- Try generating more cutouts using `generating_ZTF_cutouts_from_ra_dec.ipynb`. How does having more testing and training data affects the classifier?
- Play around with the parameters used to make the cutouts. What happens if you remove blend cuts? Can the classifier classify blends? What is you increase the seeing limit? Can the classifier classify images with bad atmoshperic quality?
<hr style="border:2px solid gray"> </hr>
## <u>**Optional Step:**</u>
### Running each dataset through the classifier multiple times, testing and training on varying amounts of data, different random states, and plotting the accuracy outcomes
- Each time you run the following steps, you change:
- `test_size`: This is used in `train_test_split`, and changes the size of the testing and training datasets, which effects the accuracy of the classifier.
- `random_state`: This is used in `train_test_split`, and changes the ratio of how many stars-to-galaxies get tested on.
- You can set how many times to run the classifier with varying test sizes and random states by setting `num_runs`, and you can manually change the test_size values by editing `test_size_values`.
```python
test_size_values = [.2, .25, .33, .4, .5, .75]
num_runs = 3
```
```python
# def run_classifier(image_data, truth_labels, test_size, state):
# X_train, X_test, y_train, y_test = train_test_split(image_data, truth_labels, test_size=test_size, random_state=state)
# onehot_train, onehot_test = generate_onehot_value(y_train), generate_onehot_value(y_test)
# train = {'input': X_train, 'output': onehot_train, 'lookup': y_train}
# test = {'input': X_test, 'output': onehot_test, 'lookup': y_test}
# #Switch verbose to True for more output
# muygps, nbrs_lookup, surrogate_predictions= do_classify(
# test_features=np.array(test['input']),
# train_features=np.array(train['input']),
# train_labels=np.array(train['output']),
# nn_count=20,
# batch_count=200,
# loss_fn=cross_entropy_fn,
# opt_fn=Bayes_optimize,
# k_kwargs=k_kwargs_mattern,
# nn_kwargs=nn_kwargs_hnsw,
# verbose=False)
# predicted_labels = np.argmax(surrogate_predictions, axis=1)
# accuracy = (np.sum(predicted_labels == np.argmax(test["output"], axis=1))/len(predicted_labels))*100
# return accuracy
```
```python
# from time import perf_counter
# start = perf_counter()
# accuracies = pd.DataFrame({'test_size': test_size_values})
# # Setting progress bar for each time the classifier will be run during this step
# pbar = tqdm(total=len(norm_data_names)*num_runs*len(test_size_values), desc='Running classifier', leave=True)
# for path in norm_data_names:
# path1 = '../data/data-norm/max-pixel-all/' + path
# data = pd.read_csv(path1,na_values='-')
# data.fillna(0,inplace=True)
# data_label = ''.join(path.split('.')[:2])
# truth_labels = data.iloc[:, 0].values
# image_data = data.iloc[:, 1:].values
# all_acc_dataset = []
# for test_size in test_size_values:
# acc = []
# idx = 1
# while idx <= num_runs:
# accuracy = run_classifier(image_data, truth_labels, test_size, state=random.randint(0, 10000))
# acc.append(accuracy)
# pbar.update(1)
# idx += 1
# avg_acc = np.average(acc)
# all_acc_dataset.append(avg_acc)
# temp_df = pd.DataFrame({str(data_label): all_acc_dataset})
# accuracies = pd.concat([accuracies, temp_df], axis=1)
# end = perf_counter()
# print(f"Time taken to run the classifier on all datasets: {(end-start)/60} minutes")
# accuracies.to_csv('max-all-accuracies.csv', index=False)
# display(accuracies)
```
```python
# plt.figure(figsize=(12,4))
# for path in norm_data_names:
# path1 = '../data/data-norm/max-pixel-all/' + path
# data = pd.read_csv(path1,na_values='-')
# data.fillna(0,inplace=True)
# data_label = ''.join(path.split('.')[:2])
# # data_label = 'Normalized {} {}'.format(*path.split('_')[:2])
# plt.plot(accuracies['test_size'].values, accuracies[data_label].values, label=data_label)
# plt.title("MuyGPs Stellar Blending 2-class")
# plt.legend(fontsize=10)
# plt.tick_params(labelsize=10)
# plt.xlabel("Test size (as a ratio to full data size)", fontsize=10)
# plt.ylabel("Accuracy [%]", fontsize=10)
# plt.savefig("muygps_max_all_abs.png")
# plt.show()
```
```python
# accuracies = pd.read_csv('max-all-accuracies.csv')
# np.max(accuracies.values, axis=1)
```
```python
# idcs = np.argmax(accuracies.values, axis=1)
# accuracies.iloc[:, idcs]
```
There is no benefit to normalization images with a division by maximum over the entire data. All the max accuracies above occur where the max-all normalization is not applied. However, it seems neural net model is in favor of max-all normalization instead of image by image max normalization.
|
cae0027REPO_NAMEStellar-BlendsPATH_START.@Stellar-Blends_extracted@Stellar-Blends-main@ce@model-comparison@stellar_blends_muygps_classification_max_all.ipynb@.PATH_END.py
|
{
"filename": "test_orbit_astrometry_HD10009.py",
"repo_name": "iancze/PSOAP",
"repo_path": "PSOAP_extracted/PSOAP-master/tests/test_orbit_astrometry_HD10009.py",
"type": "Python"
}
|
import pytest
import os
import pkg_resources
import numpy as np
from psoap import orbit_astrometry
from psoap import constants as C
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib
# Create plots of all of the orbits
from astropy.io import ascii
# Create plots of all of the orbits
# If it doesn't already exist, create a "plots/basic/" directory
outdir = "tests/plots/HD10009/"
if not os.path.exists(outdir):
print("Creating ", outdir)
os.makedirs(outdir)
# Load the data sets for radial velocity and astrometry
astro_fname = pkg_resources.resource_filename("psoap", "data/HD10009/astro.txt")
astro_data = ascii.read(astro_fname, format="csv", fill_values=[(".", '0')])
# print(astro_data["date"].data)
# convert UT date to JD
astro_dates = Time(astro_data["date"].data, format="decimalyear")
astro_dates.format = 'jd'
astro_jds = astro_dates.value
rho_data = astro_data["rho"]
rho_err = astro_data["rho_err"]
theta_data = astro_data["PA"]
theta_err = astro_data["PA_err"]
def test_data():
# Make a plot of the astrometric data on the sky
fig, ax = plt.subplots(nrows=1)
xs = rho_data * np.cos(theta_data * np.pi/180)
ys = rho_data * np.sin(theta_data * np.pi/180)
ax.plot(xs, ys, ".")
ax.set_xlabel("North")
ax.set_ylabel("East")
ax.plot(0,0, "k*")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "data_astro.png")
rv1_fname = pkg_resources.resource_filename("psoap", "data/HD10009/rv1.txt")
rv_data_1 = ascii.read(rv1_fname, format="csv")
rv2_fname = pkg_resources.resource_filename("psoap", "data/HD10009/rv2.txt")
rv_data_2 = ascii.read(rv2_fname, format="csv")
rv_jds_A = rv_data_1["date"] + 2400000
vAs_data = rv_data_1["rv"]
vAs_err = rv_data_1["err"]
rv_jds_B = rv_data_2["date"] + 2400000
vBs_data = rv_data_2["rv"]
vBs_err = rv_data_2["err"]
dpc = 37.03 # pc
# Orbital elements
a = 0.324 * dpc # [AU]
e = 0.798
i = 96.0 # [deg]
omega_2 = 251.6 # omega_1
omega = omega_2 + 180.0
Omega = 159.6
T0 = Time(1989.92, format="decimalyear")
T0.format = "jd"
T0 = T0.value # [Julian Date]
M_2 = 1.0 # [M_sun]
M_tot = 1.2 + M_2 # [M_sun]
gamma = 47.8 # [km/s]
P = np.sqrt(4 * np.pi**2 / (C.G * M_tot * C.M_sun) * (a * C.AU)**3) / (24 * 3600) # [day]
print(P/365)
# Pick a span of dates for one period
dates = np.linspace(T0, T0 + P, num=600)
# Initialize the orbit
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
full_dict = orb.get_full_orbit()
vAs, vBs, XYZ_As, XYZ_Bs, XYZ_ABs, xy_As, xy_Bs, xy_ABs = [full_dict[key] for key in ("vAs", "vBs", "XYZ_As", "XYZ_Bs", "XYZ_ABs", "xy_As", "xy_Bs", "xy_ABs")]
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
alpha_dec_As = XYZ_As/dpc # [arcsec]
alpha_dec_Bs = XYZ_Bs/dpc # [arcsec]
alpha_dec_ABs = XYZ_ABs/dpc # [arcsec]
rho_ABs = rho_ABs/dpc # [arcsec]
peri_A = orb._get_periastron_A()/dpc
peri_B = orb._get_periastron_B()/dpc
peri_BA = orb._get_periastron_BA()/dpc
asc_A = orb._get_node_A()/dpc
asc_B = orb._get_node_B()/dpc
asc_BA = orb._get_node_BA()/dpc
# Since we are plotting vs one date, we need to plot the dots using a color scale so we can figure them out along the orbit.
# Set a colorscale for the lnprobs
cmap_primary = matplotlib.cm.get_cmap("Blues")
cmap_secondary = matplotlib.cm.get_cmap("Oranges")
norm = matplotlib.colors.Normalize(vmin=np.min(dates), vmax=np.max(dates))
# Determine colors based on the ending lnprob of each walker
def plot_points(ax, dates, xs, ys, primary):
for date, x, y in zip(dates, xs, ys):
if primary:
c = cmap_primary(norm(date))
else:
c = cmap_secondary(norm(date))
ax.plot(x, y, "o", color=c, mew=0.1, ms=3, mec="k")
# Then, we will make 3D plots of the orbit so that we can square with what we think is happening.
# The final crowning grace will be a 3D matplotlib plot of the orbital path.
# Plot the Orbits
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, alpha_dec_ABs[:,0], alpha_dec_ABs[:,1], False)
ax.plot(0,0, "*k", ms=2)
ax.plot(peri_BA[0], peri_BA[1], "ko", ms=3)
ax.plot(asc_BA[0], asc_BA[1], "o", color="C2", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta $ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A.png")
# Make a series of astrometric plots from different angles.
def test_AB_Z():
# Now plot A and B together, viewed from the Z axis
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,0], alpha_dec_As[:,1], True)
plot_points(ax, dates, alpha_dec_Bs[:,0], alpha_dec_Bs[:,1], False)
ax.plot(peri_A[0], peri_A[1], "ko", ms=3)
ax.plot(peri_B[0], peri_B[1], "ko", ms=3)
ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3)
ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.15, right=0.85, bottom=0.15, top=0.85)
# Plot A and B together, viewed from the observer (along -Z axis).
fig.savefig(outdir + "orbit_AB_Z.png")
def test_AB_X():
# Now plot A and B together, viewed from the X axis
# This means Y will form the "X" axis, or North
# And Z will form the Y axis, or towards observer
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,1], alpha_dec_As[:,2], True)
plot_points(ax, dates, alpha_dec_Bs[:,1], alpha_dec_Bs[:,2], False)
ax.plot(peri_A[1], peri_A[2], "ko", ms=3)
ax.plot(peri_B[1], peri_B[2], "ko", ms=3)
ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3)
ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \alpha \cos delta$ mas")
ax.set_ylabel(r"$\Delta Z$ mas (towards observer)")
ax.axhline(0, ls=":", color="k")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_X.png")
def test_AB_Y():
# Now plot A and B together, viewed from the Y axis
# This means Z will form the "X" axis, or towards the observer
# And X will form the Y axis, or East
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,2], alpha_dec_As[:,0], True)
plot_points(ax, dates, alpha_dec_Bs[:,2], alpha_dec_Bs[:,0], False)
ax.plot(peri_A[2], peri_A[0], "ko", ms=3)
ax.plot(peri_B[2], peri_B[0], "ko", ms=3)
ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3)
ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3)
ax.axvline(0, ls=":", color="k")
ax.set_xlabel(r"$\Delta Z$ mas (towards observer)")
ax.set_ylabel(r"$\Delta \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_Y.png")
def test_vel_rho_theta_one_period():
# Plot velocities, rho, and theta as function of time for one period
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(8,8))
ax[0].plot(dates, vAs)
# ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
# ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
# ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
# ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
# ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="")
# ax[2].plot(astro_jds, rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]")
ax[3].plot(dates, theta_ABs)
# ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
# ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta_one_period.png", dpi=400)
# Now make a 3D Orbit and pop it up
def test_B_rel_A_plane():
# Plot the orbits in the plane
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_ABs[:,0], xy_ABs[:,1], False)
ax.plot(0,0, "*k", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A_plane.png")
def test_AB_plane():
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_As[:,0], xy_As[:,1], True)
plot_points(ax, dates, xy_Bs[:,0], xy_Bs[:,1], False)
ax.plot(0,0, "ko", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_plane.png")
# Redo this using a finer space series of dates spanning the full series of observations.
# Pick a span of dates for the observations
dates = np.linspace(2443500, 2452010, num=3000) # [day]
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
rho_ABs = rho_ABs/dpc # [arcsec]
def test_vel_rho_theta():
# Plot velocities, rho, and theta as function of time
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(12,8))
ax[0].plot(dates, vAs)
ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
# ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="")
ax[2].plot(astro_jds, rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]")
ax[3].plot(dates, theta_ABs)
# ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta.png", dpi=400)
plt.close('all')
plt.close('all')
|
ianczeREPO_NAMEPSOAPPATH_START.@PSOAP_extracted@PSOAP-master@tests@test_orbit_astrometry_HD10009.py@.PATH_END.py
|
{
"filename": "analyzer.py",
"repo_name": "ML4GW/aframe",
"repo_path": "aframe_extracted/aframe-main/projects/plots/plots/vizapp/infer/analyzer.py",
"type": "Python"
}
|
from pathlib import Path
from typing import Dict, List, Sequence
import h5py
import numpy as np
import torch
from gwpy.timeseries import TimeSeries
from ledger.injections import InterferometerResponseSet, waveform_class_factory
from plots.vizapp.infer.utils import get_indices, get_strain_fname
from utils.preprocessing import BackgroundSnapshotter, BatchWhitener
class EventAnalyzer:
"""
Class for performing on-the-fly inference
"""
def __init__(
self,
model: torch.nn.Module,
strain_dir: Path,
response_set: Path,
psd_length: float,
kernel_length: float,
sample_rate: float,
fduration: float,
inference_sampling_rate: float,
integration_length: float,
batch_size: int,
highpass: float,
fftlength: float,
device: str,
ifos: List[str],
padding: int = 3,
):
self.model = model
self.whitener = BatchWhitener(
kernel_length,
sample_rate,
inference_sampling_rate,
batch_size,
fduration,
fftlength=fftlength,
highpass=highpass,
return_whitened=True,
).to(device)
self.snapshotter = BackgroundSnapshotter(
psd_length=psd_length,
kernel_length=kernel_length,
fduration=fduration,
sample_rate=sample_rate,
inference_sampling_rate=inference_sampling_rate,
).to(device)
self.response_set = response_set
self.strain_dir = strain_dir
self.ifos = ifos
self.padding = padding
self.sample_rate = sample_rate
self.fduration = fduration
self.psd_length = psd_length
self.kernel_length = kernel_length
self.highpass = highpass
self.inference_sampling_rate = inference_sampling_rate
self.integration_length = integration_length
self.batch_size = batch_size
self.device = device
@property
def waveform_class(self):
return waveform_class_factory(
self.ifos, InterferometerResponseSet, "IfoWaveformSet"
)
@property
def kernel_size(self):
return int(self.kernel_length * self.sample_rate)
@property
def state_shape(self):
return (1, len(self.ifos), self.snapshotter.state_size)
@property
def inference_stride(self):
return int(self.sample_rate / self.inference_sampling_rate)
@property
def step_size(self):
return int(self.batch_size * self.inference_stride)
@property
def integration_size(self):
return int(self.integration_length * self.inference_sampling_rate)
@property
def window(self):
return np.ones((self.integration_size,)) / self.integration_size
@property
def times(self):
"""
Returns the time values relative to event time
"""
start = (
self.psd_length
+ self.kernel_length
+ (self.fduration / 2)
+ self.padding
)
stop = self.kernel_length + (self.fduration / 2) + self.padding
return np.arange(-start, stop, 1 / self.sample_rate)
@property
def inference_times(self):
return self.times[:: self.inference_stride]
@property
def whitened_times(self):
start = (
self.step_size
- self.inference_stride
- int(self.sample_rate * self.fduration)
)
return self.times[start:]
def find_strain(self, time: float, shifts: Sequence[float]):
# find strain file corresponding to requested time
fname, t0, duration = get_strain_fname(self.strain_dir, time)
# find indices of data needed for inference
times = np.arange(t0, t0 + duration, 1 / self.sample_rate)
start, stop = get_indices(
times, time + self.times[0], time + self.times[-1]
)
strain = []
with h5py.File(fname, "r") as f:
for ifo, shift in zip(self.ifos, shifts):
shift_size = int(shift * self.sample_rate)
start_shifted, stop_shifted = (
start + shift_size,
stop + shift_size,
)
data = torch.tensor(f[ifo][start_shifted:stop_shifted])
strain.append(data)
return torch.stack(strain, axis=0), time + self.times[0]
def find_waveform(self, time: float, shifts: np.ndarray):
"""
find the closest injection that corresponds to event
time and shifts from waveform dataset
"""
waveform = self.waveform_class.read(
self.response_set, time - 0.1, time + 0.1, shifts
)
return waveform
def integrate(self, y):
integrated = np.convolve(y, self.window, mode="full")
return integrated[: -self.integration_size + 1]
def infer(self, X: torch.Tensor):
ys, strain = [], []
start = 0
state = torch.zeros(self.state_shape).to(self.device)
# pad X up to batch size
remainder = X.shape[-1] % self.step_size
num_slice = None
if remainder:
pad = self.step_size - remainder
X = torch.nn.functional.pad(X, (0, pad))
num_slice = pad // self.inference_stride
slc = slice(-num_slice)
while start <= (X.shape[-1] - self.step_size):
stop = start + self.step_size
x = X[:, :, start:stop]
with torch.no_grad():
x, state = self.snapshotter(x, state)
batch, whitened = self.whitener(x)
y_hat = self.model(batch)[:, 0].cpu().numpy()
strain.append(whitened.cpu().numpy())
ys.append(y_hat)
start += self.step_size
whitened = np.concatenate(strain, axis=-1)[..., :-pad]
ys = np.concatenate(ys)[slc]
return ys, whitened
def analyze(self, time, shifts, foreground):
strain, t0 = self.find_strain(time, shifts)
if foreground:
waveform = self.find_waveform(time, shifts)
strain = waveform.inject(strain, t0)
strain = strain[None]
strain = torch.Tensor(strain).to(self.device)
nn, whitened = self.infer(strain)
integrated = self.integrate(nn)
return nn, integrated, whitened
def get_fft(self, strain: Dict[str, np.ndarray]):
ffts = {}
for ifo in self.ifos:
data = strain[ifo]
ts = TimeSeries(data, times=self.whitened_times)
ts = ts.crop(-3, 5)
fft = ts.fft().crop(start=self.highpass)
freqs = fft.frequencies.value
ffts[ifo] = np.abs(fft.value)
return freqs, ffts
def qscan(self, strain: Dict[str, np.ndarray]):
qscans = []
for ifo in self.ifos:
data = strain[ifo]
ts = TimeSeries(data, times=self.whitened_times)
ts = ts.crop(-3, 3)
qscan = ts.q_transform(
logf=True, frange=(32, 1024), whiten=False, outseg=(-1, 1)
)
qscans.append(qscan)
return qscans
|
ML4GWREPO_NAMEaframePATH_START.@aframe_extracted@aframe-main@projects@plots@plots@vizapp@infer@analyzer.py@.PATH_END.py
|
{
"filename": "binpaper2020.py",
"repo_name": "AstroSheppard/WFC3-analysis",
"repo_path": "WFC3-analysis_extracted/WFC3-analysis-main/LATE/verification/binpaper2020.py",
"type": "Python"
}
|
from __future__ import print_function
import sys
import numpy as np
import pandas as pd
import scipy.stats as st
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.ticker import ScalarFormatter
sys.path.append('./MCcubed')
import MCcubed as mc3
def comp_methods(visit, binsize, bin, wave):
modelfile='../bin_analysis/bin_smooth2.csv'
datafile='../bin_analysis/bin_data2.csv'
pfile='../bin_analysis/bin_params2.csv'
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
data=pd.read_csv(datafile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize, bin)]
params=params.loc[(visit, binsize, bin, 'Values')]
smooth=pd.read_csv(modelfile, index_col=[0,1,2]).sort_index()
smooth=smooth.loc[(visit, binsize, bin)]
margdepth=params['Depth']*1e6
margflux=data['Norm Flux'].values
margerror=data['Norm Flux Error'].values
margnflux=data['Flux'].values
margnerror=data['Flux Error'].values
margresids=data['Residuals'].values*1e6
margbinphase=data['Model Phase'].values
margcor=data['Corrected Flux'].values
margcorerr=data['Corrected Flux Error'].values
margslope=(params['Slope'].values*margbinphase+1.0)*params['Zero-flux'].values
margmodel=smooth['Model'].values
margmodelx=smooth['Phase'].values
try:
margnorm=params['Zero-flux'].values[0]
except AttributeError:
margnorm=params['Zero-flux']
try:
margphoton_error=params['Photon Error'].values[0]
except AttributeError:
margphoton_error=params['Photon Error']
marg_sys_model=margflux/margcor/margnorm
marg_full_model=margflux-margresids*margnorm/1e6
xmin=np.min(margbinphase)-0.02
xmax=np.max(margbinphase)+.02
margmod=data['Model'].values
print(params.T)
dfile='../bin_analysis/binmcmc_data.csv'
pfile='../bin_analysis/binmcmc_params.csv'
sfile='../bin_analysis/binmcmc_smooth.csv'
binsmooth=pd.read_csv(sfile, index_col=[0,1,2]).sort_index()
smooth=binsmooth.loc[(visit, binsize, bin)]
data=pd.read_csv(dfile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize, bin)]
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
params=params.loc[(visit,binsize, bin, 'Values')]
mcmcdepth=params['Depth']*1e6
mcmcflux=data['Norm Flux'].values
mcmcerror=data['Norm Flux Error'].values
mcmcnflux=data['Flux'].values
mcmcnerror=data['Flux Error'].values
mcmcresids=data['Residuals'].values*1e6
mcmcbinphase=data['Model Phase'].values
mcmccor=data['Corrected Flux'].values
mcmccorerr=data['Corrected Flux Error'].values
mcmcslope=(params['Slope']*mcmcbinphase+1.0)*params['Zero-flux']
mcmcmod=data['Model'].values
print(params)
try:
mcmcphoton_error=params['Photon Error'].values[0]
except AttributeError:
mcmcphoton_error=params['Photon Error']
try:
mcmcnorm=params['Zero-flux'].values[0]
except AttributeError:
mcmcnorm=params['Zero-flux']
mcmcmodel=smooth['Model'].values
mcmcmodelx=smooth['Phase'].values
mcmc_sys_model=mcmcflux/mcmccor
mcmc_full_model=mcmcflux#-mcmcresids*mcmcnorm/1e6
"""f=plt.figure(figsize=(8,12))
plt.subplot(311)
plt.errorbar(margbinphase, margflux, margerror, color='b', ls='', marker='o',
ecolor='g', label='Error ')
plt.plot(mcmcbinphase, mcmc_full_model, label='MCMC', color='g')
plt.plot(margbinphase, marg_full_model, label='Marg', color='r')
plt.xlim([xmin, xmax])
plt.ylabel('Normalized Flux')
plt.text(-0.02, 1.0, 'Raw Light Curve')
#plt.text(0.0,.99, 'Error ')"""
#plt.subplot(312)
plt.errorbar(mcmcbinphase, mcmcmod, mcmccorerr, color='r', ls='', marker='o', ecolor='r', label='mcmc')
#plt.errorbar(mcmcbinphase, mcmcresids/1e6, mcmccorerr, color='r', ls='', marker='o', ecolor='r', label='MCMC')
#plt.plot(mcmcmodelx, mcmcmodel, color='b', label='MCMC')
#plt.plot(mcmcbinphase, mcmc_sys_model, 'go', label='MCMC', ls='')
#plt.plot(mcmcbinphase, mcmcslope, 'go', label='MCMC',ls='')
plt.xlim([mcmcbinphase[0]-.01, mcmcbinphase[-1]+.01])
#plt.plot(margmodelx, margmodel, color='r', label='Marg')
#plt.plot(margbinphase, marg_sys_model, 'ro', label='Marg',ls='')
#plt.plot(margbinphase, margslope, 'ro', label='Marg',ls='')
plt.errorbar(margbinphase, margmod, margerror, color='g', ls='', marker='o',
ecolor='g', label='Marg')
#plt.plot(margbinphase, np.zeros_like(margbinphase))
plt.ylabel('Normalized Flux')
#plt.text(-.02, 1.0, 'Systematics removed')
#plt.text(.0, 0.000, 'Marg: %.1f' % (np.median(margresids[36:55])))
#plt.text(.0, -0.001, 'MCMC: %.1f' % (np.median(mcmcresids[36:55])))
plt.text(.0, 1-0.002, 'Marg-MCMC: %.1f' % (margdepth-mcmcdepth))
plt.text(.0, 1-0.003, 'Norm dif: %.1f' % ((margnorm-mcmcnorm)*margdepth))
plt.errorbar(margbinphase, margflux, margerror, color='b', ls='', marker='o',
ecolor='b', label='Data')
#plt.text(-.2,.998, 'Error ')
plt.legend()
plt.show()
"""flat=np.zeros_like(mcmcresids)
mcmccorerr*=1e6
margcorerr*=1e6
plt.subplot(313)
p3=plt.errorbar(mcmcbinphase, mcmcresids, mcmccorerr, color='g', ls='', marker='o'
, ecolor='g', label='Residuals')
p3=plt.errorbar(margbinphase, margresids, margcorerr, color='r', ls='', marker='o'
, ecolor='r', label='Residuals')
plt.xlim([xmin, xmax])
plt.xlabel('Phase')
plt.ylabel('Obs - Model [ppm]')
plt.plot(mcmcbinphase, flat)
plt.text(-.23, np.max(mcmcresids), 'Residuals')
plt.legend()
plt.show()"""
def bin_op(input, size, op='mean'):
nbins=len(input)/size
out=np.zeros(nbins)
for i in range(nbins):
start=size*i
fin=size*(i+1)
if op=='mean':
out[i]=np.mean(input[start:fin])
elif op=='sum':
out[i]=np.sum(input[start:fin])
elif op=='sqsum':
out[i]=np.sqrt(np.sum(np.square(input[start:fin])))
return out
def correlated(resids, wave, axes, color):
#n=resids.shape[0]/9
n=26
rms=np.zeros(n)
error=np.zeros(n)
binsize=np.arange(n)+1
nbins=np.zeros(n)
#for i in range(n):
#r=bin_op(resids, i+1)
#nbins[i]=len(r)
#rms[i]=np.std(r)
#error[i]=rms[i]/np.sqrt(2*nbins[i])
#expected=rms[0]/np.sqrt(binsize)*np.sqrt(nbins/(nbins-1))
rms, rmslo, rmshi, expected, binsize=mc3.rednoise.binrms(resids,n)
significant=np.where(rms/expected -1 > 2*rmslo/expected)[0]
print(significant)
if len(significant) == 0:
max_beta=1.0
else:
max_beta = np.max(rms[significant]/expected[significant])
ind = np.argmax(rms[significant]/expected[significant])
print (rms[significant[ind]]/expected[significant[ind]]-
rmslo[significant[ind]]/expected[significant[ind]])
ax.plot(binsize, expected/rms[0],color='black', label='Expected')
ax.errorbar(binsize, rms/rms[0], yerr=[rmslo/rms[0], rmshi/rms[0]]
, color=color, label='Data RMS')
ax.set_xscale('log')
ax.set_yscale('log')
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.get_xaxis().get_major_formatter().labelOnlyBase = False
ax.yaxis.set_major_formatter(ScalarFormatter())
#ax.minorticks_off()
ax.set_yticks([.1,1])
ax.set_xticks([1,2,3,4,5,6,7,8,9])
#a.xscale('log')
#plt.yscale('log')
label = '%.03f $\mu$m' % wave
if wave ==0:
label = 'Band-integrated'
ax.text(.05, .2, label, transform=ax.transAxes)
#ax.text(1, .2, r'$\beta_{max}$ = %.03f' % max_beta)
#max_beta=rms[0]/expected[0]
return max_beta
def adtest(resids, photon_error, norm=False):
""" Eventually save AD to params? Or something. Maybe it's own CSV with all of this info."""
if norm:
st.probplot(resids, plot=plt)
plt.show()
shapiro=st.shapiro(resids)
pearson= st.normaltest(resids)
#A-D test
# First, get CDF of data
nres=len(resids)
res=np.sort(resids)
num=np.ones(nres)/nres
cdf1=np.cumsum(num)
# Case 3: we do not know mean or sigma of distribution. Determine from residuals
# Test if it is gaussian, just with inflated errors (aka no red noise)
avg_3=np.mean(res)
sig_3=np.std(res)
# Case 0: We "know" the distribution is photon noise gaussian centered on 0. Test for accurate noise
avg_0=0
sig_0=photon_error
# Normalize (see wikipedia)
data_0=(res-avg_0)/sig_0
data_3=(res-avg_3)/sig_3
# Get gaussian CDFs with corresponding mean and sigma
cdf_0=st.norm.cdf(data_0)
cdf_3=st.norm.cdf(data_3)
# Get continuous gaussian CDFs for plotting
gauss=np.arange(180)/30. - 3
gauss_cdf=st.norm.cdf(gauss)
# Get continuous, unnormalized perfectly gaussian residuals for plotting
gauss_resids_0 = sig_0*gauss + avg_0
gauss_resids_3 = sig_3*gauss + avg_3
# Calculate A-D number
sum_0=0
sum_3=0
for j in range(1, nres):
# First quoted number vs photon error
sum_0+=(2*j-1)*(np.log(cdf_0[j-1])+np.log(1.0-cdf_0[nres-j]))
sum_3+=(2*j-1)*(np.log(cdf_3[j-1])+np.log(1.0-cdf_3[nres-j]))
# Then comparison to any gaussian
ad_0=-nres-sum_0/nres
ad_3=-nres-sum_3/nres
ad_3*=(1+4./nres-25./nres/nres)
# Save all plotting stuff to somewhere? probably same place as residuals?
print('Compared to theory-limited: %f' % ad_0)
print('Compared to Gaussian: %f' % ad_3)
print('Shapiro p-value: %f' % shapiro[1])
print('Pearson p-value: %f' % pearson[1])
return res, cdf1, gauss_resids_0, gauss_resids_3, gauss_cdf
def binpaper(visit, binsize, bin, wave, method='marg'):
""" This puts all information about quality of fit for
one visit into a few nice figures.
Right now I toggle correlated/adtest/binpaper plot. In future make these inputs."""
if method=='marg':
modelfile='../bin_analysis/bin_smooth.csv'
datafile='../bin_analysis/bin_data.csv'
pfile='../bin_analysis/bin_params.csv'
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
data=pd.read_csv(datafile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize, bin)]
params=params.loc[(visit, binsize, bin, 'Values')]
smooth=pd.read_csv(modelfile, index_col=[0,1,2]).sort_index()
smooth=smooth.loc[(visit, binsize, bin)]
flux=data['Norm Flux'].values
error=data['Norm Flux Error'].values
nflux=data['Flux'].values
nerror=data['Flux Error'].values
resids=data['Residuals'].values*1e6
binphase=data['Model Phase'].values
cor=data['Corrected Flux'].values
corerr=data['Corrected Flux Error'].values
model=smooth['Model'].values
modelx=smooth['Phase'].values
try:
norm=params['Zero-flux'].values[0]
except AttributeError:
norm=params['Zero-flux']
try:
photon_error=params['Photon Error'].values[0]
except AttributeError:
photon_error=params['Photon Error']
sys_model=flux/cor/norm
full_model=flux-resids*norm/1e6
xmin=np.min(binphase)-0.02
xmax=np.max(binphase)+.02
elif method=='ramp':
dfile='../bin_analysis/binramp_data.csv'
pfile='../bin_analysis/binramp_params.csv'
sfile='../bin_analysis/binramp_smooth.csv'
binsmooth=pd.read_csv(sfile, index_col=[0,1,2]).sort_index()
smooth=binsmooth.loc[(visit, binsize, bin)]
data=pd.read_csv(dfile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize, bin)]
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
params=params.loc[(visit,binsize, bin, 'Values')]
flux=data['Norm Flux'].values
error=data['Norm Flux Error'].values
nflux=data['Flux'].values
nerror=data['Flux Error'].values
resids=data['Residuals'].values*1e6
binphase=data['Model Phase'].values
cor=data['Corrected Flux'].values
corerr=data['Corrected Flux Error'].values
try:
photon_error=params['Photon Error'].values[0]
except AttributeError:
photon_error=params['Photon Error']
try:
norm=params['Zero-flux'].values[0]
except AttributeError:
norm=params['Zero-flux']
model=smooth['Model'].values
modelx=smooth['Phase'].values
sys_model=flux/cor/norm
full_model=flux-resids*norm/1e6
xmin=np.min(binphase)-0.02
xmax=np.max(binphase)+.02
elif method=='mcmc':
dfile='../bin_analysis/binmcmc_data.csv'
pfile='../bin_analysis/binmcmc_params.csv'
sfile='../bin_analysis/binmcmc_smooth.csv'
binsmooth=pd.read_csv(sfile, index_col=[0,1,2]).sort_index()
smooth=binsmooth.loc[(visit, binsize, bin)]
data=pd.read_csv(dfile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize, bin)]
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
params=params.loc[(visit,binsize, bin, 'Values')]
flux=data['Norm Flux'].values
error=data['Norm Flux Error'].values
nflux=data['Flux'].values
nerror=data['Flux Error'].values
resids=data['Residuals'].values*1e6
binphase=data['Model Phase'].values
cor=data['Corrected Flux'].values
corerr=data['Corrected Flux Error'].values
try:
photon_error=params['Photon Error'].values[0]
except AttributeError:
photon_error=params['Photon Error']
try:
norm=params['Zero-flux'].values[0]
except AttributeError:
norm=params['Zero-flux']
model=smooth['Model'].values
modelx=smooth['Phase'].values
sys_model=flux/cor/norm
full_model=flux-resids*norm/1e6
xmin=np.min(binphase)-0.02
xmax=np.max(binphase)+.02
f=plt.figure(figsize=(8,12))
plt.subplot(311)
plt.errorbar(binphase, flux, error, color='b', ls='', marker='o',
ecolor='g', label='Error ')
plt.plot(binphase, full_model)
plt.xlim([xmin, xmax])
plt.ylabel('Normalized Flux')
plt.text(-0.02, 1.0, 'Raw Light Curve')
#plt.text(0.0,.99, 'Error ')
plt.subplot(312)
plt.errorbar(binphase, cor, corerr, color='b', ls='', marker='o', ecolor='purple')
plt.plot(modelx, model)
plt.xlim([xmin, xmax])
plt.ylabel('Normalized Flux')
plt.text(-.02, 1.0, 'Systematics removed')
#plt.text(-.2,.998, 'Error ')
flat=np.zeros_like(resids)
corerr*=1e6
plt.subplot(313)
p3=plt.errorbar(binphase, resids, corerr, color='r', ls='', marker='o'
, ecolor='red', label='Residuals')
plt.xlim([xmin, xmax])
plt.xlabel('Phase')
plt.ylabel('Obs - Model [ppm]')
plt.plot(binphase, flat)
plt.text(-.23, np.max(resids), 'Residuals')
std_res=np.std(resids)
std_err=std_res/np.sqrt(2*len(resids))
ratio=std_res/photon_error
# ''photon error'' is really theory limit for bins, and should be same as flux_error/flux*1e6
#plt.text(-.2,np.min(resids)+100, 'RMS: %03d +- %03d' % (std_res, std_err))
plt.text(.01,np.min(resids)+100, 'RMS/photon: %.3f +- %.3f' % (ratio, std_err/photon_error))
plt.title('%.03f $\mu$m' % wave, size=12)
plt.savefig('bin_lightcurves'+method+'.png')
savename='binpaper_'+visit.replace('/','_')+'_'+method+'.pdf'
savename='bin%03d_outlier.pdf' % bin
#f.savefig(savename)
#f.clf()
#plt.close(f)
#plt.savefig('bin_resids_cdf'+method+'.png')
plt.show()
"""res, cdf1, gauss_resids_0, gauss_resids_3, gauss_cdf = adtest(resids, photon_error)
plt.subplot(414)
plt.plot(res, cdf1, 'ro', label='Residuals')
plt.plot(gauss_resids_0, gauss_cdf, label='Theoretical noise limit Gaussian')
plt.plot(gauss_resids_3, gauss_cdf, 'purple', label='Gaussian')
plt.legend(numpoints=1)
#plt.show()
figure=plt.gcf()
figure.set_size_inches(12, 10)
plt.savefig('wlpaper_'+visit.replace('/','_')+'_'+method+'.pdf', )
#plt.clf()
plt.plot(res, cdf1, 'ro', label='Residuals')
plt.plot(gauss_resids_0, gauss_cdf, label='Theoretical noise limit Gaussian')
plt.plot(gauss_resids_3, gauss_cdf, 'purple', label='Gaussian')
plt.legend(numpoints=1)
plt.xlabel('Residuals [PPM]')
plt.ylabel('CDF')
plt.savefig('bin_resids_cdf.png')"""
def binvis(visit, binsize, wave, method='marg'):
if method=='marg':
modelfile='../bin_analysis/bin_smooth2.csv'
datafile='../bin_analysis/bin_data2.csv'
data=pd.read_csv(datafile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize)]
smooth=pd.read_csv(modelfile, index_col=[0,1,2]).sort_index()
smooth=smooth.loc[(visit, binsize)]
# chi squared stuff
pfile='../bin_analysis/bin_params2.csv'
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
params=params.loc[(visit, binsize)]
#flux=data['Norm Flux'].values
#error=data['Norm Flux Error'].values
#nflux=data['Flux'].values
#nerror=data['Flux Error'].values
#resids=data['Residuals'].values*1e6
fig = plt.figure(figsize=(7, 12))
colors = iter(cm.inferno(np.linspace(0.1, .8, len(spec))))
start_bin = 0
#start_bin = 14
#end_bin=len(spec)
end_bin=14
for i in range(start_bin):
c=next(colors)
for i in range(end_bin-start_bin):
c = next(colors)
dat = data.loc[start_bin]
smoo = smooth.loc[start_bin]
binphase=dat['Model Phase'].values
cor=dat['Corrected Flux'].values
corerr=dat['Corrected Flux Error'].values
#print np.median(corerr)*1e6
model=smoo['Model'].values
modelx=smoo['Phase'].values
# Get reduced chi-squared
mresids=data.loc[start_bin,'Residuals'].values
errors = data.loc[start_bin, 'Corrected Flux Error'].values
#errors = errors/1.1
nfree = (params.loc[(start_bin, 'Errors'),'rprs':'WL Coeff'].values != 0).sum()
dof = len(mresids) - nfree
chi2 = np.sum(mresids*mresids/errors/errors)
rchi2 = chi2/dof
print(rchi2)
# Plot
xmin=np.min(binphase)-0.005
xmax=np.max(binphase)+.02
con = 0.005
plt.errorbar(binphase, cor-i*con, corerr, color=c,
ls='', marker='o', ecolor=c, markersize = 3)
plt.plot(modelx, model-i*con, color=c)
plt.xlim([xmin, xmax])
plt.ylabel('Normalized Flux - Constant')
plt.xlabel('Orbital Phase')
plt.text(.045, 1.002-i*con, r'%.2f$\mu$m' % wave[start_bin], color=c)
plt.text(-.06, 1.002-i*con, r'$\chi^2_{red}$=%.2f' % rchi2, color=c)
start_bin += 1
plt.savefig('../../hat41_bincurves1.pdf')
#plt.show()
elif method=='mcmc':
dfile='../bin_analysis/binmcmc_data.csv'
pfile='../bin_analysis/binmcmc_params.csv'
sfile='../bin_analysis/binmcmc_smooth.csv'
binsmooth=pd.read_csv(sfile, index_col=[0,1,2]).sort_index()
smooth=binsmooth.loc[(visit, binsize)]
data=pd.read_csv(dfile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize)]
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
params=params.loc[(visit,binsize, 'Values')]
flux=data['Norm Flux'].values
error=data['Norm Flux Error'].values
nflux=data['Flux'].values
nerror=data['Flux Error'].values
resids=data['Residuals'].values*1e6
binphase=data['Model Phase'].values
cor=data['Corrected Flux'].values
corerr=data['Corrected Flux Error'].values
try:
photon_error=params['Photon Error'].values[0]
except AttributeError:
photon_error=params['Photon Error']
try:
norm=params['Zero-flux'].values[0]
except AttributeError:
norm=params['Zero-flux']
model=smooth['Model'].values
modelx=smooth['Phase'].values
sys_model=flux/cor/norm
full_model=flux-resids*norm/1e6
xmin=np.min(binphase)-0.02
xmax=np.max(binphase)+.02
fig = plt.figure(figsize=(7, 12))
colors = iter(cm.inferno(np.linspace(0.1, .8, len(spec))))
bin = 0
for i in range(len(spec)):
c = next(colors)
dat = data.loc[bin]
smoo = smooth.loc[bin]
binphase=dat['Model Phase'].values
cor=dat['Corrected Flux'].values
corerr=dat['Corrected Flux Error'].values
#print np.median(corerr)*1e6
model=smoo['Model'].values
modelx=smoo['Phase'].values
# Get reduced chi-squared
mresids=data.loc[i,'Residuals'].values
errors = data.loc[i, 'Corrected Flux Error'].values
#errors = errors/1.1
nfree = (params.loc[(i, 'Errors'),'rprs':'WL Coeff'].values != 0).sum()
dof = len(mresids) - nfree
chi2 = np.sum(mresids*mresids/errors/errors)
rchi2 = chi2/dof
print(rchi2)
# Plot
xmin=np.min(binphase)-0.005
xmax=np.max(binphase)+.01
con = 0.003
plt.errorbar(binphase, cor-i*con, corerr, color=c,
ls='', marker='o', ecolor=c, markersize = 3)
plt.plot(modelx, model-i*con, color=c)
plt.xlim([xmin, xmax])
plt.ylabel('Normalized Flux - Constant')
plt.xlabel('Orbital Phase')
plt.text(.03, 1.00012-i*con, r'%.2f$\mu$m' % wave[bin], color=c)
plt.text(-.05, 1.0005-i*con, r'$\chi^2_{red}$=%.2f' % rchi2, color=c)
bin += 1
#plt.savefig('../../l9859c_bincurves.pdf')
plt.show()
### HERE: need to make binvis and bincorrelated work for both new bin_data2 and for mcmc stuff.
## Hopefully, this will make difference in depths more obvious. Probably the slope though.
## I would like the prove that ramp misses slope with just a linear fit, since first two orbits
## are flat. Can also look more at acor_resids. Can also test ramp with quad or log slope or something.
## Finally, calculate evidences and chi squareds for both too.
elif method=='ramp':
dfile='../bin_analysis/binramp_data.csv'
pfile='../bin_analysis/binramp_params.csv'
sfile='../bin_analysis/binramp_smooth.csv'
binsmooth=pd.read_csv(sfile, index_col=[0,1,2]).sort_index()
smooth=binsmooth.loc[(visit, binsize, bin)]
data=pd.read_csv(dfile, index_col=[0,1,2]).sort_index()
data=data.loc[(visit, binsize, bin)]
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
params=params.loc[(visit,binsize, bin, 'Values')]
flux=data['Norm Flux'].values
error=data['Norm Flux Error'].values
nflux=data['Flux'].values
nerror=data['Flux Error'].values
resids=data['Residuals'].values*1e6
binphase=data['Model Phase'].values
cor=data['Corrected Flux'].values
corerr=data['Corrected Flux Error'].values
try:
photon_error=params['Photon Error'].values[0]
except AttributeError:
photon_error=params['Photon Error']
try:
norm=params['Zero-flux'].values[0]
except AttributeError:
norm=params['Zero-flux']
model=smooth['Model'].values
modelx=smooth['Phase'].values
sys_model=flux/cor/norm
full_model=flux-resids*norm/1e6
xmin=np.min(binphase)-0.02
xmax=np.max(binphase)+.02
#plt.subplot(312)
#plt.text(-.2,.998, 'Error ')
""" flat=np.zeros_like(resids)
corerr*=1e6
plt.subplot(313)
p3=plt.errorbar(binphase, resids, corerr, color='r', ls='', marker='o'
, ecolor='red', label='Residuals')
plt.xlim([xmin, xmax])
plt.xlabel('Phase')
plt.ylabel('Obs - Model [ppm]')
plt.plot(binphase, flat)
plt.text(-.23, np.max(resids), 'Residuals')
std_res=np.std(resids)
std_err=std_res/np.sqrt(2*len(resids))
ratio=std_res/photon_error
# ''photon error'' is really theory limit for bins, and should be same as flux_error/flux*1e6
#plt.text(-.2,np.min(resids)+100, 'RMS: %03d +- %03d' % (std_res, std_err))
plt.text(.01,np.min(resids)+100, 'RMS/photon: %.3f +- %.3f' % (ratio, std_err/photon_error))"""
#plt.title('%.03f $\mu$m' % wave, size=12)
#plt.savefig('bin_lightcurves'+method+'.png')
savename='binpaper_'+visit.replace('/','_')+'_'+method+'.pdf'
savename='bin%03d_outlier.pdf' % start_bin
#f.savefig(savename)
#f.clf()
#plt.close(f)
#plt.savefig('bin_resids_cdf'+method+'.png')
if __name__=='__main__':
visit=sys.argv[1]+'/'+sys.argv[2]+'/'+sys.argv[3]
binsize=int(sys.argv[4])
bin=int(sys.argv[5])
if len(sys.argv) == 7:
save=bool(int(sys.argv[6]))
else:
save=False
print(save)
datafile='../bin_analysis/bin_data2.csv'
rampfile='../bin_analysis/binramp_data.csv'
spectra='../bin_analysis/spectra.csv'
pfile='../bin_analysis/bin_params2.csv'
phot_error=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
try:
phot_error=phot_error.loc[(visit, binsize, bin, 'Values')
, 'Photon Error'].values[0]
except AttributeError:
phot_error=phot_error.loc[(visit, binsize, bin, 'Values')
, 'Photon Error']
sp=pd.read_csv(spectra, index_col=[0,1,2]).sort_index()
spec=sp.loc[(visit, 'marg', binsize), 'Central Wavelength'].values
wave=spec[bin]
#nbin=29
#ratio_marg=np.zeros(nbin)
#ratio_ramp=np.zeros(nbin)
#for i in range(nbin):
# print i
#binvis(visit, binsize, spec, method='marg')
#comp_methods(visit, binsize, bin, wave)
#sys.exit()
#binpaper(visit, binsize, bin, wave, method='mcmc')
#binpaper(visit, binsize, bin, wave, method='ramp')
#datafile='../bin_analysis/binmcmc_data.csv'
#pfile='../bin_analysis/binmcmc_params.csv'
marg=pd.read_csv(datafile, index_col=[0,1,2]).sort_index()
marg=marg.loc[(visit, binsize)]
nrow=int(np.ceil(len(spec)/3.))
f, axes=plt.subplots(nrow, 3, sharex='col', sharey='row', figsize=(12,12))
ax=f.add_subplot(111, frame_on=False)
ax.set_xlabel('Exposures Per Bin', labelpad=30, fontsize=15)
ax.set_ylabel('Normalized RMS', labelpad=40, fontsize=15)
s=visit.split('/')
#ax.set_title('WASP-19b Correlated Noise Analysis\n', fontsize=18)
#ax.set_title('%s %s %s Correlated Noise Analysis\n' % (s[0],s[1],s[2]) , fontsize=18)
#ax.set_title('Marginalization Correlated Noise Analysis\n', fontsize=18)
ax.tick_params(bottom=False, labelbottom=False, top=False, labeltop=False
,left=False, labelleft=False, right=False, labelright=False)
plt.subplots_adjust(hspace=0)
plt.subplots_adjust(wspace=0)
colors = iter(cm.rainbow(np.linspace(0.2, .9, len(np.ravel(axes)))))
beta=np.empty(len(spec))
# Get free params
params=pd.read_csv(pfile, index_col=[0,1,2,3]).sort_index()
params=params.loc[(visit, binsize)]
for i, ax in enumerate(np.ravel(axes)):
if i == len(spec):
#ax.set_xscale('log')
#ax.set_yscale('log')
#ax.xaxis.set_major_formatter(ScalarFormatter())
#ax.get_xaxis().get_major_formatter().labelOnlyBase = False
#ax.yaxis.set_major_formatter(ScalarFormatter())
#ax.minorticks_off()
#ax.set_yticks([.1,1])
#ax.set_xticks([1,2,3,4,5,6,7,8,9])
modelfile='../wl_preprocess/wl_models_info.csv'
model_info=pd.read_csv(modelfile, index_col=[0,1]).loc[visit]
best_model=model_info.loc['Weight'].iloc[:-1].astype(float).idxmax()
best= model_info.loc[:,best_model]
resids=best.loc['Residuals'].values*1e6
correlated(resids, 0, ax, 'grey')
break
mresids=marg.loc[i,'Residuals'].values
errors = marg.loc[i, 'Corrected Flux Error'].values
nfree = (params.loc[(i, 'Errors'),'rprs':'WL Coeff'].values != 0).sum()
dof = len(mresids) - nfree
chi2 = np.sum(mresids*mresids/errors/errors)
rchi2 = chi2/dof
print('Bin %2d' % i)
print('Chi squared: %.2f' % chi2)
print('Reduced Chi squared: %.2f' % rchi2)
print('DOF: %d' % dof)
wave=spec[i]
adtest(mresids, phot_error/1e6)
beta[i]=correlated(mresids, wave, ax, next(colors))
name='rednoise_'+visit.replace('/','_')+'_marg.pdf'
name = '../../rednoise_'+visit.replace('/','_')+'_marg.pdf'
axes[0,0].legend()
save=True
if save==True:
f.savefig(name)
f.clf()
plt.close(f)
else:
plt.show()
#f.close()
sys.exit()
#sp.loc[(visit, 'marg', binsize), 'Beta Max']=beta
#sp.to_csv('../bin_analysis/spectra.csv', index_label=['Obs', 'Method', 'Bin Size'])
print(beta)
print(np.mean(beta))
print(np.median(beta))
rspec=sp.loc[(visit, 'ramp', binsize), 'Central Wavelength'].values
rwave=rspec[bin]
ramp=pd.read_csv(rampfile, index_col=[0,1,2]).sort_index()
ramp=ramp.loc[(visit, binsize)]
f, axes=plt.subplots(nrow, 3, sharex='col', sharey='row', figsize=(12,12))
ax=f.add_subplot(111, frame_on=False)
ax.set_xlabel('Points Per Bin', labelpad=30, fontsize=15)
ax.set_ylabel('Normalized RMS', labelpad=40, fontsize=15)
s=visit.split('/')
#ax.set_title('%s %s %s Correlated Noise Analysis\n' % (s[0],s[1],s[2]) , fontsize=18)
ax.set_title('Ramp Correlated Noise Analysis\n' , fontsize=18)
ax.tick_params(bottom=False, labelbottom=False, top=False, labeltop=False
,left=False, labelleft=False, right=False, labelright=False)
plt.subplots_adjust(hspace=0)
plt.subplots_adjust(wspace=0)
colors = iter(cm.rainbow(np.linspace(0.2, .9, len(np.ravel(axes)))))
beta=np.empty(len(spec))
for i, ax in enumerate(np.ravel(axes)):
if i == len(rspec):
break
rresids=ramp.loc[i,'Residuals'].values
wave=rspec[i]
adtest(rresids, phot_error/1e6)
beta[i]=correlated(rresids, wave, ax, next(colors))
name='rednoise_'+visit.replace('/','_')+'_ramp.pdf'
print(beta)
print(np.mean(beta))
print(np.median(beta))
axes[0,0].legend(loc=3)
if save==True:
f.savefig(name)
f.clf()
plt.close(f)
else:
f.show()
sys.exit()
sp.loc[(visit, 'ramp', binsize), 'Beta Max']=beta
sp.to_csv('../bin_analysis/spectra.csv', index_label=['Obs', 'Method', 'Bin Size'])
|
AstroSheppardREPO_NAMEWFC3-analysisPATH_START.@WFC3-analysis_extracted@WFC3-analysis-main@LATE@verification@binpaper2020.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "duvall3/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/__init__.py",
"type": "Python"
}
|
"""SCons
The main package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/__init__.py 4043 2009/02/23 09:06:45 scons"
__version__ = "1.2.0.d20090223"
__build__ = "r4043"
__buildsys__ = "scons-dev"
__date__ = "2009/02/23 09:06:45"
__developer__ = "scons"
# make sure compatibility is always in place
import SCons.compat
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
duvall3REPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@__init__.py@.PATH_END.py
|
{
"filename": "simulation.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioReco/eventbrowser/apps/simulation.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function # , unicode_literals
from dash import html
import NuRadioReco.eventbrowser.apps.simulation_plots.sim_electric_field_trace
import NuRadioReco.eventbrowser.apps.simulation_plots.sim_electric_field_spectrum
import NuRadioReco.eventbrowser.apps.simulation_plots.sim_event_overview
import logging
logger = logging.getLogger('traces')
parent_logger = logging.getLogger('NuRadioReco')
logger.setLevel(parent_logger.level)
layout = html.Div([
html.Div([
html.Div([
html.Div('Sim Traces', className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.simulation_plots.sim_electric_field_trace.layout,
className='panel-body', style={'min-height': '500px'})
], className='panel panel-default mb-2', style={'flex': '1'}),
html.Div([
html.Div('Sim Spectrum', className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.simulation_plots.sim_electric_field_spectrum.layout,
className='panel-body', style={'min-height': '500px'})
], className='panel panel-default mb-2', style={'flex': '1'})
], style={'display': 'flex'}),
html.Div([
html.Div([
html.Div('Simulated Event', className='panel-heading'),
html.Div(NuRadioReco.eventbrowser.apps.simulation_plots.sim_event_overview.layout,
className='panel-body', style={'display': 'flex'})
], className='panel panel-default', style={'flex': '1'})
], style={'display': 'flex'})
])
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioReco@eventbrowser@apps@simulation.py@.PATH_END.py
|
{
"filename": "move_template.py",
"repo_name": "plazar/TOASTER",
"repo_path": "TOASTER_extracted/TOASTER-master/toolkit/templates/move_template.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
move_template.py
Move a template and update the database accordingly.
Patrick Lazarus, Mar 10, 2014
"""
import os
import shutil
from toaster import utils
from toaster.toolkit.templates import general
from toaster.utils import notify
from toaster import database
from toaster import errors
SHORTNAME = 'move'
DESCRIPTION = 'Move a template and update the database accordingly.'
def add_arguments(parser):
parser.add_argument('-t', '--template-id', dest='template_id',
type=int, required=True,
help="ID of ephemeris to move.")
parser.add_argument("--dest", dest='dest', type=str,
help="Where template will be moved to.")
def update_template_entry(template_id, dest, existdb=None):
"""Update the database to reflect a moved template.
Input:
template_id: The ID number of the template to remove.
dest: The new destination of the template.
existdb: A (optional) existing database connection object.
(Default: Establish a db connection)
Outputs:
None
"""
# Connect to the database
db = existdb or database.Database()
db.connect()
# Remove template from DB
values = {'filepath': os.path.dirname(dest),
'filename': os.path.basename(dest)}
update = db.templates.update().\
where(db.templates.c.template_id == template_id)
result = db.execute(update, values)
result.close()
if not existdb:
# Close DB connection
db.close()
def main(args):
move_template(args.template_id, args.dest)
def move_template(template_id, dest, existdb=None):
# Connect to the database
db = existdb or database.Database()
db.connect()
trans = db.begin()
try:
template = general.get_template_from_id(template_id, existdb=db)
dest = os.path.abspath(dest)
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(template))
elif os.path.isfile(dest):
raise errors.FileError("Template destination file (%s) "
"already exists!" % dest)
# Deal with the template itself
notify.print_info("Moving template %s to %s" % (template, dest))
shutil.copyfile(template, dest)
# Now remove the template entry from the DB
update_template_entry(template_id, dest)
except:
# Failure
trans.rollback()
raise
else:
# Success
os.remove(template)
trans.commit()
finally:
db.close()
if __name__ == "__main__":
parser = utils.DefaultArguments(prog='move_template.py',
description=DESCRIPTION)
add_arguments(parser)
args = parser.parse_args()
main(args)
|
plazarREPO_NAMETOASTERPATH_START.@TOASTER_extracted@TOASTER-master@toolkit@templates@move_template.py@.PATH_END.py
|
{
"filename": "command.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/build/tools/command.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Helper script for the JAX build CLI for running subprocess commands.
import asyncio
import dataclasses
import datetime
import os
import logging
from typing import Dict, Optional
logger = logging.getLogger(__name__)
class CommandBuilder:
def __init__(self, base_command: str):
self.command = [base_command]
def append(self, parameter: str):
self.command.append(parameter)
return self
def get_command_as_string(self) -> str:
return " ".join(self.command)
def get_command_as_list(self) -> list[str]:
return self.command
@dataclasses.dataclass
class CommandResult:
"""
Represents the result of executing a subprocess command.
"""
command: str
return_code: int = 2 # Defaults to not successful
logs: str = ""
start_time: datetime.datetime = dataclasses.field(
default_factory=datetime.datetime.now
)
end_time: Optional[datetime.datetime] = None
async def _process_log_stream(stream, result: CommandResult):
"""Logs the output of a subprocess stream."""
while True:
line_bytes = await stream.readline()
if not line_bytes:
break
line = line_bytes.decode().rstrip()
result.logs += line
logger.info("%s", line)
class SubprocessExecutor:
"""
Manages execution of subprocess commands with reusable environment and logging.
"""
def __init__(self, environment: Dict[str, str] = None):
"""
Args:
environment:
"""
self.environment = environment or dict(os.environ)
async def run(self, cmd: str, dry_run: bool = False, detailed_timestamped_log: bool = False) -> CommandResult:
"""
Executes a subprocess command.
Args:
cmd: The command to execute.
dry_run: If True, prints the command instead of executing it.
Returns:
A CommandResult instance.
"""
result = CommandResult(command=cmd)
if dry_run:
logger.info("[DRY RUN] %s", cmd)
result.return_code = 0 # Dry run is a success
return result
logger.info("[EXECUTING] %s", cmd)
process = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE if detailed_timestamped_log else None,
stderr=asyncio.subprocess.PIPE if detailed_timestamped_log else None,
env=self.environment,
)
if detailed_timestamped_log:
await asyncio.gather(
_process_log_stream(process.stdout, result), _process_log_stream(process.stderr, result)
)
result.return_code = await process.wait()
result.end_time = datetime.datetime.now()
logger.debug("Command finished with return code %s", result.return_code)
return result
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@build@tools@command.py@.PATH_END.py
|
{
"filename": "_xside.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/grid/_xside.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XsideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xside", parent_name="layout.grid", **kwargs):
super(XsideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["bottom", "bottom plot", "top plot", "top"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@grid@_xside.py@.PATH_END.py
|
{
"filename": "text-processing__feature_calcers.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/references/text-processing__feature_calcers.md",
"type": "Markdown"
}
|
# Feature calcers and corresponding options
The following is a list of options for the --feature-calcers`for the Command-line version parameter (these options are set in `option_name`):
- {{ dictionary__feature-calcers__BoW }} (Bag of words) — Boolean (0/1) features reflecting whether the object contains the token_id. The number of features is equal to the dictionary size.
Supported options:
- top_tokens_count — The maximum number of features to create. If set, the specified number top tokens is taken into account and the corresponding number of new features is created.
- {{ dictionary__feature-calcers__NaiveBayes }} — Multinomial naive bayes model, the number of created features is equal to the number of classes. To avoid target leakage, this model is computed online on several dataset permutations (similarly to the estimation of CTRs).
- {{ dictionary__feature-calcers__BM25 }} — A function that is used for ranking purposes by search engines to estimate the relevance of documents. To avoid target leakage, this model is computed online on several dataset permutations (similarly to the estimation of CTRs).
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@references@text-processing__feature_calcers.md@.PATH_END.py
|
{
"filename": "test_regrid.py",
"repo_name": "sherpa/sherpa",
"repo_path": "sherpa_extracted/sherpa-main/sherpa/models/tests/test_regrid.py",
"type": "Python"
}
|
# Copyright 2018, 2020, 2021, 2022, 2023
# Smithsonian Astrophysical Observatory
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pytest
from sherpa.astro.data import DataIMG, DataIMGInt
from sherpa.astro.ui.utils import Session
from sherpa.data import Data1DInt, Data1D
from sherpa.fit import Fit
from sherpa.models.basic import Box1D
from sherpa.models import Const1D, RegriddableModel1D, Parameter, Const2D, \
RegriddableModel2D, ArithmeticModel, Gauss2D, basic, model
from sherpa.optmethods import LevMar
from sherpa.stats import LeastSq
from sherpa.utils.err import ModelErr
from sherpa.utils import neville, linear_interp
from sherpa.utils import akima
@pytest.fixture
def setup():
const = Const1D("const")
const.c0 = 0
const.c0.freeze()
my_model = MyModel("my_model")
my_model.integrate = False
return Session(), my_model, const
@pytest.fixture
def setup2d():
const = Const2D("const")
const.c0 = 0
const.c0.freeze()
x = [2, 3, 2, 3]
y = [2, 2, 3, 3]
xhi = [2.1, 3.5, 2.1, 3.5]
yhi = [2.1, 2.1, 3.5, 3.5]
# This is the result when rebinning [100, ] * 4
z = [225, ] * 4
my_model = MyModel2D("my_model")
return Session(), my_model, const, (x, y, xhi, yhi, z)
def test_evaluate_model_on_arbitrary_grid_point_list(setup):
"""
The idea of the test is that the model will evaluate differently depending on the grid it is evaluated on.
This is really arbitrary, it just exercises the high level API for a common workflow while making sure the results
are the expected ones.
"""
ui, my_model, const = setup
# Load data
ui.load_arrays(1, [1, 2, 3], [100, 100, 100])
# Get a model that evaluates on a different grid
# This is the important part.
regrid_model = my_model.regrid([1, 2, 2.5, 4, 5])
# The model will usually be part of a complex model expression, so let's pretend we add another component,
# although that component is muted.
ui.set_source(regrid_model + const)
# Fit and check the result
assert_fit_1d(ui, my_model, 1)
# Now fit with a different grid.
# This is also the important part.
regrid_model.grid = [1, 2, 3, 4, 5]
assert_fit_1d(ui, my_model, 0)
def test_evaluate_model_on_arbitrary_grid_point_list_2d(setup2d):
"""
The idea of the test is that the model will evaluate differently depending on the grid it is evaluated on.
This is really arbitrary, it just exercises the high level API for a common workflow while making sure the results
are the expected ones.
"""
ui, my_model, const, data = setup2d
x, y, _, _, z = data
# Load data
ui.load_arrays(1, x, y, z, DataIMG)
# Get a model that evaluates on a different grid
# This is the important part.
regrid_model = my_model.regrid([2, 2.5, 3], [2, 2.5, 3])
# The model will usually be part of a complex model expression, so let's pretend we add another component,
# although that component is muted.
ui.set_source(regrid_model + const)
# Fit and check the result
assert_fit_2d(ui, my_model, (1, 1))
# Now fit with a different grid.
# This is also the important part.
regrid_model.grid = [2, 3], [2, 3]
assert_fit_2d(ui, my_model, (0, 0))
def test_evaluate_model_on_arbitrary_grid_integrated_list(setup):
"""
Same as above, but with integrated models.
"""
ui, my_model, const = setup
# Load data
ui.load_arrays(1, [1.5, 2.5, 3.5], [2.5, 3.5, 4.5], [100, 100, 100], Data1DInt)
# Get a model that evaluates on a different grid
# This is the important part.
regrid_model = my_model.regrid([0, 1, 2], [1, 2, 3])
# The model will be part of a complex model expression, so let's pretend we add another component
ui.set_source(regrid_model + const)
# Fit and check the result
assert_fit_1d(ui, my_model, 1)
# Now fit with a different grid.
# This is also the important part.
regrid_model.grid = [1.5, 2.5, 3.5], [2.5, 3.5, 4.5]
assert_fit_1d(ui, my_model, 0)
def test_evaluate_model_on_arbitrary_grid_integrated_list_2d(setup2d):
"""
Same as above, but with integrated models
"""
ui, my_model, const, data = setup2d
x, y, xhi, yhi, z = data
# Load data
ui.load_arrays(1, x, y, xhi, yhi, z, DataIMGInt)
# The model here is "clever" in that the response depends on whether
# the array contains the value 2.5 and the value of the x_has_25/y_has_25
# parameters. This means that you can get away with using an integrated
# grid which doesn't make sense (lo == hi), as the code used to do.
# However, this has been changed so that the setup is more realistic,
# so that we do not trigger any validation checks.
#
regrid_lo = [2, 2.5, 3]
regrid_hi = np.array([2.2, 2.6, 3.5])
# Get a model that evaluates on a different grid
# This is the important part.
regrid_model = my_model.regrid(regrid_lo, regrid_lo, regrid_hi, regrid_hi)
# The model will usually be part of a complex model expression, so let's pretend we add another component,
# although that component is muted.
ui.set_source(regrid_model + const)
# Fit and check the result
assert_fit_2d(ui, my_model, (1, 1))
# Now fit with a different grid.
# This is also the important part.
regrid_model.grid = x, y, xhi, yhi
assert_fit_2d(ui, my_model, (0, 0))
def test_evaluate_model_on_arbitrary_grid_point_ndarray(setup):
"""
The idea of the test is that the model will evaluate differently depending on the grid it is evaluated on.
This is really arbitrary, it just exercises the high level API for a common workflow while making sure the results
are the expected ones.
"""
ui, my_model, const = setup
# Load data
ui.load_arrays(1, [1, 2, 3], [100, 100, 100])
# Get a model that evaluates on a different grid
# This is the important part.
regrid_model = my_model.regrid(np.array([1, 2, 2.5, 4, 5]))
# The model will be part of a complex model expression, so let's pretend we add another component
ui.set_source(regrid_model + const)
# Fit and check the result
assert_fit_1d(ui, my_model, 1)
# Now fit with a different regrid.
# This is also the important part.
regrid_model.grid = np.array([1, 2, 3, 4, 5])
assert_fit_1d(ui, my_model, 0)
def test_evaluate_model_on_arbitrary_grid_integrated_ndarray(setup):
"""
Same as above, but with integrated models.
"""
ui, my_model, const = setup
# Load data
ui.load_arrays(1, [1.5, 2.5, 3.5], [2.5, 3.5, 4.5], [100, 100, 100], Data1DInt)
# Get a model that evaluates on a different grid
# This is the important part.
regrid_model = my_model.regrid(np.array([0, 1, 2]), [1, 2, 3])
# The model will be part of a complex model expression, so let's pretend we add another component
ui.set_source(regrid_model + const)
# Fit and check the result
assert_fit_1d(ui, my_model, 1)
# Now fit with a different grid.
# This is also the important part.
regrid_model.grid = [1.5, 2.5, 3.5], np.array([2.5, 3.5, 4.5])
assert_fit_1d(ui, my_model, 0)
def test_evaluate_model_on_arbitrary_grid_no_overlap(setup):
"""
If grids do not overlap, issue a warning and return zeros
"""
ui, my_model, _ = setup
# Get a model that evaluates on a different grid
# This is the important part. Note that there is overlap, but
# the start and end p
with pytest.raises(ModelErr) as excinfo:
my_model.regrid([2, 2.5], [2, 2.5])
assert ModelErr.dict['needsint'] in str(excinfo.value)
def test_evaluate_model_on_arbitrary_grid_no_overlap_2d(setup2d):
"""
In the 2D case, the overlap is way more stringent than in the 1D case, due to the complexity of rebinning
"""
ui, my_model, _, data = setup2d
x, y, _, _, _ = data
my_model.x_has_25 = 1 # To force the model to evaluate to something other than 0.
# Get a model that evaluates on a different grid
# This is the important part. Note that there is overlap, but
# the start and end points are different.
regrid_model = my_model.regrid([2, 2.5], [2, 2.5])
with pytest.warns(UserWarning):
np.testing.assert_array_equal(regrid_model(x, y), [0, 0, 0, 0])
def test_runtime_interp():
def tst_runtime_interp(model, requested, interp):
regrid_model = mdl.regrid(requested, interp=interp)
yregrid = regrid_model(xgrid)
return yregrid
xgrid = np.arange(2, 6, 0.1)
requested = np.arange(2.5, 5.1, 0.075)
mdl = Box1D()
mdl.xlow = 3.1
mdl.xhi = 4.2
mdl.ampl = 0.4
yregrid = tst_runtime_interp(mdl, requested, akima.akima)
assert 4.4 == pytest.approx(yregrid.sum())
yregrid = tst_runtime_interp(mdl, requested, linear_interp)
assert 4.4 == pytest.approx(yregrid.sum())
yregrid = tst_runtime_interp(mdl, requested, neville)
assert - 5.0e6 > yregrid.sum()
d = Data1D('tst', xgrid, np.ones_like(xgrid))
yexpected = d.eval_model(mdl)
requested = np.arange(2.5, 7, 0.2)
rmdl = mdl.regrid(requested)
ygot = d.eval_model(rmdl)
assert ygot == pytest.approx(yexpected)
class MyConst1D(RegriddableModel1D):
def __init__(self, name='myconst1d'):
self.c0 = Parameter(name, 'c0', 3.1)
self.counter = 0
ArithmeticModel.__init__(self, name, (self.c0,))
def calc(self, par, *args, **kwargs):
x = args[0]
self.counter += x.size
return par[0]
class MyGauss(RegriddableModel1D):
def __init__(self, name='mygauss'):
self.sigma = Parameter(name, 'sigma', 10, min=0, max=10)
self.pos = Parameter(name, 'pos', 0, min=-10, max=10)
self.ampl = Parameter(name, 'ampl', 5)
self.counter = 0
ArithmeticModel.__init__(self, name, (self.sigma, self.pos, self.ampl))
def calc(self, par, *args, **kwargs):
sigma, pos, ampl = par[0], par[1], par[2]
x = args[0]
self.counter += x.size
return ampl * np.exp(-0.5 * (args[0] - pos)**2 / sigma**2)
def test_regrid_binaryop_1d():
"""issue #762, Cannot regrid a composite model (BinaryOpModel)"""
rng = np.random.RandomState(0)
leastsq = LeastSq()
levmar = LevMar()
mygauss = MyGauss()
myconst = MyConst1D()
mymodel = mygauss + myconst
x = np.linspace(-5., 5., 5)
err = 0.25
y = mymodel(x) + rng.normal(mygauss.pos.val, err, x.shape)
mygauss.counter = 0
myconst.counter = 0
data = Data1D('one', x, y)
fit = Fit(data, mymodel, leastsq, levmar)
result = fit.fit()
assert result.numpoints == x.size
assert result.statval < 1.0
assert mygauss.counter == myconst.counter
assert (result.nfev + 4) * x.size == mygauss.counter
mygauss.counter = 0
myconst.counter = 0
x_regrid = np.linspace(-5., 5., 25)
mymodel_regrid = mymodel.regrid(x_regrid)
fit = Fit(data, mymodel_regrid, leastsq, levmar)
result = fit.fit()
assert result.numpoints == x.size
assert result.statval < 1.0
assert mygauss.counter == myconst.counter
assert (result.nfev + 4) * x_regrid.size == mygauss.counter
def test_regrid_binaryop_2d():
y0, x0 = np.mgrid[20:29, 10:20]
y0 = y0.flatten()
x0 = x0.flatten()
gmdl = Gauss2D()
gmdl.fwhm = 14
gmdl.xpos = 15
gmdl.ypos = 24
gmdl.ampl = 10
cmdl = Const2D()
cmdl.c0 = 4
xr1 = np.arange(10, 20, 1)
yr1 = np.arange(20, 29, 1)
rmdlg = gmdl.regrid(xr1, yr1)
rmdlc = cmdl.regrid(xr1, yr1)
shape = y0.shape
truthg = gmdl(x0, y0).reshape(shape)
truthc = cmdl(x0, y0).reshape(shape)
truth = truthg + truthc
ans1 = rmdlg(x0, y0).reshape(shape)
ans2 = rmdlc(x0, y0).reshape(shape)
assert (ans1 == truthg).all()
assert (ans2 == truthc).all()
rmdl = (gmdl + cmdl).regrid(xr1, yr1)
ans3 = rmdl(x0, y0).reshape(shape)
assert (ans3 == truth).all()
class Wrappable1D(model.RegriddableModel1D):
def __init__(self, cls, name):
self.ncalled = [] # record the number of elements
self.baseclass = cls
self.baseclass.__init__(self, name)
def calc(self, pars, xlo, *args, **kwargs):
xlo = np.asarray(xlo)
self.ncalled.append((xlo[0], xlo[-1], xlo.size))
return self.baseclass.calc(self, pars, xlo, *args, **kwargs)
def test_regrid_call_behavior():
m1 = Wrappable1D(basic.Const1D, 'm1')
m2 = Wrappable1D(basic.Gauss1D, 'm2')
m2.pos = 5
xregrid = np.arange(0, 20, 0.2)
xdata = np.arange(1.5, 12.5, 0.5)
morig = m1 + m2
mwrap = morig.regrid(xregrid)
# evaluate the model, we do not check the return value
_ = mwrap(xdata)
# Check both components were called with the same grid
assert m1.ncalled == m2.ncalled
# Check that m1 was called with the expected grid (ie that
# it is larger than xdata).
got = m1.ncalled
assert len(got) == 1
minval, maxval, nbins = m1.ncalled[0]
assert minval == pytest.approx(0)
assert maxval == pytest.approx(19.8)
assert nbins > xdata.size
assert nbins == 111
class MyModel(RegriddableModel1D):
"""
A model that returns [100, ] * len(x) if 2.5 is in the input array x
"""
def __init__(self, name):
self.has_25 = Parameter(name, "has_25", 0, min=0, max=1)
ArithmeticModel.__init__(self, name, (self.has_25,))
def guess(self, dep, *args, **kwargs):
raise NotImplementedError()
def get_center(self):
raise NotImplementedError()
def set_center(self, *args, **kwargs):
raise NotImplementedError()
def calc(self, p, *args, **kwargs):
x = args[0]
if 2.5 not in x:
if p[0] == 0:
return [100, ] * len(x)
return [100-p[0] * 100, ] * len(x)
if p[0] == 1:
return [100, ] * len(x)
return [p[0]*100, ] * len(x)
class MyModel2D(RegriddableModel2D):
"""
A 2D model that returns [100, ] * len(x) * len(y) if 2.5 is in the input arrays x and y
"""
def __init__(self, name):
self.x_has_25 = Parameter(name, "x_has_25", 0, min=0, max=1)
self.y_has_25 = Parameter(name, "y_has_25", 0, min=0, max=1)
RegriddableModel2D.__init__(self, name, (self.x_has_25, self.y_has_25))
def guess(self, dep, *args, **kwargs):
raise NotImplementedError()
def get_center(self):
raise NotImplementedError()
def set_center(self, *args, **kwargs):
raise NotImplementedError()
def calc(self, p, *args, **kwargs):
x, y, x_has_25, y_has_25 = args[0], args[1], p[0], p[1]
x_eval = np.array(self._eval(x, x_has_25))
y_eval = np.array(self._eval(y, y_has_25))
return (x_eval + y_eval)/2
def _eval(self, array, has_25):
if 2.5 not in array:
if has_25 == 0:
return [100, ] * len(array)
return [100 - has_25 * 100, ] * len(array)
if has_25 == 1:
return [100, ] * len(array)
return [has_25 * 100, ] * len(array)
def assert_fit_1d(ui, model, value):
ui.fit()
assert model.has_25.val == pytest.approx(value)
def assert_fit_2d(ui, model, value):
ui.fit()
assert model.x_has_25.val == pytest.approx(value[0])
assert model.y_has_25.val == pytest.approx(value[1])
|
sherpaREPO_NAMEsherpaPATH_START.@sherpa_extracted@sherpa-main@sherpa@models@tests@test_regrid.py@.PATH_END.py
|
{
"filename": "detector_stacks.py",
"repo_name": "PlasmaPy/PlasmaPy",
"repo_path": "PlasmaPy_extracted/PlasmaPy-main/src/plasmapy/diagnostics/charged_particle_radiography/detector_stacks.py",
"type": "Python"
}
|
"""
Objects representing stacks of film and/or filter layers for charged particle
detectors.
"""
__all__ = [
"Stack",
"Layer",
]
import astropy.units as u
import numpy as np
from scipy.interpolate import interp1d
class Layer:
r"""
A layer in a detector film stack.
The layer could either be an active layer (the actual film medium) or
an inum_active layer (a filter or inum_active part of the film, such as
a substrate.)
Tabulated stopping powers for protons and electrons can be found in the
`NIST PSTAR database
<https://physics.nist.gov/PhysRefData/Star/Text/PSTAR.html>`_
and the
`NIST ESTAR database
<https://physics.nist.gov/PhysRefData/Star/Text/ESTAR.html>`_.
Parameters
----------
thickness : `~astropy.units.Quantity`
The thickness of the layer, in units convertible to meters.
energy_axis : `~astropy.units.Quantity`
The energies corresponding to the stopping power array.
stopping_power : `~astropy.units.Quantity`
The stopping power in the material. Either the linear stopping
power (units of J/m) or the mass stopping power
(units convertible to J m\ :sup:`2` / kg) can be provided. If the
mass stopping power is provided, the material_density keyword
is required.
mass_density : `~astropy.units.Quantity`, optional
The material mass density in units convertible to kg/m\ :sup:`3`.
This keyword is required if the provided stopping power is the
mass stopping power.
active : `bool`, default: `True`
If `True`, this layer is marked as an active layer.
name : `str`, optional
An optional name for the layer.
"""
def __init__(
self,
thickness: u.Quantity[u.m],
energy_axis: u.Quantity[u.J],
stopping_power: u.Quantity[u.J / u.m, u.J * u.m**2 / u.kg],
mass_density: u.Quantity[u.kg / u.m**3] | None = None,
active: bool = True,
name: str = "",
) -> None:
self.thickness = thickness
self.energy_axis = energy_axis
self.active = active
self.name = name
# Handle stopping power provided as either linear or
# mass stopping power
if stopping_power.unit.is_equivalent(u.J / u.m):
self.linear_stopping_power = stopping_power.to(u.J / u.m)
elif stopping_power.unit.is_equivalent(u.J * u.m**2 / u.kg):
if mass_density is None:
raise ValueError(
"mass_density keyword is required if "
"stopping power is not provided in units "
"convertible to J/m"
)
# Ensure the mass density has the right units
try:
mass_density = mass_density.to(u.kg / u.m**3)
except u.UnitConversionError as e:
raise ValueError(
"mass_density keyword must have units convertible to kg/m^3."
) from e
self.linear_stopping_power = (stopping_power * mass_density).to(u.J / u.m)
else:
raise ValueError(
"Units of stopping_power keyword not recognized:"
f"{stopping_power.unit}"
)
class Stack:
r"""
An ordered list of |Layer| objects.
Parameters
----------
layers : list of |Layer|
The objects that make up the film stack.
"""
def __init__(self, layers: list[Layer]) -> None:
self._layers = layers
self._energy_bands = None
@property
def num_layers(self):
r"""
The number of layers in the stack.
"""
return len(self._layers)
@property
def num_active(self):
r"""
The number of layers in the stack marked ``active``.
"""
return len([layer for layer in self._layers if layer.active])
@property
def thickness(self):
r"""
The total thickness of the stack.
"""
thickness = np.array([layer.thickness.to(u.m).value for layer in self._layers])
return np.sum(thickness) * u.m
def deposition_curves(
self, energies: u.Quantity[u.J], dx=1 * u.um, return_only_active: bool = True
):
"""
Calculate the deposition of an ensemble of particles over a range of
energies in a stack of films and filters.
Parameters
----------
energies : (``nenergies``,) `~astropy.units.Quantity` array
Energies axis over which to calculate the deposition. Units
convertible to J.
dx : `~astropy.units.Quantity`, optional
The spatial resolution of the numerical integration of the
stopping power. Defaults to 1 μm.
return_only_active : `bool`, default: `True`
If `True`, only the energy bands of layers in which the
active property is `True` will be returned. This is usually
desirable, since particles captured in other layers will not
be measured. If `False`, energy bands in all layers of the
stack are returned. The default is `True`.
Returns
-------
deposited : (``nlayers``, ``nenergies``) `~numpy.ndarray`
The fraction of particles at each energy that will be deposited in
each layer of the film. The array is normalized such that the sum
along the first dimension (all of the layers) for each population
is unity.
"""
energies = energies.to(u.J).value
deposited_energy = np.zeros([len(self._layers), energies.size])
for i, layer in enumerate(self._layers):
# Interpolate stopping power for each energy
# stopping power here is in MeV/cm
sp_fcn = interp1d(
layer.energy_axis.to(u.J).value,
layer.linear_stopping_power.to(u.J / u.m).value,
fill_value=(0, np.inf),
bounds_error=False,
)
# Slice the layer into sublayer dx thick
nsublayers = int(np.floor(layer.thickness.to(u.m).value / dx.to(u.m).value))
sublayers = np.ones(nsublayers) * dx.to(u.m)
# Include any remainder in the last sublayer
sublayers[-1] += layer.thickness.to(u.m) % dx.to(u.m)
# Calculate the energy deposited in each sublayer
# This is essentially numerically integrating the stopping power
for ds in sublayers:
# Interpolate the stopping power at the current energies
interpolated_stopping_power = sp_fcn(energies)
# dE is in MeV
dE = interpolated_stopping_power * ds.to(u.m).value
# If dE > E for a given energy, set dE=E (stop the particle)
dE = np.where(dE > energies, energies, dE)
energies += -dE
deposited_energy[i, :] += dE
# Normalize the deposited energy array so that each number represents
# the fraction of a population of particles of that energy stopped
# in that layer.
deposited_energy /= np.sum(deposited_energy, axis=0)
# If this flag is set, return only the layers that correspond to active
# medium, ignoring the filter and substrate layers
if return_only_active:
active_ind = [i for i in range(len(self._layers)) if self._layers[i].active]
deposited_energy = deposited_energy[active_ind, :]
return deposited_energy
def energy_bands(
self,
energy_range: u.Quantity[u.J],
dE: u.Quantity[u.J],
dx=1e-6 * u.m, # noqa: ARG002
return_only_active: bool = True,
):
"""
Calculate the energy bands in each of the active layers of a film
stack.
Parameters
----------
energy_range : (2,) `~astropy.units.Quantity` array
A range of energies to include in the calculation. Units
convertible to eV.
dE : `~astropy.units.Quantity`
Spacing between energy bins in the calculation. Units convertible
to J.
dx : `~astropy.units.Quantity`, default: 1 μm
The spatial resolution of the numerical integration of the stopping
power. Passed directly to the `~deposition_curves` method.
return_only_active : `bool`, default: `True`
If `True`, only the energy bands of layers in which the active
property is `True` will be returned. This is usually desirable,
since particles captured in other layers will not be measured.
If `False`, energy bands in all layers of the stack are returned.
Returns
-------
energy_bands : (``nlayers``, 2) `~astropy.units.Quantity`
The full-width-half-max energy range of the Bragg peak in each
active layer of the film stack, in J.
"""
energies = (
np.arange(
*energy_range.to(u.J).value,
dE.to(u.J).value,
)
* u.J
)
deposited = self.deposition_curves(
energies, return_only_active=return_only_active
)
energy_bands = np.zeros([deposited.shape[0], 2]) * u.J
for i in range(deposited.shape[0]):
bragg_curve = deposited[i, :]
# Find the indices corresponding to half the maximum value
# on either side of the peak
halfmax = np.max(bragg_curve) / 2
inds = np.argwhere(bragg_curve > halfmax)
# Store those energies
energy_bands[i, 0] = energies[inds[0][0]]
energy_bands[i, 1] = energies[inds[-1][0]]
self._energy_bands = energy_bands
return energy_bands
|
PlasmaPyREPO_NAMEPlasmaPyPATH_START.@PlasmaPy_extracted@PlasmaPy-main@src@plasmapy@diagnostics@charged_particle_radiography@detector_stacks.py@.PATH_END.py
|
{
"filename": "_cmax.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/marker/line/_cmax.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmax", parent_name="scattercarpet.marker.line", **kwargs
):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@marker@line@_cmax.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_ImSim/test_Numerics/__init__.py",
"type": "Python"
}
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_ImSim@test_Numerics@__init__.py@.PATH_END.py
|
|
{
"filename": "conversion.py",
"repo_name": "jobovy/galpy",
"repo_path": "galpy_extracted/galpy-main/galpy/util/conversion.py",
"type": "Python"
}
|
###############################################################################
#
# conversion: utilities to convert from galpy 'natural units' to physical
# units
#
###############################################################################
import copy
import math as m
import numbers
import warnings
from functools import wraps
from typing import Any, Tuple
import numpy
from ..util._optional_deps import _APY_LOADED, _APY_UNITS
from ..util.config import __config__
if not _APY_LOADED:
_G = 4.302 * 10.0**-3.0 # pc / Msolar (km/s)^2
_kmsInPcMyr = 1.0227121655399913
_PCIN10p18CM = 3.08567758 # 10^18 cm
_CIN10p5KMS = 2.99792458 # 10^5 km/s
_MSOLAR10p30KG = 1.9891 # 10^30 kg
_EVIN10m19J = 1.60217657 # 10^-19 J
else:
from astropy import constants, units
_G = constants.G.to(units.pc / units.Msun * units.km**2 / units.s**2).value
_kmsInPcMyr = (units.km / units.s).to(units.pc / units.Myr)
_PCIN10p18CM = units.pc.to(units.cm) / 10.0**18.0 # 10^18 cm
_CIN10p5KMS = constants.c.to(units.km / units.s).value / 10.0**5.0 # 10^5 km/s
_MSOLAR10p30KG = units.Msun.to(units.kg) / 10.0**30.0 # 10^30 kg
_EVIN10m19J = units.eV.to(units.J) * 10.0**19.0 # 10^-19 J
_MyrIn1013Sec = 3.65242198 * 0.24 * 3.6 # use tropical year, like for pms
_TWOPI = 2.0 * m.pi
def dens_in_criticaldens(vo, ro, H=70.0):
"""
Convert density to units of the critical density.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
H : float, optional
Hubble constant in km/s/Mpc. Default is 70.0.
Returns
-------
float
Conversion from units where vo=1. at ro=1. to units of the critical density.
Notes
-----
- 2014-01-28 - Written - Bovy (IAS)
"""
return vo**2.0 / ro**2.0 * 10.0**6.0 / H**2.0 * 8.0 * numpy.pi / 3.0
def dens_in_meanmatterdens(vo, ro, H=70.0, Om=0.3):
"""
Convert density to units of the mean matter density.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
H : float, optional
Hubble constant in km/s/Mpc. Default is 70.0.
Om : float, optional
Omega matter. Default is 0.3.
Returns
-------
float
Conversion from units where vo=1. at ro=1. to units of the mean matter density.
Notes
-----
- 2014-01-28 - Written - Bovy (IAS)
"""
return dens_in_criticaldens(vo, ro, H=H) / Om
def dens_in_gevcc(vo, ro):
"""
Convert density to GeV / cm^3.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1. to GeV/cm^3.
Notes
-----
- 2014-06-16 - Written - Bovy (IAS)
"""
return (
vo**2.0
/ ro**2.0
/ _G
* _MSOLAR10p30KG
* _CIN10p5KMS**2.0
/ _EVIN10m19J
/ _PCIN10p18CM**3.0
* 10.0**-4.0
)
def dens_in_msolpc3(vo, ro):
"""
Convert density to Msolar / pc^3.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1. to Msolar/pc^3.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo**2.0 / ro**2.0 / _G * 10.0**-6.0
def force_in_2piGmsolpc2(vo, ro):
"""
Convert a force or acceleration to 2piG x Msolar / pc^2
Parameters
----------
vo : float
Velocity unit in km/s
ro : float
Length unit in kpc
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo**2.0 / ro / _G * 10.0**-3.0 / _TWOPI
def force_in_pcMyr2(vo, ro):
"""
Convert a force or acceleration to pc/Myr^2.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo**2.0 / ro * _kmsInPcMyr**2.0 * 10.0**-3.0
def force_in_kmsMyr(vo, ro):
"""
Convert a force or acceleration to km/s/Myr.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo**2.0 / ro * _kmsInPcMyr * 10.0**-3.0
def force_in_10m13kms2(vo, ro):
"""
Convert a force or acceleration to 10^(-13) km/s^2
Parameters
----------
vo : float
Velocity unit in km/s
ro : float
Length unit in kpc
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2014-01-22 - Written - Bovy (IAS)
"""
return vo**2.0 / ro * _kmsInPcMyr * 10.0**-3.0 / _MyrIn1013Sec
def freq_in_Gyr(vo, ro):
"""
Convert a frequency to 1/Gyr.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo / ro * _kmsInPcMyr
def freq_in_kmskpc(vo, ro):
"""
Convert a frequency to km/s/kpc.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo / ro
def surfdens_in_msolpc2(vo, ro):
"""
Convert a surface density to Msolar / pc^2.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo**2.0 / ro / _G * 10.0**-3.0
def mass_in_msol(vo, ro):
"""
Convert a mass to Msolar.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo**2.0 * ro / _G * 10.0**3.0
def mass_in_1010msol(vo, ro):
"""
Convert a mass to 10^10 x Msolar.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return vo**2.0 * ro / _G * 10.0**-7.0
def time_in_Gyr(vo, ro):
"""
Convert a time to Gyr.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2013-09-01 - Written - Bovy (IAS)
"""
return ro / vo / _kmsInPcMyr
def velocity_in_kpcGyr(vo, ro):
"""
Convert a velocity to kpc/Gyr.
Parameters
----------
vo : float
Velocity unit in km/s.
ro : float
Length unit in kpc.
Returns
-------
float
Conversion from units where vo=1. at ro=1.
Notes
-----
- 2014-12-19 - Written - Bovy (IAS)
"""
return vo * _kmsInPcMyr
def get_physical(obj: Any, include_set: bool = False) -> dict:
"""
Return the velocity and length units for converting between physical and internal units as a dictionary for any galpy object, so they can easily be fed to galpy routines.
Parameters
----------
obj : Any
A galpy object or list of such objects (e.g., a Potential, list of Potentials, Orbit, actionAngle instance, DF instance).
include_set : bool, optional
If True, also include roSet and voSet, flags of whether the unit is explicitly set in the object. Default is False.
Returns
-------
dict
A dictionary {'ro':length unit in kpc,'vo':velocity unit in km/s}; note that this routine will *always* return these conversion units, even if the obj you provide does not have units turned on.
Notes
-----
- 2019-08-03 - Written - Bovy (UofT)
"""
# Try flattening the object in case it's a nested list of Potentials
from ..potential import Force
from ..potential import flatten as flatten_pot
from ..potential import linearPotential, planarPotential
try:
new_obj = flatten_pot(obj)
except: # pragma: no cover
pass # hope for the best!
else: # only apply flattening for potentials
if isinstance(new_obj, (Force, planarPotential, linearPotential)) or (
isinstance(new_obj, list)
and len(new_obj) > 0
and isinstance(new_obj[0], (Force, planarPotential, linearPotential))
):
obj = new_obj
if isinstance(obj, list):
out_obj = obj[0]
else:
out_obj = obj
out = {"ro": out_obj._ro, "vo": out_obj._vo}
if include_set:
out.update({"roSet": out_obj._roSet, "voSet": out_obj._voSet})
return out
def extract_physical_kwargs(kwargs: dict) -> dict:
"""
Extract the physical kwargs from a kwargs dictionary.
Parameters
----------
kwargs : dict
A dictionary of kwargs.
Returns
-------
dict
A dictionary with just the physical kwargs.
Notes
-----
- 2023-04-24 - Written - Bovy (UofT)
"""
out = {}
for key in kwargs.copy():
if key in ["use_physical", "ro", "vo", "quantity"]:
out[key] = kwargs.pop(key)
return out
def physical_compatible(obj: Any, other_obj: Any) -> bool:
"""
Test whether the velocity and length units for converting between physical and internal units are compatible for two galpy objects.
Parameters
----------
obj : galpy object or list of such objects
A galpy object or list of such objects (e.g., a Potential, list of Potentials, Orbit, actionAngle instance, DF instance)
other_obj : galpy object or list of such objects
Another galpy object or list of such objects (e.g., a Potential, list of Potentials, Orbit, actionAngle instance, DF instance)
Returns
-------
bool
True if the units are compatible, False if not (compatible means that the units are the same when they are set for both objects).
Notes
-----
- 2020-04-22 - Written - Bovy (UofT)
"""
if obj is None or other_obj is None: # if one is None, just state compat
return True
phys = get_physical(obj, include_set=True)
other_phys = get_physical(other_obj, include_set=True)
out = True
if phys["roSet"] and other_phys["roSet"]:
out = out and m.fabs((phys["ro"] - other_phys["ro"]) / phys["ro"]) < 1e-8
if phys["voSet"] and other_phys["voSet"]:
out = out and m.fabs((phys["vo"] - other_phys["vo"]) / phys["vo"]) < 1e-8
return out
# Parsers of different inputs with units
def check_parser_input_type(func):
"""
Decorator to check the inputs to a parse_ function; should be either:
a) a number
b) an array of numbers
c) an astropy Quantity (incl. arrays)
Also parses ro/vo if they are provided and converts them to the correct
internal representation
"""
@wraps(func)
def parse_x_wrapper(x, **kwargs):
if (
not x is None
and not isinstance(x, numbers.Number)
and not (
isinstance(x, numpy.ndarray)
and (x.size == 0 or isinstance(x.flatten()[0], numbers.Number))
)
and not (_APY_LOADED and isinstance(x, units.Quantity))
):
raise RuntimeError(
f"Input '{x}' not understood; should either be a number or an astropy Quantity"
)
# Also parse ro and vo inputs
if "ro" in kwargs:
if (
not kwargs["ro"] is None
and not isinstance(kwargs["ro"], numbers.Number)
and not (_APY_LOADED and isinstance(kwargs["ro"], units.Quantity))
):
raise RuntimeError(
f"Input 'ro={kwargs['ro']}' not understood; should either be a number or an astropy Quantity"
)
else:
kwargs["ro"] = (
kwargs["ro"].to(units.kpc).value
if _APY_LOADED and isinstance(kwargs["ro"], units.Quantity)
else kwargs["ro"]
)
if "vo" in kwargs:
if (
not kwargs["vo"] is None
and not isinstance(kwargs["vo"], numbers.Number)
and not (_APY_LOADED and isinstance(kwargs["vo"], units.Quantity))
):
raise RuntimeError(
f"Input 'vo={kwargs['vo']}' not understood; should either be a number or an astropy Quantity"
)
else:
kwargs["vo"] = (
kwargs["vo"].to(units.km / units.s).value
if _APY_LOADED and isinstance(kwargs["vo"], units.Quantity)
else kwargs["vo"]
)
return func(x, **kwargs)
return parse_x_wrapper
@check_parser_input_type
def parse_length(x, ro=None, vo=None):
return (
x.to(units.kpc).value / ro
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_length_kpc(x):
return x.to(units.kpc).value if _APY_LOADED and isinstance(x, units.Quantity) else x
@check_parser_input_type
def parse_velocity(x, ro=None, vo=None):
return (
x.to(units.km / units.s).value / vo
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_velocity_kms(x):
return (
x.to(units.km / units.s).value
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_angle(x):
return x.to(units.rad).value if _APY_LOADED and isinstance(x, units.Quantity) else x
@check_parser_input_type
def parse_time(x, ro=None, vo=None):
return (
x.to(units.Gyr).value / time_in_Gyr(vo, ro)
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_mass(x, ro=None, vo=None):
try:
return (
x.to(units.pc * units.km**2 / units.s**2).value / mass_in_msol(vo, ro) / _G
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
except units.UnitConversionError:
pass
return (
x.to(1e10 * units.Msun).value / mass_in_1010msol(vo, ro)
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_energy(x, ro=None, vo=None):
return (
x.to(units.km**2 / units.s**2).value / vo**2.0
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_angmom(x, ro=None, vo=None):
return (
x.to(units.kpc * units.km / units.s).value / ro / vo
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_frequency(x, ro=None, vo=None):
return (
x.to(units.km / units.s / units.kpc).value / freq_in_kmskpc(vo, ro)
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_force(x, ro=None, vo=None):
try:
return (
x.to(units.pc / units.Myr**2).value / force_in_pcMyr2(vo, ro)
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
except units.UnitConversionError:
pass
return (
x.to(units.Msun / units.pc**2).value / force_in_2piGmsolpc2(vo, ro)
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_dens(x, ro=None, vo=None):
try:
return (
x.to(units.Msun / units.pc**3).value / dens_in_msolpc3(vo, ro)
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
except units.UnitConversionError:
pass
# Try Gxdens
return (
x.to(units.km**2 / units.s**2 / units.pc**2).value
/ dens_in_msolpc3(vo, ro)
/ _G
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_surfdens(x, ro=None, vo=None):
try:
return (
x.to(units.Msun / units.pc**2).value / surfdens_in_msolpc2(vo, ro)
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
except units.UnitConversionError:
pass
# Try Gxsurfdens
return (
x.to(units.km**2 / units.s**2 / units.pc).value
/ surfdens_in_msolpc2(vo, ro)
/ _G
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
@check_parser_input_type
def parse_numdens(x, ro=None, vo=None):
return (
x.to(1 / units.kpc**3).value * ro**3
if _APY_LOADED and isinstance(x, units.Quantity)
else x
)
# Decorator to apply these transformations
# NOTE: names with underscores in them signify return values that *always* have
# units, which is depended on in the Orbit returns (see issue #326)
_roNecessary = {
"time": True,
"position": True,
"position_kpc": True,
"velocity": False,
"velocity2": False,
"velocity2surfacendensity": False,
"velocity_kms": False,
"energy": False,
"density": True,
"numberdensity": True,
"force": True,
"velocity2surfacedensity": True,
"surfacedensity": True,
"numbersurfacedensity": True,
"surfacedensitydistance": True,
"mass": True,
"action": True,
"frequency": True,
"frequency-kmskpc": True,
"forcederivative": True,
"angle": True,
"angle_deg": True,
"proper-motion_masyr": True,
"phasespacedensity": True,
"phasespacedensity2d": True,
"phasespacedensityvelocity": True,
"phasespacedensityvelocity2": True,
"massphasespacedensity": True,
"massenergydensity": False,
"dimensionless": False,
}
_voNecessary = copy.copy(_roNecessary)
_voNecessary["position"] = False
_voNecessary["position_kpc"] = False
_voNecessary["angle"] = False
_voNecessary["angle_deg"] = False
_voNecessary["velocity"] = True
_voNecessary["velocity2"] = True
_voNecessary["velocity_kms"] = True
_voNecessary["energy"] = True
_voNecessary["massenergydensity"] = True
# Determine whether or not outputs will be physical or not
def physical_output(obj: Any, kwargs: dict, quantity: str) -> Tuple[bool, float, float]:
"""
Determine whether or not outputs will be physical or not
Parameters
----------
obj : galpy object (or list in case of potentials)
galpy object.
kwargs : dict
Kwargs passed to the method.
quantity : str
Quantity to be returned.
Returns
-------
tuple
A tuple containing:
- boolean that indicates whether or not to use physical units.
- ro.
- vo.
Notes
-----
- 2023-04-24 - Written - Bovy (UofT).
"""
use_physical = kwargs.get("use_physical", True) and not kwargs.get("log", False)
# Parse whether ro or vo should be considered to be set, because
# the return value will have units anyway
# (like in Orbit methods that return numbers with units, like ra)
roSet = "_" in quantity # _ in quantity name means always units
voSet = "_" in quantity # _ in quantity name means always units
use_physical = (
use_physical or "_" in quantity
) # _ in quantity name means always units
ro = kwargs.get("ro", None)
if ro is None and (roSet or (hasattr(obj, "_roSet") and obj._roSet)):
ro = obj._ro
if (
ro is None
and isinstance(obj, list)
and hasattr(obj[0], "_roSet")
and obj[0]._roSet
):
# For lists of Potentials
ro = obj[0]._ro
if _APY_LOADED and isinstance(ro, units.Quantity):
ro = ro.to(units.kpc).value
vo = kwargs.get("vo", None)
if vo is None and (voSet or (hasattr(obj, "_voSet") and obj._voSet)):
vo = obj._vo
if (
vo is None
and isinstance(obj, list)
and hasattr(obj[0], "_voSet")
and obj[0]._voSet
):
# For lists of Potentials
vo = obj[0]._vo
if _APY_LOADED and isinstance(vo, units.Quantity):
vo = vo.to(units.km / units.s).value
return (
(
use_physical
and not (_voNecessary[quantity.lower()] and vo is None)
and not (_roNecessary[quantity.lower()] and ro is None)
),
ro,
vo,
)
def physical_conversion(quantity, pop=False):
"""Decorator to convert to physical coordinates:
quantity = [position,velocity,time]"""
def wrapper(method):
@wraps(method)
def wrapped(*args, **kwargs):
# Determine whether or not to return outputs in physical units
use_physical_output, ro, vo = physical_output(args[0], kwargs, quantity)
# Determine whether physical outputs were explicitly asked for
use_physical_explicitly_set = kwargs.get("use_physical", False)
# Determine whether or not to return outputs as quantities
_apy_units = kwargs.get("quantity", _APY_UNITS)
# Remove ro, vo, use_physical, and quantity kwargs if necessary
if pop:
_ = extract_physical_kwargs(kwargs)
if use_physical_output:
from ..orbit import Orbit
if quantity.lower() == "time":
fac = time_in_Gyr(vo, ro)
if _apy_units:
u = units.Gyr
elif quantity.lower() == "position":
fac = ro
if _apy_units:
u = units.kpc
elif quantity.lower() == "position_kpc": # already in kpc
fac = 1.0
if _apy_units:
u = units.kpc
elif quantity.lower() == "velocity":
fac = vo
if _apy_units:
u = units.km / units.s
elif quantity.lower() == "velocity2":
fac = vo**2.0
if _apy_units:
u = (units.km / units.s) ** 2
elif quantity.lower() == "velocity_kms": # already in km/s
fac = 1.0
if _apy_units:
u = units.km / units.s
elif quantity.lower() == "frequency":
if kwargs.get("kmskpc", False) and not _apy_units:
fac = freq_in_kmskpc(vo, ro)
else:
fac = freq_in_Gyr(vo, ro)
if _apy_units:
u = units.Gyr**-1.0
elif quantity.lower() == "frequency-kmskpc":
fac = freq_in_kmskpc(vo, ro)
if _apy_units:
u = units.km / units.s / units.kpc
elif quantity.lower() == "action":
fac = ro * vo
if _apy_units:
u = units.kpc * units.km / units.s
elif quantity.lower() == "energy":
fac = vo**2.0
if _apy_units:
u = units.km**2.0 / units.s**2.0
elif quantity.lower() == "angle": # in rad
fac = 1.0
if _apy_units:
u = units.rad
elif quantity.lower() == "angle_deg": # already in deg
fac = 1.0
if _apy_units:
u = units.deg
elif quantity.lower() == "proper-motion_masyr": # already in mas/yr
fac = 1.0
if _apy_units:
u = units.mas / units.yr
elif quantity.lower() == "force":
fac = force_in_kmsMyr(vo, ro)
if _apy_units:
u = units.km / units.s / units.Myr
elif quantity.lower() == "density":
fac = dens_in_msolpc3(vo, ro)
if _apy_units:
u = units.Msun / units.pc**3
elif quantity.lower() == "numberdensity":
fac = 1 / ro**3.0
if _apy_units:
u = 1 / units.kpc**3
elif quantity.lower() == "velocity2surfacedensity":
fac = surfdens_in_msolpc2(vo, ro) * vo**2
if _apy_units:
u = units.Msun / units.pc**2 * (units.km / units.s) ** 2
elif quantity.lower() == "surfacedensity":
fac = surfdens_in_msolpc2(vo, ro)
if _apy_units:
u = units.Msun / units.pc**2
elif quantity.lower() == "numbersurfacedensity":
fac = 1.0 / ro**2.0
if _apy_units:
u = 1 / units.kpc**2
elif quantity.lower() == "surfacedensitydistance":
fac = surfdens_in_msolpc2(vo, ro) * ro * 1000.0
if _apy_units:
u = units.Msun / units.pc
elif quantity.lower() == "mass":
fac = mass_in_msol(vo, ro)
if _apy_units:
u = units.Msun
elif quantity.lower() == "forcederivative":
fac = freq_in_Gyr(vo, ro) ** 2.0
if _apy_units:
u = units.Gyr**-2.0
elif quantity.lower() == "phasespacedensity":
fac = 1.0 / vo**3.0 / ro**3.0
if _apy_units:
u = 1 / (units.km / units.s) ** 3 / units.kpc**3
elif quantity.lower() == "phasespacedensity2d":
fac = 1.0 / vo**2.0 / ro**2.0
if _apy_units:
u = 1 / (units.km / units.s) ** 2 / units.kpc**2
elif quantity.lower() == "phasespacedensityvelocity":
fac = 1.0 / vo**2.0 / ro**3.0
if _apy_units:
u = 1 / (units.km / units.s) ** 2 / units.kpc**3
elif quantity.lower() == "phasespacedensityvelocity2":
fac = 1.0 / vo / ro**3.0
if _apy_units:
u = 1 / (units.km / units.s) / units.kpc**3
elif quantity.lower() == "massphasespacedensity":
fac = mass_in_msol(vo, ro) / vo**3.0 / ro**3.0
if _apy_units:
u = units.Msun / (units.km / units.s) ** 3 / units.kpc**3
elif quantity.lower() == "massenergydensity":
fac = mass_in_msol(vo, ro) / vo**2.0
if _apy_units:
u = units.Msun / (units.km / units.s) ** 2
elif quantity.lower() == "dimensionless":
fac = 1.0
if _apy_units:
u = units.dimensionless_unscaled
out = method(*args, **kwargs)
if out is None:
return out
if _apy_units:
return units.Quantity(out * fac, unit=u)
else:
# complicated logic for dealing with ro and vo arrays
return out * (
fac[:, numpy.newaxis]
if isinstance(fac, numpy.ndarray) and len(out.shape) > 1
else fac
)
else:
if use_physical_explicitly_set:
warnings.warn(
"Returning output(s) in internal units even though use_physical=True, because ro and/or vo not set"
)
return method(*args, **kwargs)
return wrapped
return wrapper
def physical_conversion_tuple(quantities, pop=False):
"""Decorator to convert to physical coordinates for tuple outputs.
So outputs are a tuple of quantities that each need to be converted,
with possibly different conversions, e.g., (R,vR)"""
def wrapper(method):
@wraps(method)
def wrapped(*args, **kwargs):
rawOut = method(*args, **kwargs)
out = ()
for ii in range(len(rawOut)):
# Apply physical conversion by converting a wrapped dummy function that returns the raw output
out = out + (
physical_conversion(quantities[ii])(lambda x, **kwargs: rawOut[ii])(
args[0], **kwargs
),
)
return out
return wrapped
return wrapper
def potential_physical_input(method):
"""Decorator to convert inputs to Potential functions from physical
to internal coordinates"""
@wraps(method)
def wrapper(*args, **kwargs):
from ..potential import flatten as flatten_potential
Pot = flatten_potential(args[0])
ro = kwargs.get("ro", None)
if ro is None and hasattr(Pot, "_ro"):
ro = Pot._ro
if ro is None and isinstance(Pot, list) and hasattr(Pot[0], "_ro"):
# For lists of Potentials
ro = Pot[0]._ro
if _APY_LOADED and isinstance(ro, units.Quantity):
ro = ro.to(units.kpc).value
if "t" in kwargs or "M" in kwargs:
vo = kwargs.get("vo", None)
if vo is None and hasattr(Pot, "_vo"):
vo = Pot._vo
if vo is None and isinstance(Pot, list) and hasattr(Pot[0], "_vo"):
# For lists of Potentials
vo = Pot[0]._vo
if _APY_LOADED and isinstance(vo, units.Quantity):
vo = vo.to(units.km / units.s).value
# Loop through args
newargs = (Pot,)
for ii in range(1, len(args)):
if _APY_LOADED and isinstance(args[ii], units.Quantity):
newargs = newargs + (args[ii].to(units.kpc).value / ro,)
else:
newargs = newargs + (args[ii],)
args = newargs
# phi and t kwargs, also do R, z, and x in case these are given as kwargs
if (
"phi" in kwargs
and _APY_LOADED
and isinstance(kwargs["phi"], units.Quantity)
):
kwargs["phi"] = kwargs["phi"].to(units.rad).value
if "t" in kwargs and _APY_LOADED and isinstance(kwargs["t"], units.Quantity):
kwargs["t"] = kwargs["t"].to(units.Gyr).value / time_in_Gyr(vo, ro)
if "R" in kwargs and _APY_LOADED and isinstance(kwargs["R"], units.Quantity):
kwargs["R"] = kwargs["R"].to(units.kpc).value / ro
if "z" in kwargs and _APY_LOADED and isinstance(kwargs["z"], units.Quantity):
kwargs["z"] = kwargs["z"].to(units.kpc).value / ro
if "x" in kwargs and _APY_LOADED and isinstance(kwargs["x"], units.Quantity):
kwargs["x"] = kwargs["x"].to(units.kpc).value / ro
# v kwarg for dissipative forces
if "v" in kwargs and _APY_LOADED and isinstance(kwargs["v"], units.Quantity):
kwargs["v"] = kwargs["v"].to(units.km / units.s).value / vo
# Mass kwarg for rtide
if "M" in kwargs and _APY_LOADED and isinstance(kwargs["M"], units.Quantity):
try:
kwargs["M"] = kwargs["M"].to(units.Msun).value / mass_in_msol(vo, ro)
except units.UnitConversionError:
kwargs["M"] = (
kwargs["M"].to(units.pc * units.km**2 / units.s**2).value
/ mass_in_msol(vo, ro)
/ _G
)
# kwargs that come up in quasiisothermaldf
# z done above
if "dz" in kwargs and _APY_LOADED and isinstance(kwargs["dz"], units.Quantity):
kwargs["dz"] = kwargs["dz"].to(units.kpc).value / ro
if "dR" in kwargs and _APY_LOADED and isinstance(kwargs["dR"], units.Quantity):
kwargs["dR"] = kwargs["dR"].to(units.kpc).value / ro
if (
"zmax" in kwargs
and _APY_LOADED
and isinstance(kwargs["zmax"], units.Quantity)
):
kwargs["zmax"] = kwargs["zmax"].to(units.kpc).value / ro
return method(*args, **kwargs)
return wrapper
def physical_conversion_actionAngle(quantity, pop=False):
"""Decorator to convert to physical coordinates for the actionAngle methods:
quantity= call, actionsFreqs, or actionsFreqsAngles (or EccZmaxRperiRap for actionAngleStaeckel)
"""
def wrapper(method):
@wraps(method)
def wrapped(*args, **kwargs):
use_physical = kwargs.get("use_physical", True)
ro = kwargs.get("ro", None)
if ro is None and hasattr(args[0], "_roSet") and args[0]._roSet:
ro = args[0]._ro
if _APY_LOADED and isinstance(ro, units.Quantity):
ro = ro.to(units.kpc).value
vo = kwargs.get("vo", None)
if vo is None and hasattr(args[0], "_voSet") and args[0]._voSet:
vo = args[0]._vo
if _APY_LOADED and isinstance(vo, units.Quantity):
vo = vo.to(units.km / units.s).value
# Remove ro and vo kwargs if necessary
if pop and "use_physical" in kwargs:
kwargs.pop("use_physical")
if pop and "ro" in kwargs:
kwargs.pop("ro")
if pop and "vo" in kwargs:
kwargs.pop("vo")
if use_physical and not vo is None and not ro is None:
out = method(*args, **kwargs)
if "call" in quantity or "actions" in quantity:
if "actions" in quantity and len(out) < 4: # 1D system
fac = [ro * vo]
if _APY_UNITS:
u = [units.kpc * units.km / units.s]
else:
fac = [ro * vo, ro * vo, ro * vo]
if _APY_UNITS:
u = [
units.kpc * units.km / units.s,
units.kpc * units.km / units.s,
units.kpc * units.km / units.s,
]
if "Freqs" in quantity:
FreqsFac = freq_in_Gyr(vo, ro)
if len(out) < 4: # 1D system
fac.append(FreqsFac)
if _APY_UNITS:
Freqsu = units.Gyr**-1.0
u.append(Freqsu)
else:
fac.extend([FreqsFac, FreqsFac, FreqsFac])
if _APY_UNITS:
Freqsu = units.Gyr**-1.0
u.extend([Freqsu, Freqsu, Freqsu])
if "Angles" in quantity:
if len(out) < 4: # 1D system
fac.append(1.0)
if _APY_UNITS:
Freqsu = units.Gyr**-1.0
u.append(units.rad)
else:
fac.extend([1.0, 1.0, 1.0])
if _APY_UNITS:
Freqsu = units.Gyr**-1.0
u.extend([units.rad, units.rad, units.rad])
if "EccZmaxRperiRap" in quantity:
fac = [1.0, ro, ro, ro]
if _APY_UNITS:
u = [1.0, units.kpc, units.kpc, units.kpc]
if _APY_UNITS:
newOut = ()
try:
for ii in range(len(out)):
newOut = newOut + (
units.Quantity(out[ii] * fac[ii], unit=u[ii]),
)
except TypeError: # happens if out = scalar
newOut = units.Quantity(out * fac[0], unit=u[0])
else:
newOut = ()
try:
for ii in range(len(out)):
newOut = newOut + (out[ii] * fac[ii],)
except TypeError: # happens if out = scalar
newOut = out * fac[0]
return newOut
else:
return method(*args, **kwargs)
return wrapped
return wrapper
def actionAngle_physical_input(method):
"""Decorator to convert inputs to actionAngle functions from physical
to internal coordinates"""
@wraps(method)
def wrapper(*args, **kwargs):
if len(args) < 3: # orbit input
return method(*args, **kwargs)
ro = kwargs.get("ro", None)
if ro is None and hasattr(args[0], "_ro"):
ro = args[0]._ro
if _APY_LOADED and isinstance(ro, units.Quantity):
ro = ro.to(units.kpc).value
vo = kwargs.get("vo", None)
if vo is None and hasattr(args[0], "_vo"):
vo = args[0]._vo
if _APY_LOADED and isinstance(vo, units.Quantity):
vo = vo.to(units.km / units.s).value
# Loop through args
newargs = ()
for ii in range(len(args)):
if _APY_LOADED and isinstance(args[ii], units.Quantity):
try:
targ = args[ii].to(units.kpc).value / ro
except units.UnitConversionError:
try:
targ = args[ii].to(units.km / units.s).value / vo
except units.UnitConversionError:
try:
targ = args[ii].to(units.rad).value
except units.UnitConversionError:
raise units.UnitConversionError(
"Input units not understood"
)
newargs = newargs + (targ,)
else:
newargs = newargs + (args[ii],)
args = newargs
return method(*args, **kwargs)
return wrapper
def physical_conversion_actionAngleInverse(quantity, pop=False):
"""Decorator to convert to physical coordinates for the actionAngleInverse methods:
quantity= call, xvFreqs, or Freqs"""
def wrapper(method):
@wraps(method)
def wrapped(*args, **kwargs):
use_physical = kwargs.get("use_physical", True)
ro = kwargs.get("ro", None)
if ro is None and hasattr(args[0], "_roSet") and args[0]._roSet:
ro = args[0]._ro
if _APY_LOADED and isinstance(ro, units.Quantity):
ro = ro.to(units.kpc).value
vo = kwargs.get("vo", None)
if vo is None and hasattr(args[0], "_voSet") and args[0]._voSet:
vo = args[0]._vo
if _APY_LOADED and isinstance(vo, units.Quantity):
vo = vo.to(units.km / units.s).value
# Remove ro and vo kwargs if necessary
if pop and "use_physical" in kwargs:
kwargs.pop("use_physical")
if pop and "ro" in kwargs:
kwargs.pop("ro")
if pop and "vo" in kwargs:
kwargs.pop("vo")
if use_physical and not vo is None and not ro is None:
fac = []
u = []
out = method(*args, **kwargs)
if "call" in quantity or "xv" in quantity:
if "xv" in quantity and len(out) < 4: # 1D system
fac.extend([ro, vo])
if _APY_UNITS:
u.extend([units.kpc, units.km / units.s])
else:
fac.extend([ro, vo, vo, ro, vo, 1.0])
if _APY_UNITS:
u.extend(
[
units.kpc,
units.km / units.s,
units.km / units.s,
units.kpc,
units.km / units.s,
units.rad,
]
)
if "Freqs" in quantity:
FreqsFac = freq_in_Gyr(vo, ro)
if isinstance(out, float): # 1D system
fac.append(FreqsFac)
if _APY_UNITS:
Freqsu = units.Gyr**-1.0
u.append(Freqsu)
else:
fac.extend([FreqsFac, FreqsFac, FreqsFac])
if _APY_UNITS:
Freqsu = units.Gyr**-1.0
u.extend([Freqsu, Freqsu, Freqsu])
if _APY_UNITS:
newOut = ()
try:
for ii in range(len(out)):
newOut = newOut + (
units.Quantity(out[ii] * fac[ii], unit=u[ii]),
)
except TypeError: # Happens when out == scalar
newOut = units.Quantity(out * fac[0], unit=u[0])
else:
newOut = ()
try:
for ii in range(len(out)):
newOut = newOut + (out[ii] * fac[ii],)
except TypeError: # Happens when out == scalar
newOut = out * fac[0]
return newOut
else:
return method(*args, **kwargs)
return wrapped
return wrapper
def actionAngleInverse_physical_input(method):
"""Decorator to convert inputs to actionAngleInverse functions from
physical to internal coordinates"""
@wraps(method)
def wrapper(*args, **kwargs):
ro = kwargs.get("ro", None)
if ro is None and hasattr(args[0], "_ro"):
ro = args[0]._ro
if _APY_LOADED and isinstance(ro, units.Quantity):
ro = ro.to(units.kpc).value
vo = kwargs.get("vo", None)
if vo is None and hasattr(args[0], "_vo"):
vo = args[0]._vo
if _APY_LOADED and isinstance(vo, units.Quantity):
vo = vo.to(units.km / units.s).value
# Loop through args
newargs = ()
for ii in range(len(args)):
if _APY_LOADED and isinstance(args[ii], units.Quantity):
try:
targ = args[ii].to(units.kpc * units.km / units.s).value / ro / vo
except units.UnitConversionError:
try:
targ = args[ii].to(units.rad).value
except units.UnitConversionError:
raise units.UnitConversionError("Input units not understood")
newargs = newargs + (targ,)
else:
newargs = newargs + (args[ii],)
args = newargs
return method(*args, **kwargs)
return wrapper
|
jobovyREPO_NAMEgalpyPATH_START.@galpy_extracted@galpy-main@galpy@util@conversion.py@.PATH_END.py
|
{
"filename": "overscanLickObs.py",
"repo_name": "ishivvers/TheKastShiv",
"repo_path": "TheKastShiv_extracted/TheKastShiv-master/tools/overscanLickObs.py",
"type": "Python"
}
|
#!/usr/bin/python
# Version 1.0 -- Elinor Gates, 2015 Nov 24
# - obtained from https://mthamilton.ucolick.org/techdocs/instruments/kast/kast_obsHints.html
#
# Modified by Isaac Shivvers, 2016 Oct 4
# - no procedural changes, only made it useable by KastShiv
from astropy.io import fits,ascii
import numpy as np
import sys, getopt
def overscan_bias( ifilelist, ofilelist, fit='yes' ):
"""
Given lists of input files and output files, will
perform an overscan-informed bias correction for Kast frames.
Written by E.Gates.
If fit == 'yes', will fit a Legendre polynomial to the overscan values (recommended)
"""
# for each file in ifilelist, read in file, figure out overscan and data regions, fit
# overscan with desired function (if any), and subtract from data.
# Write data to ofilelist value.
for i in range( len(ifilelist) ):
ifile=ifilelist[i]
ofile=ofilelist[i]
data, header = fits.getdata(ifile,header=True)
# read necessary keywords from fits header
xsize = header['NAXIS1']
ysize = header['NAXIS2']
xorig = header['CRVAL1U']
yorig = header['CRVAL2U']
cdelt1 = header['CDELT1U']
cdelt2 = header['CDELT2U']
rover = header['ROVER']
cover = header['COVER']
inxsize = header['DNAXIS1']
inysize = header['DNAXIS2']
ampsx = header['AMPSCOL']
ampsy = header['AMPSROW']
# determine number and sizes of overscan and data regions
namps = ampsx*ampsy
if rover > 0:
over=rover
sys.exit('Program does not yet deal with row overscans. Exiting.')
else:
over = cover
if over == 0:
sys.exit('No overscan region specified in FITS header. Exiting.')
# single amplifier mode
if namps == 1:
biassec = data[:,xsize-cover:xsize]
datasec = data[0:,0:xsize-cover]
# median overscan section
bias=np.median(biassec, axis=1)
# legendre fit
if fit == 'yes':
# fit
lfit = np.polynomial.legendre.legfit(range(0,len(bias)),bias,3)
bias = np.polynomial.legendre.legval(range(0,len(bias)),lfit)
# subtract overscan
datanew = datasec
for i in range(datasec.shape[1]):
datanew[:,i] = datasec[:,i]-bias
# two amplifier mode
if namps == 2:
biasseca = data[:,xsize-cover*2:xsize-cover]
biassecb = data[:,xsize-cover:xsize]
# median overscan sections
biasa=np.median(biasseca,axis=1)
biasb=np.median(biassecb,axis=1)
# legendre fit
if fit == 'yes':
lfita = np.polynomial.legendre.legfit(range(0,len(biasa)),biasa,3)
lfitb = np.polynomial.legendre.legfit(range(0,len(biasb)),biasb,3)
biasa = np.polynomial.legendre.legval(range(0,len(biasa)),lfita)
biasb = np.polynomial.legendre.legval(range(0,len(biasb)),lfitb)
# extract data regions
#determine size of binned data region
hsize=abs(inxsize/cdelt1)
# calculate x origin of readout in binned units if cdelt1 negative or positive
if cdelt1 < 0:
xorig=(xorig-(xsize-2*cover)*abs(cdelt1))/abs(cdelt1)
else:
xorig=xorig/cdelt1
x0=xorig+xsize-1-cover*2 # need to test is need -1 because starting counting at 0
# determine which columns are on which amplifier and subtract proper overscan region
if x0 < hsize/2: # all data on left amplifier
datanew=data[:,0:xsize-cover*2]
m=datanew.shape[1]
for i in range(0,m):
datanew[:,i]=datanew[:,i]-biasa
if xorig >= hsize/2: # all data on right amplifier
datanew=data[:,0:xsize-cover*2]
m=datanew.shape[1]
for i in range(0,m):
datanew[:,i]=datanew[:,i]-biasb
if xorig < hsize/2 and x0 > hsize/2:
x1=hsize/2-xorig
dataa=data[:,0:x1]
datab=data[:,x1:-cover*2]
ma=dataa.shape[1]
mb=datab.shape[1]
for i in range(0,ma):
dataa[:,i]=dataa[:,i]-biasa
for i in range(0,mb):
datab[:,i]=datab[:,i]-biasb
# merge dataa and datab into single image
datanew=np.hstack([dataa,datab])
if namps > 2:
sys.exit('Program does not yet deal with more than two overscan regions. Exiting.')
# add info to header
header['HISTORY'] = 'Overscan subtracted'
# write new fits file
fits.writeto(ofile,datanew,header,clobber=True)
def main(argv):
inputfilelist = ''
outputfilelist = ''
fit = 'no'
try:
opts, args = getopt.getopt(argv,"hfi:o:",["ifilelist=","ofilelist="])
except getopt.GetoptError:
print 'overscanLickObs.py -f -i <inputfilelist> -o <outputfilelist>'
print '-f indicates do a Legendre fit to overscan'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'overscanLickObs.py -f -i <inputfilelist> -o <outputfilelist>'
print '-f indicates do a Legendre fit to overscan'
sys.exit(2)
elif opt in ("-i", "--ifilelist"):
inputfilelist = arg
elif opt in ("-o", "--ofilelist"):
outputfilelist = arg
elif opt == '-f':
fit = 'yes'
print 'Input filelist is ', inputfilelist
print 'Output filelist is ', outputfilelist
print 'Fit is ', fit
# open input and output filelists
ifilelist = ascii.read(inputfilelist, header_start=-1)
ofilelist = ascii.read(outputfilelist, header_start=-1)
# how many files
numifiles = len(ifilelist)
numofiles = len(ofilelist)
if numifiles != numofiles:
sys.exit('Input and output file lists have different numbers of files. Exiting.')
overscan_bias( ifilelist, ofilelist, fit )
if __name__ == "__main__":
main(sys.argv[1:])
|
ishivversREPO_NAMETheKastShivPATH_START.@TheKastShiv_extracted@TheKastShiv-master@tools@overscanLickObs.py@.PATH_END.py
|
{
"filename": "test_ccd.py",
"repo_name": "wfirst-cgi/emccd_detect",
"repo_path": "emccd_detect_extracted/emccd_detect-master/arcticpy_folder/test_arcticpy/test_ccd.py",
"type": "Python"
}
|
import numpy as np
import pytest
import arcticpy as ac
class TestCCD:
def test__single_electron_fractional_height_from_electrons(
self,
):
parallel_ccd = ac.CCD(
full_well_depth=10000.0, well_notch_depth=0.0, well_fill_power=1.0
)
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=100.0
)
)
assert electron_fractional_volumes == 0.01
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=1000.0
)
)
assert electron_fractional_volumes == 0.1
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=1000000.0
)
)
assert electron_fractional_volumes == 1.0
parallel_ccd = ac.CCD(
full_well_depth=10000.0, well_notch_depth=0.0, well_fill_power=0.5
)
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=100.0
)
)
assert electron_fractional_volumes == 0.01 ** 0.5
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=1000.0
)
)
assert electron_fractional_volumes == 0.1 ** 0.5
parallel_ccd = ac.CCD(
full_well_depth=100.0, well_notch_depth=90.0, well_fill_power=1.0
)
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=100.0
)
)
assert electron_fractional_volumes == 1.0
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=9.0
)
)
assert electron_fractional_volumes == 0.0
def test__electron_fractional_heights_from_electrons(
self,
):
parallel_ccd = ac.CCD(
full_well_depth=10000.0, well_notch_depth=0.0, well_fill_power=1.0
)
electron_fractional_volumes = (
parallel_ccd.cloud_fractional_volumes_from_n_electrons_and_phase(
n_electrons=[10.0, 100.0, 1000.0]
)
)
assert electron_fractional_volumes == pytest.approx([0.001, 0.01, 0.1])
class TestMultiPhase:
def test__mutli_phase_initialisation(self):
# All duplicated
ccd = ac.CCD(
well_notch_depth=0.01,
well_fill_power=0.8,
full_well_depth=84700,
fraction_of_traps_per_phase=[0.5, 0.2, 0.2, 0.1],
)
assert ccd.well_notch_depth == [0.01] * 4
assert ccd.well_fill_power == [0.8] * 4
assert ccd.full_well_depth == [84700] * 4
# Some duplicated
ccd = ac.CCD(
well_notch_depth=0.01,
well_fill_power=0.8,
full_well_depth=[84700, 1e5, 2e5, 3e5],
fraction_of_traps_per_phase=[0.5, 0.2, 0.2, 0.1],
)
assert ccd.well_notch_depth == [0.01] * 4
assert ccd.well_fill_power == [0.8] * 4
assert ccd.full_well_depth == [84700, 1e5, 2e5, 3e5]
def test__extract_phase(self):
ccd = ac.CCD(
well_notch_depth=0.01,
well_fill_power=0.8,
full_well_depth=[84700, 1e5, 2e5, 3e5],
fraction_of_traps_per_phase=[0.5, 0.2, 0.2, 0.1],
)
ccd_phase_0 = ac.CCDPhase(ccd, 0)
ccd_phase_1 = ac.CCDPhase(ccd, 1)
ccd_phase_2 = ac.CCDPhase(ccd, 2)
ccd_phase_3 = ac.CCDPhase(ccd, 3)
assert ccd_phase_0.well_notch_depth == 0.01
assert ccd_phase_0.full_well_depth == 84700
assert ccd_phase_1.well_notch_depth == 0.01
assert ccd_phase_1.full_well_depth == 1e5
assert ccd_phase_2.well_notch_depth == 0.01
assert ccd_phase_2.full_well_depth == 2e5
assert ccd_phase_3.well_notch_depth == 0.01
assert ccd_phase_3.full_well_depth == 3e5
|
wfirst-cgiREPO_NAMEemccd_detectPATH_START.@emccd_detect_extracted@emccd_detect-master@arcticpy_folder@test_arcticpy@test_ccd.py@.PATH_END.py
|
{
"filename": "index_lookup.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/preprocessing/index_lookup.py",
"type": "Python"
}
|
import collections
import numpy as np
from keras.src import backend
from keras.src.layers.layer import Layer
from keras.src.utils import argument_validation
from keras.src.utils import numerical_utils
from keras.src.utils import tf_utils
from keras.src.utils.module_utils import tensorflow as tf
class IndexLookup(Layer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output
via a table-based lookup, with optional out-of-vocabulary handling. This is
the basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer.
If `None`, there is no cap on the size of the vocabulary.
Note that this size includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are hashed to determine
their OOV value. If this value is 0,
OOV inputs will cause an error when calling the layer.
mask_token: A token that represents masked inputs.
When `output_mode` is `"int"`,
the token is included in vocabulary and mapped to index 0.
In other output modes, the token will not appear in the vocabulary
and instances of the mask token in the input will be dropped.
If set to `None`, no mask term will be added.
oov_token: Only used when `invert` is `True`.
The token to return for OOV indices.
vocabulary: Optional. Either an array or a string path to a text file.
If passing an array, can pass a tuple, list, 1D numpy array,
or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt` the layer.
vocabulary_dtype: The dtype of the vocabulary terms.
For example, `"int64"` or `"string"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D numpy array, or 1D tensor or the same length
as the vocabulary, containing the floating point
inverse document frequency weights, which will be multiplied
by per sample term counts for the final TF-IDF
weight. If the `vocabulary` argument is set, and `output_mode`
is `"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1
at the element index. If the last dimension is size 1,
will encode on that dimension.
If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into
a single array the same size as the vocabulary,
containing a 1 for each vocabulary term present in the sample.
Treats the last dimension as the sample dimension,
if input shape is `(..., sample_length)`, output shape will
be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains a count
of the number of times the token at that index appeared
in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm
is applied to find the value in each token slot.
Defaults to `"int"`.
pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have its
feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
`"count"` and `"tf-idf"` output modes.
If `True`, returns a `SparseTensor` instead of a dense `Tensor`.
Defaults to `False`.
"""
def __init__(
self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary_dtype,
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
name=None,
**kwargs,
):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError(
"If set, `max_tokens` must be greater than 1. "
f"Received: max_tokens={max_tokens}"
)
if pad_to_max_tokens and max_tokens is None:
raise ValueError(
"If pad_to_max_tokens is True, must set `max_tokens`. "
f"Received: max_tokens={max_tokens}"
)
if num_oov_indices < 0:
raise ValueError(
"`num_oov_indices` must be greater than or equal to 0. "
f"Received: num_oov_indices={num_oov_indices}"
)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
if output_mode == "tf-idf":
output_mode = "tf_idf"
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
"tf_idf",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
if invert and output_mode != "int":
raise ValueError(
"`output_mode` must be `'int'` when `invert` is true. "
f"Received: output_mode={output_mode}"
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse` may only be true if `output_mode` is "
"`'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
if idf_weights is not None and output_mode != "tf_idf":
raise ValueError(
"`idf_weights` should only be set if `output_mode` is "
f"`'tf_idf'`. Received: idf_weights={idf_weights} and "
f"output_mode={output_mode}"
)
super().__init__(name=name)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.mask_token = mask_token
self.oov_token = oov_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self.vocabulary_dtype = tf.as_dtype(vocabulary_dtype).name
self._frozen_vocab_size = kwargs.pop("vocabulary_size", None)
self.input_vocabulary = vocabulary
self.input_idf_weights = idf_weights
# We set this hidden attr to
# persist the fact that we have have a non-adaptable layer with a
# manually set vocab.
self._has_input_vocabulary = kwargs.pop(
"has_input_vocabulary", (vocabulary is not None)
)
kwargs.pop("trainable", None)
kwargs.pop("dtype", None)
if kwargs:
raise ValueError(f"Unrecognized keyword argument(s): {kwargs}")
if invert:
self._key_dtype = "int64"
self._value_dtype = self.vocabulary_dtype
mask_key = 0
mask_value = mask_token
self._default_value = self.oov_token
else:
self._key_dtype = self.vocabulary_dtype
self._value_dtype = "int64"
mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max
# ints will be dropped from the bincount op.
mask_value = (
0
if self.output_mode == "int"
else tf.as_dtype(self._value_dtype).max
)
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 and error
# out during call if we find a negative index.
self._default_value = -1
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the
# default value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we have multiple OOV values, we need to do a further
# hashing step; to make this easier, we set the OOV value to -1.
# (This lets us do a vectorized add and cast to boolean to
# determine locations where we need to do extra hashing.)
self._default_value = -1
if self.mask_token is not None:
self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
self._mask_value = tf.convert_to_tensor(
mask_value, self._value_dtype
)
if self.output_mode == "tf_idf":
if self._has_input_vocabulary and idf_weights is None:
raise ValueError(
"When specifying the `vocabulary` argument, "
"in TF-IDF output mode, the `idf_weights` argument "
"must also be provided."
)
if idf_weights is not None:
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
if vocabulary is not None:
self.set_vocabulary(vocabulary, idf_weights)
else:
# When restoring from a keras SavedModel, the loading code will
# expect to find and restore a lookup_table attribute on the layer.
# This table needs to be uninitialized as a StaticHashTable cannot
# be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()
# Only set up adapt state if we did not receive a vocab on construction.
if not self._has_input_vocabulary:
# Set adapt state.
self.token_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
if self.output_mode == "tf_idf":
self.token_document_counts = (
tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
)
self.num_documents = tf.Variable(
0, dtype="int64", trainable=False
)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If `True`, the returned vocabulary
will include mask and OOV tokens,
and a term's index in the vocabulary
will equal the term's index when calling the layer.
If `False`, the returned vocabulary will not include
any mask or OOV tokens.
"""
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices
# [0, vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (
self._tensor_vocab_to_numpy(vocab),
indices.numpy(),
)
lookup = collections.defaultdict(
lambda: self.oov_token, zip(indices, vocab)
)
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == "int":
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index() :]
if self.vocabulary_dtype == "string":
return [
i.decode("utf-8") if isinstance(i, bytes) else i for i in vocab
]
else:
return vocab
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional mask and oov
indices.
"""
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.lookup_table.size() + self._token_start_index()
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"sparse": self.sparse,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary_dtype": self.vocabulary_dtype,
"idf_weights": listify_tensors(self.input_idf_weights),
"vocabulary": listify_tensors(self.input_vocabulary),
"vocabulary_size": self._frozen_vocab_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _record_vocabulary_size(self):
self._ensure_vocab_size_unchanged()
with tf.init_scope():
self._frozen_vocab_size = self.vocabulary_size()
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is already present in the layer, this
method will replace it.
Args:
vocabulary: Either an array or a string path to a text file.
If passing an array, can pass a tuple, list,
1D numpy array, or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line
per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor
of inverse document frequency weights with equal
length to vocabulary. Must be set if `output_mode`
is `"tf_idf"`. Should not be set otherwise.
"""
if self.output_mode == "tf_idf":
if idf_weights is None:
raise ValueError(
"`idf_weights` must be set if output_mode is 'tf_idf'."
)
elif idf_weights is not None:
raise ValueError(
"`idf_weights` should only be set if output_mode is "
f"`'tf_idf'`. Received: output_mode={self.output_mode} "
f"and idf_weights={idf_weights}"
)
if isinstance(vocabulary, str):
if not tf.io.gfile.exists(vocabulary):
raise ValueError(
f"Vocabulary file {vocabulary} does not exist."
)
if self.output_mode == "tf_idf":
raise ValueError(
"output_mode `'tf_idf'` does not support loading a "
"vocabulary from file."
)
self.lookup_table = self._lookup_table_from_file(vocabulary)
self._record_vocabulary_size()
return
if not tf.executing_eagerly() and (
tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)
):
raise RuntimeError(
f"Cannot set a tensor vocabulary on layer {self.name} "
"when not executing eagerly. "
"Create this layer or call `set_vocabulary()` "
"outside of any traced function."
)
# TODO(mattdangerw): for better performance we should rewrite this
# entire function to operate on tensors and convert vocabulary to a
# tensor here.
if tf.is_tensor(vocabulary):
vocabulary = self._tensor_vocab_to_numpy(vocabulary)
elif isinstance(vocabulary, (list, tuple)):
vocabulary = np.array(vocabulary)
if tf.is_tensor(idf_weights):
idf_weights = idf_weights.numpy()
elif isinstance(idf_weights, (list, tuple)):
idf_weights = np.array(idf_weights)
if vocabulary.size == 0:
raise ValueError(
"Cannot set an empty vocabulary. "
f"Received: vocabulary={vocabulary}"
)
oov_start = self._oov_start_index()
token_start = self._token_start_index()
special_tokens = [self.mask_token] * oov_start + [
self.oov_token
] * self.num_oov_indices
found_special_tokens = np.array_equal(
special_tokens, vocabulary[:token_start]
)
if found_special_tokens:
tokens = vocabulary[token_start:]
else:
tokens = vocabulary
repeated_tokens = self._find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError(
"The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
f"are: {repeated_tokens}"
)
if self.mask_token is not None and self.mask_token in tokens:
mask_index = np.argwhere(vocabulary == self.mask_token)[-1]
raise ValueError(
"Found reserved mask token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: mask_token={self.mask_token} at "
f"vocabulary index {mask_index}"
)
# Only error out for oov_token when invert=True. When invert=False,
# oov_token is unused during lookup.
if (
self.oov_token is not None
and self.invert
and self.oov_token in tokens
):
oov_index = np.argwhere(vocabulary == self.oov_token)[-1]
raise ValueError(
"Found reserved OOV token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: oov_token={self.oov_token} at "
f"vocabulary index {oov_index}"
)
new_vocab_size = token_start + len(tokens)
if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab "
f"size. Received vocabulary size is {new_vocab_size}; "
f"`max_tokens` is {self.max_tokens}."
)
self.lookup_table = self._lookup_table_from_tokens(tokens)
self._record_vocabulary_size()
if self.output_mode == "tf_idf" and idf_weights is not None:
if len(vocabulary) != len(idf_weights):
raise ValueError(
"`idf_weights` must be the same length as vocabulary. "
f"len(idf_weights) is {len(idf_weights)}; "
f"len(vocabulary) is {len(vocabulary)}"
)
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array. "
f"Received: type(idf_weights)={type(idf_weights)}"
)
# If the passed vocabulary has no special tokens, we need to pad the
# front of idf_weights. We don't have real document frequencies for
# these tokens so we will use an average of all idf_weights passed
# in as a reasonable default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our
# total vocab size, we need to pad the back of idf_weights with
# zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = (
self.max_tokens - front_padding - len(idf_weights)
)
else:
back_padding = 0
weights = np.pad(
idf_weights,
(front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value),
)
weights = tf.convert_to_tensor(weights, dtype=backend.floatx())
self.idf_weights = tf.Variable(
weights,
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
def build(self):
self.built = True
def get_build_config(self):
return {}
def build_from_config(self, config):
self.build()
@property
def compute_dtype(self):
return self.vocabulary_dtype
@property
def variable_dtype(self):
return self.vocabulary_dtype
def compute_output_shape(self, input_shape):
if self.output_mode == "int":
return input_shape
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
return (input_shape[0], depth)
def compute_output_spec(self, inputs):
if self.output_mode == "int":
output_dtype = "int64"
else:
output_dtype = backend.floatx()
output_shape = self.compute_output_shape(inputs.shape)
return backend.KerasTensor(output_shape, dtype=output_dtype)
def adapt(self, data, steps=None):
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 1:
# A plain list of strings
# is treated as as many documents
data = tf.expand_dims(data, -1)
self.update_state(data)
self.finalize_state()
def update_state(self, data):
if self._has_input_vocabulary:
raise ValueError(
f"Cannot adapt layer '{self.name}' after setting a static "
"vocabulary via `vocabulary` argument or "
"`set_vocabulary()` method."
)
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 0:
data = tf.expand_dims(data, 0)
if data.shape.rank == 1:
# Expand dims on axis 0 for tf-idf. A 1-d tensor
# is a single document.
data = tf.expand_dims(data, 0)
tokens, counts = self._num_tokens(data)
self.token_counts.insert(
tokens, counts + self.token_counts.lookup(tokens)
)
if self.output_mode == "tf_idf":
# Dedupe each row of our dataset.
if isinstance(data, tf.RaggedTensor):
deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data)
else:
deduped_doc_data = [tf.unique(x)[0] for x in data]
deduped_doc_data = tf.concat(deduped_doc_data, axis=0)
# Flatten and count tokens.
tokens, counts = self._num_tokens(deduped_doc_data)
self.token_document_counts.insert(
tokens, counts + self.token_document_counts.lookup(tokens)
)
if isinstance(data, tf.RaggedTensor):
self.num_documents.assign_add(data.nrows())
else:
self.num_documents.assign_add(
tf.shape(data, out_type="int64")[0]
)
def finalize_state(self):
if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0):
# Finalize idf_weights to a const for call even if we don't need to
# compute a new vocabulary.
if self.output_mode == "tf_idf":
self.idf_weights_const = self.idf_weights.value()
self._record_vocabulary_size()
return
# Remove special tokens from our counts.
if self.mask_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype)
)
if self.oov_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype)
)
tokens, counts = self.token_counts.export()
# To keep vocabs deterministic, we sort our tokens by count and break
# ties by sorting the tokens themselves. Tensorflow has no ops for
# sorting strings, so we need to use numpy for the sort.
sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
token_start = self._token_start_index()
if self.max_tokens:
max_learned_tokens = self.max_tokens - token_start
sorted_indices = sorted_indices[:max_learned_tokens]
tokens = tf.gather(tokens, sorted_indices)
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == "tf_idf":
token_document_counts = self.token_document_counts.lookup(tokens)
idf_weights = self._inverse_document_frequency(
token_document_counts, self.num_documents
)
idf_weights = tf.cast(idf_weights, backend.floatx())
# Pad the front of idf_weights with the average idf weight for OOV
# tokens. We cannot compute the real idf weight of OOV in a single
# pass.
idf_weights = tf.pad(
idf_weights,
[[self._token_start_index(), 0]],
constant_values=tf.reduce_mean(idf_weights),
)
if self.pad_to_max_tokens and self.max_tokens is not None:
# Pad the back of idf_weights with zeros.
idf_weights = tf.pad(
idf_weights,
[[0, self.max_tokens - tf.size(idf_weights)]],
constant_values=0,
)
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
# We call this here to save memory, now that we've built our vocabulary,
# we don't want to keep every token we've seen in separate lookup
# tables.
self.reset_state()
self._record_vocabulary_size()
def reset_state(self):
if self._has_input_vocabulary:
return
self.token_counts.remove(self.token_counts.export()[0])
if self.output_mode == "tf_idf":
self.token_document_counts.remove(
self.token_document_counts.export()[0]
)
self.num_documents.assign(0)
def call(self, inputs):
from keras.src.backend import tensorflow as tf_backend
self._ensure_known_vocab_size()
inputs = tf_utils.ensure_tensor(inputs, dtype=self._key_dtype)
original_shape = inputs.shape
# Some ops will not handle scalar input, so uprank to rank 1.
if inputs.shape.rank == 0:
inputs = self._expand_dims(inputs, -1)
if isinstance(inputs, tf.SparseTensor):
lookups = tf.SparseTensor(
inputs.indices,
self._lookup_dense(inputs.values),
inputs.dense_shape,
)
elif isinstance(inputs, tf.RaggedTensor):
lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs)
else:
lookups = self._lookup_dense(inputs)
if self.output_mode == "int":
# If we received a scalar input, downrank back to a scalar.
if original_shape.rank == 0:
lookups = tf.squeeze(lookups, -1)
return lookups
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
idf_weights = (
self.idf_weights_const if self.output_mode == "tf_idf" else None
)
output = numerical_utils.encode_categorical_inputs(
lookups,
output_mode=(
"count" if self.output_mode == "tf_idf" else self.output_mode
),
depth=depth,
dtype=self._value_dtype,
sparse=self.sparse,
backend_module=tf_backend,
)
if self.output_mode == "tf_idf":
if idf_weights is None:
raise ValueError(
"When `output_mode` is `'tf_idf'`, `idf_weights` must be "
"provided."
)
output = tf_backend.numpy.multiply(
tf_backend.core.cast(output, idf_weights.dtype), idf_weights
)
return output
def _lookup_dense(self, inputs):
"""Lookup table values for a dense Tensor, handling masking and OOV."""
# When executing eagerly and tracing keras.Input objects,
# do not call lookup.
# This is critical for restoring SavedModel, which will first trace
# layer.call and then attempt to restore the table. We need the table to
# be uninitialized for the restore to work, but calling the table
# uninitialized would error.
if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
else:
lookups = self.lookup_table.lookup(inputs)
if self.mask_token is not None:
mask_locations = tf.equal(inputs, self._mask_key)
lookups = tf.where(mask_locations, self._mask_value, lookups)
if self.invert:
return lookups
lookup_checks = []
if self.num_oov_indices == 0:
# If we have zero oov indices, we need to check for oov inputs.
oov_indices = tf.where(tf.equal(lookups, -1))
oov_inputs = tf.gather_nd(inputs, oov_indices)
msg = tf.strings.format(
"When `num_oov_indices=0` all inputs should be in vocabulary, "
"found OOV values {}, consider setting `num_oov_indices=1`.",
(oov_inputs,),
)
assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg])
lookup_checks.append(assertion)
elif self.num_oov_indices > 1:
# If we have multiple oov indices, we need a further hashing step.
if tf.as_dtype(self._key_dtype).is_integer:
oov_indices = tf.math.floormod(inputs, self.num_oov_indices)
else:
oov_indices = tf.strings.to_hash_bucket_fast(
inputs, num_buckets=self.num_oov_indices
)
oov_indices = oov_indices + self._oov_start_index()
oov_locations = tf.equal(lookups, self._default_value)
lookups = tf.where(oov_locations, oov_indices, lookups)
with tf.control_dependencies(lookup_checks):
return tf.identity(lookups)
def save_own_variables(self, store):
if self.output_mode == "tf_idf":
store["idf_weights"] = self.idf_weights_const.numpy()
def load_own_variables(self, store):
if self.output_mode == "tf_idf":
self.idf_weights.assign(store["idf_weights"])
self.idf_weights_const = self.idf_weights.value()
def save_assets(self, dir_path):
if self.input_vocabulary is not None:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
vocabulary = self.get_vocabulary(include_special_tokens=True)
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
with open(vocabulary_filepath, "w") as f:
f.write("\n".join([str(w) for w in vocabulary]))
def load_assets(self, dir_path):
if self.input_vocabulary is not None:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
# TODO: fix bug with include_special_tokens and set reload from file.
with open(vocabulary_filepath, "r") as f:
lines = f.read().split("\n")
if tf.as_dtype(self.vocabulary_dtype) == tf.string:
values = [str(line) for line in lines]
else:
values = [int(line) for line in lines]
if self.output_mode == "tf_idf":
self.set_vocabulary(values, idf_weights=False)
else:
self.set_vocabulary(values)
def _uninitialized_lookup_table(self):
with tf.init_scope():
initializer = get_null_initializer(
self._key_dtype, self._value_dtype
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_tokens(self, tokens):
with tf.init_scope():
token_start = self._token_start_index()
token_end = token_start + tf.size(tokens)
indices_dtype = (
self._key_dtype if self.invert else self._value_dtype
)
indices = tf.range(token_start, token_end, dtype=indices_dtype)
keys, values = (
(indices, tokens) if self.invert else (tokens, indices)
)
initializer = tf.lookup.KeyValueTensorInitializer(
keys, values, self._key_dtype, self._value_dtype
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_file(self, filename):
if self.invert:
key_index = tf.lookup.TextFileIndex.LINE_NUMBER
value_index = tf.lookup.TextFileIndex.WHOLE_LINE
else:
key_index = tf.lookup.TextFileIndex.WHOLE_LINE
value_index = tf.lookup.TextFileIndex.LINE_NUMBER
with tf.init_scope():
initializer = tf.lookup.TextFileInitializer(
filename=filename,
key_dtype=self._key_dtype,
key_index=key_index,
value_dtype=self._value_dtype,
value_index=value_index,
value_index_offset=self._token_start_index(),
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _expand_dims(self, inputs, axis):
if isinstance(inputs, tf.SparseTensor):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
def _oov_start_index(self):
return (
1
if self.mask_token is not None and self.output_mode == "int"
else 0
)
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
def _ensure_known_vocab_size(self):
if self.output_mode == "int" or self.pad_to_max_tokens:
return
if self._frozen_vocab_size is None:
raise RuntimeError(
f"When using `output_mode={self.output_mode}` "
"and `pad_to_max_tokens=False`, "
"you must set the layer's vocabulary before calling it. Either "
"pass a `vocabulary` argument to the layer, or call `adapt` "
"with some sample data."
)
def _ensure_vocab_size_unchanged(self):
if self.output_mode == "int" or self.pad_to_max_tokens:
return
with tf.init_scope():
new_vocab_size = self.vocabulary_size()
if (
self._frozen_vocab_size is not None
and new_vocab_size != self._frozen_vocab_size
):
raise RuntimeError(
f"When using `output_mode={self.output_mode}` "
"and `pad_to_max_tokens=False`, "
"the vocabulary size cannot be changed after the layer is "
f"called. Old vocab size is {self._frozen_vocab_size}, "
f"new vocab size is {new_vocab_size}"
)
def _find_repeated_tokens(self, vocabulary):
"""Return all repeated tokens in a vocabulary."""
vocabulary_set = set(vocabulary)
if len(vocabulary) != len(vocabulary_set):
return [
item
for item, count in collections.Counter(vocabulary).items()
if count > 1
]
else:
return []
def _num_tokens(self, data):
"""Count the number of tokens in a ragged, sparse or dense tensor."""
if isinstance(data, tf.SparseTensor):
flat_values = data.values
elif isinstance(data, tf.RaggedTensor):
flat_values = data.flat_values
else:
flat_values = tf.reshape(data, [-1])
tokens, _, counts = tf.unique_with_counts(flat_values, out_idx="int64")
return tokens, counts
def _inverse_document_frequency(self, token_document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of "tf_idf".
Args:
token_document_counts: An array of the # of documents each token
appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return tf.math.log(1 + num_documents / (1 + token_document_counts))
# Override points for IntegerLookup and StringLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
"""Converts a tensor vocabulary to a numpy vocabulary."""
return vocabulary.numpy()
def get_null_initializer(key_dtype, value_dtype):
class NullInitializer(tf.lookup.KeyValueTensorInitializer):
"""A placeholder initializer for restoring from a SavedModel."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = key_dtype
self._value_dtype = value_dtype
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
pass
return NullInitializer(key_dtype, value_dtype)
def listify_tensors(x):
"""Convert any tensors or numpy arrays to lists for config serialization."""
if tf.is_tensor(x):
x = x.numpy()
if isinstance(x, np.ndarray):
x = x.tolist()
return x
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@preprocessing@index_lookup.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sdss/marvin",
"repo_path": "marvin_extracted/marvin-main/tests/misc/__init__.py",
"type": "Python"
}
|
sdssREPO_NAMEmarvinPATH_START.@marvin_extracted@marvin-main@tests@misc@__init__.py@.PATH_END.py
|
|
{
"filename": "run_Gaussian_beam.py",
"repo_name": "JLBLine/WODEN",
"repo_path": "WODEN_extracted/WODEN-master/docs/sphinx/operating_principles/run_Gaussian_beam.py",
"type": "Python"
}
|
from astropy.io import fits
import numpy as np
from astropy.wcs import WCS
from copy import deepcopy
import erfa
from subprocess import call
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def add_colourbar(fig=None,ax=None,im=None,label=False,top=False):
"""
Adds a colourbar (colorbar, fine) in a nice way to a subplot
Parameters
----------
fig : matplotlib.pyplot.figure instance
The figure that the plot lives on
ax : matplotlib.pyplot.figure.add_subplot instance
The axis to append a colorbar to
im : ax.imshow output
The output of imshow to base the colourbar on
label : string
Optional - add a label to the colorbar
top : Bool
Optional - put the colorbar above the axis instead of to the right
"""
divider = make_axes_locatable(ax)
if top == True:
cax = divider.append_axes("top", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax = cax,orientation='horizontal')
cax.xaxis.set_ticks_position('top')
cax.xaxis.set_label_position('top')
else:
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax = cax)
if label:
cbar.set_label(label)
D2R = np.pi / 180.0
nside = 301
lst_deg = 0.0
##Setup a dummy FITS header with appropriate settings
header = fits.Header()
##Give it 301 pixel for each axis
nside = 301
##This resolution seems to cover the full sky nicely
cpix = int(nside // 2)
cdelt = 0.25
cdelt = 125 / nside
header['NAXIS'] = 2
header['NAXIS1'] = nside
header['NAXIS2'] = nside
header['CTYPE1'] = 'RA---SIN'
header['CRPIX1'] = cpix
header['CRVAL1'] = lst_deg
header['CDELT1'] = cdelt
header['CUNIT1'] = 'deg '
header['CTYPE2'] = 'DEC--SIN'
header['CRPIX2'] = cpix
header['CRVAL2'] = -26.7
header['CDELT2'] = cdelt
header['CUNIT2'] = 'deg '
##Make a world coord system
wcs = WCS(header)
##Set up x/y pixels that cover the whole image
x_mesh, y_mesh = np.meshgrid(np.arange(nside), np.arange(nside))
x_pixels = x_mesh.flatten()
y_pixels = y_mesh.flatten()
##convert to ra, dec
ras, decs = wcs.all_pix2world(x_pixels, y_pixels, 0.0)
##Then use erfa to convert these values into azs, els
has = lst_deg - ras
##use this erfa function to convert to azimuth and elevation
##were using degrees, but erfa uses rads, so convert here
az_grid, els = erfa.hd2ae(has*D2R, decs*D2R, -26.7*D2R);
##convert elevation to zenith angle
za_grid = np.pi/2 - els
##Only feed az/za above the horizon to save on CUDA memory
##write out the az/za to feed into the C/CUDA code
with open('ha-dec_values.txt','w') as outfile:
for ha, dec in zip(has[za_grid <= np.pi/2], decs[za_grid < np.pi/2]):
outfile.write("{:.8f} {:.8f}\n".format(ha*D2R, dec*D2R))
def reshape_and_plot(data, ax, label, fig, vmin=False, vmax=False):
square_plot = np.zeros(nside*nside)*np.nan
square_plot[za_grid <= np.pi/2] = data
square_plot.shape = (nside, nside)
if vmin is not False and vmax is not False:
im = ax.imshow(square_plot, origin='lower', vmin=vmin, vmax=vmax)
else:
im = ax.imshow(square_plot, origin='lower',cmap='gnuplot')
add_colourbar(ax=ax, fig=fig, im=im)
ax.set_title(label)
ax.set_xticks([])
ax.set_yticks([])
gx_re, gy_re = np.loadtxt('Gaussian_beam_zenith_100MHz.txt',unpack=True)
fig, axs = plt.subplots(1, 2, figsize=(6,3))
reshape_and_plot(gx_re, axs[0], 'Real $g_x$', fig)
reshape_and_plot(gy_re, axs[1], 'Real $g_y$', fig)
plt.tight_layout()
fig.savefig('Gaussian_jones_zenith.png',bbox_inches='tight')
plt.close()
gx_re, gy_re = np.loadtxt('Gaussian_beam_offzenith_100MHz.txt',unpack=True)
fig, axs = plt.subplots(1, 2, figsize=(6,3))
reshape_and_plot(gx_re, axs[0], 'Real $g_x$', fig)
reshape_and_plot(gy_re, axs[1], 'Real $g_y$', fig)
plt.tight_layout()
fig.savefig('Gaussian_jones_offzenith.png',bbox_inches='tight')
plt.close()
|
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@docs@sphinx@operating_principles@run_Gaussian_beam.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/wcs/wcsapi/wrappers/__init__.py",
"type": "Python"
}
|
from .base import BaseWCSWrapper
from .sliced_wcs import *
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@wcs@wcsapi@wrappers@__init__.py@.PATH_END.py
|
{
"filename": "asdf_cutouts.py",
"repo_name": "spacetelescope/astrocut",
"repo_path": "astrocut_extracted/astrocut-main/astrocut/asdf_cutouts.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements cutout functionality similar to fitscut, but for the ASDF file format."""
import copy
import pathlib
from typing import Union, Tuple
import requests
import asdf
import astropy
import gwcs
import numpy as np
import s3fs
from s3path import S3Path
from astropy.coordinates import SkyCoord
from astropy.modeling import models
from . import log
from .utils.utils import _handle_verbose
def _get_cloud_http(s3_uri: Union[str, S3Path], key: str = None, secret: str = None,
token: str = None, verbose: bool = False) -> str:
"""
Get the HTTP URI of a cloud resource from an S3 URI.
Parameters
----------
s3_uri : string | S3Path
the S3 URI of the cloud resource
key : string
Default None. Access key ID for S3 file system.
secret : string
Default None. Secret access key for S3 file system.
token : string
Default None. Security token for S3 file system.
verbose : bool
Default False. If true intermediate information is printed.
"""
# check if public or private by sending an HTTP request
s3_path = S3Path.from_uri(s3_uri) if isinstance(s3_uri, str) else s3_uri
url = f'https://{s3_path.bucket}.s3.amazonaws.com/{s3_path.key}'
resp = requests.head(url, timeout=10)
is_anon = False if resp.status_code == 403 else True
if not is_anon:
log.debug('Attempting to access private S3 bucket: %s', s3_path.bucket)
# create file system and get URL of file
fs = s3fs.S3FileSystem(anon=is_anon, key=key, secret=secret, token=token)
with fs.open(s3_uri, 'rb') as f:
return f.url()
def get_center_pixel(gwcsobj: gwcs.wcs.WCS, ra: float, dec: float) -> tuple:
"""
Get the center pixel from a Roman 2D science image.
For an input RA, Dec sky coordinate, get the closest pixel location
on the input Roman image.
Parameters
----------
gwcsobj : gwcs.wcs.WCS
The Roman GWCS object.
ra : float
The input right ascension.
dec : float
The input declination.
Returns
-------
tuple
The pixel position, FITS wcs object
"""
# Convert the gwcs object to an astropy FITS WCS header
header = gwcsobj.to_fits_sip()
# Update WCS header with some keywords that it's missing.
# Otherwise, it won't work with astropy.wcs tools (TODO: Figure out why. What are these keywords for?)
for k in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2', 'sip']:
if k not in header:
header[k] = 'na'
# New WCS object with updated header
wcs_updated = astropy.wcs.WCS(header)
# Turn input RA, Dec into a SkyCoord object
coordinates = SkyCoord(ra, dec, unit='deg')
# Map the coordinates to a pixel's location on the Roman 2d array (row, col)
row, col = gwcsobj.invert(coordinates)
return (row, col), wcs_updated
def _get_cutout(data: asdf.tags.core.ndarray.NDArrayType, coords: Union[tuple, SkyCoord],
wcs: astropy.wcs.wcs.WCS = None, size: int = 20, outfile: str = "example_roman_cutout.fits",
write_file: bool = True, fill_value: Union[int, float] = np.nan,
gwcsobj: gwcs.wcs.WCS = None) -> astropy.nddata.Cutout2D:
"""
Get a Roman image cutout.
Cut out a square section from the input image data array. The ``coords`` can either be a tuple of x, y
pixel coordinates or an astropy SkyCoord object, in which case, a wcs is required. Writes out a
new output file containing the image cutout of the specified ``size``. Default is 20 pixels.
Parameters
----------
data : asdf.tags.core.ndarray.NDArrayType
the input Roman image data array
coords : Union[tuple, SkyCoord]
the input pixel or sky coordinates
wcs : astropy.wcs.wcs.WCS, Optional
the astropy FITS wcs object
size : int, optional
the image cutout pizel size, by default 20
outfile : str, optional
the name of the output cutout file, by default "example_roman_cutout.fits"
write_file : bool, by default True
Flag to write the cutout to a file or not
fill_value: int | float, by default np.nan
The fill value for pixels outside the original image.
gwcsobj : gwcs.wcs.WCS, Optional
the original gwcs object for the full image, needed only when writing cutout as asdf file
Returns
-------
astropy.nddata.Cutout2D:
an image cutout object
Raises
------
ValueError:
when a wcs is not present when coords is a SkyCoord object
RuntimeError:
when the requested cutout does not overlap with the original image
ValueError:
when no gwcs object is provided when writing to an asdf file
"""
# check for correct inputs
if isinstance(coords, SkyCoord) and not wcs:
raise ValueError('wcs must be input if coords is a SkyCoord.')
# create the cutout
try:
cutout = astropy.nddata.Cutout2D(data, position=coords, wcs=wcs, size=(size, size), mode='partial',
fill_value=fill_value)
except astropy.nddata.utils.NoOverlapError as e:
raise RuntimeError('Could not create 2d cutout. The requested cutout does not overlap with the '
'original image.') from e
# check if the data is a quantity and get the array data
if isinstance(cutout.data, astropy.units.Quantity):
data = cutout.data.value
else:
data = cutout.data
# write the cutout to the output file
if write_file:
# check the output file type
out = pathlib.Path(outfile)
write_as = out.suffix or '.fits'
outfile = outfile if out.suffix else str(out) + write_as
# write out the file
if write_as == '.fits':
_write_fits(cutout, outfile)
elif write_as == '.asdf':
if not gwcsobj:
raise ValueError('The original gwcs object is needed when writing to asdf file.')
_write_asdf(cutout, gwcsobj, outfile)
return cutout
def _write_fits(cutout: astropy.nddata.Cutout2D, outfile: str = "example_roman_cutout.fits"):
"""
Write cutout as FITS file.
Parameters
----------
cutout : astropy.nddata.Cutout2D
the 2d cutout
outfile : str, optional
the name of the output cutout file, by default "example_roman_cutout.fits"
"""
# check if the data is a quantity and get the array data
if isinstance(cutout.data, astropy.units.Quantity):
data = cutout.data.value
else:
data = cutout.data
astropy.io.fits.writeto(outfile, data=data, header=cutout.wcs.to_header(relax=True), overwrite=True)
def _slice_gwcs(gwcsobj: gwcs.wcs.WCS, slices: Tuple[slice, slice]) -> gwcs.wcs.WCS:
"""
Slice the original gwcs object.
"Slices" the original gwcs object down to the cutout shape. This is a hack
until proper gwcs slicing is in place a la fits WCS slicing. The ``slices``
keyword input is a tuple with the x, y cutout boundaries in the original image
array, e.g. ``cutout.slices_original``. Astropy Cutout2D slices are in the form
((ymin, ymax, None), (xmin, xmax, None))
Parameters
----------
gwcsobj : gwcs.wcs.WCS
the original gwcs from the input image
slices : Tuple[slice, slice]
the cutout x, y slices as ((ymin, ymax), (xmin, xmax))
Returns
-------
gwcs.wcs.WCS
The sliced gwcs object
"""
tmp = copy.deepcopy(gwcsobj)
# get the cutout array bounds and create a new shift transform to the cutout
# add the new transform to the gwcs
xmin, xmax = slices[1].start, slices[1].stop
ymin, ymax = slices[0].start, slices[0].stop
shape = (ymax - ymin, xmax - xmin)
offsets = models.Shift(xmin, name='cutout_offset1') & models.Shift(ymin, name='cutout_offset2')
tmp.insert_transform('detector', offsets, after=True)
# modify the gwcs bounding box to the cutout shape
tmp.bounding_box = ((0, shape[0] - 1), (0, shape[1] - 1))
tmp.pixel_shape = shape[::-1]
tmp.array_shape = shape
return tmp
def _write_asdf(cutout: astropy.nddata.Cutout2D, gwcsobj: gwcs.wcs.WCS, outfile: str = "example_roman_cutout.asdf"):
"""
Write cutout as ASDF file.
Parameters
----------
cutout : astropy.nddata.Cutout2D
the 2d cutout
gwcsobj : gwcs.wcs.WCS
the original gwcs object for the full image
outfile : str, optional
the name of the output cutout file, by default "example_roman_cutout.asdf"
"""
# slice the origial gwcs to the cutout
sliced_gwcs = _slice_gwcs(gwcsobj, cutout.slices_original)
# create the asdf tree
tree = {'roman': {'meta': {'wcs': sliced_gwcs}, 'data': cutout.data}}
af = asdf.AsdfFile(tree)
# Write the data to a new file
af.write_to(outfile)
def asdf_cut(input_file: Union[str, pathlib.Path, S3Path], ra: float, dec: float, cutout_size: int = 25,
output_file: Union[str, pathlib.Path] = "example_roman_cutout.fits",
write_file: bool = True, fill_value: Union[int, float] = np.nan, key: str = None,
secret: str = None, token: str = None, verbose: bool = False) -> astropy.nddata.Cutout2D:
"""
Takes a single ASDF input file (`input_file`) and generates a cutout of designated size `cutout_size`
around the given coordinates (`coordinates`).
Parameters
----------
input_file : str | Path | S3Path
The input ASDF file.
ra : float
The right ascension of the central cutout.
dec : float
The declination of the central cutout.
cutout_size : int
Optional, default 25. The image cutout pixel size.
Note: Odd values for `cutout_size` generally result in a cutout that is more accurately
centered on the target coordinates compared to even values, due to the symmetry of the
pixel grid.
output_file : str | Path
Optional, default "example_roman_cutout.fits". The name of the output cutout file.
write_file : bool
Optional, default True. Flag to write the cutout to a file or not.
fill_value: int | float
Optional, default `np.nan`. The fill value for pixels outside the original image.
key : string
Default None. Access key ID for S3 file system. Only applicable if `input_file` is a
cloud resource.
secret : string
Default None. Secret access key for S3 file system. Only applicable if `input_file` is a
cloud resource.
token : string
Default None. Security token for S3 file system. Only applicable if `input_file` is a
cloud resource.
verbose : bool
Default False. If True, intermediate information is printed.
Returns
-------
astropy.nddata.Cutout2D:
An image cutout object.
"""
# Log messages based on verbosity
_handle_verbose(verbose)
# if file comes from AWS cloud bucket, get HTTP URL to open with asdf
file = input_file
if (isinstance(input_file, str) and input_file.startswith('s3://')) or isinstance(input_file, S3Path):
file = _get_cloud_http(input_file, key, secret, token, verbose)
# get the 2d image data
with asdf.open(file) as f:
data = f['roman']['data']
gwcsobj = f['roman']['meta']['wcs']
# get the center pixel
pixel_coordinates, wcs = get_center_pixel(gwcsobj, ra, dec)
# create the 2d image cutout
return _get_cutout(data, pixel_coordinates, wcs, size=cutout_size, outfile=output_file,
write_file=write_file, fill_value=fill_value, gwcsobj=gwcsobj)
|
spacetelescopeREPO_NAMEastrocutPATH_START.@astrocut_extracted@astrocut-main@astrocut@asdf_cutouts.py@.PATH_END.py
|
{
"filename": "Results_ft.ipynb",
"repo_name": "astromer-science/main-code",
"repo_path": "main-code_extracted/main-code-main/presentation/notebooks/Results_ft.ipynb",
"type": "Jupyter Notebook"
}
|
```python
cd /home
```
/home
```python
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
import numpy as np
import pickle
import json
import os
from sklearn.metrics import r2_score, mean_squared_error
from core.astromer import get_ASTROMER, train, predict
from core.data import pretraining_records
from tensorboard.backend.event_processing import event_accumulator
from tensorflow.core.util import event_pb2
from tensorflow.python.lib.io import tf_record
%load_ext autoreload
%autoreload 2
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
# for Palatino and other serif fonts use:
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Palatino"],
})
```
[H[2J
```python
def my_summary_iterator(path):
for r in tf_record.tf_record_iterator(path):
yield event_pb2.Event.FromString(r)
def get_metrics(path_logs):
train_logs = [x for x in os.listdir(path_logs) if x.endswith('.v2')][-1]
path_train = os.path.join(path_logs, train_logs)
ea = event_accumulator.EventAccumulator(path_train, size_guidance={'tensors': 0})
ea.Reload()
metrics = pd.DataFrame([(w,s,tf.make_ndarray(t))for w,s,t in ea.Tensors('mse')],
columns=['wall_time', 'step', 'value'])
return metrics
```
```python
exp_path = './runs/astromer_256/'
```
```python
fig, axes = plt.subplots(5,3, figsize=(12,12), dpi=300, gridspec_kw={'hspace': 0.4, 'wspace': 0.3})
ls = ['-', ':', '--']
for col, dataset in enumerate(['alcock','ogle', 'atlas']):
values = []
for row, subset in enumerate([20, 50, 100, 500, -1]):
for fold in range(3):
if subset != -1:
subset_name = '{}_{}'.format(dataset, subset)
else:
subset_name = dataset
if '_' in subset_name:
path = os.path.join(exp_path, 'ab',dataset, 'fold_{}'.format(fold), subset_name, 'logs', 'valid')
else:
path = os.path.join(exp_path, 'c',dataset, 'fold_{}'.format(fold), subset_name, 'logs', 'valid')
metrc = get_metrics(path)
lowest_rmse = metrc['value'].min()
x = metrc['step']
axes[row][col].plot(x, metrc['value'], label='fold_{}'.format(fold), color='k', linestyle=ls[fold])
axes[row][col].set_yscale('log')
if col == 2:
if subset == -1:
ax2 = axes[row][col].twinx()
ax2.set_ylabel('All samples', fontsize=12)
ax2.set_yticks([])
else:
ax2 = axes[row][col].twinx()
ax2.set_ylabel('{} samples per class'.format(subset), fontsize=12)
ax2.set_yticks([])
if row == 0 and dataset == 'alcock':
axes[row][col].set_title('MACHO')
if row == 0 and dataset == 'ogle':
axes[row][col].set_title('OGLE-III')
if row == 0 and dataset == 'atlas':
axes[row][col].set_title('ATLAS')
values.append([metrc['value'].min(), metrc['value'].max()])
axes[0][1].legend(ncol=3, bbox_to_anchor = (1.2, 1.5), fontsize=12)
fig.text(0.5, 0.08, 'Number of Epochs', ha='center', fontsize=12)
fig.text(0.04, 0.5, 'Root-mean-square error', va='center', rotation='vertical', fontsize=12)
```
Text(0.04, 0.5, 'Root-mean-square error')

```python
fig.savefig('presentation/figures/ft_val_curves.pdf', format='pdf', bbox_inches='tight')
```
## Training Times
```python
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
def display_time(seconds, granularity=2):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
```
```python
for col, dataset in enumerate(['alcock','ogle', 'atlas']):
values = []
for row, subset in enumerate([ -1]):
times = []
rmse_list = []
for fold in range(3):
if subset != -1:
subset_name = '{}_{}'.format(dataset, subset)
else:
subset_name = dataset
if '_' in subset_name:
path = os.path.join(exp_path, 'ab',dataset, 'fold_{}'.format(fold), subset_name, 'logs', 'valid')
else:
path = os.path.join(exp_path, 'c',dataset, 'fold_{}'.format(fold), subset_name, 'logs', 'valid')
metrc = get_metrics(path)
time = metrc['wall_time'].iloc[-1] - metrc['wall_time'].iloc[0]
rmse_list.append(metrc['value'].min())
times.append(time)
print(dataset, display_time(np.mean(times)), '{:.2f}'.format(np.mean(rmse_list)))
```
alcock 23.0 minutes, 3.0 seconds 0.11
ogle 1.0 day, 8.0 hours 0.03
atlas 9.0 hours, 58.0 minutes 0.05
## Testing RMSE on finetuned models
Testing all the models is time consuming! `subset_ft.pkl` already has all the results
```python
try:
with open('./presentation/backup/subset_ft.pkl', 'rb') as handle:
res_dic = pickle.load(handle)
except Exception as e:
print(e)
res_dic = {'alcock':{'mean':[], 'std':[]}, 'ogle':{'mean':[], 'std':[]}, 'atlas':{'mean':[], 'std':[]}}
for col, dataset in enumerate(['alcock','ogle', 'atlas']):
values, values_std = [], []
for row, subset in enumerate([20, 50, 100, 500, -1]):
times = []
rmse_list = []
for fold in range(3):
if subset != -1:
subset_name = '{}_{}'.format(dataset, subset)
else:
subset_name = dataset
if '_' in subset_name:
path = os.path.join(exp_path, 'ab',dataset, 'fold_{}'.format(fold), subset_name)
else:
path = os.path.join(exp_path, 'c',dataset, 'fold_{}'.format(fold), subset_name)
conf_file = os.path.join(path, 'conf.json')
with open(conf_file, 'r') as handle:
conf = json.load(handle)
dataset_path = os.path.join('./data/records/', dataset, 'fold_{}'.format(fold), '{}_20'.format(dataset))
test_batches = pretraining_records(os.path.join(dataset_path, 'test'),
256,
max_obs=conf['max_obs'],
msk_frac=conf['msk_frac'],
rnd_frac=conf['rnd_frac'],
same_frac=conf['same_frac'],
sampling=False, shuffle=False)
astromer = get_ASTROMER(num_layers=conf['layers'],
d_model=conf['head_dim'],
num_heads=conf['heads'],
dff=conf['dff'],
base=conf['base'],
use_leak=conf['use_leak'],
dropout=conf['dropout'],
maxlen=conf['max_obs'])
weights_path = os.path.join(path, 'weights')
astromer.load_weights(weights_path)
result = predict(astromer, test_batches, conf)
rmse_list.append(result['mse'])
values.append(np.mean(rmse_list))
values_std.append(np.std(rmse_list))
res_dic[dataset]['mean'] = values
res_dic[dataset]['std'] = values_std
with open('./presentation/backup/subset_ft.pkl', 'wb') as h:
pickle.dump(res_dic, h)
```
```python
markers = ['o', 's', '^']
ls = ['--', ':', '-']
plt.figure(figsize=(4,2.5), dpi=300)
for col, dataset in enumerate(['alcock','ogle', 'atlas']):
values = res_dic[dataset]['mean']
values_std = res_dic[dataset]['std']
if dataset == 'alcock':
ds_name = 'MACHO'
if dataset == 'ogle':
ds_name = 'OGLE-III'
if dataset == 'atlas':
ds_name = 'ATLAS'
values = np.array(values)
print(dataset, values)
values_std = np.array(values_std)
x_range = np.arange(len(values))
plt.plot(x_range, values, marker=markers[col], color='k', label=ds_name, linestyle=ls[col])
plt.fill_between(x_range, values+values_std, values-values_std, color='k', alpha=0.2)
plt.xticks(x_range, ['20', '50', '100', '500', 'All'])
plt.ylabel('RMSE')
plt.xlabel('Samples per class')
plt.yscale('log')
plt.legend(ncol=3, bbox_to_anchor = (.95, 1.15), fontsize=8)
# plt.savefig('./presentation/figures/subset_ft_rmse.pdf', format='pdf',bbox_inches='tight')
```
alcock [0.1097377 0.10666344 0.10878623 0.10471207 0.09476293]
ogle [0.087459 0.08507907 0.0828257 0.07447947 0.06177234]
atlas [0.2053613 0.18972848 0.17527956 0.1496656 0.07305571]
<matplotlib.legend.Legend at 0x7f9daf775dc0>

## Comparing Head-dim
```python
wights_path = './weights/macho_{}'
```
```python
for head_dim in [64, 128, 256]:
path = os.path.join(wights_path.format(head_dim), 'logs', 'valid')
metrc = get_metrics(path)
print(metrc['value'].min())
```
0.15185417
0.15613385
0.1474126
```python
```
|
astromer-scienceREPO_NAMEmain-codePATH_START.@main-code_extracted@main-code-main@presentation@notebooks@Results_ft.ipynb@.PATH_END.py
|
{
"filename": "orbit_conventional.py",
"repo_name": "iancze/PSOAP",
"repo_path": "PSOAP_extracted/PSOAP-master/attic/orbit_conventional.py",
"type": "Python"
}
|
import numpy as np
from scipy.optimize import fsolve, minimize
from psoap import constants as C
class Binary:
'''
A binary orbit that delivers astrometric position, relative astrometric position (B relative to A), and radial velocities of A and B.
'''
def __init__(self, a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=None, **kwargs):
self.a = a # [AU] semi-major axis
self.e = e # eccentricity
self.i = i # [deg] inclination
self.omega = omega # [deg] argument of periastron
self.Omega = Omega # [deg] east of north
self.T0 = T0 # [JD]
self.M_tot = M_tot # [M_sun]
self.M_2 = M_2 # [M_sun]
self.gamma = gamma # [km/s]
# Update the RV quantities
self.recalculate()
# If we are going to be repeatedly predicting the orbit at a sequence of dates,
# just store them to the object.
self.obs_dates = obs_dates
self.param_dict = {"a":self.a, "e":self.e, "i":self.i, "omega":self.omega,
"Omega":self.Omega, "T0":self.T0, "M_tot":self.M_tot, "M_2":self.M_2, "gamma":self.gamma}
def recalculate(self):
'''
Recalculates derivative RV quantities when other parameters are updated.
'''
# Calculate the following RV quantities
self.q = self.M_2 / (self.M_tot - self.M_2) # [M2/M1]
self.P = np.sqrt(4 * np.pi**2 / (C.G * self.M_tot * C.M_sun) * (self.a * C.AU)**3) / (60 * 60 * 24)# [days]
self.K = np.sqrt(C.G/(1 - self.e**2)) * self.M_2 * C.M_sun * np.sin(self.i * np.pi/180.) / np.sqrt(self.M_tot * C.M_sun * self.a * C.AU) * 1e-5 # [km/s]
def update_parameters(self, param_values, param_list):
'''
param_values is numpy array of values
param_list is list of strings of the names of the parameters
'''
for (value, key) in zip(param_values, param_list):
self.param_dict[key] = value
def theta(self, t):
'''Calculate the true anomoly for the A-B orbit.
Input is in days.'''
# t is input in seconds
# Take a modulus of the period
t = (t - self.T0) % self.P
f = lambda E: E - self.e * np.sin(E) - 2 * np.pi * t/self.P
E0 = 2 * np.pi * t / self.P
E = fsolve(f, E0)[0]
th = 2 * np.arctan(np.sqrt((1 + self.e)/(1 - self.e)) * np.tan(E/2.))
if E < np.pi:
return th
else:
return th + 2 * np.pi
def v1_f(self, f):
'''Calculate the component of A's velocity based on only the inner orbit.
f is the true anomoly of this inner orbit.'''
return self.K * (np.cos(self.omega * np.pi/180 + f) + self.e * np.cos(self.omega * np.pi/180))
def v2_f(self, f):
'''Calculate the component of B's velocity based on only the inner orbit.
f is the true anomoly of this inner orbit.'''
return -self.K/self.q * (np.cos(self.omega * np.pi/180 + f) + self.e * np.cos(self.omega * np.pi/180))
# Get the position of A in the plane of the orbit
def xy_A(self, f):
# find the reduced radius
r = self.a * (1 - self.e**2) / (1 + self.e * np.cos(f)) # [AU]
r1 = r * self.M_2 / self.M_tot # [AU]
x = r1 * np.cos(f)
y = r1 * np.sin(f)
return (x,y)
# Get the position of B in the plane of the orbit
def xy_B(self, f):
# find the reduced radius
r = self.a * (1 - self.e**2) / (1 + self.e * np.cos(f)) # [AU]
r2 = -r * (self.M_tot - self.M_2) / self.M_tot # [AU]
x = r2 * np.cos(f)
y = r2 * np.sin(f)
return (x,y)
def xy_AB(self, f):
r = self.a * (1 - self.e**2) / (1 + self.e * np.cos(f)) # [AU]
x = r * np.cos(f)
y = r * np.sin(f)
return (x,y)
# position of A relative to center of mass
def XY_A(self, f):
Omega = self.Omega * np.pi / 180
omega = self.omega * np.pi / 180 # add in pi to swap the periapse
i = self.i * np.pi / 180
# find the reduced semi-major axis
a1 = self.a * self.M_2 / self.M_tot
r1 = a1 * (1 - self.e**2) / (1 + self.e * np.cos(f)) # [AU]
x = r1 / a1 * np.cos(f)
y = r1 / a1 * np.sin(f)
# Calculate Thiele-Innes elements
A = a1 * (np.cos(omega) * np.cos(Omega) - np.sin(omega) * np.sin(Omega) * np.cos(i))
B = a1 * (np.cos(omega) * np.sin(Omega) + np.sin(omega) * np.cos(Omega) * np.cos(i))
F = a1 * (-np.sin(omega) * np.cos(Omega) - np.cos(omega) * np.sin(Omega) * np.cos(i))
G = a1 * (-np.sin(omega) * np.sin(Omega) + np.cos(omega) * np.cos(Omega) * np.cos(i))
X = A * x + F * y
Y = B * x + G * y
return (X, Y) # [AU]
# position of B relative to center of mass
def XY_B(self, f):
Omega = self.Omega * np.pi / 180
omega = self.omega * np.pi / 180
i = self.i * np.pi / 180
# find the reduced radius
a2 = self.a * (self.M_tot - self.M_2) / self.M_tot
r2 = a2 * (1 - self.e**2) / (1 + self.e * np.cos(f)) # [AU]
x = r2 / a2 * np.cos(f)
y = r2 / a2 * np.sin(f)
# Calculate Thiele-Innes elements
A = a2 * (np.cos(omega) * np.cos(Omega) - np.sin(omega) * np.sin(Omega) * np.cos(i))
B = a2 * (np.cos(omega) * np.sin(Omega) + np.sin(omega) * np.cos(Omega) * np.cos(i))
F = a2 * (-np.sin(omega) * np.cos(Omega) - np.cos(omega) * np.sin(Omega) * np.cos(i))
G = a2 * (-np.sin(omega) * np.sin(Omega) + np.cos(omega) * np.cos(Omega) * np.cos(i))
X = A * x + F * y
Y = B * x + G * y
return (X, Y) # [AU]
def XY_AB(self, f):
Omega = self.Omega * np.pi / 180
omega = self.omega * np.pi / 180
i = self.i * np.pi / 180
r = self.a * (1 - self.e**2) / (1 + self.e * np.cos(f))
x = r / self.a * np.cos(f)
y = r / self.a * np.sin(f)
# Calculate Thiele-Innes elements
A = self.a * (np.cos(omega) * np.cos(Omega) - np.sin(omega) * np.sin(Omega) * np.cos(i))
B = self.a * (np.cos(omega) * np.sin(Omega) + np.sin(omega) * np.cos(Omega) * np.cos(i))
F = self.a * (-np.sin(omega) * np.cos(Omega) - np.cos(omega) * np.sin(Omega) * np.cos(i))
G = self.a * (-np.sin(omega) * np.sin(Omega) + np.cos(omega) * np.cos(Omega) * np.cos(i))
X = A * x + F * y
Y = B * x + G * y
# X is north, Y is east.
return (X, Y) # [AU]
def get_orbit(self, t):
'''
Given a time, calculate all of the orbital quantaties we might be interseted in.
returns (v_A, v_B, (x,y) of A, (x,y) of B, and x,y of B relative to A)
'''
# Get the true anomoly "f" from time
f = self.theta(t)
# Feed this into the orbit equation and add the systemic velocity
vA = self.v1_f(f) + self.gamma
vB = self.v2_f(f) + self.gamma
XY_A = self.XY_A(f)
XY_B = self.XY_B(f)
XY_AB = self.XY_AB(f)
xy_A = self.xy_A(f)
xy_B = self.xy_B(f)
xy_AB = self.xy_AB(f)
return (vA, vB, XY_A, XY_B, XY_AB, xy_A, xy_B, xy_AB)
def get_component_orbits(self, dates=None):
'''
Return both vA and vB for all dates provided.
'''
if dates is None and self.obs_dates is None:
raise RuntimeError("Must provide input dates or specify observation dates upon creation of orbit object.")
if dates is None and self.obs_dates is not None:
dates = self.obs_dates
dates = np.atleast_1d(dates)
N = len(dates)
vAs = np.empty(N, dtype=np.float64)
vBs = np.empty(N, dtype=np.float64)
XY_As = np.empty((N, 2), dtype=np.float64)
XY_Bs = np.empty((N, 2), dtype=np.float64)
XY_ABs = np.empty((N, 2), dtype=np.float64)
xy_As = np.empty((N, 2), dtype=np.float64)
xy_Bs = np.empty((N, 2), dtype=np.float64)
xy_ABs = np.empty((N, 2), dtype=np.float64)
for i,date in enumerate(dates):
vA, vB, XY_A, XY_B, XY_AB, xy_A, xy_B, xy_AB = self.get_orbit(date)
vAs[i] = vA
vBs[i] = vB
XY_As[i] = np.array(XY_A)
XY_Bs[i] = np.array(XY_B)
XY_ABs[i] = np.array(XY_AB)
xy_As[i] = np.array(xy_A)
xy_Bs[i] = np.array(xy_B)
xy_ABs[i] = np.array(xy_AB)
return (vAs, vBs, XY_As, XY_Bs, XY_ABs, xy_As, xy_Bs, xy_ABs)
def get_component_fits(self, dates=None):
'''
Return both vA, vB, rho_AB, and theta_AB, for all dates provided.
These are mainly as inputs to a fit.
'''
if dates is None and self.obs_dates is None:
raise RuntimeError("Must provide input dates or specify observation dates upon creation of orbit object.")
if dates is None and self.obs_dates is not None:
dates = self.obs_dates
dates = np.atleast_1d(dates)
N = len(dates)
vAs = np.empty(N, dtype=np.float64)
vBs = np.empty(N, dtype=np.float64)
rho_ABs = np.empty(N, dtype=np.float64)
theta_ABs = np.empty(N, dtype=np.float64)
for i,date in enumerate(dates):
vA, vB, XY_A, XY_B, XY_AB, xy_A, xy_B, xy_AB = self.get_orbit(date)
vAs[i] = vA
vBs[i] = vB
# Calculate rho, theta from XY_AB
X, Y = XY_AB
rho = np.sqrt(X**2 + Y**2) # [AU]
theta = np.arctan2(Y, X) * 180/np.pi # [Deg]
if theta < 0: # ensure that 0 <= theta <= 360
theta += 360.
rho_ABs[i] = rho
theta_ABs[i] = theta
return (vAs, vBs, rho_ABs, theta_ABs)
class Triple:
'''
Techniques describing solving for a triple star orbit.
'''
def __init__(self, a_in, e_in, i_in, omega_in, Omega_in, T0_in, a_out, e_out, i_out, omega_out, Omega_out, T0_out, M_1, M_2, M_3, gamma, obs_dates=None, **kwargs):
self.a_in = a_in # [AU]
self.e_in = e_in #
self.i_in = i_in # [deg]
self.omega_in = omega_in # [deg]
self.Omega_in = Omega_in # [deg]
self.T0_in = T0_in # [JD]
self.a_out = a_out # [AU]
self.e_out = e_out
self.i_out = i_out # [deg]
self.omega_out = omega_out # [deg]
self.Omega_out = Omega_out # [deg]
self.T0_out = T0_out # [JD]
self.M_1 = M_1 # [M_sun]
self.M_2 = M_2 # [M_sun]
self.M_3 = M_3 # [M_sun]
self.gamma = gamma # [km/s]
self.recalculate()
# If we are going to be repeatedly predicting the orbit at a sequence of dates,
# just store them to the object.
self.obs_dates = obs_dates
self.param_dict = {"a_in":self.a_in, "e_in":self.e_in, "i_in":self.i_in, "omega_in":self.omega_in, "Omega_in":self.Omega_in, "T0_in":self.T0_in, "a_out":self.a_out, "e_out":self.e_out, "i_out":self.i_out, "omega_out":self.omega_out, "Omega_out":self.Omega_out, "T0_out":self.T0_out, "M_1":self.M_1, "M_2":self.M_2, "M_3":self.M_3, "gamma":self.gamma}
def recalculate(self):
'''
Update all of the derived quantities.
'''
# Calculate the following RV quantities
self.P_in = np.sqrt(4 * np.pi**2 / (C.G * (self.M_1 + self.M_2) * C.M_sun) * (self.a_in * C.AU)**3) / (60 * 60 * 24)# [days]
self.K_in = np.sqrt(C.G/(1 - self.e_in**2)) * self.M_2 * C.M_sun * np.sin(self.i_in * np.pi/180.) / np.sqrt((self.M_1 + self.M_2) * C.M_sun * self.a_in * C.AU) * 1e-5 # [km/s]
self.P_out = np.sqrt(4 * np.pi**2 / (C.G * (self.M_1 + self.M_2 + self.M_3) * C.M_sun) * (self.a_out * C.AU)**3) / (60 * 60 * 24) # [days]
self.K_out = np.sqrt(C.G/(1 - self.e_out**2)) * self.M_3 * C.M_sun * np.sin(self.i_out * np.pi/180.) / np.sqrt((self.M_1 + self.M_2 + self.M_3) * C.M_sun * self.a_out * C.AU) * 1e-5 # [km/s]
def update_parameters(self, param_values, param_list):
'''
param_values is numpy array of values
param_list is list of strings of the names of the parameters
'''
for (value, key) in zip(param_values, param_list):
self.param_dict[key] = value
def theta_in(self, t):
'''Calculate the true anomoly for the A-B orbit.'''
# t is input in seconds
# Take a modulus of the period
t = (t - self.T0_in) % self.P_in
f = lambda E: E - self.e_in * np.sin(E) - 2 * np.pi * t/self.P_in
E0 = 2 * np.pi * t / self.P_in
E = fsolve(f, E0)[0]
th = 2 * np.arctan(np.sqrt((1 + self.e_in)/(1 - self.e_in)) * np.tan(E/2.))
if E < np.pi:
return th
else:
return th + 2 * np.pi
def theta_out(self, t):
'''Calculate the true anomoly for the (A-B) - C orbit.'''
# t is input in seconds
# Take a modulus of the period
t = (t - self.T0_out) % self.P_out
f = lambda E: E - self.e_out * np.sin(E) - 2 * np.pi * t/self.P_out
E0 = 2 * np.pi * t / self.P_out
E = fsolve(f, E0)[0]
th = 2 * np.arctan(np.sqrt((1 + self.e_out)/(1 - self.e_out)) * np.tan(E/2.))
if E < np.pi:
return th
else:
return th + 2 * np.pi
def v1_f(self, f):
'''Calculate the component of A's velocity based on only the inner orbit.
f is the true anomoly of this inner orbit.'''
return self.K_in * (np.cos(self.omega_in * np.pi/180 + f) + self.e_in * np.cos(self.omega_in * np.pi/180))
def v2_f(self, f):
'''Calculate the component of B's velocity based on only the inner orbit.
f is the true anomoly of this inner orbit.'''
return -self.K_in * self.M_1/self.M_2 * (np.cos(self.omega_in * np.pi/180 + f) + self.e_in * np.cos(self.omega_in * np.pi/180))
def v3_f(self, f):
'''Calculate the velocity of (A-B) based only on the outer orbit.
f is the true anomoly of the outer orbit'''
return self.K_out * (np.cos(self.omega_out * np.pi/180 + f) + self.e_out * np.cos(self.omega_out * np.pi/180))
def v3_f_C(self, f):
'''Calculate the velocity of C based only on the outer orbit.
f is the true anomoly of the outer orbit
'''
return -self.K_out * (self.M_1 + self.M_2)/ self.M_3 * (np.cos(self.omega_out * np.pi/180 + f) + self.e_out * np.cos(self.omega_out * np.pi/180))
# absolute position of the AB center of mass in the plane of the orbit
def xy_AB(self, f):
# find the reduced radius
r = self.a_out * (1 - self.e_out**2) / (1 + self.e_out * np.cos(f)) # [AU]
r1 = r * self.M_3 / (self.M_1 + self.M_2 + self.M_3) # [AU]
x = r1 * np.cos(f)
y = r1 * np.sin(f)
return (x,y)
# absolute position of C in the plane of the orbit
def xy_C(self, f):
# find the reduced radius
r = self.a_out * (1 - self.e_out**2) / (1 + self.e_out * np.cos(f)) # [AU]
r2 = -r * (self.M_1 + self.M_2) / (self.M_1 + self.M_2 + self.M_3) # [AU]
x = r2 * np.cos(f)
y = r2 * np.sin(f)
return (x,y)
# absolute position of AB center of mass
def XY_AB(self, f):
# find the reduced radius
r = self.a_out * (1 - self.e_out**2) / (1 + self.e_out * np.cos(f)) # [AU]
r1 = r * self.M_3 / (self.M_1 + self.M_2 + self.M_3) # [AU]
Omega = self.Omega_out * np.pi / 180
omega = self.omega_out * np.pi / 180 # add in pi to swap the periapse
i = self.i_out * np.pi / 180
X = r1 * (np.cos(Omega) * np.cos(omega + f) - np.sin(Omega) * np.sin(omega + f) * np.cos(i))
Y = r1 * (np.sin(Omega) * np.cos(omega + f) + np.cos(Omega) * np.sin(omega + f) * np.cos(i))
return (X, Y) # [AU]
# absolute position of C
def XY_C(self, f):
# find the reduced radius
r = self.a_out * (1 - self.e_out**2) / (1 + self.e_out * np.cos(f)) # [AU]
r2 = -r * (self.M_1 + self.M_2) / (self.M_1 + self.M_2 + self.M_3) # [AU]
Omega = self.Omega_out * np.pi / 180
omega = self.omega_out * np.pi / 180
i = self.i_out * np.pi / 180
X = r2 * (np.cos(Omega) * np.cos(omega + f) - np.sin(Omega) * np.sin(omega + f) * np.cos(i))
Y = r2 * (np.sin(Omega) * np.cos(omega + f) + np.cos(Omega) * np.sin(omega + f) * np.cos(i))
return (X, Y) # [AU]
# position of A relative to center of mass of AB in the plane of the orbit
def xy_A_loc(self, f):
# find the reduced radius
r = self.a_in * (1 - self.e_in**2) / (1 + self.e_in * np.cos(f)) # [AU]
r1 = r * self.M_2 / (self.M_1 + self.M_2) # [AU]
x = r1 * np.cos(f)
y = r1 * np.sin(f)
return (x,y)
# position of B relative to center of mass of AB in the plane of the orbit
def xy_B_loc(self, f):
# find the reduced radius
r = self.a_in * (1 - self.e_in**2) / (1 + self.e_in * np.cos(f)) # [AU]
r2 = -r * self.M_1 / (self.M_1 + self.M_2) # [AU]
x = r2 * np.cos(f)
y = r2 * np.sin(f)
return (x,y)
# position of A relative to center of mass of AB (projected)
def XY_A_loc(self, f):
# find the reduced radius
r = self.a_in * (1 - self.e_in**2) / (1 + self.e_in * np.cos(f)) # [AU]
r1 = r * self.M_2 / (self.M_1 + self.M_2) # [AU]
Omega = self.Omega_in * np.pi / 180
omega = self.omega_in * np.pi / 180 # add in pi to swap the periapse
i = self.i_in * np.pi / 180
X = r1 * (np.cos(Omega) * np.cos(omega + f) - np.sin(Omega) * np.sin(omega + f) * np.cos(i))
Y = r1 * (np.sin(Omega) * np.cos(omega + f) + np.cos(Omega) * np.sin(omega + f) * np.cos(i))
return (X, Y) # [AU]
# position of B relative to center of mass of AB (projected)
def XY_B_loc(self, f):
# find the reduced radius
r = self.a_in * (1 - self.e_in**2) / (1 + self.e_in * np.cos(f)) # [AU]
r2 = -r * self.M_1 / (self.M_1 + self.M_2) # [AU]
Omega = self.Omega_in * np.pi / 180
omega = self.omega_in * np.pi / 180
i = self.i_in * np.pi / 180
X = r2 * (np.cos(Omega) * np.cos(omega + f) - np.sin(Omega) * np.sin(omega + f) * np.cos(i))
Y = r2 * (np.sin(Omega) * np.cos(omega + f) + np.cos(Omega) * np.sin(omega + f) * np.cos(i))
return (X, Y) # [AU]
def get_orbit(self, t):
'''
Given a time, calculate all of the orbital quantaties we might be interseted in.
returns (v_A, v_B, (x,y) of A, (x,y) of B, and x,y of B relative to A)
'''
# Get the true anomoly "f" from time
f_in = self.theta_in(t)
f_out = self.theta_out(t)
# Feed this into the orbit equation and add the systemic velocity
vA = self.v1_f(f_in) + self.v3_f(f_out) + self.gamma
vB = self.v2_f(f_in) + self.v3_f(f_out) + self.gamma
vC = self.v3_f_C(f_out) + self.gamma
# Absolute positions of AB center of mass, and C component.
XY_AB = self.XY_AB(f_out)
XY_C = self.XY_C(f_out)
# Positions of A and B relative to AB center of mass.
XY_A_loc = self.XY_A_loc(f_in)
XY_B_loc = self.XY_B_loc(f_in)
# Absolute positions of A and B
XY_A = np.array(XY_A_loc) + np.array(XY_AB)
XY_B = np.array(XY_B_loc) + np.array(XY_AB)
# Orbital positions in the plane
xy_AB = self.xy_AB(f_out)
xy_C = self.xy_C(f_out)
xy_A_loc = self.xy_A_loc(f_in)
xy_B_loc = self.xy_B_loc(f_in)
return (vA, vB, vC, XY_A, XY_B, XY_C, XY_AB, XY_A_loc, XY_B_loc, xy_C, xy_AB, xy_A_loc, xy_B_loc)
def get_component_orbits(self, dates=None):
'''
Return both vA and vB for all dates provided.
'''
if dates is None and self.obs_dates is None:
raise RuntimeError("Must provide input dates or specify observation dates upon creation of orbit object.")
if dates is None and self.obs_dates is not None:
dates = self.obs_dates
dates = np.atleast_1d(dates)
N = len(dates)
vAs = np.empty(N, dtype=np.float64)
vBs = np.empty(N, dtype=np.float64)
vCs = np.empty(N, dtype=np.float64)
XY_As = np.empty((N, 2), dtype=np.float64)
XY_Bs = np.empty((N, 2), dtype=np.float64)
XY_Cs = np.empty((N, 2), dtype=np.float64)
XY_ABs = np.empty((N, 2), dtype=np.float64)
XY_A_locs = np.empty((N, 2), dtype=np.float64)
XY_B_locs = np.empty((N, 2), dtype=np.float64)
xy_Cs = np.empty((N, 2), dtype=np.float64)
xy_ABs = np.empty((N, 2), dtype=np.float64)
xy_A_locs = np.empty((N, 2), dtype=np.float64)
xy_B_locs = np.empty((N, 2), dtype=np.float64)
for i,date in enumerate(dates):
vA, vB, vC, XY_A, XY_B, XY_C, XY_AB, XY_A_loc, XY_B_loc, xy_C, xy_AB, xy_A_loc, xy_B_loc = self.get_orbit(date)
vAs[i] = vA
vBs[i] = vB
vCs[i] = vC
XY_As[i] = np.array(XY_A)
XY_Bs[i] = np.array(XY_B)
XY_Cs[i] = np.array(XY_C)
XY_ABs[i] = np.array(XY_AB)
XY_A_locs[i] = np.array(XY_A_loc)
XY_B_locs[i] = np.array(XY_B_loc)
xy_Cs[i] = np.array(xy_C)
xy_ABs[i] = np.array(xy_AB)
xy_A_locs[i] = np.array(xy_A_loc)
xy_B_locs[i] = np.array(xy_B_loc)
return (vAs, vBs, vCs, XY_As, XY_Bs, XY_Cs, XY_ABs, XY_A_locs, XY_B_locs, xy_Cs, xy_ABs, xy_A_locs, xy_B_locs)
def get_component_fits(self, dates=None):
'''
Return the vA, vB, vC, rho_AB, theta_AB, rho_AC, theta_AC for all dates provided.
These are mainly as inputs to a fit.
'''
if dates is None and self.obs_dates is None:
raise RuntimeError("Must provide input dates or specify observation dates upon creation of orbit object.")
if dates is None and self.obs_dates is not None:
dates = self.obs_dates
dates = np.atleast_1d(dates)
N = len(dates)
vAs = np.empty(N, dtype=np.float64)
vBs = np.empty(N, dtype=np.float64)
vCs = np.empty(N, dtype=np.float64)
rho_ABs = np.empty(N, dtype=np.float64)
theta_ABs = np.empty(N, dtype=np.float64)
rho_ACs = np.empty(N, dtype=np.float64)
theta_ACs = np.empty(N, dtype=np.float64)
for i,date in enumerate(dates):
vA, vB, vC, XY_A, XY_B, XY_C, XY_AB, XY_A_loc, XY_B_loc, xy_C, xy_AB, xy_A_loc, xy_B_loc = self.get_orbit(date)
vAs[i] = vA
vBs[i] = vB
vCs[i] = vC
# For AB pair
# Calculate rho, theta from XY_A, XY_B, and XY_C
X_A, Y_A = XY_A
X_B, Y_B = XY_B
X_C, Y_C = XY_C
rho_AB = np.sqrt((X_B - X_A)**2 + (Y_B - Y_A)**2) # [AU]
theta_AB = np.arctan2((Y_B - Y_A), (X_B - X_A)) * 180/np.pi # [Deg]
if theta_AB < 0: # ensure that 0 <= theta <= 360
theta_AB += 360.
rho_ABs[i] = rho_AB
theta_ABs[i] = theta_AB
rho_AC = np.sqrt((X_C - X_A)**2 + (Y_C - Y_A)**2) # [AU]
theta_AC = np.arctan2((Y_C - Y_A), (X_C - X_A)) * 180/np.pi # [Deg]
if theta_AC < 0:
theta_AC += 360.
rho_ACs[i] = rho_AC
theta_ACs[i] = theta_AC
return (vAs, vBs, vCs, rho_ABs, theta_ABs, rho_ACs, theta_ACs)
models = {"Binary":Binary, "Triple":Triple}
|
ianczeREPO_NAMEPSOAPPATH_START.@PSOAP_extracted@PSOAP-master@attic@orbit_conventional.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "exoplanet-dev/celerite2",
"repo_path": "celerite2_extracted/celerite2-main/python/celerite2/pymc3/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
__all__ = ["terms", "GaussianProcess"]
def __set_compiler_flags():
import theano
def add_flag(current, new):
if new in current:
return current
return f"{current} {new}"
current = theano.config.gcc__cxxflags
current = add_flag(current, "-Wno-c++11-narrowing")
current = add_flag(current, "-fno-exceptions")
current = add_flag(current, "-fno-unwind-tables")
current = add_flag(current, "-fno-asynchronous-unwind-tables")
theano.config.gcc__cxxflags = current
__set_compiler_flags()
from celerite2.pymc3 import terms
from celerite2.pymc3.celerite2 import GaussianProcess
|
exoplanet-devREPO_NAMEcelerite2PATH_START.@celerite2_extracted@celerite2-main@python@celerite2@pymc3@__init__.py@.PATH_END.py
|
{
"filename": "_textsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/waterfall/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="waterfall", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@waterfall@_textsrc.py@.PATH_END.py
|
{
"filename": "style.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/util/svg/style.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from . color import Color
from . number import Number
from . length import Length
_converters = {
"fill": Color,
"fill-opacity": Number,
"stroke": Color,
"stroke-opacity": Number,
"opacity": Number,
"stroke-width": Length,
# "stroke-miterlimit": Number,
# "stroke-dasharray": Lengths,
# "stroke-dashoffset": Length,
}
class Style(object):
def __init__(self):
self._unset = True
for key in _converters.keys():
key_ = key.replace("-", "_")
self.__setattr__(key_, None)
def update(self, content):
if not content:
return
self._unset = False
items = content.strip().split(";")
attributes = dict([item.strip().split(":") for item in items if item])
for key, value in attributes.items():
if key in _converters:
key_ = key.replace("-", "_")
self.__setattr__(key_, _converters[key](value))
@property
def xml(self):
return self._xml()
def _xml(self, prefix=""):
if self._unset:
return ""
s = 'style="'
for key in _converters.keys():
key_ = key.replace("-", "_")
value = self.__getattribute__(key_)
if value is not None:
s += '%s:%s ' % (key, value)
s += '"'
return s
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@util@svg@style.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_internal/commands/__init__.py",
"type": "Python"
}
|
"""
Package containing all pip commands
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
# There is currently a bug in python/typeshed mentioned at
# https://github.com/python/typeshed/issues/3906 which causes the
# return type of difflib.get_close_matches to be reported
# as List[Sequence[str]] whereas it should have been List[str]
from __future__ import absolute_import
import importlib
from collections import OrderedDict, namedtuple
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any
from pip._internal.cli.base_command import Command
CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary')
# The ordering matters for help display.
# Also, even though the module path starts with the same
# "pip._internal.commands" prefix in each case, we include the full path
# because it makes testing easier (specifically when modifying commands_dict
# in test setup / teardown by adding info for a FakeCommand class defined
# in a test-related module).
# Finally, we need to pass an iterable of pairs here rather than a dict
# so that the ordering won't be lost when using Python 2.7.
commands_dict = OrderedDict([
('install', CommandInfo(
'pip._internal.commands.install', 'InstallCommand',
'Install packages.',
)),
('download', CommandInfo(
'pip._internal.commands.download', 'DownloadCommand',
'Download packages.',
)),
('uninstall', CommandInfo(
'pip._internal.commands.uninstall', 'UninstallCommand',
'Uninstall packages.',
)),
('freeze', CommandInfo(
'pip._internal.commands.freeze', 'FreezeCommand',
'Output installed packages in requirements format.',
)),
('list', CommandInfo(
'pip._internal.commands.list', 'ListCommand',
'List installed packages.',
)),
('show', CommandInfo(
'pip._internal.commands.show', 'ShowCommand',
'Show information about installed packages.',
)),
('check', CommandInfo(
'pip._internal.commands.check', 'CheckCommand',
'Verify installed packages have compatible dependencies.',
)),
('config', CommandInfo(
'pip._internal.commands.configuration', 'ConfigurationCommand',
'Manage local and global configuration.',
)),
('search', CommandInfo(
'pip._internal.commands.search', 'SearchCommand',
'Search PyPI for packages.',
)),
('cache', CommandInfo(
'pip._internal.commands.cache', 'CacheCommand',
"Inspect and manage pip's wheel cache.",
)),
('wheel', CommandInfo(
'pip._internal.commands.wheel', 'WheelCommand',
'Build wheels from your requirements.',
)),
('hash', CommandInfo(
'pip._internal.commands.hash', 'HashCommand',
'Compute hashes of package archives.',
)),
('completion', CommandInfo(
'pip._internal.commands.completion', 'CompletionCommand',
'A helper command used for command completion.',
)),
('debug', CommandInfo(
'pip._internal.commands.debug', 'DebugCommand',
'Show information useful for debugging.',
)),
('help', CommandInfo(
'pip._internal.commands.help', 'HelpCommand',
'Show help for commands.',
)),
]) # type: OrderedDict[str, CommandInfo]
def create_command(name, **kwargs):
# type: (str, **Any) -> Command
"""
Create an instance of the Command class with the given name.
"""
module_path, class_name, summary = commands_dict[name]
module = importlib.import_module(module_path)
command_class = getattr(module, class_name)
command = command_class(name=name, summary=summary, **kwargs)
return command
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
name = name.lower()
close_commands = get_close_matches(name, commands_dict.keys())
if close_commands:
return close_commands[0]
else:
return False
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@lib@python3.7@site-packages@pip@_internal@commands@__init__.py@.PATH_END.py
|
{
"filename": "proc_daemon.py",
"repo_name": "icrar/daliuge",
"repo_path": "daliuge_extracted/daliuge-master/daliuge-engine/dlg/manager/proc_daemon.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2016
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""
Module containing the DALiuGE Daemon class and command-line entry point to use it
"""
import functools
import json
import logging
import signal
import socket
import sys
import threading
import bottle
import zeroconf as zc
from dlg import constants
from .. import utils
from ..restserver import RestServer
from dlg.nm_dim_assigner import NMAssigner
logger = logging.getLogger(__name__)
def get_tool():
# This import is performed at runtime to avoid a circular dependency
# at import time with the tool module, which imports this module
# to make it available as a 'dlg' command
from ..common import tool
return tool
def _get_address(zeroconf_service_info):
if tuple(map(int, zc.__version__.split(".")))[:2] >= (0, 23):
return zeroconf_service_info.addresses[0]
return zeroconf_service_info.address
class DlgDaemon(RestServer):
"""
The DALiuGE Daemon
The DALiuGE Daemon is the long-running process that we assume is always
available for contacting, and that acts as the bootstrapping of the whole
system. It exposes a REST API through which users can start the different
Drop Managers (node, dataisland and master) and query their status.
Optionally it can also start automatically the node manager (default: yes)
and the master manager (default: no) at creation time.
"""
def __init__(self, master=False, noNM=False, disable_zeroconf=False, verbosity=0):
super(DlgDaemon, self).__init__()
self._shutting_down = False
self._verbosity = verbosity
# The three processes we run
self._nm_proc = None
self._dim_proc = None
self._mm_proc = None
# Zeroconf for NM and MM
self._zeroconf = None if disable_zeroconf else zc.Zeroconf()
self._nm_info = None
self._dim_info = None
self._mm_nm_browser = None
self._mm_dim_browser = None
# Starting managers
app = self.app
app.post("/managers/node/start", callback=self.rest_startNM)
app.post("/managers/node/stop", callback=self.rest_stopNM)
app.post("/managers/island/start", callback=self.rest_startDIM)
app.post("/managers/island/stop", callback=self.rest_stopDIM)
app.post("/managers/master/start", callback=self.rest_startMM)
app.post("/managers/master/stop", callback=self.rest_stopMM)
# Querying about managers
app.get("/", callback=self.rest_getMgrs)
app.get("/managers", callback=self.rest_getMgrs)
app.get("/managers/master", callback=self.rest_getMMInfo)
app.get("/managers/island", callback=self.rest_getDIMInfo)
app.get("/managers/node", callback=self.rest_getNMInfo)
# Automatically start those that we need
if master:
self.startMM()
if not noNM:
self.startNM()
def stop(self, timeout=None):
"""
Stops this DALiuGE Daemon, terminating all its child processes and its REST
server.
"""
self._shutting_down = True
super(DlgDaemon, self).stop(timeout)
self.stopNM(timeout)
self.stopDIM(timeout)
self.stopMM(timeout)
self._stop_zeroconf()
logger.info("DALiuGE Daemon stopped")
def _stop_zeroconf(self):
if not self._zeroconf:
return
# Stop the MM service browser, the NM registration, and ZC itself
if self._mm_nm_browser:
self._mm_nm_browser.cancel()
self._mm_nm_browser.join()
if self._mm_dim_browser:
self._mm_dim_browser.cancel()
self._mm_dim_browser.join()
self._zeroconf.close()
logger.info("Zeroconf stopped")
def _stop_rest_server(self, timeout):
if self._ioloop:
self.app.close()
# Submit a callback to the IOLoop to stop itself and wait until it's
# done with it
logger.debug("Stopping the web server")
ioloop_stopped = threading.Event()
def stop_ioloop():
self._ioloop.stop()
ioloop_stopped.set()
self._ioloop.add_callback(stop_ioloop)
if not ioloop_stopped.wait(timeout):
logger.warning("Timed out while waiting for the server to stop")
self._server.stop()
self._started = False
def _stop_manager(self, name, timeout):
proc = getattr(self, name)
logger.debug("Stopping manager %s", name)
if proc:
utils.terminate_or_kill(proc, timeout)
pid = proc.pid
setattr(self, name, None)
return {"terminated": pid}
else:
logger.warning("No %s manager found!", name)
return {}
def stopNM(self, timeout=10):
if self._nm_info:
utils.deregister_service(self._zeroconf, self._nm_info)
return self._stop_manager("_nm_proc", timeout)
def stopDIM(self, timeout=10):
if self._dim_info:
utils.deregister_service(self._zeroconf, self._dim_info)
self._stop_manager("_dim_proc", timeout)
def stopMM(self, timeout=10):
self._stop_manager("_mm_proc", timeout)
# Methods to start and stop the individual managers
def startNM(self):
tool = get_tool()
args = ["--host", "0.0.0.0"]
args += self._verbosity_as_cmdline()
logger.info("Starting Node Drop Manager with args: %s", (" ".join(args)))
self._nm_proc = tool.start_process("nm", args)
logger.info("Started Node Drop Manager with PID %d", self._nm_proc.pid)
# Registering the new NodeManager via zeroconf so it gets discovered
# by the Master Manager
if self._zeroconf:
addrs = utils.get_local_ip_addr()
logger.info("Registering this NM with zeroconf: %s", addrs)
self._nm_info = utils.register_service(
self._zeroconf,
"NodeManager",
socket.gethostname(),
addrs[0][0],
constants.NODE_DEFAULT_REST_PORT,
)
return
def startDIM(self, nodes):
tool = get_tool()
args = ["--host", "0.0.0.0"]
args += self._verbosity_as_cmdline()
if nodes:
args += ["--nodes", ",".join(nodes)]
logger.info("Starting Data Island Drop Manager with args: %s", (" ".join(args)))
self._dim_proc = tool.start_process("dim", args)
logger.info("Started Data Island Drop Manager with PID %d", self._dim_proc.pid)
# Registering the new DIM via zeroconf so it gets discovered
# by the Master Manager
if self._zeroconf:
addrs = utils.get_local_ip_addr()
logger.info("Registering this DIM with zeroconf: %s", addrs)
self._dim_info = utils.register_service(
self._zeroconf,
"DIM",
socket.gethostname(),
addrs[0][0],
constants.ISLAND_DEFAULT_REST_PORT,
)
return
def startMM(self):
tool = get_tool()
args = ["--host", "0.0.0.0"]
args += self._verbosity_as_cmdline()
logger.info("Starting Master Drop Manager with args: %s", (" ".join(args)))
self._mm_proc = tool.start_process("mm", args)
logger.info("Started Master Drop Manager with PID %d", self._mm_proc.pid)
# Also subscribe to zeroconf events coming from NodeManagers and feed
# the Master Manager with the new hosts we find
if self._zeroconf:
nm_assigner = NMAssigner()
def _callback(
zeroconf, service_type, name, state_change, adder, remover, accessor
):
info = zeroconf.get_service_info(service_type, name)
if state_change is zc.ServiceStateChange.Added:
server = socket.inet_ntoa(_get_address(info))
port = info.port
adder(name, server, port)
logger.info(
"Found a new %s on %s:%d, will add it to the MM",
service_type,
server,
port,
)
elif state_change is zc.ServiceStateChange.Removed:
server, port = accessor(name)
logger.info(
"%s on %s:%d disappeared, removing it from the MM",
service_type,
server,
port,
)
# Don't bother to remove it if we're shutting down. This way
# we avoid hanging in here if the MM is down already but
# we are trying to remove our NM who has just disappeared
if not self._shutting_down:
remover(name)
nm_callback = functools.partial(
_callback,
adder=nm_assigner.add_nm,
remover=nm_assigner.remove_nm,
accessor=nm_assigner.get_nm,
)
dim_callback = functools.partial(
_callback,
adder=nm_assigner.add_dim,
remover=nm_assigner.remove_dim,
accessor=nm_assigner.get_dim,
)
self._mm_nm_browser = utils.browse_service(
self._zeroconf, "NodeManager", "tcp", nm_callback
)
self._mm_dim_browser = utils.browse_service(
self._zeroconf,
"DIM",
"tcp",
dim_callback, # DIM since name must be < 15 bytes
)
logger.info("Zeroconf started")
return
def _verbosity_as_cmdline(self):
if self._verbosity > 0:
return ["-" + "v" * self._verbosity]
elif self._verbosity < 0:
return ["-" + "q" * (-self._verbosity)]
return ()
# Rest interface
def _rest_start_manager(self, proc, start_method):
if proc is not None:
bottle.abort(409, "The Drop Manager is already running") # Conflict
start_method()
return
def _rest_stop_manager(self, proc, stop_method):
if proc is None:
bottle.abort(409, "The Drop Manager is not running") # Conflict
logger.debug("Calling %s", stop_method)
return json.dumps(stop_method())
def _rest_get_manager_info(self, proc):
if proc:
bottle.response.content_type = "application/json"
logger.info("Sending response: %s", json.dumps({"pid": proc.pid}))
return json.dumps({"pid": proc.pid})
else:
return json.dumps({"pid": None})
def rest_getMgrs(self):
mgrs = {
"master": self._mm_proc,
"island": self._dim_proc,
"node": self._nm_proc,
}
if mgrs["master"]:
mgrs["master"] = self._mm_proc.pid
if mgrs["island"]:
mgrs["island"] = self._dim_proc.pid
if mgrs["node"]:
mgrs["node"] = self._nm_proc.pid
logger.info("Sending response: %s", json.dumps(mgrs))
return json.dumps(mgrs)
def rest_startNM(self):
self._rest_start_manager(self._nm_proc, self.startNM)
return self.rest_getNMInfo()
def rest_stopNM(self):
self._rest_stop_manager(self._nm_proc, self.stopNM)
def rest_startDIM(self):
body = bottle.request.json
if not body or "nodes" not in body:
# if nothing else is specified we simply add this host
nodes = {}
else:
nodes = body["nodes"]
self._rest_start_manager(
self._dim_proc, functools.partial(self.startDIM, nodes)
)
return self.rest_getDIMInfo()
def rest_stopDIM(self):
self._rest_stop_manager(self._dim_proc, self.stopDIM)
def rest_startMM(self):
self._rest_start_manager(self._mm_proc, self.startMM)
return self.rest_getMMInfo()
def rest_stopMM(self):
self._rest_stop_manager(self._mm_proc, self.stopMM)
def rest_getNMInfo(self):
return self._rest_get_manager_info(self._nm_proc)
def rest_getDIMInfo(self):
return self._rest_get_manager_info(self._dim_proc)
def rest_getMMInfo(self):
return self._rest_get_manager_info(self._mm_proc)
terminating = False
def run_with_cmdline(parser, args):
parser.add_option(
"-m",
"--master",
action="store_true",
dest="master",
help="Start this DALiuGE daemon as the master daemon",
default=False,
)
parser.add_option(
"--no-nm",
action="store_true",
dest="noNM",
help="Don't start a NodeDropManager by default",
default=False,
)
parser.add_option(
"--no-zeroconf",
action="store_true",
dest="noZC",
help="Don't enable zeroconf on this DALiuGE daemon",
default=False,
)
parser.add_option(
"-v",
"--verbose",
action="count",
dest="verbose",
help="Become more verbose. The more flags, the more verbose",
default=0,
)
parser.add_option(
"-q",
"--quiet",
action="count",
dest="quiet",
help="Be less verbose. The more flags, the quieter",
default=0,
)
(opts, args) = parser.parse_args(args)
# -v and -q are exclusive
if opts.verbose and opts.quiet:
parser.error("-v and -q cannot be specified together")
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
daemon = DlgDaemon(opts.master, opts.noNM, opts.noZC, opts.verbose - opts.quiet)
# Signal handling, which stops the daemon
def handle_signal(signalNo, stack_frame):
global terminating
if terminating:
return
logger.info("Received signal %d, will stop the daemon now", signalNo)
terminating = True
daemon.stop(10)
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
# Go, go, go!
t = threading.Thread(
target=daemon.start, args=("0.0.0.0", constants.DAEMON_DEFAULT_REST_PORT)
)
t.start()
signal.pause()
|
icrarREPO_NAMEdaliugePATH_START.@daliuge_extracted@daliuge-master@daliuge-engine@dlg@manager@proc_daemon.py@.PATH_END.py
|
{
"filename": "_bgcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/waterfall/hoverlabel/_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="waterfall.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@waterfall@hoverlabel@_bgcolor.py@.PATH_END.py
|
{
"filename": "psf_estimation.py",
"repo_name": "torluca/morphofit",
"repo_path": "morphofit_extracted/morphofit-master/morphofit/psf_estimation.py",
"type": "Python"
}
|
#! /usr/bin/env python
# Copyright (C) 2019 ETH Zurich, Institute for Particle Physics and Astrophysics
# Author: Luca Tortorelli
# System imports
from __future__ import (print_function, division, absolute_import,
unicode_literals)
# External modules
import os
from astropy.io import fits
from astropy.table import Table
import numpy as np
from astropy.nddata import Cutout2D
from scipy.optimize import curve_fit
# from scipy import linalg
from astropy.stats import sigma_clipped_stats
from astropy.stats import SigmaClip
from photutils import Background2D, MedianBackground
from astropy.nddata import NDData
from photutils.psf import extract_stars
from photutils import EPSFBuilder
# from sklearn import preprocessing
from sklearn.decomposition import PCA
# morphofit imports
from morphofit.utils import match_sources_with_star_catalogue, ra_dec_to_pixels
from morphofit.utils import get_logger
logger = get_logger(__file__)
def two_dim_moffat_profile(x_y, sky, amplt, x0, y0, alpha, beta):
"""
Two dimensional circular Moffat profile function.
https://www.aspylib.com/doc/aspylib_fitting.html
:param x_y: position coordinates.
:param sky: sky background value.
:param amplt: amplitude of the profile.
:param x0: x central coordinate.
:param y0: y central coordinate.
:param alpha: alpha parameter related to FWHM.
:param beta: beta parameter of the Moffat profile.
:return moffat.ravel(): contiguous flattened Moffat profile array.
"""
x, y = x_y
moffat = sky + (amplt / ((1 + ((x - x0) ** 2 + (y - y0) ** 2) / (alpha ** 2)) ** beta))
return moffat.ravel()
def create_cutout_image(x_star, y_star, size, data):
"""
:param x_star:
:param y_star:
:param size:
:param data:
:return:
"""
position = (x_star - 1, y_star)
sizeimg = (size, size) # pixels
cutout = Cutout2D(data, position, sizeimg)
return cutout
def subtract_background_from_image(data, sigma):
"""
:param data:
:param sigma:
:return:
"""
mean_back, median_back, std_back = sigma_clipped_stats(data, sigma=sigma)
background_subtracted_data = data - median_back
return background_subtracted_data
def subtract_2d_background_from_image(data, size=(30, 30), sigma=2, filter_size=(5, 5), iters=10):
"""
:param data:
:param size:
:param sigma:
:param filter_size:
:param iters:
:return:
"""
sigma_clip = SigmaClip(sigma=sigma, maxiters=iters)
bkg_estimator = MedianBackground()
bkg = Background2D(data, size, filter_size=filter_size,
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
background_subtracted_data = data - bkg.background
return background_subtracted_data
def fit_2d_moffat_profile(data, starting_point, size):
"""
:param data:
:param starting_point:
:param size:
:return:
"""
x_img = np.arange(0, size)
y_img = np.arange(0, size)
x_img, y_img = np.meshgrid(x_img, y_img)
datatofit = data.ravel()
popt, pcov = curve_fit(two_dim_moffat_profile, (x_img, y_img), datatofit, starting_point)
fwhm = 2 * popt[4] * np.sqrt(2 ** (1 / popt[5]) - 1)
beta = popt[5]
return popt, fwhm, beta
def beta_seeing_evaluation(sci_image_filename, catalogue, ext_star_catalogue, pixscale,
background_noise_amp, psf_fwhm_init_guess, catalogue_ra_keyword, catalogue_dec_keyword,
ext_stars_ra_keyword, ext_stars_dec_keyword):
max_dist_arcsec = 1.0
data = fits.getdata(sci_image_filename)
select_in_star_cat, not_select_in_star_cat = match_sources_with_star_catalogue(catalogue, ext_star_catalogue,
catalogue_ra_keyword,
catalogue_dec_keyword,
ext_stars_ra_keyword,
ext_stars_dec_keyword,
max_dist_arcsec)
with fits.open(ext_star_catalogue) as f:
starcat = f[1].data
ra = np.array(starcat[ext_stars_ra_keyword], dtype=float)[select_in_star_cat]
dec = np.array(starcat[ext_stars_dec_keyword], dtype=float)[select_in_star_cat]
x_stars, y_stars = ra_dec_to_pixels(ra, dec, sci_image_filename)
try:
h = fits.getheader(sci_image_filename, ext=0)
mask = np.where((x_stars > 0) & (x_stars < h['NAXIS1']) & (y_stars > 0) & (y_stars < h['NAXIS2']))
except Exception as e:
logger.info(e)
h = fits.getheader(sci_image_filename, ext=1)
mask = np.where((x_stars > 0) & (x_stars < h['NAXIS1']) & (y_stars > 0) & (y_stars < h['NAXIS2']))
if mask[0].size != 0:
x_stars = x_stars[mask]
y_stars = y_stars[mask]
else:
logger.info('No stars found, raise error...')
raise ValueError
fwhm_array = []
beta_array = []
for i in range(len(x_stars)):
size = 51
cutout = create_cutout_image(x_stars[i], y_stars[i], size, data)
if np.isnan(np.min(cutout.data)): # avoids nan in cutouts
continue
else:
if len(cutout.data) == size & len(cutout.data[0]) == size: # avoids stars at edges
starting_point = [background_noise_amp, 1., int(size / 2), int(size / 2),
psf_fwhm_init_guess / pixscale, 3.5]
try:
popt, fwhm, beta = fit_2d_moffat_profile(cutout.data, starting_point, size)
fwhm_array.append(fwhm)
beta_array.append(beta)
except Exception as e:
logger.info(e)
pass
else:
continue
fwhm_array = np.array(fwhm_array)
beta_array = np.array(beta_array)
mask_mean = np.where((fwhm_array * pixscale < 1.) & (fwhm_array * pixscale > 0.05))
mask_beta = np.where((beta_array > 0) & (beta_array < 10))
fwhm = np.nanmedian(fwhm_array[mask_mean]) * pixscale
beta = np.nanmedian(beta_array[mask_beta])
logger.info('Image: {}, FWHM: {}, Beta Moffat: {}'.format(sci_image_filename, fwhm, beta))
return fwhm, beta
def get_hst_seeing(sci_image_name, catalogue, external_star_catalogue, pixel_scale, background_noise_amp,
psf_fwhm_init_guess, catalogue_ra_keyword, catalogue_dec_keyword, ext_stars_ra_keyword,
ext_stars_dec_keyword):
"""
:param sci_image_name:
:param catalogue:
:param external_star_catalogue:
:param pixel_scale:
:param background_noise_amp:
:param psf_fwhm_init_guess:
:param catalogue_ra_keyword:
:param catalogue_dec_keyword:
:param ext_stars_ra_keyword:
:param ext_stars_dec_keyword:
:return:
"""
try:
fwhm, beta = beta_seeing_evaluation(sci_image_name, catalogue, external_star_catalogue, pixel_scale,
background_noise_amp, psf_fwhm_init_guess, catalogue_ra_keyword,
catalogue_dec_keyword, ext_stars_ra_keyword, ext_stars_dec_keyword)
except Exception as e:
logger.info(e)
fwhm, beta = psf_fwhm_init_guess, 3.5
return fwhm, beta
def get_omegacam_seeing(sci_image_name, catalogue, external_star_catalogue, pixel_scale, background_noise_amp,
psf_fwhm_init_guess, catalogue_ra_keyword, catalogue_dec_keyword, ext_stars_ra_keyword,
ext_stars_dec_keyword):
"""
:param sci_image_name:
:param catalogue:
:param external_star_catalogue:
:param pixel_scale:
:param background_noise_amp:
:param psf_fwhm_init_guess:
:param catalogue_ra_keyword:
:param catalogue_dec_keyword:
:param ext_stars_ra_keyword:
:param ext_stars_dec_keyword:
:return:
"""
try:
fwhm, beta = beta_seeing_evaluation(sci_image_name, catalogue, external_star_catalogue, pixel_scale,
background_noise_amp, psf_fwhm_init_guess, catalogue_ra_keyword,
catalogue_dec_keyword, ext_stars_ra_keyword, ext_stars_dec_keyword)
fwhm = fits.getheader(sci_image_name)['PSF_RAD']
except Exception as e:
logger.info(e)
fwhm = fits.getheader(sci_image_name)['PSF_RAD']
beta = 3.5
return fwhm, beta
def get_jwst_seeing(sci_image_name, catalogue, external_star_catalogue, pixel_scale, background_noise_amp,
psf_fwhm_init_guess, catalogue_ra_keyword, catalogue_dec_keyword, ext_stars_ra_keyword,
ext_stars_dec_keyword):
"""
:param sci_image_name:
:param catalogue:
:param external_star_catalogue:
:param pixel_scale:
:param background_noise_amp:
:param psf_fwhm_init_guess:
:param catalogue_ra_keyword:
:param catalogue_dec_keyword:
:param ext_stars_ra_keyword:
:param ext_stars_dec_keyword:
:return:
"""
try:
fwhm, beta = beta_seeing_evaluation(sci_image_name, catalogue, external_star_catalogue, pixel_scale,
background_noise_amp, psf_fwhm_init_guess, catalogue_ra_keyword,
catalogue_dec_keyword, ext_stars_ra_keyword, ext_stars_dec_keyword)
except Exception as e:
logger.info(e)
fwhm, beta = psf_fwhm_init_guess, 3.5
return fwhm, beta
def get_seeings(telescope_name, sci_images, wavebands, catalogues, ext_star_cat, pixel_scale,
background_noise_amps, psf_fwhm_init_guesses, catalogue_ra_keyword, catalogue_dec_keyword,
ext_stars_ra_keyword, ext_stars_dec_keyword):
"""
This function computes the fwhm and beta of stars by fitting a 2D circular Moffat profile.
Stars are find by matching with an external star catalogue.
:param telescope_name:
:param sci_images:
:param wavebands:
:param catalogues:
:param ext_star_cat:
:param pixel_scale:
:param background_noise_amps:
:param psf_fwhm_init_guesses:
:param catalogue_ra_keyword:
:param catalogue_dec_keyword:
:param ext_stars_ra_keyword:
:param ext_stars_dec_keyword:
:return fwhms, betas: dict, dictionaries of fwhms and betas of the 2D Moffat profiles.
"""
seeing_switcher = {'HST': get_hst_seeing, 'OCAM': get_omegacam_seeing, 'JWST': get_jwst_seeing}
fwhms = {}
betas = {}
for name in sci_images:
idx_name = sci_images.index(name)
background_noise_amp = background_noise_amps[wavebands[idx_name]]
psf_fwhm_init_guess = psf_fwhm_init_guesses[idx_name]
seeing_function = seeing_switcher.get(telescope_name, lambda: 'To be implemented...')
fwhms[wavebands[idx_name]], betas[wavebands[idx_name]] = seeing_function(name, catalogues[idx_name],
ext_star_cat, pixel_scale,
background_noise_amp,
psf_fwhm_init_guess,
catalogue_ra_keyword,
catalogue_dec_keyword,
ext_stars_ra_keyword,
ext_stars_dec_keyword)
return fwhms, betas
def estimate_cutout_background(cutout, seg_cutout, sigma):
mask = (seg_cutout == 0) & (cutout != 0)
mean_back, median_back, std_back = sigma_clipped_stats(cutout[mask], sigma=sigma)
return mean_back, median_back, std_back
def substitute_sources_with_background(cutout, seg_cutout, star_number):
seg_cutout_copy = seg_cutout.copy()
cutout_copy = cutout.copy()
star_pixels = np.where(seg_cutout_copy == star_number)
seg_cutout_copy[star_pixels] = 0
mask = np.where(seg_cutout_copy != 0)
sigma_clip = SigmaClip(sigma=4, maxiters=1)
bkg_estimator = MedianBackground()
bkg = Background2D(cutout_copy, (10, 10), filter_size=(2, 2),
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
cutout_copy[mask] = bkg.background[mask]
return cutout_copy
def create_moffat_psf_image_per_target(root_path, target_name, target_star_positions, target_stars_x_keyword,
target_stars_y_keyword, sci_images, psf_image_size, wavebands,
pixel_scale, target_param_table):
"""
:param root_path:
:param target_name:
:param target_star_positions:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param psf_image_size:
:param wavebands:
:param pixel_scale:
:param target_param_table:
:return:
"""
for name in sci_images:
idx_name = sci_images.index(name)
target_stars_table = Table.read(target_star_positions[idx_name], format='fits')
x_stars = target_stars_table[target_stars_x_keyword]
y_stars = target_stars_table[target_stars_y_keyword]
idx_target_param_table = np.where(target_param_table['wavebands'] == wavebands[idx_name])
background_noise_amp_initial_guess = target_param_table['bkg_amps'][idx_target_param_table][0]
seeing_pixel_initial_guess = target_param_table['fwhms'][idx_target_param_table][0] / pixel_scale
data, head = fits.getdata(name, header=True)
popts = np.empty((len(x_stars), 6))
for i in range(len(x_stars)):
cutout = create_cutout_image(x_stars[i], y_stars[i], psf_image_size, data)
back_sub_cutout = subtract_background_from_image(cutout.data, sigma=2)
starting_point = [background_noise_amp_initial_guess, 1., int(psf_image_size / 2),
int(psf_image_size / 2), seeing_pixel_initial_guess, 3.5]
if back_sub_cutout.shape == (psf_image_size, psf_image_size):
try:
popt, fwhm, beta = fit_2d_moffat_profile(back_sub_cutout, starting_point, psf_image_size)
popts[i, :] = popt
except RuntimeError:
popts[i, :] = np.full(len(starting_point), np.nan)
else:
continue
x_img = np.arange(0, psf_image_size)
y_img = np.arange(0, psf_image_size)
x_y = np.meshgrid(x_img, y_img)
sky = np.nanmedian(popts[:, 0])
amplt = np.nanmedian(popts[:, 1])
x0 = psf_image_size / 2
y0 = psf_image_size / 2
alpha = np.nanmedian(popts[:, 4])
beta = np.nanmedian(popts[:, 5])
moffat_psf = two_dim_moffat_profile(x_y, sky, amplt, x0, y0, alpha, beta)
moffat_psf = moffat_psf / np.nanmax(moffat_psf)
moffat_psf = moffat_psf.reshape((psf_image_size, psf_image_size))
fits.writeto(os.path.join(root_path, 'moffat_psf_{}_{}.fits'.format(target_name, wavebands[idx_name])),
moffat_psf, head, overwrite=True)
def create_moffat_psf_image(root_path, target_star_positions, target_stars_id_keyword, target_stars_x_keyword,
target_stars_y_keyword, sci_images, seg_images, psf_image_size, wavebands,
pixel_scale, target_param_tables):
"""
:param root_path:
:param target_star_positions:
:param target_stars_id_keyword:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param seg_images:
:param psf_image_size:
:param wavebands:
:param pixel_scale:
:param target_param_tables:
:return:
"""
for waveband in wavebands:
idx_waveband = wavebands.index(waveband)
psf_cutouts = []
for name in sci_images[idx_waveband]:
idx_name = sci_images[idx_waveband].index(name)
target_stars_table = Table.read(target_star_positions[idx_waveband][idx_name], format='fits')
x_stars = target_stars_table[target_stars_x_keyword]
y_stars = target_stars_table[target_stars_y_keyword]
stars_number = target_stars_table[target_stars_id_keyword]
target_param_table = Table.read(target_param_tables[idx_waveband][idx_name], format='fits')
idx_target_param_table = np.where(target_param_table['wavebands'] == waveband)
background_noise_amp_initial_guess = target_param_table['bkg_amps'][idx_target_param_table][0]
seeing_pixel_initial_guess = target_param_table['fwhms'][idx_target_param_table][0] / pixel_scale
data, head = fits.getdata(name, header=True)
seg_data, seg_head = fits.getdata(seg_images[idx_waveband][idx_name], header=True)
popts = np.empty((len(x_stars), 6))
for i in range(len(x_stars)):
cutout = create_cutout_image(x_stars[i], y_stars[i], psf_image_size, data)
seg_cutout = create_cutout_image(x_stars[i] - 0.5, y_stars[i] - 1.5, psf_image_size, seg_data)
back_sub_cutout = substitute_sources_with_background(cutout.data, seg_cutout.data, stars_number[i])
mean_back, median_back, std_back = estimate_cutout_background(back_sub_cutout, seg_cutout.data,
sigma=2)
back_sub_cutout = back_sub_cutout - mean_back
starting_point = [background_noise_amp_initial_guess, 1., int(psf_image_size / 2),
int(psf_image_size / 2), seeing_pixel_initial_guess, 3.5]
if back_sub_cutout.shape == (psf_image_size, psf_image_size):
try:
popt, fwhm, beta = fit_2d_moffat_profile(back_sub_cutout, starting_point, psf_image_size)
popts[i, :] = popt
except RuntimeError:
popts[i, :] = np.full(len(starting_point), np.nan)
else:
continue
x_img = np.arange(0, psf_image_size)
y_img = np.arange(0, psf_image_size)
x_y = np.meshgrid(x_img, y_img)
sky = np.nanmedian(popts[:, 0])
amplt = np.nanmedian(popts[:, 1])
x0 = psf_image_size / 2
y0 = psf_image_size / 2
alpha = np.nanmedian(popts[:, 4])
beta = np.nanmedian(popts[:, 5])
moffat_psf = two_dim_moffat_profile(x_y, sky, amplt, x0, y0, alpha, beta)
moffat_psf = moffat_psf / np.nanmax(moffat_psf)
moffat_psf = moffat_psf.reshape((psf_image_size, psf_image_size))
psf_cutouts.append(moffat_psf)
fits.writeto(os.path.join(root_path, 'moffat_psf_{}.fits'.format(waveband)),
np.nanmedian(psf_cutouts, axis=0), overwrite=True)
def create_observed_psf_image_per_target(root_path, target_name, target_star_positions, target_stars_x_keyword,
target_stars_y_keyword, sci_images, psf_image_size, wavebands):
"""
Possible improvements: different kind of normalization, median mean or sum? align them by the brightest pixel
:param root_path:
:param target_name:
:param target_star_positions:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param psf_image_size:
:param wavebands:
:return:
"""
for name in sci_images:
idx_name = sci_images.index(name)
target_stars_table = Table.read(target_star_positions[idx_name], format='fits')
x_stars = target_stars_table[target_stars_x_keyword]
y_stars = target_stars_table[target_stars_y_keyword]
psf_cutout = np.empty((len(x_stars), psf_image_size, psf_image_size))
data, head = fits.getdata(name, header=True)
for i in range(len(x_stars)):
cutout = create_cutout_image(x_stars[i]-0.5, y_stars[i]-1.5, psf_image_size, data)
if (len(cutout.data) == psf_image_size) & (len(cutout.data[0]) == psf_image_size):
psf_cutout[i] = subtract_background_from_image(cutout.data, sigma=2)
psf_cutout[i] = psf_cutout[i] / np.nanmax(psf_cutout[i])
else:
continue
fits.writeto(os.path.join(root_path, 'observed_psf_{}_{}.fits'.format(target_name, wavebands[idx_name])),
np.nanmedian(psf_cutout, axis=0),
head, overwrite=True)
def create_observed_psf_image(root_path, target_star_positions, target_stars_id_keyword, target_stars_x_keyword,
target_stars_y_keyword, sci_images, seg_images,
psf_image_size, wavebands):
"""
:param root_path:
:param target_star_positions:
:param target_stars_id_keyword:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param seg_images:
:param psf_image_size:
:param wavebands:
:return:
"""
for waveband in wavebands:
idx_waveband = wavebands.index(waveband)
psf_cutouts = []
for name in sci_images[idx_waveband]:
idx_name = sci_images[idx_waveband].index(name)
target_stars_table = Table.read(target_star_positions[idx_waveband][idx_name], format='fits')
x_stars = target_stars_table[target_stars_x_keyword]
y_stars = target_stars_table[target_stars_y_keyword]
stars_number = target_stars_table[target_stars_id_keyword]
psf_cutout = np.empty((len(x_stars), psf_image_size, psf_image_size))
data, head = fits.getdata(name, header=True)
seg_data, seg_head = fits.getdata(seg_images[idx_waveband][idx_name], header=True)
for i in range(len(x_stars)):
cutout = create_cutout_image(x_stars[i] - 0.5, y_stars[i] - 1.5, psf_image_size, data)
seg_cutout = create_cutout_image(x_stars[i] - 0.5, y_stars[i] - 1.5, psf_image_size, seg_data)
if (len(cutout.data) == psf_image_size) & (len(cutout.data[0]) == psf_image_size):
psf_cutout[i] = substitute_sources_with_background(cutout.data, seg_cutout.data, stars_number[i])
mean_back, median_back, std_back = estimate_cutout_background(psf_cutout[i], seg_cutout.data,
sigma=2)
psf_cutout[i] = psf_cutout[i] - mean_back
psf_cutout[i] = psf_cutout[i] / np.nanmax(psf_cutout[i])
else:
continue
psf_cutouts.append(psf_cutout)
psf_cutouts = np.concatenate(psf_cutouts)
fits.writeto(os.path.join(root_path, 'observed_psf_{}.fits'.format(waveband)),
np.nanmedian(psf_cutouts, axis=0), overwrite=True)
def create_pca_psf_image_per_target(root_path, target_name, target_star_positions, target_stars_x_keyword,
target_stars_y_keyword, sci_images, psf_image_size, wavebands):
"""
:param root_path:
:param target_name:
:param target_star_positions:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param psf_image_size:
:param wavebands:
:return:
"""
# per fare la pca le immagini vanno centrate normalizzate e poi mean subtracted
# principal_components = np.empty((len(wavebands), psf_image_size ** 2, psf_image_size, psf_image_size))
for name in sci_images:
idx_name = sci_images.index(name)
target_stars_table = Table.read(target_star_positions[idx_name], format='fits')
x_stars = target_stars_table[target_stars_x_keyword]
y_stars = target_stars_table[target_stars_y_keyword]
psf_cutout = np.empty((len(x_stars), psf_image_size, psf_image_size))
data, head = fits.getdata(name, header=True)
S = np.empty((len(x_stars), psf_image_size ** 2))
for i in range(len(x_stars)):
cutout = create_cutout_image(x_stars[i]-0.5, y_stars[i]-1.5, psf_image_size, data)
if (len(cutout.data) == psf_image_size) & (len(cutout.data[0]) == psf_image_size):
psf_cutout[i] = subtract_background_from_image(cutout.data, sigma=2)
# psf_cutout[i] = psf_cutout[i] / np.nanmax(psf_cutout[i])
# psf_cutout_raveled = np.ravel(psf_cutout[i].transpose())
# psf_cutout_norm = preprocessing.normalize(np.reshape(psf_cutout_raveled, (1,
# len(psf_cutout_raveled))))
S[i, :] = np.ravel(psf_cutout[i].transpose())
# S[i, :] = psf_cutout_norm
else:
continue
goodindices = []
for k in range(len(S)):
if np.isnan(S[k, :]).any():
continue
else:
goodindices.append(k)
S_clean = S[goodindices]
S_clean_mean_sub = S_clean - np.mean(S_clean, axis=0)
# U, s, VT = linalg.svd(S_clean_mean_sub, full_matrices=True)
# principal_components[idx_name, :, :, :] = VT.reshape((psf_image_size ** 2, psf_image_size, psf_image_size))
# pca_star = principal_components[idx_name, 0, :, :] # + principal_components[idx_band,1,:,:] + ...
# pca_star = pca_star / np.nanmin(pca_star)
n_components = min(len(x_stars), 5)
pca_model = PCA(n_components=n_components)
pca_model.fit(S_clean_mean_sub)
pca_pc = pca_model.components_
pca_star = np.reshape(pca_pc[0, :], (psf_image_size, psf_image_size))
# + np.reshape(pca_pc[1,:],(psf_image_size, psf_image_size)) + \
# np.reshape(pca_pc[2,:],(psf_image_size, psf_image_size)) +
# np.reshape(pca_pc[3,:],(psf_image_size, psf_image_size)) + \
# np.reshape(pca_pc[4,:],(psf_image_size, psf_image_size))
head['NAXIS1'] = psf_image_size
head['NAXIS2'] = psf_image_size
fits.writeto(os.path.join(root_path, 'pca_psf_{}_{}.fits'.format(target_name, wavebands[idx_name])),
pca_star, head, overwrite=True)
def create_pca_psf_image(root_path, target_star_positions, target_stars_id_keyword, target_stars_x_keyword,
target_stars_y_keyword, sci_images, seg_images,
psf_image_size, wavebands):
"""
:param root_path:
:param target_star_positions:
:param target_stars_id_keyword:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param seg_images:
:param psf_image_size:
:param wavebands:
:return:
"""
for waveband in wavebands:
idx_waveband = wavebands.index(waveband)
psf_cutouts = []
for name in sci_images[idx_waveband]:
idx_name = sci_images[idx_waveband].index(name)
target_stars_table = Table.read(target_star_positions[idx_waveband][idx_name], format='fits')
x_stars = target_stars_table[target_stars_x_keyword]
y_stars = target_stars_table[target_stars_y_keyword]
stars_number = target_stars_table[target_stars_id_keyword]
psf_cutout = np.empty((len(x_stars), psf_image_size, psf_image_size))
data, head = fits.getdata(name, header=True)
seg_data, seg_head = fits.getdata(seg_images[idx_waveband][idx_name], header=True)
S = np.empty((len(x_stars), psf_image_size ** 2))
for i in range(len(x_stars)):
cutout = create_cutout_image(x_stars[i], y_stars[i], psf_image_size, data)
seg_cutout = create_cutout_image(x_stars[i], y_stars[i], psf_image_size, seg_data)
if (len(cutout.data) == psf_image_size) & (len(cutout.data[0]) == psf_image_size):
psf_cutout[i] = substitute_sources_with_background(cutout.data, seg_cutout.data, stars_number[i])
mean_back, median_back, std_back = estimate_cutout_background(psf_cutout[i], seg_cutout.data,
sigma=2)
psf_cutout[i] = psf_cutout[i] - mean_back
S[i, :] = np.ravel(psf_cutout[i].transpose())
else:
continue
psf_cutouts.append(S)
psf_cutouts = np.concatenate(psf_cutouts)
goodindices = []
for k in range(len(S)):
if np.isnan(S[k, :]).any():
continue
else:
goodindices.append(k)
S_clean = psf_cutouts[goodindices]
S_clean_mean_sub = S_clean - np.mean(S_clean, axis=0)
# n_components = min(len(x_stars), 5)
pca_model = PCA(n_components=1)
pca_model.fit(S_clean_mean_sub)
pca_pc = pca_model.components_
pca_star = np.reshape(pca_pc[0, :], (psf_image_size, psf_image_size))
fits.writeto(os.path.join(root_path, 'pca_psf_{}.fits'.format(waveband)),
pca_star, overwrite=True)
def create_effective_psf_image_per_target(root_path, target_name, target_star_positions, target_stars_x_keyword,
target_stars_y_keyword, sci_images, psf_image_size, wavebands):
"""
:param root_path:
:param target_name:
:param target_star_positions:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param psf_image_size:
:param wavebands:
:return:
"""
for name in sci_images:
idx_name = sci_images.index(name)
data, head = fits.getdata(name, header=True)
back_sub_data = subtract_background_from_image(data, sigma=2)
nddata = NDData(data=back_sub_data)
target_stars_table = Table.read(target_star_positions[idx_name], format='fits')
stars_tbl = Table()
stars_tbl['x'] = target_stars_table[target_stars_x_keyword]
stars_tbl['y'] = target_stars_table[target_stars_y_keyword]
stars = extract_stars(nddata, stars_tbl, size=psf_image_size)
epsf_builder = EPSFBuilder(oversampling=2, progress_bar=False,
smoothing_kernel='quadratic',
recentering_maxiters=20, maxiters=10,
norm_radius=5.5, shift_val=0.5,
recentering_boxsize=(5, 5), center_accuracy=1.0e-3)
epsf, fitted_stars = epsf_builder(stars)
fits.writeto(os.path.join(root_path, 'effective_psf_{}_{}.fits'.format(target_name, wavebands[idx_name])),
epsf.data, head, overwrite=True)
def create_psf_image_for_sextractor(root_path, target_name, target_star_positions, target_stars_x_keyword,
target_stars_y_keyword, sci_images, sextractor_psf_filename,
psf_image_size, wavebands, fwhms):
"""
:param root_path:
:param target_name:
:param target_star_positions:
:param target_stars_x_keyword:
:param target_stars_y_keyword:
:param sci_images:
:param sextractor_psf_filename:
:param psf_image_size:
:param wavebands:
:param fwhms:
:return:
"""
for name in sci_images:
idx_name = sci_images.index(name)
target_stars_table = Table.read(target_star_positions[idx_name], format='fits')
x_stars = target_stars_table[target_stars_x_keyword]
y_stars = target_stars_table[target_stars_y_keyword]
psf_cutout = np.empty((len(x_stars), psf_image_size, psf_image_size))
data, head = fits.getdata(name, header=True)
for i in range(len(x_stars)):
cutout = create_cutout_image(x_stars[i] - 0.5, y_stars[i] - 1.5, psf_image_size, data)
if (len(cutout.data) == psf_image_size) & (len(cutout.data[0]) == psf_image_size):
psf_cutout[i] = subtract_background_from_image(cutout.data, sigma=2)
psf_cutout[i] = psf_cutout[i] / np.nanmax(psf_cutout[i])
else:
continue
psf_image = np.nanmedian(psf_cutout, axis=0)
psf_image.reshape((psf_image_size, psf_image_size))
psf, head = fits.getdata(sextractor_psf_filename, header=True)
head['PSF_FWHM'] = fwhms[wavebands[idx_name]]
psf[0][0][0] = psf_image
fits.writeto(os.path.join(root_path, 'sextractor_psf_{}_{}.fits'.format(target_name, wavebands[idx_name])),
psf, head, overwrite=True)
|
torlucaREPO_NAMEmorphofitPATH_START.@morphofit_extracted@morphofit-master@morphofit@psf_estimation.py@.PATH_END.py
|
{
"filename": "python-reference_catboost_plot_predictions.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/python-reference_catboost_plot_predictions.md",
"type": "Markdown"
}
|
# plot_predictions
{% include [plot_predictions-plot_predictions__desc__full](../_includes/work_src/reusage-python/plot_predictions__desc__full.md) %}
## {{ dl--parameters }} {#parameters}
### data
#### Description
The data to plot predictions for.
For example, use a two-document slice of the original dataset (refer to the example below).
**Possible types**
- numpy.ndarray
- pandas.DataFrame
- {{ python_type__pandas-SparseDataFrame }}
- scipy.sparse.spmatrix (all subclasses except dia_matrix)
- catboost.Pool
**Default value**
Required parameter
### features_to_change
#### Description
The list of numerical features to vary the prediction value for.
For example, chose the required features by selecting top N most important features that impact the prediction results for a pair of objects according to [PredictionDiff](fstr.md#fstr__prediction-diff) (refer to the example below).
**Possible types**
- list of int
- string
- combination of list of int & string
**Default value**
Required parameter
### plot
#### Description
Plot a Jupyter Notebook chart based on the calculated predictions.
**Possible types**
bool
**Default value**
True
### plot_file
#### Description
The name of the output HTML-file to save the chart to.
**Possible types**
string
**Default value**
None (the files is not saved)
## {{ dl__return-value }} {#output-format}
Dict with two fields:
A list of dictionaries with predictions for all objects in the data `float feature index -> [prediction for the object with corresponding feature values in the bucket : for all buckets used in the model]`
## {{ dl--example }} {#example}
```python
import numpy as np
from catboost import Pool, CatBoost
train_data = np.random.randint(0, 100, size=(100, 10))
train_label = np.random.randint(0, 1000, size=(100))
train_pool = Pool(train_data, train_label)
train_pool_slice = train_pool.slice([2, 3])
model = CatBoost()
model.fit(train_pool)
prediction_diff = model.get_feature_importance(train_pool_slice,
type='PredictionDiff',
prettified=True)
model.plot_predictions(data=train_pool_slice,
features_to_change=prediction_diff["Feature Id"][:2],
plot=True,
plot_file="plot_predictions_file.html")
```
An example of the first plotted chart:

|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@python-reference_catboost_plot_predictions.md@.PATH_END.py
|
{
"filename": "try_ifort.py",
"repo_name": "CosmoLike/cocoa",
"repo_path": "cocoa_extracted/cocoa-main/Cocoa/external_modules/code/planck/code/spt_clik/waf_tools/try_ifort.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
from waflib import Options
import os.path as osp
from waflib import Logs
from waflib import Context
from waflib import Errors
### common tools
def show_linkline(ctx,flavour,wafname):
ctx.start_msg("%s link line"%flavour)
ctx.end_msg(" ".join(["-L%s"%vv for vv in ctx.env["LIBPATH_"+wafname]])+" "+" ".join(["-l%s"%vv for vv in ctx.env["LIB_"+wafname]]))
def retrieve_intel_linkline(ctx,flavour,wafname,execnm,flags,rl0=[],optionname=""):
import waflib
import os
if optionname and not getattr(ctx.options,optionname,""):
ctx.start_msg("retrieve %s link line"%flavour)
try:
# do a dry run to retrieve all of the command
magic_cmd = "%s %s -dryrun -dynamiclib -shared-intel -no-cxxlib dummy.f90"%(execnm,flags)
llgo,llge = ctx.cmd_and_log(magic_cmd, output=waflib.Context.BOTH)
# now try to parse the hell out of it !
# first, look for the location of the ld command
# assume it is spelled ld
parts = re.split("^\s*ld\s*\\\\",llge,flags=re.M)
if len(parts)!=2:
# it is not called ld, or there is a specific path!!!
# is it set by a -Qlocation command ?
gotcha = re.findall("-Qlocation,ld,.+?\s",llge)
if gotcha:
# I am picking the last one...
nloc = gotcha[-1].strip()
# remove a posible trailling \
if nloc[-1]=="\\":
nloc[-1]=" "
nloc = nloc.strip()
# location should be after the last comma
ldpath = nloc.split(",")[-1]
ldname = re.escape(ldpath+"/ld")
stillnot=True
parts = re.split("^\s*%s\s*\\\\"%ldname,llge,flags=re.M)
if len(parts)==2:
stillnot=False
if stillnot:
# ah ! That did not work ! I am getting a bit desperate here...
# at this point I will grab anything that starts from the begining of the line with something different from a space and ends with /ld some spaces and a trailing \ in it you know !
parts = re.split("^\S.+?/ld\s+\\\\",llge,flags=re.M)
if len(parts)==2:
# ok, I call it quit...
stillnot = False
if stillnot:
# really deperate now !
# ok, now I will decide that the link part is whatever is between the first line after a \n and the rm lines
llgeli = llge.split("\n")
i = len(llgeli)
while True:
i=i-1
if not(re.match("^\s*rm\s+",llgeli[i]) or llgeli[i].strip()==''):
break
last = i+1
i=0
first=False
while True:
if llgeli[i].strip()!='':
first=True
if llgeli[i].strip()=='' and first:
break
i=i+1
parts = [""]+["\n"].join(llgeli[i+2:last])
# now parts[1] contains the link command ! Yeah !
# grab the paths (assuming that they contains either intel or ifort in them)
L = set([ll.strip() for ll in re.findall("^\s+-L(.+)\s*\\\\", parts[1],flags=re.M) if ("ifort" in ll.lower()) or ("intel" in ll.lower())])
# grab the libs
l = set([ll.strip() for ll in re.findall("^\s+-l(.+)\s*\\\\", parts[1],flags=re.M)])
# reduce to what's really relevant
rL = set()
rl = set(rl0)
for Li in L:
if osp.exists(Li):
oli = os.listdir(Li)
for li in l:
if ctx.env.cshlib_PATTERN%li in oli:
rl.add(li)
rL.add(Li)
except Exception as e:
#print(e)
ctx.end_msg(False)
Logs.pprint("RED","Cannot retrieve the %s link line"%flavour)
if optionname:
Logs.pprint("RED","Please run manually")
Logs.pprint("RED"," '%s'"%magic_cmd)
Logs.pprint("RED","build the link line and pass it using the option")
Logs.pprint("RED"," '%s'"%optionname)
ctx.fatal('The configuration failed')
raise
for pth in list(rL) + ["/lib","/lib64"]:
if osp.exists(pth):
ctx.env.append_value("LIBPATH_%s"%wafname,pth)
ctx.env.append_value("RPATH_%s"%wafname,pth)
ctx.env.append_value("LIB_%s"%wafname,list(rl)+["pthread"])
ctx.end_msg(True)
show_linkline(ctx,flavour,wafname)
def options(ctx):
import optparse
grp = ctx.parser.get_option_group("--gcc")
if grp==None:
grp=optparse.OptionGroup(ctx.parser,"compiler options")
grp.add_option("--gfortran",action="store_true",default=False,help="Do not test for ifort and only use gfortran")
grp.add_option("--ifort",action="store_true",default=False,help="Do not test for gfortran and only use ifort")
grp.add_option("--fortran_flagline",action="store",default="",help="flagline to link fortran object to c using ld")
ctx.add_option_group(grp)
def configure_(ctx):
if ctx.options.fortran_flagline:
conf.parse_flags(ctx.options.fortran_flagline,uselib="fc_runtime")
if sys.platform.lower()=="darwin":
ctx.env.fcshlib_PATTERN = 'lib%s.dylib'
ctx.env.has_ifort = False
if not Options.options.gfortran:
try:
ifort_conf(ctx)
return
except Exception as e:
if Options.options.ifort:
raise
Logs.pprint("PINK", "ifort not found, defaulting to gfortran (cause: '%s')"%e)
gfortran_conf(ctx)
def configure(ctx):
import os
if "FC" in os.environ:
ctx.start_msg("Using fortran compiler path from 'FC' environment variable")
ctx.end_msg(os.environ["FC"])
configure_(ctx)
ctx.env.append_value("FCFLAGS_fcshlib",ctx.env.LINKFLAGS_fcshlib)
ctx.env["FCFLAGS_fpic"]=[]
ctx.env.append_value("FCFLAGS_fpic",[flg for flg in ctx.env.FCFLAGS_fcshlib if "-fpic" in flg.lower()])
#ctx.env.append_value("FCFLAGS_fpic","-fpe0")
def ifort_conf(ctx):
import waflib
import os
ctx.env.FC=[]
ctx.load('ifort')
if sys.platform.lower()=="darwin":
ctx.env.LINKFLAGS_fcshlib = ['-dynamiclib']
ctx.env.append_value('FCFLAGS',ctx.env.mopt)
ctx.env["FCFLAGS_fc_omp"]=[]
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = [""]
ctx.start_msg("Check ifort version")
v90 = ctx.cmd_and_log(" ".join(ctx.env.FC)+" --version",quiet=Context.STDOUT).split("\n")[0].strip()
v90 = v90.split("\n")[0].strip().split(" ")[2]
ctx.end_msg(v90)
ctx.env.IFORT_VERSION = v90
majver = int(v90.split(".")[0])
rl0 = []
if majver>13:
rl0 = ["irc"]
if majver>15:
ctx.env.append_value("FCFLAGS_fc_omp","-qopenmp")
else:
ctx.env.append_value("FCFLAGS_fc_omp","-openmp")
ctx.check_cc(
errmsg="failed",msg='Compile a test code with ifort',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
retrieve_intel_linkline(ctx,"ifort","fc_runtime"," ".join(ctx.env.FC)," ".join(ctx.env.FCFLAGS+ctx.env.FCFLAGS_fc_omp),rl0,"fortran_flagline")
#if not ctx.options.fortran_flagline:
# ctx.start_msg("retrieve ifort link line")
# try:
# #print "%s %s -dryrun -dynamiclib -shared-intel -no-cxxlib dummy.f90"%(ctx.env.FC," ".join(ctx.env.FCFLAGS))
# llgo,llge = ctx.cmd_and_log("%s %s -dryrun -dynamiclib -shared-intel -no-cxxlib dummy.f90"%(" ".join(ctx.env.FC)," ".join(ctx.env.FCFLAGS+ctx.env.FCFLAGS_fc_omp)), output=waflib.Context.BOTH)
# #print "RET",llgo,llge
# L = set([ll.strip() for ll in re.findall("^\s+-L(.+)\s*\\\\", re.split("^\s*ld\s*\\\\",llge,flags=re.M)[1],flags=re.M) if ("ifort" in ll.lower()) or ("intel" in ll.lower())])
# l = set([ll.strip() for ll in re.findall("^\s+-l(.+)\s*\\\\", re.split("^\s*ld\s*\\\\",llge,flags=re.M)[1],flags=re.M)])
# rL = set()
# rl = set(rl0)
# for Li in L:
# if osp.exists(Li):
# oli = os.listdir(Li)
# for li in l:
# if ctx.env.cshlib_PATTERN%li in oli:
# rl.add(li)
# rL.add(Li)
# except:
# ctx.end_msg(False)
# raise
# for pth in list(rL) + ["/lib","/lib64"]:
# if osp.exists(pth):
# ctx.env.append_value("LIBPATH_fc_runtime",pth)
# ctx.env.append_value("RPATH_fc_runtime",pth)
#
# ctx.env.append_value("LIB_fc_runtime",list(rl)+["pthread"])
# ctx.end_msg(True)
#show_linkline(ctx)
ctx.env.has_ifort = True
def gfortran_conf(ctx):
ctx.env.FC=[]
ctx.env.FCFLAGS = []
ctx.load('gfortran')
ctx.env["FCFLAGS_fc_omp"]=[]
ctx.env.append_value("FCFLAGS_fc_omp","-fopenmp")
ctx.env.append_value("FCFLAGS","-DGFORTRAN")
ctx.env.append_value("FCFLAGS","-ffixed-line-length-0")
ctx.env.append_value("FCFLAGS","-ffree-line-length-0")
mopt = ctx.env.mopt
if sys.platform.lower()=="darwin":
if "i386" in ctx.env.mopt:
ctx.env.append_value('FCFLAGS','-m32')
mopt = ["-m32"]
else:
ctx.env.append_value('FCFLAGS','-m64')
mopt = ["-m64"]
else:
ctx.env.append_value('FCFLAGS',ctx.env.mopt)
ctx.start_msg("Check gfortran version")
v90 = ctx.cmd_and_log(" ".join(ctx.env.FC)+" --version",quiet=Context.STDOUT).split("\n")[0].strip()
version90 = re.findall("([0-9]+\.[0-9]+\.[0-9]+)",v90)
if len(version90)<1:
#Logs.pprint("PINK","Can't get gfortran version... Let's hope for the best")
ctx.end_msg("not found, let's hope for the best...",color="PINK")
else:
version90 = version90[0]
vtrio = [int(v) for v in version90.split(".")]
if (vtrio[0]<4) or (vtrio[0]==4 and vmid<3):
ctx.end_msg(v90,color="YELLOW")
raise Errors.WafError("gfortran version need to be above 4.3 got %s"%version90)
ctx.end_msg(v90)
# kludge !
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = mopt
ctx.check_cc(
errmsg="failed",msg='Compile a test code with gfortran',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
ctx.start_msg("retrieve gfortran link line")
lgfpath = ctx.cmd_and_log(" ".join(ctx.env.FC)+" %s -print-file-name=libgfortran.dylib"%(" ".join(mopt)),quiet=Context.STDOUT)
lpath = [osp.dirname(osp.realpath(lgfpath))]
lgfpath = ctx.cmd_and_log(" ".join(ctx.env.FC)+" %s -print-file-name=libgomp.dylib"%(" ".join(mopt)),quiet=Context.STDOUT)
lpath += [osp.dirname(osp.realpath(lgfpath))]
lpath = set(lpath)
ctx.env.append_value("LIB_fc_runtime",["gfortran","gomp"])
ctx.env.append_value("LIBPATH_fc_runtime",list(lpath))
ctx.env.append_value("RPATH_fc_runtime",list(lpath))
ctx.end_msg(True)
show_linkline(ctx,"gfortran","fc_runtime")
def NOTUSEDANYMORE_OLD_ifort_conf_(ctx):
ctx.env.FC=[]
ctx.load('ifort')
if sys.platform.lower()=="darwin":
ctx.env.LINKFLAGS_fcshlib = ['-dynamiclib']
ctx.env.append_value('FCFLAGS',ctx.env.mopt)
ctx.env.append_value("FCFLAGS_fc_omp","-openmp")
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = [""]
ctx.check_cc(
errmsg="failed",msg='Compile a test code with ifort',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
if not ctx.options.fortran_flagline:
ctx.start_msg("retrieve ifort link line")
if "/" not in ctx.env.FC[0]:
ctx.env.FC = ctx.cmd_and_log("which %s"%ctx.env.FC[0]).strip()
#print ctx.env.FC
ifort_path = osp.dirname(osp.realpath(ctx.env.FC[0]))
#print ifort_path
if ctx.options.m32:
try:
f=open(osp.join(ifort_path,'ifortvars_ia32.sh'))
except:
ctx.end_msg(False)
raise Errors.WafError("Can't locate ifort configuration file")
else:
try:
f=open(osp.join(ifort_path,'ifortvars_intel64.sh'))
except:
ctx.end_msg(False)
raise Errors.WafError("Can't locate ifort configuration file")
txt = f.read()
f.close()
#print txt
if sys.platform.lower()=="darwin":
sp = "DYLD_LIBRARY_PATH"
else:
sp = "LD_LIBRARY_PATH"
res = re.findall("\s"+sp+"\s*=\s*\"(.+)\"",txt)[0]
for pth in res.split(":"):
ctx.env.append_value("LIBPATH_fc_runtime",pth)
ctx.env.append_value("RPATH_fc_runtime",pth)
ctx.env.append_value("LIB_fc_runtime",["ifcore","intlc","ifport","imf","irc","svml","iomp5","pthread"])
ctx.end_msg(True)
show_linkline(ctx)
|
CosmoLikeREPO_NAMEcocoaPATH_START.@cocoa_extracted@cocoa-main@Cocoa@external_modules@code@planck@code@spt_clik@waf_tools@try_ifort.py@.PATH_END.py
|
{
"filename": "ex_mixed_lls_timecorr.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/examples/ex_mixed_lls_timecorr.py",
"type": "Python"
}
|
"""Example using OneWayMixed with within group intertemporal correlation
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects, and uses OneWayMixed to estimate it.
This is a variation on ex_mixed_lls_0.py.
Here we use time dummies as random effects (all except 1st time period).
I think, this should allow for (almost) arbitrary intertemporal correlation.
The assumption is that each unit can have different constants, however the
intertemporal covariance matrix is the same for all units. One caveat, to
avoid singular matrices, we have to treat one time period differently.
Estimation requires that the number of units is larger than the number of
time periods. Also, it requires that we have the same number of periods for
each unit.
I needed to remove the first observation from the time dummies to avoid a
singular matrix. So, interpretation of time effects should be relative to
first observation. (I did not check the math.)
TODO:
Note, I do not already have constant in X. Constant for first
time observation is missing.
Do I need all dummies in exog_fe, Z, but not in exog_re, Z? Tried this and
it works.
In the error decomposition we also have the noise variable, I guess this works
like constant, so we get full rank (square) with only T-1 time dummies.
But we do not get correlation with the noise, or do we? conditional?
-> sample correlation of estimated random effects looks a bit high,
upward bias? or still some problems with initial condition?
correlation from estimated cov_random looks good.
Since we include the time dummies also in the fixed effect, we can have
arbitrary trends, different constants in each period.
Intertemporal correlation in data generating process, DGP, to see if
the results correctly estimate it.
used AR(1) as example, but only starting at second period. (?)
Note: we do not impose AR structure in the estimation
"""
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
#np.random.seed(978326)
nsubj = 200
units = []
nobs_i = 8 #number of observations per unit, changed below
nx = 1 #number fixed effects
nz = nobs_i - 1 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
#gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
use_correlated = True
if not use_correlated:
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
else:
#coefficients are AR(1) for all but first time periods
from scipy import linalg as splinalg
rho = 0.6
corr_re = splinalg.toeplitz(rho**np.arange(nz))
rvs = np.random.multivariate_normal(np.zeros(nz), corr_re)
gamma_re = gamma + 0.2 * rvs
#store true parameter for checking
gamma_re_true.append(gamma_re)
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
#try Z should be time dummies
time_dummies = (np.arange(nobs_i)[:, None] == np.arange(nobs_i)[None, :]).astype(float)
Z = time_dummies[:,1:]
# Z = np.random.standard_normal((nobs_i, nz-1))
# Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
#X = np.hstack((X,Z))
X = np.hstack((X, time_dummies))
# create units and append to list
new_unit = Unit(Y, X, Z)
units.append(new_unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print dir(m)
#print vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('sample correlation of estimated random effects')
print(np.corrcoef(b_re, rowvar=0))
print('std of above')
#need atleast_1d or diag raises exception
print(np.sqrt(np.diag(np.atleast_1d(np.cov(b_re, rowvar=0)))))
print('m.cov_random()')
print(m.cov_random())
print('correlation from above')
print(res.cov_random()/ res.std_random()[:,None] /res.std_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random does not subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print()
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-nz:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-nz:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-nz:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-nz:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
#from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this will not work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print('llf', res.llf) #based on MLE, does not include constant
print('tvalues', res.tvalues)
print('pvalues', res.pvalues)
rmat = np.zeros(len(res.params))
rmat[-nz:] = 1
print('t_test mean of random effects variables are zero')
print(res.t_test(rmat))
print('f_test mean of both random effects variables is zero (joint hypothesis)')
print(res.f_test(rmat))
plots = res.plot_random_univariate() #(bins=50)
fig = res.plot_scatter_all_pairs()
import matplotlib.pyplot as plt
plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@examples@ex_mixed_lls_timecorr.py@.PATH_END.py
|
{
"filename": "plotMag3Dsmooth.py",
"repo_name": "folsomcp/ZDIpy",
"repo_path": "ZDIpy_extracted/ZDIpy-main/utils/plotMag3Dsmooth.py",
"type": "Python"
}
|
#!/usr/bin/python3
#
#Plot a magnetic field from set of magnetic spherical harmonic coefficients
#cofficiants are read in from a file, and plotted in 3D using matplotlib (if possible)
import numpy as np
try:
import core.geometryStellar as geometryStellar
import core.magneticGeom as magneticGeom
import core.mainFuncs as mf
except ImportError:
#If this is run from the utils sub-directory, try adding the path
#to the main ZDIpy directory, containing the core sub-directory/module.
#(There may be a better way to do this?)
import sys
sys.path += [sys.path[0]+'/../']
import core.geometryStellar as geometryStellar
import core.magneticGeom as magneticGeom
import core.mainFuncs as mf
#incDeg = 60.
#inc = incDeg/180.*np.pi
phaseList = [0]
magCoeff = 'outMagCoeff.dat'
fileModelParams = 'inzdi.dat'
#the number of latitudinal grid points to use, for the spherical star
nGridLatSph = 90
nGridLatVec = 20
#use ~30-40 for a a star with a total of ~1000-2000 surface elements
print('reading i, P, M, and R from {:}'.format(fileModelParams))
par = mf.readParamsZDI('inzdi.dat')
incDeg = par.inclination
incRad = par.incRad
#initialise the stellar grid
sGrid = geometryStellar.starGrid(nGridLatSph, par.period, par.mass, par.radius)
sGridVec = geometryStellar.starGrid(nGridLatVec, par.period, par.mass, par.radius)
#initilaise the magnetic geometry spherical harmonics from a file of coifficents
inMagGeom = magneticGeom.magSphHarmoicsFromFile(magCoeff)
##########################################################
#actual plotting...
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.tri as mtri
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
##########################################################
##Magnetic stuff for the spherical surface
#generatea new copy of the magnetic vector map (probably not necessary)
magGeom = magneticGeom.magSphHarmoics(inMagGeom.nl)
magGeom.alpha = inMagGeom.alpha
magGeom.beta = inMagGeom.beta
magGeom.gamma = inMagGeom.gamma
magGeom.initMagGeom(sGrid.clat, sGrid.long)
Bxc, Byc, Bzc = magGeom.getAllMagVectorsCart()
Bmag = np.sqrt(Bxc**2 + Byc**2 + Bzc**2) #abs magnetic strength at cell center
xp, yp, zp = sGrid.GetCartesianCells()
#Set up a grid in colatitude and longitude for interpolating onto
npSphGrid = 100
tmpClat = np.linspace(0., np.pi, npSphGrid)
tmpLong = np.linspace(0., 2*np.pi, 2*npSphGrid)
gridClat, gridLong = np.meshgrid(tmpClat, tmpLong)
#Padd the edges of the ZDI stellar grid, so that the linear interpolation routine can handle the edges
sGridPadClat = sGrid.clat
sGridPadLong = sGrid.long
radiusPad = sGrid.radius
BmagPad = Bmag
#padd clat < 0
indCMin = np.where(sGrid.clat == np.min(sGrid.clat))
sGridPadClat = np.append(sGrid.clat[indCMin] - sGrid.dClat[indCMin], sGridPadClat)
sGridPadLong = np.append(sGrid.long[indCMin], sGridPadLong)
radiusPad = np.append(sGrid.radius[indCMin], radiusPad)
BmagPad = np.append(Bmag[indCMin], BmagPad)
#padd clat > pi
indCMax = np.where(sGrid.clat == np.max(sGrid.clat))
sGridPadClat = np.append(sGridPadClat, sGrid.clat[indCMax] + sGrid.dClat[indCMax])
sGridPadLong = np.append(sGridPadLong, sGrid.long[indCMax])
radiusPad = np.append(radiusPad, sGrid.radius[indCMax])
BmagPad = np.append(BmagPad, Bmag[indCMax])
#padd long>0, long < 2pi
lastClat = -100.
indLastEdge = 0
for i in range(sGrid.numPoints):
if sGrid.clat[i] > lastClat:
lastClat = sGrid.clat[i]
#the long > 2pi edge
sGridPadClat = np.append(sGridPadClat, sGrid.clat[i-1])
sGridPadLong = np.append(sGridPadLong, sGrid.long[i-1]+sGrid.dLong[i-1])
radiusPad = np.append(radiusPad, sGrid.radius[i-1])
#Use the pixel at clat[i-1] and long=0 (for clat[i-1] long>2pi)
BmagPad = np.append(BmagPad, Bmag[indLastEdge])
#find the pixel at this (i) clat and long = 2pi
j = i
while j < sGrid.numPoints-1 and sGrid.clat[j] <= lastClat:
j += 1
indNextEdge = j - 1
#the long < 0 edge
sGridPadClat = np.append(sGridPadClat, sGrid.clat[i])
sGridPadLong = np.append(sGridPadLong, sGrid.long[i]-sGrid.dLong[i])
radiusPad = np.append(radiusPad, sGrid.radius[i])
BmagPad = np.append(BmagPad, Bmag[indNextEdge])
indLastEdge = i
##Interpolate using scipy multi-dimensional interpolation from unstructured data
##(This seems to be flexible if possibly a little imprecise)
from scipy.interpolate import griddata
#Bgrid = griddata( (sGrid.clat,sGrid.long), Bmag, (gridClat, gridLong), method='nearest')
#Bgrid = griddata( (sGrid.clat,sGrid.long), Bmag, (gridClat, gridLong), method='linear', fill_value=0.0)
radiusGrid = griddata( (sGridPadClat,sGridPadLong), radiusPad, (gridClat, gridLong), method='linear', fill_value=0.0)
Bgrid = griddata( (sGridPadClat,sGridPadLong), BmagPad, (gridClat, gridLong), method='linear', fill_value=-10.0)
#Convert the grid to Cartesian coordinates for the plotting routine
xgrid = radiusGrid*np.sin(gridClat)*np.cos(gridLong)
ygrid = radiusGrid*np.sin(gridClat)*np.sin(gridLong)
zgrid = radiusGrid*np.cos(gridClat)
##########################################################
##Magnetic stuff for the vector arrows
#offset above the surface of the sphere for displaying magnetic vectors
#used to keep the vectors from disappearing below the surface of the plot
offsetBvec = 0.05
#generatea new copy of the magnetic vector map
magGeomV = magneticGeom.magSphHarmoics(inMagGeom.nl)
magGeomV.alpha = inMagGeom.alpha
magGeomV.beta = inMagGeom.beta
magGeomV.gamma = inMagGeom.gamma
magGeomV.initMagGeom(sGridVec.clat, sGridVec.long)
BxcV, BycV, BzcV = magGeomV.getAllMagVectorsCart()
BrV, BclatV, BlonV = magGeomV.getAllMagVectors()
cbBmagV = np.sqrt(BxcV**2 + BycV**2 + BzcV**2) #abs magnetic strength at cell center
centers = sGridVec.GetCartesianCells()
#position values for the plot of magnetic vectors ("3D quiver plot")
xc = centers[0,:] + offsetBvec*centers[0,:]
yc = centers[1,:] + offsetBvec*centers[1,:]
zc = centers[2,:] + offsetBvec*centers[2,:]
##########################################################
##Plotting setup details
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
quivNorm = 0.2/np.max(cbBmagV)
for i in range(xc.shape[0]):
#lenV = cbBmagV[i] / np.max(cbBmagV)*0.3+0.03 #not necessary in newer versions of mplot3d, can just set the normalize keyword to False. Otherwise use length=lenV
if BrV[i] > 0.0:
quiv = ax.quiver(xc[i], yc[i], zc[i], BxcV[i]*quivNorm, BycV[i]*quivNorm, BzcV[i]*quivNorm, normalize=False, color=(1,0,0), pivot='tail')
else:
quiv = ax.quiver(xc[i], yc[i], zc[i], BxcV[i]*quivNorm, BycV[i]*quivNorm, BzcV[i]*quivNorm, normalize=False, color=(0,0,1), pivot='tip')
#quiv.set_sort_zpos(zc[i]) #probably not necessary, should be roughly set automatically
#get colors from a colormap, for optional later use
#First get a colormap object. Mostly you could just use cmap='xxx' in functions, exept for the line below.
#colormap = plt.cm.afmhot
#colormap = plt.cm.binary
#colormap = plt.cm.Spectral
#colormap = plt.cm.inferno
#colormap = plt.cm.inferno_r
#colormap = plt.cm.magma
#colormap = plt.cm.magma_r
#colormap = plt.cm.cividis_r
#colormap = plt.cm.Purples
colormap = plt.cm.BuPu
#colormap = plt.cm.viridis_r
tmpFrac = Bgrid/np.max(Bgrid)
tmpcolors=colormap(tmpFrac)
tmpsurf= ax.plot_surface(xgrid, ygrid, zgrid, facecolors=tmpcolors, rstride=1, cstride=1, shade=False)
#set the image z position of the sphere to be 0, otherwise mplot3d seems to do something odd/wrong for some quiver plots.
#Hypothesis: Things (quiver points) in the image deeper than 0.0 should be beyond the limb of the sphere and not drawn, while things shallower than that should be on the visible surface and drawn on top of the sphere. So setting the sort_zpos to 0 is correct.
tmpsurf.set_sort_zpos(0.0)
##Plot the rotation axis, this only works when viewed from the northern hemisphere
##This dosen't interact well with the 3D quiver, seems to get z sorting wrong
#tmpLineax1 = ax.plot3D([0,0],[0,0],[1.0,1.3], color='k', linewidth=2, zorder=+1.3)
#tmpLineax2 = ax.plot3D([0,0],[0,0],[-1.0,-1.3], color='k', linewidth=2, zorder=-1.3)
#Instead build the rotaion axis from quiver objects
tmpLineax1 = ax.quiver(0.0, 0.0, 1.0, 0.0, 0.0, 0.8, normalize=False, linewidth=2, color=(0,0,0), pivot='tail',arrow_length_ratio=0.0)
tmpLineax2 = ax.quiver(0.0, 0.0,-1.0, 0.0, 0.0,-0.8, normalize=False, linewidth=2, color=(0,0,0), pivot='tail',arrow_length_ratio=0.0)
#And try to build an equator out of quiver objects too
eqR = np.max(sGrid.radius)
for i in range(100):
theta = 2*np.pi*(i/100.)
dtheta = 2*np.pi*(1./100.)
circ_x1 = eqR*np.sin(theta)
circ_y1 = eqR*np.cos(theta)
circ_dx = eqR*(np.sin(theta+dtheta) - np.sin(theta))
circ_dy = eqR*(np.cos(theta+dtheta) - np.cos(theta))
tmpCirc = ax.quiver(circ_x1, circ_y1, 0.0, circ_dx, circ_dy, 0.0, normalize=False, linewidth=1, color=(0.0,0.0,0.0), pivot='tail',arrow_length_ratio=0.0)
#And label this rotation phase
ax.text2D(0.1, 0.95, 'Phase {:0.2f}'.format(phaseList[0]), transform=ax.transAxes, fontsize='large')
#work around to make a color bar work for the plot
#Need to make a "mappable" colormap object, as a proxy for the plot_surface
#(the 2D color array and 2D data arrays are not "mappable", and seem not to generate one automatically,
# unlike most plotting functions. Probably because I have to color things manualy in plot_surface.)
tmpCBmap = plt.cm.ScalarMappable(norm=None, cmap=colormap)
tmpCBmap.set_array(Bgrid)
colorbar = fig.colorbar(tmpCBmap) #shrink=0.5, aspect=5)
colorbar.set_label('|B| (G)')
ax.set_xlim(-1.0, 1.0)
ax.set_ylim(-1.0, 1.0)
ax.set_zlim(-1.0, 1.0)
#remove position axes with tickmarks etc.
ax.set_axis_off()
ax.view_init(90-incDeg, -phaseList[0]*360.)
plt.show()
|
folsomcpREPO_NAMEZDIpyPATH_START.@ZDIpy_extracted@ZDIpy-main@utils@plotMag3Dsmooth.py@.PATH_END.py
|
{
"filename": "TensorMetadataAddStats.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/metadata_schema_py_generated/TensorMetadataAddStats.md",
"type": "Markdown"
}
|
page_type: reference
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.metadata_schema_py_generated.TensorMetadataAddStats" />
<meta itemprop="path" content="Stable" />
</div>
# tflite_support.metadata_schema_py_generated.TensorMetadataAddStats
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L2232-L2233">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.metadata_schema_py_generated.TensorMetadataAddStats(
builder, stats
)
</code></pre>
<!-- Placeholder for "Used in" -->
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@metadata_schema_py_generated@TensorMetadataAddStats.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "jona2510/BCNN",
"repo_path": "BCNN_extracted/BCNN-main/README.md",
"type": "Markdown"
}
|
Bayesian and Convolutional Neural Networks (BCNN)
|
jona2510REPO_NAMEBCNNPATH_START.@BCNN_extracted@BCNN-main@README.md@.PATH_END.py
|
{
"filename": "metrics_utils.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/metrics/metrics_utils.py",
"type": "Python"
}
|
from enum import Enum
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.losses.loss import squeeze_or_expand_to_same_rank
from keras.src.utils.python_utils import to_list
NEG_INF = -1e10
def assert_thresholds_range(thresholds):
if thresholds is not None:
invalid_thresholds = [
t for t in thresholds if t is None or t < 0 or t > 1
]
if invalid_thresholds:
raise ValueError(
"Threshold values must be in [0, 1]. "
f"Received: {invalid_thresholds}"
)
def parse_init_thresholds(thresholds, default_threshold=0.5):
if thresholds is not None:
assert_thresholds_range(to_list(thresholds))
thresholds = to_list(
default_threshold if thresholds is None else thresholds
)
return thresholds
class ConfusionMatrix(Enum):
TRUE_POSITIVES = "tp"
FALSE_POSITIVES = "fp"
TRUE_NEGATIVES = "tn"
FALSE_NEGATIVES = "fn"
class AUCCurve(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = "ROC"
PR = "PR"
@staticmethod
def from_str(key):
if key in ("pr", "PR"):
return AUCCurve.PR
elif key in ("roc", "ROC"):
return AUCCurve.ROC
else:
raise ValueError(
f'Invalid AUC curve value: "{key}". '
'Expected values are ["PR", "ROC"]'
)
class AUCSummationMethod(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = "interpolation"
MAJORING = "majoring"
MINORING = "minoring"
@staticmethod
def from_str(key):
if key in ("interpolation", "Interpolation"):
return AUCSummationMethod.INTERPOLATION
elif key in ("majoring", "Majoring"):
return AUCSummationMethod.MAJORING
elif key in ("minoring", "Minoring"):
return AUCSummationMethod.MINORING
else:
raise ValueError(
f'Invalid AUC summation method value: "{key}". '
'Expected values are ["interpolation", "majoring", "minoring"]'
)
def _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=False,
sample_weights=None,
label_weights=None,
thresholds_with_epsilon=False,
):
"""Update confusion matrix variables with memory efficient alternative.
Note that the thresholds need to be evenly distributed within the list, eg,
the diff between consecutive elements are the same.
To compute TP/FP/TN/FN, we are measuring a binary classifier
C(t) = (predictions >= t)
at each threshold 't'. So we have
TP(t) = sum( C(t) * true_labels )
FP(t) = sum( C(t) * false_labels )
But, computing C(t) requires computation for each t. To make it fast,
observe that C(t) is a cumulative integral, and so if we have
thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
where n = num_thresholds, and if we can compute the bucket function
B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
then we get
C(t_i) = sum( B(j), j >= i )
which is the reversed cumulative sum in ops.cumsum().
We can compute B(i) efficiently by taking advantage of the fact that
our thresholds are evenly distributed, in that
width = 1.0 / (num_thresholds - 1)
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
Given a prediction value p, we can map it to its bucket by
bucket_index(p) = floor( p * (num_thresholds - 1) )
so we can use ops.segment_sum() to update the buckets in one pass.
Consider following example:
y_true = [0, 0, 1, 1]
y_pred = [0.1, 0.5, 0.3, 0.9]
thresholds = [0.0, 0.5, 1.0]
num_buckets = 2 # [0.0, 1.0], (1.0, 2.0]
bucket_index(y_pred) = ops.floor(y_pred * num_buckets)
= ops.floor([0.2, 1.0, 0.6, 1.8])
= [0, 0, 0, 1]
# The meaning of this bucket is that if any of the label is true,
# then 1 will be added to the corresponding bucket with the index.
# Eg, if the label for 0.2 is true, then 1 will be added to bucket 0. If the
# label for 1.8 is true, then 1 will be added to bucket 1.
#
# Note the second item "1.0" is floored to 0, since the value need to be
# strictly larger than the bucket lower bound.
# In the implementation, we use ops.ceil() - 1 to achieve this.
tp_bucket_value = ops.segment_sum(true_labels, bucket_indices,
num_segments=num_thresholds)
= [1, 1, 0]
# For [1, 1, 0] here, it means there is 1 true value contributed by bucket
# 0, and 1 value contributed by bucket 1. When we aggregate them to
# together, the result become [a + b + c, b + c, c], since large thresholds
# will always contribute to the value for smaller thresholds.
true_positive = ops.cumsum(tp_bucket_value, reverse=True)
= [2, 1, 0]
This implementation exhibits a run time and space complexity of O(T + N),
where T is the number of thresholds and N is the size of predictions.
Metrics that rely on standard implementation instead exhibit a complexity of
O(T * N).
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid
keys and corresponding variables to update as values.
y_true: A floating point `Tensor` whose shape matches `y_pred`. Will be
cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A sorted floating point `Tensor` with value in `[0, 1]`.
It need to be evenly distributed (the diff between each element need
to be the same).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or
flattened into a single label. When True, the values of
`variables_to_update` must have a second dimension equal to the
number of labels in y_true and y_pred, and those tensors must not be
RaggedTensors.
sample_weights: Optional `Tensor` whose rank is either 0, or the same
rank as `y_true`, and must be broadcastable to `y_true` (i.e., all
dimensions must be either `1`, or the same as the corresponding
`y_true` dimension).
label_weights: Optional tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN
without explicit multilabel handling (i.e. when the data is to be
flattened).
thresholds_with_epsilon: Optional boolean indicating whether the leading
and tailing thresholds has any epsilon added for floating point
imprecisions. It will change how we handle the leading and tailing
bucket.
"""
num_thresholds = ops.shape(thresholds)[0]
if sample_weights is None:
sample_weights = 1.0
else:
sample_weights = ops.broadcast_to(
ops.cast(sample_weights, dtype=y_pred.dtype), ops.shape(y_pred)
)
if not multi_label:
sample_weights = ops.reshape(sample_weights, [-1])
if label_weights is None:
label_weights = 1.0
else:
label_weights = ops.expand_dims(label_weights, 0)
label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred))
if not multi_label:
label_weights = ops.reshape(label_weights, [-1])
weights = ops.cast(
ops.multiply(sample_weights, label_weights), y_true.dtype
)
# We shouldn't need this, but in case there are predict value that is out of
# the range of [0.0, 1.0]
y_pred = ops.clip(y_pred, x_min=0.0, x_max=1.0)
y_true = ops.cast(ops.cast(y_true, "bool"), y_true.dtype)
if not multi_label:
y_true = ops.reshape(y_true, [-1])
y_pred = ops.reshape(y_pred, [-1])
true_labels = ops.multiply(y_true, weights)
false_labels = ops.multiply((1.0 - y_true), weights)
# Compute the bucket indices for each prediction value.
# Since the predict value has to be strictly greater than the thresholds,
# eg, buckets like [0, 0.5], (0.5, 1], and 0.5 belongs to first bucket.
# We have to use math.ceil(val) - 1 for the bucket.
bucket_indices = (
ops.ceil(y_pred * (ops.cast(num_thresholds, dtype=y_pred.dtype) - 1))
- 1
)
if thresholds_with_epsilon:
# In this case, the first bucket should actually take into account since
# the any prediction between [0.0, 1.0] should be larger than the first
# threshold. We change the bucket value from -1 to 0.
bucket_indices = ops.relu(bucket_indices)
bucket_indices = ops.cast(bucket_indices, "int32")
if multi_label:
# We need to run bucket segment sum for each of the label class. In the
# multi_label case, the rank of the label is 2. We first transpose it so
# that the label dim becomes the first and we can parallel run though
# them.
true_labels = ops.transpose(true_labels)
false_labels = ops.transpose(false_labels)
bucket_indices = ops.transpose(bucket_indices)
def gather_bucket(label_and_bucket_index):
label, bucket_index = (
label_and_bucket_index[0],
label_and_bucket_index[1],
)
return ops.segment_sum(
data=label,
segment_ids=bucket_index,
num_segments=num_thresholds,
)
tp_bucket_v = backend.vectorized_map(
gather_bucket,
(true_labels, bucket_indices),
)
fp_bucket_v = backend.vectorized_map(
gather_bucket, (false_labels, bucket_indices)
)
tp = ops.transpose(ops.flip(ops.cumsum(ops.flip(tp_bucket_v), axis=1)))
fp = ops.transpose(ops.flip(ops.cumsum(ops.flip(fp_bucket_v), axis=1)))
else:
tp_bucket_v = ops.segment_sum(
data=true_labels,
segment_ids=bucket_indices,
num_segments=num_thresholds,
)
fp_bucket_v = ops.segment_sum(
data=false_labels,
segment_ids=bucket_indices,
num_segments=num_thresholds,
)
tp = ops.flip(ops.cumsum(ops.flip(tp_bucket_v)))
fp = ops.flip(ops.cumsum(ops.flip(fp_bucket_v)))
# fn = sum(true_labels) - tp
# tn = sum(false_labels) - fp
if (
ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
or ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
):
if multi_label:
total_true_labels = ops.sum(true_labels, axis=1)
total_false_labels = ops.sum(false_labels, axis=1)
else:
total_true_labels = ops.sum(true_labels)
total_false_labels = ops.sum(false_labels)
if ConfusionMatrix.TRUE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES]
variable.assign(variable + tp)
if ConfusionMatrix.FALSE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES]
variable.assign(variable + fp)
if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES]
tn = total_false_labels - fp
variable.assign(variable + tn)
if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES]
fn = total_true_labels - tp
variable.assign(variable + fn)
def is_evenly_distributed_thresholds(thresholds):
"""Check if the thresholds list is evenly distributed.
We could leverage evenly distributed thresholds to use less memory when
calculate metrcis like AUC where each individual threshold need to be
evaluated.
Args:
thresholds: A python list or tuple, or 1D numpy array whose value is
ranged in [0, 1].
Returns:
boolean, whether the values in the inputs are evenly distributed.
"""
# Check the list value and see if it is evenly distributed.
num_thresholds = len(thresholds)
if num_thresholds < 3:
return False
even_thresholds = np.arange(num_thresholds, dtype=np.float32) / (
num_thresholds - 1
)
return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())
def update_confusion_matrix_variables(
variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None,
thresholds_distributed_evenly=False,
):
"""Updates the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds
are provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates
an `update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are
in the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited
to the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions
must be either `1`, or the same as the corresponding `y_true`
dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or
flattened into a single label. When True, the values of
`variables_to_update` must have a second dimension equal to the number
of labels in y_true and y_pred, and those tensors must not be
RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN
without explicit multilabel handling (i.e. when the data is to be
flattened).
thresholds_distributed_evenly: Boolean, whether the thresholds are evenly
distributed within the list. An optimized method will be used if this is
the case. See _update_confusion_matrix_variables_optimized() for more
details.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or
if `variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError(
"`label_weights` for multilabel data should be handled "
"outside of `update_confusion_matrix_variables` when "
"`multi_label` is True."
)
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)
):
raise ValueError(
"Please provide at least one valid confusion matrix "
"variable to update. Valid variable key options are: "
f'"{list(ConfusionMatrix)}". '
f'Received: "{variables_to_update.keys()}"'
)
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = ops.cast(y_true, dtype=variable_dtype)
y_pred = ops.cast(y_pred, dtype=variable_dtype)
if thresholds_distributed_evenly:
# Check whether the thresholds has any leading or tailing epsilon added
# for floating point imprecision. The leading and tailing threshold will
# be handled bit differently as the corner case. At this point,
# thresholds should be a list/array with more than 2 items, and ranged
# between [0, 1]. See is_evenly_distributed_thresholds() for more
# details.
thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0
thresholds = ops.convert_to_tensor(thresholds, dtype=variable_dtype)
num_thresholds = ops.shape(thresholds)[0]
if multi_label:
one_thresh = ops.equal(
np.array(1, dtype="int32"),
len(thresholds.shape),
)
else:
one_thresh = np.array(True, dtype="bool")
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
f'Invalid keys: "{invalid_keys}". '
f'Valid variable key options are: "{list(ConfusionMatrix)}"'
)
y_pred, y_true = squeeze_or_expand_to_same_rank(y_pred, y_true)
if sample_weight is not None:
sample_weight = ops.expand_dims(
ops.cast(sample_weight, dtype=variable_dtype), axis=-1
)
_, sample_weight = squeeze_or_expand_to_same_rank(
y_true, sample_weight, expand_rank_1=False
)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
if len(y_pred.shape) == 1:
raise ValueError(
"When class_id is provided, y_pred must be a 2D array "
"with shape (num_samples, num_classes), found shape: "
f"{y_pred.shape}"
)
# Preserve dimension to match with sample_weight
y_true = y_true[..., class_id, None]
y_pred = y_pred[..., class_id, None]
if thresholds_distributed_evenly:
return _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=multi_label,
sample_weights=sample_weight,
label_weights=label_weights,
thresholds_with_epsilon=thresholds_with_epsilon,
)
if None in y_pred.shape:
pred_shape = ops.shape(y_pred)
num_predictions = pred_shape[0]
if len(y_pred.shape) == 1:
num_labels = 1
else:
num_labels = ops.cast(
ops.prod(ops.array(pred_shape[1:]), axis=0), "int32"
)
thresh_label_tile = ops.where(one_thresh, num_labels, 1)
else:
pred_shape = ops.shape(y_pred)
num_predictions = pred_shape[0]
if len(y_pred.shape) == 1:
num_labels = 1
else:
num_labels = np.prod(pred_shape[1:], axis=0).astype("int32")
thresh_label_tile = np.where(one_thresh, num_labels, 1)
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = ops.expand_dims(y_pred, 0)
labels_extra_dim = ops.expand_dims(ops.cast(y_true, dtype="bool"), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = ops.reshape(y_pred, [1, -1])
labels_extra_dim = ops.reshape(ops.cast(y_true, dtype="bool"), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = ops.tile(
ops.reshape(thresholds, thresh_pretile_shape), thresh_tiles
)
# Tile the predictions for every threshold.
preds_tiled = ops.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = ops.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = ops.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = ops.broadcast_to(
ops.cast(sample_weight, dtype=y_pred.dtype), ops.shape(y_pred)
)
weights_tiled = ops.tile(
ops.reshape(sample_weight, thresh_tiles), data_tiles
)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = ops.expand_dims(label_weights, 0)
label_weights = ops.broadcast_to(label_weights, ops.shape(y_pred))
label_weights_tiled = ops.tile(
ops.reshape(label_weights, thresh_tiles), data_tiles
)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = ops.multiply(weights_tiled, label_weights_tiled)
def weighted_assign_add(label, pred, weights, var):
label_and_pred = ops.cast(ops.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= ops.cast(weights, dtype=var.dtype)
var.assign(var + ops.sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = ops.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = ops.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (
label_is_neg,
pred_is_neg,
)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
weighted_assign_add(
label, pred, weights_tiled, variables_to_update[matrix_cond]
)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the
same shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = ops.top_k(x, k)
top_k_mask = ops.sum(
ops.one_hot(top_k_idx, ops.shape(x)[-1], axis=-1), axis=-2
)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def confusion_matrix(
labels,
predictions,
num_classes,
weights=None,
dtype="int32",
):
"""Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent
the real labels. The confusion matrix is always a 2-D array of shape
`(n, n)`, where `n` is the number of valid labels for a given classification
task. Both prediction and labels must be 1-D arrays of the same shape in
order for this function to work.
If `num_classes` is `None`, then `num_classes` will be set to one plus the
maximum value in either predictions or labels. Class labels are expected to
start at 0. For example, if `num_classes` is 3, then the possible labels
would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
keras.metrics.metrics_utils.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D tensor of real labels for the classification task.
predictions: 1-D tensor of predictions for a given classification.
num_classes: The possible number of labels the classification
task can have.
weights: An optional tensor whose shape matches `predictions`.
dtype: Data type of the confusion matrix.
Returns:
A tensor of type `dtype` with shape `(n, n)` representing the confusion
matrix, where `n` is the number of possible labels in the classification
task.
"""
labels = ops.convert_to_tensor(labels, dtype)
predictions = ops.convert_to_tensor(predictions, dtype)
labels, predictions = squeeze_or_expand_to_same_rank(labels, predictions)
predictions = ops.cast(predictions, dtype)
labels = ops.cast(labels, dtype)
if weights is not None:
weights = ops.convert_to_tensor(weights, dtype)
indices = ops.stack([labels, predictions], axis=1)
values = ops.ones_like(predictions, dtype) if weights is None else weights
indices = ops.cast(indices, dtype="int64")
values = ops.cast(values, dtype=dtype)
num_classes = int(num_classes)
confusion_matrix = ops.scatter(indices, values, (num_classes, num_classes))
return confusion_matrix
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@metrics@metrics_utils.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/splom/hoverlabel/font/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@splom@hoverlabel@font@__init__.py@.PATH_END.py
|
{
"filename": "getdist_test.py",
"repo_name": "cmbant/CosmoMC",
"repo_path": "CosmoMC_extracted/CosmoMC-master/python/getdist/tests/getdist_test.py",
"type": "Python"
}
|
import tempfile
import os
import numpy as np
import unittest
import subprocess
import shutil
from getdist import loadMCSamples, plots, IniFile
from getdist.tests.test_distributions import Test2DDistributions, Gaussian1D, Gaussian2D
from getdist.mcsamples import MCSamples
from getdist.styles.tab10 import style_name as tab10
from getdist.styles.planck import style_name as planck
from getdist.parampriors import ParamBounds
from matplotlib import rcParams
import matplotlib.pyplot as plt
class GetDistFileTest(unittest.TestCase):
"""test reading files, convergence routines and getdist script"""
def setUp(self):
random_state = np.random.default_rng(10)
# Simulate some chain files
prob = Test2DDistributions().bimodal[0]
self.tempdir = os.path.join(tempfile.gettempdir(), 'gettdist_tests')
if not os.path.exists(self.tempdir):
os.mkdir(self.tempdir)
self.root = os.path.join(self.tempdir, 'testchain')
for n in range(3):
mcsamples = prob.MCSamples(4000, logLikes=True, random_state=random_state)
mcsamples.saveAsText(self.root, chain_index=n)
def tearDown(self):
os.chdir(tempfile.gettempdir())
shutil.rmtree(self.tempdir)
def testFileLoadPlot(self):
samples = loadMCSamples(self.root, settings={'ignore_rows': 0.1})
g = plots.get_single_plotter(chain_dir=self.tempdir, analysis_settings={'ignore_rows': 0.1})
self.assertEqual(g.samples_for_root('testchain').numrows, samples.numrows,
"Inconsistent chain loading")
self.assertEqual(g.samples_for_root('testchain').getTable().tableTex(),
samples.getTable().tableTex(), 'Inconsistent load result')
samples.getConvergeTests(0.95)
self.assertAlmostEqual(0.00052997, samples.GelmanRubin, 4, 'Gelman Rubin error, got ' + str(samples.GelmanRubin))
g = plots.get_single_plotter()
g.plot_3d(samples, ['x', 'y', 'x'])
g.export(self.root + '_plot.pdf')
g = plots.get_single_plotter(chain_dir=self.tempdir,
analysis_settings={'ignore_rows': 0.1, 'contours': [0.68, 0.95, 0.99]})
g.settings.num_plot_contours = 3
g.plot_2d('testchain', ['x', 'y'])
def testGetDist(self):
from getdist.command_line import getdist_command
os.chdir(self.tempdir)
res = getdist_command([self.root])
# Note this can fail if your local analysis defaults changes the default ignore_rows
self.assertTrue('-Ln(mean like) = 2.31' in res, res)
fname = 'testchain_pars.ini'
getdist_command(['--make_param_file', fname])
ini = IniFile(fname)
ini.params['no_plots'] = False
ini.params['plot_2D_num'] = 1
ini.params['plot1'] = 'x y'
ini.params['num_3D_plots'] = 1
ini.params['3D_plot1'] = 'x y x'
ini.params['triangle_params'] = '*[xy]*'
ini.saveFile(fname)
res = getdist_command([fname, self.root])
self.assertTrue('-Ln(mean like) = 2.31' in res)
def check_run():
for f in ['.py', '_2D.py', '_3D.py', '_tri.py']:
pyname = self.root + f
self.assertTrue(os.path.isfile(pyname))
subprocess.check_output(['python', pyname])
pdf = self.root + f.replace('py', 'pdf')
self.assertTrue(os.path.isfile(pdf))
os.remove(pdf)
os.remove(pyname)
check_run()
class GetDistTest(unittest.TestCase):
"""test some getdist routines and plotting"""
def setUp(self):
self.testdists = Test2DDistributions()
def testBestFit(self):
samples = self.testdists.bimodal[0].MCSamples(12000, logLikes=True, random_state=10)
bestSample = samples.getParamBestFitDict(best_sample=True)
self.assertAlmostEqual(bestSample['loglike'], 1.708, 2)
def testTables(self):
self.samples = self.testdists.bimodal[0].MCSamples(12000, logLikes=True, random_state=10)
self.assertEqual(str(self.samples.getLatex(limit=2)),
"(['x', 'y'], ['0.0^{+2.1}_{-2.1}', '0.0^{+1.3}_{-1.3}'])", "MCSamples.getLatex error")
table = self.samples.getTable(columns=1, limit=1, paramList=['x'])
self.assertTrue(r'0.0\pm 1.2' in table.tableTex(), "Table tex error")
def testPCA(self):
samples = self.testdists.bending.MCSamples(12000, logLikes=True, random_state=10)
self.assertTrue('e-value: 0.10' in samples.PCA(['x', 'y']))
def testLimits(self):
samples = self.testdists.cut_correlated.MCSamples(12000, logLikes=False, random_state=10)
stats = samples.getMargeStats()
lims = stats.parWithName('x').limits
self.assertAlmostEqual(lims[0].lower, 0.2077, 3)
self.assertAlmostEqual(lims[1].lower, 0.0574, 3)
self.assertTrue(lims[2].onetail_lower)
# check some analytics (note not very accurate actually)
samples = Gaussian1D(0, 1, xmax=1).MCSamples(1500000, logLikes=False, random_state=10)
stats = samples.getMargeStats()
lims = stats.parWithName('x').limits
self.assertAlmostEqual(lims[0].lower, -0.78828, 2)
self.assertAlmostEqual(lims[0].upper, 0.7954, 2)
self.assertAlmostEqual(lims[1].lower, -1.730, 2)
def testDensitySymmetries(self):
# check flipping samples gives flipped density
samps = Gaussian1D(0, 1, xmin=-1, xmax=3).MCSamples(12000, random_state=10)
d = samps.get1DDensity('x')
samps.samples[:, 0] *= -1
samps = MCSamples(samples=samps.samples, names=['x'], ranges={'x': [-3, 1]})
d2 = samps.get1DDensity('x')
self.assertTrue(np.allclose(d.P, d2.P[::-1]))
samps = Gaussian2D([0, 0], np.diagflat([1, 2]), xmin=-1, xmax=2, ymin=0, ymax=3).MCSamples(12000,
random_state=10)
d = samps.get2DDensity('x', 'y')
samps.samples[:, 0] *= -1
samps = MCSamples(samples=samps.samples, names=['x', 'y'], ranges={'x': [-2, 1], 'y': [0, 3]})
d2 = samps.get2DDensity('x', 'y')
self.assertTrue(np.allclose(d.P, d2.P[:, ::-1]))
samps.samples[:, 0] *= -1
samps.samples[:, 1] *= -1
samps = MCSamples(samples=samps.samples, names=['x', 'y'], ranges={'x': [-1, 2], 'y': [-3, 0]})
d2 = samps.get2DDensity('x', 'y')
self.assertTrue(np.allclose(d.P, d2.P[::-1, ::], atol=1e-5))
def testLoads(self):
# test initiating from multiple chain arrays
samps = []
for i in range(3):
samps.append(Gaussian2D([1.5, -2], np.diagflat([1, 2])).MCSamples(1001 + i * 10, names=['x', 'y'],
random_state=10))
fromChains = MCSamples(samples=[s.samples for s in samps], names=['x', 'y'])
mean = np.sum([s.norm * s.mean('x') for s in samps]) / np.sum([s.norm for s in samps])
meanChains = fromChains.mean('x')
self.assertAlmostEqual(mean, meanChains)
self.assertAlmostEqual(mean, float(np.mean(fromChains['x'])))
def testMixtures(self):
from getdist.gaussian_mixtures import Mixture2D, GaussianND
cov1 = [[0.001 ** 2, 0.0006 * 0.05], [0.0006 * 0.05, 0.05 ** 2]]
cov2 = [[0.01 ** 2, -0.005 * 0.03], [-0.005 * 0.03, 0.03 ** 2]]
mean1 = [0.02, 0.2]
mean2 = [0.023, 0.09]
mixture = Mixture2D([mean1, mean2], [cov1, cov2], names=['zobs', 't'], labels=[r'z_{\rm obs}', 't'],
label='Model')
tester = 0.03
cond = mixture.conditionalMixture(['zobs'], [tester])
marge = mixture.marginalizedMixture(['zobs'])
# test P(x,y) = P(y)P(x|y)
self.assertAlmostEqual(mixture.pdf([tester, 0.15]), marge.pdf([tester]) * cond.pdf([0.15]))
samples = mixture.MCSamples(3000, label='Samples', random_state=10)
g = plots.get_subplot_plotter()
g.triangle_plot([samples, mixture], filled=False)
g.new_plot()
g.plot_1d(cond, 't')
s1 = 0.0003
covariance = [[s1 ** 2, 0.6 * s1 * 0.05, 0], [0.6 * s1 * 0.05, 0.05 ** 2, 0.2 ** 2], [0, 0.2 ** 2, 2 ** 2]]
mean = [0.017, 1, -2]
gauss = GaussianND(mean, covariance)
g = plots.get_subplot_plotter()
g.triangle_plot(gauss, filled=True)
def testPlots(self):
self.samples = self.testdists.bimodal[0].MCSamples(12000, logLikes=True, random_state=10)
g = plots.get_single_plotter(auto_close=True)
samples = self.samples
p = samples.getParams()
samples.addDerived(p.x + (5 + p.y) ** 2, name='z')
samples.addDerived(p.x, name='x.yx', label='forPattern')
samples.addDerived(p.y, name='x.2', label='x_2')
samples.updateBaseStatistics()
g.plot_1d(samples, 'x')
g.new_plot()
g.plot_1d(samples, 'y', normalized=True, marker=0.1, marker_color='b')
g.new_plot()
g.plot_2d(samples, 'x', 'y')
g.new_plot()
g.plot_2d(samples, 'x', 'y', filled=True)
g.new_plot()
g.plot_2d(samples, 'x', 'y', shaded=True)
g.new_plot()
g.plot_2d_scatter(samples, 'x', 'y', color='red', colors=['blue'])
g.new_plot()
g.plot_3d(samples, ['x', 'y', 'z'])
g = plots.get_subplot_plotter(width_inch=8.5, auto_close=True)
g.plots_1d(samples, ['x', 'y'], share_y=True)
g.new_plot()
g.triangle_plot(samples, ['x', 'y', 'z'])
self.assertTrue(g.get_axes_for_params('x', 'z') == g.subplots[2, 0])
self.assertTrue(g.get_axes_for_params('z', 'x', ordered=False) == g.subplots[2, 0])
self.assertTrue(g.get_axes_for_params('x') == g.subplots[0, 0])
self.assertTrue(g.get_axes_for_params('x', 'p', 'q') is None)
self.assertTrue(g.get_axes(ax=('x', 'z')) == g.subplots[2, 0])
self.assertTrue(g.get_axes(ax=(2, 0)) == g.subplots[2, 0])
g.new_plot()
g.triangle_plot(samples, ['x', 'y'], plot_3d_with_param='z')
g.new_plot()
g.rectangle_plot(['x', 'y'], ['z'], roots=samples, filled=True)
prob2 = self.testdists.bimodal[1]
samples2 = prob2.MCSamples(12000, random_state=10)
g.new_plot()
g.triangle_plot([samples, samples2], ['x', 'y'])
g.new_plot()
g.plots_2d([samples, samples2], param_pairs=[['x', 'y'], ['x', 'z']])
g.new_plot()
g.plots_2d([samples, samples2], 'x', ['z', 'y'])
g.new_plot()
self.assertEqual([name.name for name in samples.paramNames.parsWithNames('x.*')], ['x.yx', 'x.2'])
g.triangle_plot(samples, 'x.*')
samples.updateSettings({'contours': '0.68 0.95 0.99'})
g.settings.num_plot_contours = 3
g.plot_2d(samples, 'x', 'y', filled=True)
g.add_y_bands(0.2, 1.5)
g.add_x_bands(-0.1, 1.2, color='red')
g.new_plot()
omm = np.arange(0.1, 0.7, 0.01)
g.add_bands(omm, 0.589 * omm ** (-0.25), 0.03 * omm ** (-0.25), nbands=3)
g = plots.get_subplot_plotter()
import copy
for upper in [False, True]:
g.triangle_plot([samples, samples2], ['x', 'y', 'z'], filled=True,
upper_roots=[copy.deepcopy(samples)], upper_kwargs={'contour_colors': ['green']},
legend_labels=['1', '2', '3'], upper_label_right=upper)
for i in range(3):
for j in range(i):
self.assertTrue(g.subplots[i, j].get_xlim() == g.subplots[j, i].get_ylim())
self.assertTrue(g.subplots[i, j].get_ylim() == g.subplots[j, i].get_xlim())
self.assertTrue(g.subplots[i, j].get_xlim() == g.subplots[j, j].get_xlim())
def test_styles(self):
tmp = rcParams.copy()
plots.set_active_style(tab10)
g = plots.get_single_plotter()
self.assertEqual(g.settings.line_styles.name, 'tab10')
plots.set_active_style(planck)
g = plots.get_single_plotter()
self.assertTrue(g.settings.prob_y_ticks)
plots.set_active_style(tab10)
g = plots.get_single_plotter()
self.assertEqual(g.settings.line_styles.name, 'tab10')
plots.set_active_style()
g = plots.get_single_plotter()
self.assertFalse(g.settings.prob_y_ticks)
g = plots.get_single_plotter(style='tab10')
self.assertEqual(g.settings.line_styles.name, 'tab10')
plots.set_active_style('planck')
plots.set_active_style()
self.assertDictEqual(tmp, rcParams)
class UtilTest(unittest.TestCase):
"""test bounded and unbounded tick assignment"""
def _plot_with_params(self, scale, x, off, prune, default=False):
from getdist.matplotlib_ext import BoundedMaxNLocator
fig, axs = plt.subplots(1, 1, figsize=(x, 1))
axs.plot([off - scale, off + scale], [0, 1])
axs.set_yticks([])
if not default:
axs.xaxis.set_major_locator(BoundedMaxNLocator(prune=prune))
axs.xaxis.get_major_formatter().useOffset = False
fig.suptitle("%s: scale %g, size %g, offset %g" % ('Default' if default else 'Bounded', scale, x, off),
fontsize=6)
return fig, axs
def test_one_locator(self):
self._plot_with_params(0.01, 1, 0.05, True)
plt.draw()
def test_y(self):
from getdist.matplotlib_ext import BoundedMaxNLocator
fig, ax = plt.subplots(1, 1, figsize=(3, 3))
ax.plot([0, 1], [0, 1])
ax.yaxis.set_major_locator(BoundedMaxNLocator(prune=True))
def check_ticks(bounds, expected):
ax.set_ylim(bounds)
ticks = ax.get_yticks()
if len(ticks) != len(expected) or not np.allclose(ticks, expected):
raise self.failureException("Wrong ticks %s for bounds %s" % (ticks, bounds))
check_ticks([0.0253, 0.02915], [0.026, 0.027, 0.028])
def test_specifics(self):
testdists = Test2DDistributions()
samples = testdists.bimodal[0].MCSamples(1000, logLikes=True, random_state=10)
g = plots.get_subplot_plotter(auto_close=True)
g.settings.prob_label = r'$P$'
g.settings.prob_y_ticks = True
g.plot_1d(samples, 'x', _no_finish=True)
ax = g.get_axes()
self.assertTrue(np.allclose(ax.get_yticks(), [0, 0.5, 1]), "Wrong probability ticks")
def check_ticks(bounds, expected):
ax.set_xlim(bounds)
ticks = ax.get_xticks()
if len(ticks) != len(expected) or not np.allclose(ticks, expected):
raise self.failureException("Wrong ticks %s for bounds %s" % (ticks, bounds))
check_ticks([-5.2, 5.2], [-4, -2, 0, 2, 4])
check_ticks([0, 8.2], [0, 2, 4, 6, 8])
check_ticks([0.0219, 0.02232], [0.022, 0.0222])
check_ticks([-0.009, 0.009], [-0.008, 0., 0.008])
g.make_figure(nx=2, ny=1, sharey=True)
ax = g.get_axes()
g._set_main_axis_properties(ax.xaxis, True)
ax.set_yticks([])
check_ticks([-0.009, 0.009], [-0.006, 0., 0.006])
check_ticks([1, 1.0004], [1.0001, 1.0003])
def test_locator(self):
import matplotlib.backends.backend_pdf
# Set TMPSMALL env variable to save the output PDF for inspection
local = os.environ.get('TMPSMALL')
temp = os.path.join(local or tempfile.gettempdir(), 'output.pdf')
pdf = matplotlib.backends.backend_pdf.PdfPages(temp)
fails = []
for x in np.arange(1, 5, 0.5):
for scale in [1e-4, 0.9e-2, 1e-1, 1, 14, 3000]:
for off in [scale / 3, 1, 7.4 * scale]:
for prune in [True, False]:
fig, ax = self._plot_with_params(scale, x, off, prune)
pdf.savefig(fig, bbox_inches='tight')
if not len(ax.get_xticks()) or x >= 2 > len(ax.get_xticks()) and scale > 1e-4:
fails.append([scale, x, off, prune])
plt.close(fig)
if local:
fig, ax = self._plot_with_params(scale, x, off, True, True)
pdf.savefig(fig, bbox_inches='tight')
plt.close(fig)
pdf.close()
if not local:
os.remove(temp)
self.assertFalse(len(fails), "Too few ticks for %s" % fails)
class CobayaTest(unittest.TestCase):
def setUp(self):
self.tempdir = os.path.join(tempfile.gettempdir(), 'gettdist_tests')
if not os.path.exists(self.tempdir):
os.mkdir(self.tempdir)
os.chdir(self.tempdir)
self.path = os.getenv('TRAVIS_BUILD_DIR', os.path.join(os.path.dirname(__file__), '..', '..', '..'))
self.path = os.path.normpath(os.path.join(self.path, 'getdist_testchains', 'cobaya'))
def tearDown(self):
os.chdir(tempfile.gettempdir())
shutil.rmtree(self.tempdir)
def test_chains(self):
if os.path.exists(self.path):
root = os.path.join(self.path, 'DES_shear')
samples = loadMCSamples(root, settings={'ignore_rows': 0.3}, no_cache=True)
self.assertAlmostEqual(samples.mean('ombh2'), 0.02764592190482377, 6)
pars = samples.getParamSampleDict(10)
self.assertAlmostEqual(0.06, pars['mnu'], 6)
self.assertAlmostEqual(samples.getUpper('ns'), 1.07, 6)
self.assertAlmostEqual(samples.getLower('ns'), 0.87, 6)
self.assertEqual(samples.getLower('DES_DzS2'), None)
self.assertAlmostEqual(0, pars['omk'])
from getdist.command_line import getdist_command
res = getdist_command([root])
self.assertTrue('-log(Like) = 95.49' in res, res)
def test_planck_chains(self):
if os.path.exists(self.path):
root = os.path.join(self.path, 'compare_devel_drag')
samples = loadMCSamples(root, settings={'ignore_rows': 0.3}, no_cache=True)
self.assertAlmostEqual(samples.mean('ombh2'), 0.0223749, 6)
self.assertAlmostEqual(samples.getUpper('H0'), 100, 6)
self.assertEqual(samples.getLower('sigma8'), None)
samples.saveAsText(r'planck_test')
ranges = ParamBounds('planck_test.ranges')
for par in samples.paramNames.names:
self.assertEqual(samples.getUpper(par.name), ranges.getUpper(par.name))
|
cmbantREPO_NAMECosmoMCPATH_START.@CosmoMC_extracted@CosmoMC-master@python@getdist@tests@getdist_test.py@.PATH_END.py
|
{
"filename": "RS_2014_liquids.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/burnman/minerals/RS_2014_liquids.py",
"type": "Python"
}
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2021 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
RS_2014_liquids
^^^^^^^^^^^^^^^
Liquids from Ramo and Stixrude (2014) FPMD simulations.
There are some typos in the article which have been corrected where marked
with the help of David Munoz Ramo.
"""
import numpy as np
from ..classes.mineral import Mineral
from ..utils.chemistry import dictionarize_formula, formula_mass
class Fe2SiO4_liquid(Mineral):
def __init__(self):
formula = "Fe2SiO4"
formula = dictionarize_formula(formula)
self.params = {
"name": "Fe2SiO4_liquid",
"formula": formula,
"equation_of_state": "dks_l",
"V_0": 59.7717e-6, # modified for T_0
"T_0": 1900.0, # corrected
"O_theta": 1,
"O_f": 4,
"m": 0.6,
"a": np.array(
[
[-4252948.0, 997810.188],
[-599315.125, 12032.8936],
[12572739.0, 7299239.5],
[53442800.0, -26791676.0],
[52981912.0, 0.0],
]
), # corrected order
"zeta_0": 0.0161350928, # 0.0166734, # the comment is a refit to David's dataset
"xi": 0.34431043, # 0.34431053, # the comment is a refit to David's dataset
"Tel_0": 1919.3553, # 1921.6813, # the comment is a refit to David's dataset
"eta": 0.0127067110, # 0.0127067, # the comment is a refit to David's dataset
"spin_a": [-0.00011134, 0.00010863],
"spin_b": [3.53793, -3.81421, 2.83703, -0.676241],
"n": sum(formula.values()),
"molar_mass": formula_mass(formula),
}
Mineral.__init__(self)
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@burnman@minerals@RS_2014_liquids.py@.PATH_END.py
|
{
"filename": "quality_control.py",
"repo_name": "transientskp/lpf",
"repo_path": "lpf_extracted/lpf-main/lpf/quality_control/quality_control.py",
"type": "Python"
}
|
import numpy as np
import torch
from lpf.bolts.torch import nanmax, nanmean
class QualityControl:
def __init__(
self, rfi_threshold: float = 5, corruption_threshold: float = 1e5,
) -> None:
self.rfi_threshold = rfi_threshold
self.corruption_threshold = corruption_threshold
def filter_corruption(self, images: torch.Tensor):
assert len(images.shape) == 3
abs_images: torch.Tensor = abs(images)
max_values: torch.Tensor = nanmax(abs_images, axis=(-1, -2))
for i, v in enumerate(max_values): # type:ignore
if v > self.corruption_threshold:
images[i] = torch.zeros_like(images[i])
return images
def filter_rfi(self, images: torch.Tensor):
median_image: torch.Tensor = torch.median(images, dim=0, keepdims=True)[0] # type: ignore
scores: np.ndarray = nanmean((images - median_image) ** 2, axis=(-1, -2)).to("cpu").numpy() # type: ignore
median = np.median(scores)
mad_std = 1.482602218505602 * np.median(abs(scores - median))
z = (scores - median) / mad_std
for i, z in enumerate(z):
if z > self.rfi_threshold:
images[i] = torch.zeros_like(images[i])
return images
def filter_corruption_np(self, images: np.ndarray):
max_values = np.nanmax(abs(images), axis=(-1, -2))
for i, v in enumerate(max_values):
if v > self.corruption_threshold:
images[i] = np.zeros_like(images[i])
return images
def filter_rfi_np(self, images: np.ndarray):
median_image = np.median(images, axis=0, keepdims=True)
scores = np.nanmean((images - median_image) ** 2, axis=(-1, -2))
median = np.median(scores)
mad_std = 1.482602218505602 * np.median(abs(scores - median))
z = (scores - median) / mad_std
for i, z in enumerate(z):
if z > self.rfi_threshold:
images[i] = np.zeros_like(images[i])
return images
def filter_bad_images(self, images: torch.Tensor) -> torch.Tensor: # type: ignore
if images.device == torch.device('cpu'):
print("WARNING: Running on CPU, switching to NumPy for quality control due to performance issues.")
images: np.ndarray = self.filter_corruption_np(images.numpy()) # type: ignore
images: np.ndarray = self.filter_rfi_np(images) # type: ignore
images: torch.Tensor = torch.from_numpy(images).float() # type: ignore
else:
images: torch.Tensor = self.filter_corruption(images)
images: torch.Tensor = self.filter_rfi(images)
return images
def __call__(self, images: torch.Tensor):
return self.filter_bad_images(images)
|
transientskpREPO_NAMElpfPATH_START.@lpf_extracted@lpf-main@lpf@quality_control@quality_control.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "smsharma/fermi-gce-flows",
"repo_path": "fermi-gce-flows_extracted/fermi-gce-flows-main/sbi/utils/__init__.py",
"type": "Python"
}
|
# flake8: noqa
from sbi.user_input.user_input_checks import (
check_estimator_arg,
validate_theta_and_x,
)
from sbi.user_input.user_input_checks_utils import MultipleIndependent
from sbi.utils.conditional_density import (
conditional_corrcoeff,
eval_conditional_density,
)
from sbi.utils.get_nn_models import posterior_nn
from sbi.utils.io import get_data_root, get_log_root, get_project_root
from sbi.utils.plot import conditional_pairplot, pairplot
from sbi.utils.restriction_estimator import RestrictedPrior, RestrictionEstimator
from sbi.utils.sbiutils import (
batched_mixture_mv,
batched_mixture_vmv,
clamp_and_warn,
del_entries,
get_simulations_since_round,
handle_invalid_x,
mask_sims_from_prior,
sample_posterior_within_prior,
standardizing_net,
standardizing_transform,
warn_on_invalid_x,
warn_on_invalid_x_for_snpec_leakage,
x_shape_from_simulation,
)
from sbi.utils.torchutils import (
BoxUniform,
cbrt,
create_alternating_binary_mask,
create_mid_split_binary_mask,
create_random_binary_mask,
gaussian_kde_log_eval,
get_num_parameters,
get_temperature,
logabsdet,
merge_leading_dims,
random_orthogonal,
repeat_rows,
searchsorted,
split_leading_dim,
sum_except_batch,
tensor2numpy,
tile,
)
from sbi.utils.typechecks import (
is_bool,
is_int,
is_nonnegative_int,
is_positive_int,
is_power_of_two,
)
|
smsharmaREPO_NAMEfermi-gce-flowsPATH_START.@fermi-gce-flows_extracted@fermi-gce-flows-main@sbi@utils@__init__.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "toros-astro/corral",
"repo_path": "corral_extracted/corral-master/docs/source/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Corral documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 18 17:22:43 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(os.path.join('..', "..")))
import corral
#~ from recommonmark.parser import CommonMarkParser
# on_rtd is whether we are on readthedocs.org
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
#~ source_parsers = {
#~ '.md': CommonMarkParser,
#~ }
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Corral'
copyright = u'2015, Corral Team'
author = u'Juan B Cabral, Bruno Sanchez, Martín Beroiz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(corral.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Corraldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Corral.tex', u'Corral Documentation',
u'Juan B Cabral', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'corral', u'Corral Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Corral', u'Corral Documentation',
author, 'Corral', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
import subs
if not ON_RTD: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
sys.modules.update((mod_name, Mock()) for mod_name in subs.MOCK_MODULES)
rst_epilog = "\n".join(
[".. _{}: {}".format(k, v) for k, v in subs.TARGETS.items()] +
[".. |{}| replace:: {}".format(k, v) for k, v in subs.SUBSTITUTIONS.items()]
)
|
toros-astroREPO_NAMEcorralPATH_START.@corral_extracted@corral-master@docs@source@conf.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/outsidetextfont/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weightsrc import WeightsrcValidator
from ._weight import WeightValidator
from ._variantsrc import VariantsrcValidator
from ._variant import VariantValidator
from ._textcasesrc import TextcasesrcValidator
from ._textcase import TextcaseValidator
from ._stylesrc import StylesrcValidator
from ._style import StyleValidator
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._shadowsrc import ShadowsrcValidator
from ._shadow import ShadowValidator
from ._linepositionsrc import LinepositionsrcValidator
from ._lineposition import LinepositionValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weightsrc.WeightsrcValidator",
"._weight.WeightValidator",
"._variantsrc.VariantsrcValidator",
"._variant.VariantValidator",
"._textcasesrc.TextcasesrcValidator",
"._textcase.TextcaseValidator",
"._stylesrc.StylesrcValidator",
"._style.StyleValidator",
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._shadowsrc.ShadowsrcValidator",
"._shadow.ShadowValidator",
"._linepositionsrc.LinepositionsrcValidator",
"._lineposition.LinepositionValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@outsidetextfont@__init__.py@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/polar/angularaxis/tickformatstop/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="name",
parent_name="layout.polar.angularaxis.tickformatstop",
**kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@polar@angularaxis@tickformatstop@_name.py@.PATH_END.py
|
{
"filename": "the_gp_sfh_module.ipynb",
"repo_name": "kartheikiyer/dense_basis",
"repo_path": "dense_basis_extracted/dense_basis-master/docs/_build/html/_sources/tutorials/the_gp_sfh_module.ipynb",
"type": "Jupyter Notebook"
}
|
# The GP-SFH module
## Creating different shapes using SFH-tuples
The `dense_basis` code contains a module for creating smooth star formation history from a tuple consisting of (M$_*$, SFR, {$t_X$}) - the stellar mass, star formation rate, and a set of lookback times at which the galaxy forms N equally spaced quantiles of its stellar mass.
This parametrization comes with a lot of flexibility, and allows us to create a large range of SFH shapes even with a small number of parameters. Here we show a few examples, showing how we create a variety of different SFH shapes with just 2 free parameters - the SFR and the t$_{50}$.
```python
import dense_basis as db
import numpy as np
import matplotlib.pyplot as plt
```
Starting dense_basis. please wait ~ a minute for the FSPS backend to initialize.
Initialized stellar population with FSPS.
```python
Nparam = 1
redshift = 1.0
logMstar = 10.0
```
Let's start with an `SFH that is rising` throughout a galaxy's lifetime, such as may be expected for high-redshift star forming galaxies. Since we are considering a galaxy with $M_* = 10^{10}M_\odot$ at z=1, we choose a reasonably high SFR of 10 $M_\odot/yr$. Since the SFR is rising, we also choose a short $t_{50}$, since it is rapidly building forming its stars. Running this through the model, we get:
```python
logSFR = 1.0
t50 = 0.6 # t50, lookback time, in Gyr
sfh_tuple = np.hstack([logMstar, logSFR, Nparam, db.scale_t50(t50,redshift)])
sfh, timeax = db.tuple_to_sfh(sfh_tuple, redshift)
fig = db.plot_sfh(timeax, sfh, lookback=True)
plt.title('Rising SFH')
plt.show()
```

We next consider the case of reasonably `steady star formation`. This is different from constant star formation, because SFR goes to 0 smoothly as we approach the big bang. In this case, we choose an SFR closer to the expected lifetime average for a massive galaxy at z=1, and a $t_{50}$ close to half the age of the universe at the redshift of observation. Doing this gives us:
```python
logSFR = 0.335
t50 = 2.3 # t50, lookback time, in Gyr
sfh_tuple = np.hstack([logMstar, logSFR, Nparam, db.scale_t50(t50,redshift)])
sfh, timeax = db.tuple_to_sfh(sfh_tuple, redshift)
fig = db.plot_sfh(timeax, sfh, lookback=True)
plt.title('Steady SFH')
plt.show()
```

We now look at the class of quenched and quenching galaxies.
For the `post-starburst SFH`, we create a similar setup to the rising SFH, but with a low SFR at the time of observation. Since the galaxy still formed a lot of stars in the recent past but is not doing so now, this creates the distinctive post-starburst shape.
```python
logSFR = 0.5
t50 = 0.6 # t50, lookback time, in Gyr
sfh_tuple = np.hstack([logMstar, logSFR, Nparam, db.scale_t50(t50,redshift)])
sfh, timeax = db.tuple_to_sfh(sfh_tuple, redshift)
fig = db.plot_sfh(timeax, sfh, lookback=True)
plt.title('Post-starburst SFH')
plt.show()
```

We also consider two simple types of `quenched galaxies`, obtained easily by setting the recent SFR to a very low value. To consider the different possible shapes for a quenched SFH, we use a recent and an older value for the $t_{50}$, to obtain SFHs that quenched either gradually or aburptly.
```python
logSFR = -3.0
t50 = 4.6 # t50, lookback time, in Gyr
sfh_tuple = np.hstack([logMstar, logSFR, Nparam, db.scale_t50(t50,redshift)])
sfh, timeax = db.tuple_to_sfh(sfh_tuple, redshift)
fig = db.plot_sfh(timeax, sfh, lookback=True)
plt.title('Old Quiescent SFH')
plt.show()
```

```python
logSFR = -3.0
t50 = 1.7 # t50, lookback time, in Gyr
sfh_tuple = np.hstack([logMstar, logSFR, Nparam, db.scale_t50(t50,redshift)])
sfh, timeax = db.tuple_to_sfh(sfh_tuple, redshift)
fig = db.plot_sfh(timeax, sfh, lookback=True)
plt.title('Young Quiescent SFH')
plt.show()
```

Finally, we also consider the case of a `rejuvenated SFH`, which had a significant lull between two periods of active star formation. To create an example of this kind of SFH, we use a reasonably large $t_{50}$, which tells the GP-SFH module that the galaxy formed 50% of its stars early on. Coupled with an SFR that indicates active star formation at the time of observation, this means that there had to be a period between these two when the galaxy did not form a lot of mass, leading to this distinctive shape.
```python
logSFR = 0.5
t50 = 4.0 # t50, lookback time, in Gyr
sfh_tuple = np.hstack([logMstar, logSFR, Nparam, db.scale_t50(t50,redshift)])
sfh, timeax = db.tuple_to_sfh(sfh_tuple, redshift)
fig = db.plot_sfh(timeax, sfh, lookback=True)
plt.title('Rejuvenated SFH')
plt.show()
```

```python
```
|
kartheikiyerREPO_NAMEdense_basisPATH_START.@dense_basis_extracted@dense_basis-master@docs@_build@html@_sources@tutorials@the_gp_sfh_module.ipynb@.PATH_END.py
|
{
"filename": "_metasrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/heatmapgl/_metasrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="heatmapgl", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@heatmapgl@_metasrc.py@.PATH_END.py
|
{
"filename": "test_quinoa.py",
"repo_name": "telegraphic/fits2hdf",
"repo_path": "fits2hdf_extracted/fits2hdf-master/aadnc_benchmarks/quinoa_idea/test_quinoa.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
test_quinoa.py
"""
from quinoa import *
def generate_data():
d = np.linspace(1e4, 1e5, 100)
d_range = 1e9
noise_ratio = 1e3
data = np.sin(np.outer(d, d)) * d_range
noise = np.random.random((100,100)) * d_range / noise_ratio
return data + noise
def test_dither():
data = generate_data()
data_d = apply_dither(data, 12345)
data_u = unapply_dither(data_d, 12345)
assert np.allclose(data, data_u)
def test_scaling():
data = generate_data()
scale_dict = quinoa_scale(data, q=4)
print(scale_dict)
#print "Noise estimate: %s" % np.max(scale_dict["data"])
print("Max value in scaled data: %s" % np.max(scale_dict["data"]))
unscaled = quinoa_unscale(scale_dict)
precision = np.average(np.abs(data - unscaled) / np.nanmax(data)) * 100
print("Precision: %2.2f%%" % precision)
assert np.allclose(data, unscaled, rtol=1)
if __name__ == "__main__":
test_dither()
test_scaling()
|
telegraphicREPO_NAMEfits2hdfPATH_START.@fits2hdf_extracted@fits2hdf-master@aadnc_benchmarks@quinoa_idea@test_quinoa.py@.PATH_END.py
|
{
"filename": "interpolation.py",
"repo_name": "spicy-oil/hfs_fit",
"repo_path": "hfs_fit_extracted/hfs_fit-master/hfs_fit/interpolation.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import LU as lu
def Gaussian(x, mu, sig, A):
'''
Standard unnormalised gussian
'''
return A * np.exp(-(x - mu)**2/(2*sig**2))
#%% The coefficients for linear interpolation
def A(x, x_i1, x_i):
return (x_i1 - x) / (x_i1 - x_i)
def B(x, x_i1, x_i):
return (x - x_i) / (x_i1 - x_i)
#%%
def lin_interp(x, f_d, x_d):
'''
1D linear interpolation.
Input 1D array of points to estimate, x data and function data arrays.
Returns estimated function values for given x.
'''
f = []
for pnt in x:
if (pnt == x_d).tolist().count(1) != 0: #Do not interpolate if value is on data
f.append(f_d[np.where(x_d == pnt)[0]])
else:
temp = (pnt < x_d).tolist().count(0) #Find position index of nearby pnts
#Nearby x data
x_i = x_d[temp - 1]
x_i1 = x_d[temp]
#Nearby f data
f_i = f_d[temp - 1]
f_i1 = f_d[temp]
f.append(A(pnt, x_i1, x_i) * f_i + B(pnt, x_i1, x_i) * f_i1)
return f
#%%
def validate_lin_interp():
'''
Check if linearly interpolated pnts give rough functional form of sine
'''
plt.clf()
x_d = np.linspace(0, 10, 10) # x data
f_d = np.sin(x_d) # function data
x_true = np.linspace(0, 10, 10000) # approximated as a continuum
f_true = np.sin(x_true) # approximated continuouse true function
x = np.linspace(0, 10, 500) # pnts to be interpolated
new_f = lin_interp(x, f_d, x_d) # interpolated pnts
plt.plot(x_true, f_true, 'r-', label = 'real function')
plt.plot(x, new_f, 'b-.', label = 'interpolation from data')
plt.plot(x_d, f_d, 'gx', label = 'data')
plt.legend()
plt.grid()
#%%Coefficients required for cubic spline
def C(x, x_i1, x_i):
return (1./6) * (A(x, x_i1, x_i) ** 3 - A(x, x_i1, x_i)) * (x_i1 - x_i) ** 2
def D(x, x_i1, x_i):
return (1./6) * (B(x, x_i1, x_i) ** 3 - B(x, x_i1, x_i)) * (x_i1 - x_i) ** 2
def a(x_i, x_i_1):
return (x_i - x_i_1) / 6
def b(x_i1, x_i_1):
return (x_i1 - x_i_1) / 3
def c(x_i1, x_i):
return (x_i1 - x_i) / 6
def d(f_i1, f_i, f_i_1, x_i1, x_i, x_i_1):
return ((f_i1 - f_i)/(x_i1 - x_i)) - ((f_i - f_i_1)/(x_i - x_i_1))
#%% Cubic spline class
class CS:
'''
1D cubic spline class, compute second derivatives only once for convenience.
'''
def __init__(self, f_d, x_d):
self.data_size = len(f_d)
self.xdata = x_d
self.fdata = f_d
self.second_derivatives = self.second_deriv()
def second_deriv(self):
'''
Input x and function array datasets, returns second derivatives.
Constructs matrix equation and solve for second derivatives.
'''
matrix = np.zeros((self.data_size, self.data_size))
vector = np.zeros((self.data_size, 1)) #array of d's defined above to solve second derivs
#for each second derivative
for row in range(self.data_size)[1:-1]: #will do top and bottom separately
matrix[row][row - 1] = a(self.xdata[row], self.xdata[row - 1])
matrix[row][row] = b(self.xdata[row + 1], self.xdata[row - 1])
matrix[row][row + 1] = c(self.xdata[row + 1], self.xdata[row])
vector[row] = d(self.fdata[row + 1], self.fdata[row], self.fdata[row - 1],
self.xdata[row + 1], self.xdata[row], self.xdata[row - 1])
#Corner values to be 1 AND leave vector end values to be zero, sets BC
matrix[0][0] = 1
matrix[-1][-1] = 1
return lu.solve(matrix, vector).flatten() #Solve using LU decomp
def interp(self, x):
'''
Returns cubic spline interpolated values at given x array of pnts.
'''
f = []
for pnt in x:
if (pnt == self.xdata).tolist().count(1) != 0:
#Do not interpolate if value is on data
f.append(self.fdata[np.where(self.xdata == pnt)[0][0]])
else:
temp = (pnt < self.xdata).tolist().count(0) # find position of required intervals
#Nearby x
x_i = self.xdata[temp - 1]
x_i1 = self.xdata[temp]
#Nearby f
f_i = self.fdata[temp - 1]
f_i1 = self.fdata[temp]
#Nearby 2nd derivs
d2f_i = self.second_derivatives[temp - 1]
d2f_i1 = self.second_derivatives[temp]
f.append(A(pnt, x_i1, x_i) * f_i + B(pnt, x_i1, x_i) * f_i1
+ C(pnt, x_i1, x_i) * d2f_i + D(pnt, x_i1, x_i) * d2f_i1)
return np.array(f)
#%%
def validate_cubic_sp():
'''
Check if cubic spline interpolated pnts give rough functional form of sine
'''
plt.clf()
x_d = np.linspace(0, 10, 8) # x data
f_d = np.sin(x_d) # function data
x_true = np.linspace(0, 10, 10000) # approximated as a continuum
f_true = np.sin(x_true) # approximated continuouse true function
x = np.linspace(0, 10, 500) # pnts to be interpolated
spline = CS(f_d, x_d)
new_f = spline.interp(x) # interpolated pnts
plt.plot(x_true, f_true, 'r-', label = 'real function')
plt.plot(x, new_f, 'b-.', label = 'interpolation from data')
plt.plot(x_d, f_d, 'gx', label = 'data')
plt.legend()
plt.grid()
|
spicy-oilREPO_NAMEhfs_fitPATH_START.@hfs_fit_extracted@hfs_fit-master@hfs_fit@interpolation.py@.PATH_END.py
|
{
"filename": "sinusoid.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/common/sinusoid.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import *
from pyorbit.common.abstract_common import *
class CommonSinusoid(AbstractCommon):
"""
Inherited class from AbstractCommon
"""
model_class = 'sinusoid'
parameters_dictionary = {
'sine_period': # Orbital period of the planet
{
'bounds': [0.4, 100000.0],
'priors': ['Uniform', []],
'spaces': 'Log_Base2',
'fixed' : None,
'unit': 'days',
},
'sine_amp':
{
'bounds': [-1e09, 1e09],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : None,
},
'sine_phase':
{
'bounds': [0.0, 360.0],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.0000,
},
'sine_offset':
{
'bounds': [0.0, 360.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.0000,
},
'x_zero': # reference value of the polynomial
{
'bounds': [-1e09, 1e09],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.00,
'unit': 'as input',
},
'x_offset': # reference value of the polynomial
{
'bounds': [-1e09, 1e09],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.00,
'unit': 'as input',
},
'poly_factor': # reference value of the polynomial
{
'bounds': [-1e09, 1e09],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.00,
'unit': 'as input',
}
}
for i_pol in range(0,10):
# Coefficient of the i_pol order of the polynomial
parameters_dictionary['poly_c'+repr(i_pol)] = {
'bounds': [-1e06, 1e06],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.00,
'unit': 'poly order '+repr(i_pol),
}
recenter_pams = {'sine_phase', 'sine_offset'}
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@common@sinusoid.py@.PATH_END.py
|
{
"filename": "visu.py",
"repo_name": "antoinemarchal/ROHSA",
"repo_path": "ROHSA_extracted/ROHSA-master/publication/SIMU/visu.py",
"type": "Python"
}
|
import numpy as np
from matplotlib.ticker import FuncFormatter
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.colors as colors
from astropy.io import fits
from astropy import units
from astropy import constants as const
import scipy.integrate as integrate
from matplotlib import animation
from numpy.random import randint
import matplotlib.gridspec as gridspec
from astropy import wcs
from scipy.stats import moment
import colorcet as cc
import turbulence as tb
plt.ion()
cm_coolwarm = cc.cm["coolwarm"]
cm_coolwarm.set_bad(color='black')
imkw_coolwarm = dict(origin='lower', interpolation='none', cmap=cm_coolwarm)
cm_inferno = plt.get_cmap('inferno')
cm_inferno.set_bad(color='black')
cm_inferno.set_under(color='black')
imkw_inferno = dict(origin='lower', interpolation='none', cmap=cm_inferno)
cm_viridis = plt.get_cmap('viridis')
cm_viridis.set_bad(color='black')
imkw_viridis = dict(origin='lower', interpolation='none', cmap=cm_viridis)
cm_cubehelix = plt.get_cmap('cubehelix')
cm_cubehelix.set_bad(color='black')
imkw_cubehelix = dict(origin='lower', interpolation='none', cmap=cm_cubehelix)
def gaussian(x, A, mu, sigma):
return A * np.exp(-((x - mu)**2)/(2. * sigma**2))
def mean2vel(CRVAL, CDELT, CRPIX, mean):
vel = [(CRVAL + CDELT * (mean[i] - CRPIX)) for i in range(len(mean))] #VLSR [km.s-1] #FIXME
return vel
def vel2mean(CRVAL, CDELT, CRPIX, vel):
return [((vel[i] - CRVAL) / CDELT + CRPIX) for i in range(len(vel))]
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
C = 1.82243e18
#SIMU SAURY
hdu = fits.open("/data/amarchal/ROHSA_paper/data/synthetic_obs/Tb_reso_0.8km.s-1_Tmin_0_Tmax_inf_ROHSA_noise_0.05_K_beam_0_2_2.fits")
hdr = hdu[0].header
data = np.genfromtxt("/data/amarchal/ROHSA_paper/ROHSA/Tb_reso_0.8km.s-1_Tmin_0_Tmax_inf_ROHSA_noise_0.05_K_beam_0_2_2_gauss_run_34.dat") # 23 now 34 with new ROHSA
cube = hdu[0].data
cube[np.where(cube != cube)] = 0.
dim_x = cube.shape[2]
dim_y = cube.shape[1]
dim_v = cube.shape[0]
CDELT = hdr['CDELT3'] #*1.e-3 #km.s-1
CRVAL = hdr['CRVAL3'] #*1.e-3 #km.s-1
CRPIX = hdr['CRPIX3']
reso = np.abs(CDELT)
id1 = data[:, 0]
id2 = data[:, 1]
sigma = data[:, 4] * reso
amp = data[:, 2]
mean = data[:, 3] - 1
reconstructed_cube = np.zeros((dim_v, dim_y, dim_x))
for i in range(len(amp)):
gauss = gaussian(np.arange(dim_v), amp[i], mean[i], sigma[i]/reso)
reconstructed_cube[:, int(id1[i]), int(id2[i])] += gauss
n_gauss = len(id1) / (dim_y*dim_x)
params = np.zeros((3*n_gauss, dim_y, dim_x))
v = mean2vel(CRVAL, CDELT, CRPIX, np.arange(cube.shape[0]))
vmean = mean2vel(CRVAL, CDELT, CRPIX, mean)
i__ = 0
for i in range(dim_y):
for j in range(dim_x):
for k in range(n_gauss):
params[0+(3*k),i,j] = amp[i__]
params[1+(3*k),i,j] = vmean[i__]
params[2+(3*k),i,j] = sigma[i__]
i__ += 1
fields = [np.sqrt(2.*np.pi) * params[0+(3*k)] * params[2+(3*k)] for k in np.arange(n_gauss)]
field = [f * C / 1.e18 for f in fields]
ampfield = [params[0+(3*k)] for k in np.arange(n_gauss)]
vfield = [params[1+(3*k)] for k in np.arange(n_gauss)]
sigfield = [params[2+(3*k)] for k in np.arange(n_gauss)]
cube_INTER_ROHSA = np.zeros(cube.shape)
for i in np.arange(cube.shape[1]):
for j in np.arange(cube.shape[2]):
cube_INTER_ROHSA[:,i,j] = gaussian(np.arange(dim_v), params[0,i,j], vel2mean(CRVAL, CDELT, CRPIX, [params[1,i,j]])[0], params[2,i,j]/reso) + gaussian(np.arange(dim_v), params[3,i,j], vel2mean(CRVAL, CDELT, CRPIX, [params[4,i,j]])[0], params[5,i,j]/reso)
CV_INTER_ROHSA = np.tensordot(v, cube_INTER_ROHSA, axes=([0],[0])) / np.sum(cube_INTER_ROHSA, axis=0)
mean_v = [np.around(np.mean(vfields), decimals=1) for vfields in vfield]
mean_sig = [np.around(np.mean(sigfields), decimals=1) for sigfields in sigfield]
print mean_v
print mean_sig
print "max WNM", np.max(sigfield[0])
print "max LNM", np.max([np.max(sigfield[2]), np.max(sigfield[5])])
print "max CNM", np.max([np.max(sigfield[1]), np.max(sigfield[3]), np.max(sigfield[4])])
stop
# PLOT HIST SIGMA / AMP
fig = plt.figure(figsize=(cm2inch((18.,18.))))
ax = fig.add_subplot(111)
ax.tick_params(labelsize=14)
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_major_formatter(ScalarFormatter())
bins = np.linspace(np.min(sigma), np.max(sigma), 400)
ax.hist(sigma, weights=np.sqrt(2.*np.pi)*amp*sigma*C/(np.sum(field)*1.e18), bins=bins, log=False, histtype='step', color='k', normed=False,
label=r'$\sigma weighted \, by \, A$', linewidth=2.5)
ax.set_xlim([0., 10])
# ax.set_ylim([0., 4.])
ax.set_xlabel(r'$\sigma$ [km s$^{-1}$]', fontsize = 14)
ax.set_ylabel(r'PDF [fraction of total emission]', fontsize = 14)
plt.savefig('plot/PDF_sigma_over_A.pdf', format='pdf', bbox_inches='tight', pad_inches=0.02)
#Good one
NHI_WNM = np.sqrt(2.*np.pi) * ampfield[0] * sigfield[0] * C / 1.e18
NHI_LNM = np.sqrt(2.*np.pi) * ampfield[1]*sigfield[1] * C / 1.e18
NHI_CNM = np.sqrt(2.*np.pi) * (ampfield[2]*sigfield[2] + ampfield[3]*sigfield[3] + ampfield[4]*sigfield[4]
+ ampfield[5]*sigfield[5] + ampfield[6]*sigfield[6] + ampfield[7]*sigfield[7]) * C / 1.e18
NHI_INTER = NHI_LNM + NHI_WNM
np.save("NHI_WNM_ROHSA.npy", NHI_WNM)
np.save("NHI_LNM_ROHSA.npy", NHI_LNM)
np.save("NHI_CNM_ROHSA.npy", NHI_CNM)
NHI_TOT = NHI_CNM + NHI_LNM + NHI_WNM
cube_CNM_simu = fits.open("/data/amarchal/Saury2014/synthetic_obs/Tb_reso_0.8km.s-1_Tmin_0_Tmax_500_ROHSA_noise_0.0_K_beam_0_2_2.fits")[0].data
cube_LNM_simu = fits.open("/data/amarchal/Saury2014/synthetic_obs/Tb_reso_0.8km.s-1_Tmin_500_Tmax_5000_ROHSA_noise_0.0_K_beam_0_2_2.fits")[0].data
cube_WNM_simu = fits.open("/data/amarchal/Saury2014/synthetic_obs/Tb_reso_0.8km.s-1_Tmin_5000_Tmax_inf_ROHSA_noise_0.0_K_beam_0_2_2.fits")[0].data
cube_INTER_simu = fits.open("/data/amarchal/Saury2014/synthetic_obs/Tb_reso_0.8km.s-1_Tmin_500_Tmax_inf_ROHSA_noise_0.0_K_beam_0_2_2.fits")[0].data
NHI_CNM_simu = np.sum(cube_CNM_simu,axis=0)
NHI_LNM_simu = np.sum(cube_LNM_simu,axis=0)
NHI_WNM_simu = np.sum(cube_WNM_simu,axis=0)
NHI_INTER_simu = np.sum(cube_INTER_simu,axis=0)
NHI_TOT_simu = NHI_CNM_simu + NHI_LNM_simu + NHI_WNM_simu
NHI_TOT_simu = NHI_TOT_simu * reso * C / 1.e18
NHI_CNM_simu = NHI_CNM_simu * reso * C / 1.e18
NHI_LNM_simu = NHI_LNM_simu * reso * C / 1.e18
NHI_WNM_simu = NHI_WNM_simu * reso * C / 1.e18
NHI_INTER_simu = NHI_INTER_simu * reso * C / 1.e18
# NHI_CNM_simu = np.load("NHI_CNM_paper_AM.npy")
# NHI_LNM_simu = np.load("NHI_LNM_paper_AM.npy")
# NHI_WNM_simu = np.load("NHI_WNM_paper_AM.npy")
# NHI_TOT_simu = NHI_CNM_simu + NHI_LNM_simu + NHI_WNM_simu
er_NHI = np.abs((np.sum(NHI_TOT_simu) - np.sum(NHI_TOT)) / np.sum(NHI_TOT_simu)) * 100.
original_map = np.sum(cube, axis=0) * reso * C / 1.e18
reconstructed_map = np.sum(reconstructed_cube, axis=0) * reso * C / 1.e18
mu3 = np.abs(moment((reconstructed_cube - cube).ravel(), moment=3, nan_policy='propagate'))
print "mu3 = ", mu3
#SPS1D
stat_cnm = tb.PowerS(NHI_CNM)
stat_inter = tb.PowerS(NHI_INTER)
stat_cnm_simu = tb.PowerS(NHI_CNM_simu)
stat_inter_simu = tb.PowerS(NHI_INTER_simu)
ks = stat_inter_simu.get_ks(unit_length=1)
sps1d_cnm = stat_cnm.sps1d(return_log=False)
sps1d_inter = stat_inter.sps1d(return_log=False)
sps1d_cnm_simu = stat_cnm_simu.sps1d(return_log=False)
sps1d_inter_simu = stat_inter_simu.sps1d(return_log=False)
ksup = np.where(np.array(ks) > 0.4)[0][0]
# #SPS1D CENTROID VELOCITY
# CV_WNM_simu = fits.open("/data/amarchal/Saury2014/synthetic_obs/CV_WNM_paper.fits")[0].data
CV_INTER_simu = np.tensordot(v, cube_INTER_simu, axes=([0],[0])) / np.sum(cube_INTER_simu, axis=0)
stat_CV_inter = tb.PowerS(CV_INTER_ROHSA)
stat_CV_inter_simu = tb.PowerS(CV_INTER_simu)
sps1d_CV_inter = stat_CV_inter.sps1d(return_log=False)
sps1d_CV_inter_simu = stat_CV_inter_simu.sps1d(return_log=False)
stop
def plot_spect(x,y,velocity,cube,id1,id2,amp,mean,sigma,reso):
clr = ['y', 'm', 'g', 'b', 'orange', 'cyan', 'y', 'm', 'g', 'b', 'orange', 'cyan',
'y', 'm', 'g', 'b', 'orange', 'cyan', 'y', 'm', 'g', 'b', 'orange', 'cyan']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.step(velocity, cube[:,x,y], color='cornflowerblue')
ax.plot(velocity, reconstructed_cube[:,x,y], color='r')
iid = np.where((id1 == x) & (id2 == y))[0]
for i in range(len(iid)):
ax.plot(velocity, gaussian(np.arange(len(v)), amp[iid[i]], mean[iid[i]], sigma[iid[i]]/reso), color=clr[i])
for i in range(len(iid)):
print amp[iid[i]], vmean[iid[i]], sigma[iid[i]]
#Plot integrated column density field
fig = plt.figure(figsize=(10, 10))
ax = fig.add_axes([0.1,0.1,0.74,0.8])
ax.set_xlabel(r"x", fontsize=18.)
ax.set_ylabel(r"y", fontsize=18.)
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
# ax.axis('off')
ax.tick_params(labelsize=16)
img = ax.imshow(original_map, aspect='auto', vmin=np.min(original_map), vmax=np.max(original_map), **imkw_inferno)
divider = make_axes_locatable(ax)
cax = divider.new_vertical(size="3%", pad=0.5, pack_start=True)
fig.add_axes(cax)
cbar = fig.colorbar(img, cax=cax, orientation="horizontal", extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"N$_{HI}$ / [10$^{18}$ cm$^{-2}$]", fontsize=18.)
plt.savefig("plot/" + 'NHI_TOT.pdf', format='pdf', bbox_inches='tight', pad_inches=0.02)
#Plot mosaic field
lh = 4; lw = 2
fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10.,23.))
fig.subplots_adjust(top=1., bottom=0.03, left=0.01, right=0.99, hspace=0.01, wspace=0.02)
k = 0
for i in np.arange(lh):
for j in np.arange(lw):
im = axs[i][j].imshow(field[k], **imkw_inferno)
if j == 0: axs[i][j].set_ylabel(r'y')
axs[i][j].set_xlabel(r'x')
# axs[i][j].set_title(r"$\overline{\mu}$ = " + str(np.around(np.mean(vfield[k]), decimals=1)) +
# ", $\overline{\sigma}$ = " + str(np.around(np.mean(sigfield[k]), decimals=1)), fontsize=10.)
axs[i][j].axes.xaxis.set_ticklabels([])
axs[i][j].axis('off')
divider = make_axes_locatable(axs[i][j])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', format='%d', extend='both')
cbar.ax.tick_params(labelsize=16.)
if i == lh-1 : cbar.set_label(r"N$_{HI}$ / [10$^{18}$ cm$^{-2}$]", fontsize=18.)
k += 1
plt.savefig('plot/mosaic_field.pdf', format='pdf')
#Plot mosaic vfield
# lh = 3; lw = 2
# fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10.,17.))
lh = 4; lw = 2
fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10.,23.))
fig.subplots_adjust(top=1., bottom=0.03, left=0.01, right=0.99, hspace=0.01, wspace=0.02)
k = 0
mincbar = [None, None, None, None, None, None, None, None]
maxcbar = [None, None, None, None, None, None, None, None]
for i in np.arange(lh):
for j in np.arange(lw):
# im = axs[i][j].imshow(field[k], origin="lower", interpolation=None, cmap="gist_gray")
im1 = axs[i][j].imshow(vfield[k], vmin=mincbar[k] , vmax=maxcbar[k], **imkw_coolwarm)
# im2 = axs[i][j].contour(field[k], colors='k', linestyles='-', linewidths=0.2)
if j == 0: axs[i][j].set_ylabel(r'y')
axs[i][j].set_xlabel(r'x')
axs[i][j].axes.xaxis.set_ticklabels([])
axs[i][j].axis('off')
divider = make_axes_locatable(axs[i][j])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
if i == lh-1 : cbar.set_label(r"v [km s$^{-1}$]", fontsize=18.)
k += 1
plt.savefig('plot/mosaic_vfield.pdf', format='pdf')
#Plot mosaic sigfield
# lh = 3; lw = 2
# fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10.,17.))
lh = 4; lw = 2
fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10.,23.))
fig.subplots_adjust(top=1., bottom=0.03, left=0.01, right=0.99, hspace=0.01, wspace=0.02)
k = 0
# mincbar = [None, 1.1, 5., 1.6, None, 4.65]
# maxcbar = [None, 2.3, 5.6, None, None, None]
mincbar = [None, None, None, None, None, None, None, None]
maxcbar = [None, None, None, None, None, None, None, None]
for i in np.arange(lh):
for j in np.arange(lw):
# im = axs[i][j].imshow(field[k], origin="lower", interpolation=None, cmap="gist_gray")
im1 = axs[i][j].imshow(sigfield[k], vmin=mincbar[k] , vmax=maxcbar[k], **imkw_cubehelix)
# im2 = axs[i][j].contour(field[k], colors='k', linestyles='-', linewidths=0.2)
if j == 0: axs[i][j].set_ylabel(r'y')
axs[i][j].set_xlabel(r'x')
axs[i][j].axes.xaxis.set_ticklabels([])
axs[i][j].axis('off')
divider = make_axes_locatable(axs[i][j])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
if i == lh-1 : cbar.set_label(r"$\sigma_v}$ [km s$^{-1}$]", fontsize=18.)
k += 1
plt.savefig('plot/mosaic_sigfield.pdf', format='pdf')
#Plot mosaic spectra
pvalues = np.logspace(-1, 0, 10)
pmin = pvalues[0]
pmax = pvalues[-1]
def norm(pval):
return (pval - pmin) / float(pmax - pmin)
ny = 4; nx = 4
center_y = 36; center_x = 42
cb = "magenta"
cw = "crimson"
fig, axs = plt.subplots(4, 4, sharex=True, sharey=True, figsize=(cm2inch((18.,14.))))
fig.subplots_adjust(hspace=0, wspace=0, left=0, right=1, top=1, bottom=0)
for i in np.arange(ny):
for j in np.arange(nx):
# axs[i][j].set_yticks(np.arange(0., 18., 5.))
# axs[i][j].set_xticks(np.arange(-10, 20., 10.))
# axs[i][j].set_ylim([-1,20])
axs[i][j].set_xlim([-25,25])
axs[i][j].tick_params(labelsize=8)
axs[i][j].step(v, cube[:,center_y+i,center_x+j], color='cornflowerblue', linewidth=2.)
axs[i][j].plot(v, reconstructed_cube[:,center_y+i,center_x+j], linestyle="-", linewidth=2., color="k")
iid = np.where((id1 == center_y+i) & (id2 == center_x+j))[0]
for k in range(len(iid)):
axs[i][j].plot(v, gaussian(np.arange(len(v)), amp[iid[k]], mean[iid[k]], np.sqrt((sigma[iid[k]]/reso)**2. + 1.**2)), linewidth=2., color=plt.cm.inferno(norm(pvalues[k])))
if j == 0: axs[i][j].set_ylabel(r'T [k]', fontsize=8)
axs[i][j].set_xlabel(r'v [km s$^{-1}$]', fontsize=8)
plt.savefig("plot/" + 'mosaic_spectra_all.pdf', format='pdf', bbox_inches='tight', pad_inches=0.02)
#HEATMAP
x_bins = np.linspace(np.min(vmean), np.max(vmean), 800)
y_bins = np.logspace(np.log(0.1), np.log(np.max(sigma)), 800)
H, xedges, yedges = np.histogram2d(vmean, sigma, weights=np.sqrt(2.*np.pi)*amp*sigma, bins=[x_bins, y_bins])
H = np.ma.masked_invalid(np.atleast_2d(H))
fig = plt.figure(figsize=(16.,8.))
ax = fig.add_subplot(111)
ax.set_yscale('log')
ax.set_ylim([1., 20.])
ax.set_xlim([np.min(vmean),np.max(vmean)])
ax.set_xlabel(r'$v_r [km.s^{-1}]$', fontsize = 16)
ax.set_ylabel(r'$\sigma [km.s^{-1}]$', fontsize = 16)
ax.yaxis.set_major_formatter(ScalarFormatter())
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='2%', pad=0.05)
im = ax.pcolormesh(xedges, yedges, np.log(H.T), vmin=0, vmax=np.max(np.log(H.T)), cmap=cm_inferno)
cbar = fig.colorbar(im, cax=cax, orientation='vertical')
cbar.set_label(r'$log(W_{HI}) [K.km.s^{-1}]$', fontsize = 16)
plt.savefig("plot/" + 'heatmap.png', format='png', bbox_inches='tight', pad_inches=0.02)
#Plot mosaic field
lh = 3; lw = 2
fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10.,17.))
fig.subplots_adjust(top=1., bottom=0.03, left=0.01, right=0.99, hspace=0.01, wspace=0.02)
# fig.subplots_adjust(top=1.015, bottom=0.01, left=0., right=1., hspace=0., wspace=0.02)
im = axs[0][0].imshow(NHI_WNM, vmin=np.min(NHI_WNM_simu), vmax=np.max(NHI_WNM_simu), **imkw_inferno)
axs[0][0].axes.xaxis.set_ticklabels([])
axs[0][0].axis('off')
divider = make_axes_locatable(axs[0][0])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', extend='both')
cbar.ax.tick_params(labelsize=16)
im = axs[1][0].imshow(NHI_LNM, vmin=np.min(NHI_LNM_simu), vmax=np.max(NHI_LNM_simu), **imkw_inferno)
axs[1][0].axes.xaxis.set_ticklabels([])
axs[1][0].axis('off')
divider = make_axes_locatable(axs[1][0])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', extend='both')
cbar.ax.tick_params(labelsize=16)
im = axs[2][0].imshow(NHI_CNM, vmin=np.min(NHI_CNM_simu), vmax=np.max(NHI_CNM_simu), **imkw_inferno)
axs[2][0].axes.xaxis.set_ticklabels([])
axs[2][0].axis('off')
divider = make_axes_locatable(axs[2][0])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', extend='both')
cbar.ax.tick_params(labelsize=16)
cbar.set_label(r"N$_{HI}$ / [10$^{18}$ cm$^{-2}$]", fontsize=18.)
im = axs[0][1].imshow(NHI_WNM_simu, **imkw_inferno)
axs[0][1].axes.xaxis.set_ticklabels([])
axs[0][1].axis('off')
divider = make_axes_locatable(axs[0][1])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', extend='both')
cbar.ax.tick_params(labelsize=16)
im = axs[1][1].imshow(NHI_LNM_simu, **imkw_inferno)
axs[1][1].axes.xaxis.set_ticklabels([])
axs[1][1].axis('off')
divider = make_axes_locatable(axs[1][1])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', extend='both')
cbar.ax.tick_params(labelsize=16)
im = axs[2][1].imshow(NHI_CNM_simu, **imkw_inferno)
axs[2][1].axes.xaxis.set_ticklabels([])
axs[2][1].axis('off')
divider = make_axes_locatable(axs[2][1])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', extend='both')
cbar.ax.tick_params(labelsize=16)
cbar.set_label(r"N$_{HI}$ / [10$^{18}$ cm$^{-2}$]", fontsize=18.)
plt.savefig('plot/mosaic_field_comparison.pdf', format='pdf')
stop
# fig = plt.figure(figsize=(cm2inch((18.,18.))))
# ax = fig.add_subplot(111)
# ax.set_xscale('log')
# ax.set_yscale('log')
# ax.xaxis.set_major_formatter(ScalarFormatter())
# ax.yaxis.set_major_formatter(ScalarFormatter())
# bins_wnm = np.logspace(np.log10(0.01), np.log10(np.nanmax(NHI_WNM.ravel())), 200)
# ax.hist(NHI_WNM.ravel(), bins=bins_wnm, log=True, histtype='step', color='r', normed=False, label='', linewidth=1.5)
# ax.hist(NHI_WNM_simu.ravel(), bins=bins_wnm, log=True, histtype='step', color='o', normed=False, label='', linewidth=1.5)
# bins_lnm = np.logspace(np.log10(0.01), np.log10(np.nanmax(NHI_LNM.ravel())), 200)
# ax.hist(NHI_LNM.ravel(), bins=bins_lnm, log=True, histtype='step', color='g', normed=False, label='', linewidth=1.5)
# ax.hist(NHI_LNM_simu.ravel(), bins=bins_lnm, log=True, histtype='step', color='k', normed=False, label='', linewidth=1.5)
# bins_cnm = np.logspace(np.log10(0.01), np.log10(np.nanmax(NHI_CNM.ravel())), 200)
# ax.hist(NHI_CNM.ravel(), bins=bins_cnm, log=True, histtype='step', color='g', normed=False, label='', linewidth=1.5)
# ax.hist(NHI_CNM_simu.ravel(), bins=bins_cnm, log=True, histtype='step', color='k', normed=False, label='', linewidth=1.5)
# # ax.set_xlim([0.01, np.max(NHI_theo_list)])
# # ax.set_ylim([1., 1.e5])
# for axis in [ax.xaxis]:
# formatter = FuncFormatter(lambda y, _: '{:.16g}'.format(y))
# axis.set_major_formatter(formatter)
# plt.setp(ax.get_yticklabels()[1], visible=False)
# plt.setp(ax.get_xticklabels()[1], visible=False)
# ax.set_xlabel(r'$NHI$ [1.83 10$^{18}$ cm$^{-2}$]', fontsize=14)
# ax.set_ylabel(r'Normalized $PDF$', fontsize=14)
# plt.savefig('plot/PDF_NHI.pdf', format='pdf')
#SPS1D NHI
fig = plt.figure(figsize=(cm2inch((18.,18.))))
ax = fig.add_subplot(111)
# ax.plot(ks[:ksup], sps1d_wnm[:ksup], linestyle='-', color="red", linewidth=3.5)
# ax.plot(ks[:ksup], sps1d_wnm_simu[:ksup], '.', color='orange', markersize=8)
# ax.plot(ks[:ksup], sps1d_lnm[:ksup], linestyle='-', color="k", linewidth=3.5)
# ax.plot(ks[:ksup], sps1d_lnm_simu[:ksup], '.k', markersize=8)
ax.plot(ks[:ksup], sps1d_inter[:ksup], linestyle='-', color="red", linewidth=3.5)
ax.plot(ks[:ksup], sps1d_inter_simu[:ksup], '.', color="orange", markersize=8)
ax.plot(ks[:ksup], sps1d_cnm[:ksup], linestyle='-', color="blue", linewidth=3.5)
ax.plot(ks[:ksup], sps1d_cnm_simu[:ksup], '.', color="cyan", markersize=8)
# ax.plot(ks[:ksup], sps1d_tot[:ksup], linestyle='-', color="black", linewidth=3.5)
# ax.plot(ks[:ksup], sps1d_tot_simu[:ksup], '.', color="black", markersize=8)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim([0.005, 0.5])
ax.set_ylim([1.e4, 1.e12])
for axis in [ax.xaxis]:
formatter = FuncFormatter(lambda y, _: '{:.16g}'.format(y))
axis.set_major_formatter(formatter)
plt.setp(ax.get_yticklabels()[1], visible=False)
plt.setp(ax.get_xticklabels()[1], visible=False)
ax.set_xlabel(r'k [pixel$^{-1}$]', fontsize = 16)
ax.set_ylabel(r'P(k) [Arbitrary unit]', fontsize = 16)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
for ticklabel in ax.yaxis.get_ticklabels():
ticklabel.set_rotation("vertical")
ax.xaxis.set_tick_params(labelsize=12)
ax.yaxis.set_tick_params(labelsize=12)
plt.savefig("plot/sps1d_NHI.pdf", format="pdf", bbox_inches='tight', pad_inches=0.02)
#ER SPS1D NHI
fig = plt.figure(figsize=(cm2inch((18.,18.))))
ax = fig.add_subplot(111)
ax.set_ylim([-1., 1.])
ax.plot(ks[:ksup], (np.array(sps1d_wnm_simu[:ksup])-np.array(sps1d_wnm[:ksup]))/np.array(sps1d_wnm_simu[:ksup]), linestyle='-', color="red", linewidth=3.5)
ax.plot(ks[:ksup], (np.array(sps1d_lnm_simu[:ksup])-np.array(sps1d_lnm[:ksup]))/np.array(sps1d_lnm_simu[:ksup]), linestyle='-', color="black", linewidth=3.5)
ax.plot(ks[:ksup], (np.array(sps1d_cnm_simu[:ksup])-np.array(sps1d_cnm[:ksup]))/np.array(sps1d_cnm_simu[:ksup]), linestyle='-', color="blue", linewidth=3.5)
ax.set_xlabel(r'k [pixel$^{-1}$]', fontsize = 16)
ax.set_ylabel(r'P$_{SIMU}$(k) - P$_{ROHSA}$(k) / P$_{SIMU}$(k)', fontsize = 16)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
for ticklabel in ax.yaxis.get_ticklabels():
ticklabel.set_rotation("vertical")
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
plt.savefig("plot/sps1d_NHI_rapport.pdf", format="pdf", bbox_inches='tight', pad_inches=0.02)
#Plot centroid velocity field INTER simu
fig = plt.figure(figsize=(10, 10))
ax = fig.add_axes([0.1,0.1,0.74,0.8])
ax.set_xlabel(r"x", fontsize=18.)
ax.set_ylabel(r"y", fontsize=18.)
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.tick_params(labelsize=16)
img = ax.imshow(CV_INTER_simu, aspect='auto', **imkw_coolwarm)
divider = make_axes_locatable(ax)
cax = divider.new_vertical(size="3%", pad=0.5, pack_start=True)
fig.add_axes(cax)
cbar = fig.colorbar(img, cax=cax, orientation="horizontal", extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"Velocity Centroid [km s$^{-1}$] SIMU", fontsize=18.)
plt.savefig("plot/" + 'CV_INTER_simu.pdf', format='pdf', bbox_inches='tight', pad_inches=0.02)
#Plot centroid velocity field INTER ROHSA
fig = plt.figure(figsize=(10, 10))
ax = fig.add_axes([0.1,0.1,0.74,0.8])
ax.set_xlabel(r"x", fontsize=18.)
ax.set_ylabel(r"y", fontsize=18.)
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.tick_params(labelsize=16)
img = ax.imshow(CV_INTER_ROHSA, aspect='auto', vmin=np.min(CV_INTER_simu), vmax=np.max(CV_INTER_simu), **imkw_coolwarm)
divider = make_axes_locatable(ax)
cax = divider.new_vertical(size="3%", pad=0.5, pack_start=True)
fig.add_axes(cax)
cbar = fig.colorbar(img, cax=cax, orientation="horizontal", extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"Velocity Centroid [km s$^{-1}$] ROHSA", fontsize=18.)
plt.savefig("plot/" + 'CV_INTER_ROHSA.pdf', format='pdf', bbox_inches='tight', pad_inches=0.02)
lh = 1; lw = 2
fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10.,5.8))
fig.subplots_adjust(top=1.1, bottom=0.0, left=0.01, right=0.99, hspace=0.01, wspace=0.02)
mincbar = [np.min(CV_INTER_simu), None]
maxcbar = [np.max(CV_INTER_simu), None]
im = axs[0].imshow(CV_INTER_ROHSA, vmin=mincbar[0] , vmax=maxcbar[0], **imkw_coolwarm)
axs[0].set_ylabel(r'y')
axs[0].set_xlabel(r'x')
axs[0].axes.xaxis.set_ticklabels([])
axs[0].axis('off')
divider = make_axes_locatable(axs[0])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"Velocity Centroid [km s$^{-1}$] ROHSA", fontsize=16.)
im = axs[1].imshow(CV_INTER_simu, vmin=mincbar[1] , vmax=maxcbar[1], **imkw_coolwarm)
axs[1].set_ylabel(r'y')
axs[1].set_xlabel(r'x')
axs[1].axes.xaxis.set_ticklabels([])
axs[1].axis('off')
divider = make_axes_locatable(axs[1])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"Velocity Centroid [km s$^{-1}$] SIMU", fontsize=16.)
plt.savefig('plot/CV_INTER_long.pdf', format='pdf')
lh = 2; lw = 1
fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(5,11.))
fig.subplots_adjust(top=1.05, bottom=0.04, left=0.01, right=0.99, hspace=0.0, wspace=0.02)
mincbar = [np.min(CV_INTER_simu), None]
maxcbar = [np.max(CV_INTER_simu), None]
im = axs[0].imshow(CV_INTER_ROHSA, vmin=mincbar[0] , vmax=maxcbar[0], **imkw_coolwarm)
axs[0].set_ylabel(r'y')
axs[0].set_xlabel(r'x')
axs[0].axes.xaxis.set_ticklabels([])
axs[0].axis('off')
divider = make_axes_locatable(axs[0])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
im = axs[1].imshow(CV_INTER_simu, vmin=mincbar[1] , vmax=maxcbar[1], **imkw_coolwarm)
axs[1].set_ylabel(r'y')
axs[1].set_xlabel(r'x')
axs[1].axes.xaxis.set_ticklabels([])
axs[1].axis('off')
divider = make_axes_locatable(axs[1])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"Velocity Centroid [km s$^{-1}$]", fontsize=16.)
plt.savefig('plot/CV_INTER_lat.pdf', format='pdf')
#__________________________________________________________________________________________________________
lh = 2; lw = 2
fig, axs = plt.subplots(lh, lw, sharex=True, sharey=True, figsize=(10,12.))
fig.subplots_adjust(top=1.02, bottom=0.03, left=0.01, right=0.99, hspace=0.01, wspace=0.02)
im = axs[0][0].imshow(NHI_INTER, vmin=np.min(NHI_INTER_simu) , vmax=np.max(NHI_INTER_simu), **imkw_inferno)
axs[0][0].set_ylabel(r'y')
axs[0][0].set_xlabel(r'x')
axs[0][0].axes.xaxis.set_ticklabels([])
axs[0][0].axis('off')
divider = make_axes_locatable(axs[0][0])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', format='%d', extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"N$_{HI}$ / [10$^{18}$ cm$^{-2}$]", fontsize=18.)
im = axs[0][1].imshow(NHI_INTER_simu, **imkw_inferno)
axs[0][1].set_ylabel(r'y')
axs[0][1].set_xlabel(r'x')
axs[0][1].axes.xaxis.set_ticklabels([])
axs[0][1].axis('off')
divider = make_axes_locatable(axs[0][1])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', format='%d', extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"N$_{HI}$ / [10$^{18}$ cm$^{-2}$]", fontsize=18.)
im1 = axs[1][0].imshow(CV_INTER_ROHSA, vmin=np.min(CV_INTER_simu) , vmax=np.max(CV_INTER_simu), **imkw_coolwarm)
# im2 = axs[1][0].contour(NHI_INTER, colors='k', linestyles='-', linewidths=0.2)
axs[1][0].set_ylabel(r'y')
axs[1][0].set_xlabel(r'x')
axs[1][0].axes.xaxis.set_ticklabels([])
axs[1][0].axis('off')
divider = make_axes_locatable(axs[1][0])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"Velocity Centroid [km s$^{-1}$]", fontsize=16.)
im1 = axs[1][1].imshow(CV_INTER_simu, **imkw_coolwarm)
# im2 = axs[1][1].contour(NHI_INTER_simu, colors='k', linestyles='-', linewidths=0.2)
axs[1][1].set_ylabel(r'y')
axs[1][1].set_xlabel(r'x')
axs[1][1].axes.xaxis.set_ticklabels([])
axs[1][1].axis('off')
divider = make_axes_locatable(axs[1][1])
cax = divider.append_axes('bottom', size='4%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, orientation='horizontal', format='%.1f', extend='both')
cbar.ax.tick_params(labelsize=16.)
cbar.set_label(r"Velocity Centroid [km s$^{-1}$]", fontsize=16.)
plt.savefig('plot/NHI_CV_INTER.pdf', format='pdf')
#SPS1D CV
fig = plt.figure(figsize=(cm2inch((18.,18.))))
ax = fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(ks[:ksup], np.array(sps1d_CV_inter[:ksup]), '.', color="orange", markersize=8)
ax.plot(ks[:ksup], np.array(sps1d_CV_inter_simu[:ksup]), linestyle='-', color='red', linewidth=3.5)
ax.set_xlabel(r'k [pixel$^{-1}$]', fontsize = 16)
ax.set_ylabel(r'P(k) [Arbitrary unit]', fontsize = 16)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
for ticklabel in ax.yaxis.get_ticklabels():
ticklabel.set_rotation("vertical")
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
plt.savefig("plot/sps1d_CV_INTER.pdf", format="pdf", bbox_inches='tight', pad_inches=0.02)
fig = plt.figure(figsize=(cm2inch((18.,18.))))
ax = fig.add_subplot(111)
ax.set_ylim([-1., 1.])
ax.plot(ks[:ksup], (np.array(sps1d_CV_wnm_simu[:ksup]) - np.array(sps1d_CV_wnm[:ksup]))/np.array(sps1d_CV_wnm_simu[:ksup]), linestyle='-', color='r', linewidth=3.5)
ax.set_xlabel(r'k [pixel$^{-1}$]', fontsize = 16)
ax.set_ylabel(r'P$_{SIMU}$(k) - P$_{ROHSA}$(k) / P$_{SIMU}$(k)', fontsize = 16)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
for ticklabel in ax.yaxis.get_ticklabels():
ticklabel.set_rotation("vertical")
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
plt.savefig("plot/sps1d_CV_rapport.pdf", format="pdf", bbox_inches='tight', pad_inches=0.02)
#OBJ FUNCTION LAST ITERATION
obj_f = np.loadtxt("iterate_Tb_reso_0.807km.s-1_noise_0.01K_1024_beam_2_256_gauss_run_13.dat")
fig = plt.figure(figsize=(cm2inch((18.,18.))))
ax = fig.add_subplot(111)
ax.plot(np.arange(len(obj_f[:,9])), np.log10(obj_f[:,9]), linewidth=3.5)
ax.set_xlabel(r'Iteration', fontsize = 16)
ax.set_ylabel(r'log$_{10}$ J($\bf \theta, \bf b$)', fontsize = 16)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
for ticklabel in ax.yaxis.get_ticklabels():
ticklabel.set_rotation("vertical")
ax.xaxis.set_tick_params(labelsize=12)
ax.yaxis.set_tick_params(labelsize=12)
plt.savefig("plot/obf_f.pdf", format="pdf", bbox_inches='tight', pad_inches=0.02)
|
antoinemarchalREPO_NAMEROHSAPATH_START.@ROHSA_extracted@ROHSA-master@publication@SIMU@visu.py@.PATH_END.py
|
{
"filename": "_method.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/slider/step/_method.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MethodValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="method", parent_name="layout.slider.step", **kwargs
):
super(MethodValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
values=kwargs.pop(
"values", ["restyle", "relayout", "animate", "update", "skip"]
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@slider@step@_method.py@.PATH_END.py
|
{
"filename": "__main__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/pip/__main__.py",
"type": "Python"
}
|
import sys
from .runner import run
if __name__ == '__main__':
exit = run()
if exit:
sys.exit(exit)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@pip@__main__.py@.PATH_END.py
|
{
"filename": "plot_test.py",
"repo_name": "micbia/serenet",
"repo_path": "serenet_extracted/serenet-main/utils_plot/plot_test.py",
"type": "Python"
}
|
import numpy as np, matplotlib.pyplot as plt, os
import matplotlib.gridspec as gridspec
import tools21cm as t2c
from matplotlib import colors
import sys
sys.path.append('/jmain02/home/J2AD005/jck02/mxb47-jck02/SegU-Net')
from utils_network.data_generator import LightConeGenerator_SegRec
class MidpointNormalize(colors.Normalize):
"""
Created by Joe Kington.
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
# set the colormap and centre the colorbar
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
class adjust_axis:
def __init__(self, axis, varr, xy, to_round=10, step=5, fmt=int):
self.axis = axis
self.varr = varr
self.to_round = to_round
self.step = step
self.fmt = fmt
loc_f = self.get_axis_locs()
if(xy == 'x'):
plt.xticks(loc_f)
axis.set_xticklabels([int(round(varr[i_n])) for i_n in loc_f])
elif(xy == 'y'):
plt.yticks(loc_f)
axis.set_yticklabels([int(round(varr[i_n])) for i_n in loc_f])
def get_axis_locs(self):
v_max = int(round(self.varr.max()/self.to_round)*self.to_round) if int(round(self.varr.max()/self.to_round)*self.to_round) <= self.varr.max() else int(round(self.varr.max()/self.to_round)*self.to_round)-self.to_round
v_min = int(round(self.varr.min()/self.to_round)*self.to_round) if int(round(self.varr.min()/self.to_round)*self.to_round) >= self.varr.min() else int(round(self.varr.min()/self.to_round)*self.to_round)+self.to_round
v_plot = np.arange(v_min, v_max+self.step, self.step)
loc_v = np.array([np.argmin(abs(self.varr-v_plot[i])) for i in range(v_plot.size)]).astype(self.fmt)
return loc_v
path_input = '/jmain02/home/J2AD005/jck02/mxb47-jck02/data/inputs/dataLC_128_train_060921_untar/'
redshifts = np.loadtxt(path_input+'lc_redshifts.txt')
dg = LightConeGenerator_SegRec(path=path_input, data_temp=np.arange(4), data_shape=(128, 128), batch_size=2, shuffle=True)
data = dg.__getitem__(0)
dT2 = data[0]
mask_xn = data[1]
dT3 = data[2]
dT2, dT3, mask_xn = dT2[0].squeeze(), dT3[0].squeeze(), mask_xn[0].squeeze()
print(dT2.shape, mask_xn.shape, dT3.shape)
#mask_xn = t2c.read_cbin(path_input+'data/xH_21cm_i0.bin')
#dT2 = t2c.read_cbin(path_input+'data/dT2_21cm_i0.bin')
#dT3 = t2c.read_cbin(path_input+'data/dT3_21cm_i0.bin')
#dT3_wdg = t2c.read_cbin(path_input+'data/dT3wdg_21cm_i0.bin')
#i_plot = lightcone.brightness_temp.shape[-1]//2
i_plot=64
my_ext1 = [redshifts.min(), redshifts.max(), 0, 256.]
my_ext2 = [0, 256., 0, 256.]
#my_ext1 =[t2c.z_to_nu(redshifts).max(), t2c.z_to_nu(redshifts).min(), -1.6402513488058277/2, 1.6402513488058277/2]
#my_ext2 = [-1.6402513488058277/2, 1.6402513488058277/2, -1.6402513488058277/2, 1.6402513488058277/2]
fig = plt.figure(figsize=(28, 18))
gs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], height_ratios=[1, 1, 1])
# FIRST LC PLOT
ax0 = fig.add_subplot(gs[0,0])
im = ax0.imshow(dT3[:,params['HII_DIM']//2,:], cmap='jet', aspect='auto', origin='lower', extent=my_ext1, norm=MidpointNormalize(vmin=dT3[:,:,i_plot].min(), vmax=dT3[:,:,i_plot].max(), midpoint=0))
ax0.contour(mask_xn[:,params['HII_DIM']//2,:], extent=my_ext1)
ax01 = fig.add_subplot(gs[0,1])
ax01.set_title('$z$ = %.3f $x^v_{HI}$=%.3f' %(redshifts[i_plot], np.mean(mask_xn[:,:,i_plot])), fontsize=18)
#ax01.set_title(r'$\nu_{obs}$ = %d MHz $SNR^{noise}$=%.3f' %(t2c.z_to_nu(redshifts[i_plot]), (np.std(dT2[:,:,i_plot])/np.std(dT3[:,:,i_plot]))**2), fontsize=18)
ax01.imshow(dT3[:,:,i_plot], cmap='jet', origin='lower', extent=my_ext2, norm=MidpointNormalize(vmin=dT3[:,:,i_plot].min(), vmax=dT3[:,:,i_plot].max(), midpoint=0))
ax01.contour(mask_xn[:,:,i_plot], extent=my_ext2)
fig.colorbar(im, ax=ax01, pad=0.01, fraction=0.048)
# SECOND LC PLOT
ax1 = fig.add_subplot(gs[1,0])
#ax1.imshow(dT3_wdg[:,params['HII_DIM']//2,:], cmap='jet', aspect='auto', origin='lower', extent=my_ext1, norm=MidpointNormalize(vmin=dT2_smt.min(), vmax=dT2_smt.max(), midpoint=0))
ax1.imshow(dT3_wdg[:,params['HII_DIM']//2,:], cmap='jet', aspect='auto', origin='lower', extent=my_ext1, norm=MidpointNormalize(vmin=dT3_wdg[:,:,i_plot].min(), vmax=dT3_wdg[:,:,i_plot].max(), midpoint=0))
ax1.contour(mask_xn[:,params['HII_DIM']//2,:], extent=my_ext1)
ax11 = fig.add_subplot(gs[1,1])
#ax11.set_title(r'$\nu_{obs}$ = %d MHz $SNR^{wedge}$=%.3f' %(t2c.z_to_nu(redshifts[i_plot]), (np.std(dT2[:,:,i_plot])/np.std(dT3_wdg[:,:,i_plot]))**2), fontsize=18)
#ax11.imshow(mask_xn[:,:,i_plot], cmap='jet', extent=my_ext, origin='lower', vmin=mask_xn.min(), vmax=mask_xn.max())
#im = ax11.imshow(dT3_wdg[:,:,i_plot], cmap='jet', origin='lower', extent=my_ext2, norm=MidpointNormalize(vmin=dT2_smt.min(), vmax=dT2_smt.max(), midpoint=0))
im = ax11.imshow(dT3_wdg[:,:,i_plot], cmap='jet', origin='lower', extent=my_ext2, norm=MidpointNormalize(vmin=dT3_wdg[:,:,i_plot].min(), vmax=dT3_wdg[:,:,i_plot].max(), midpoint=0))
ax11.contour(mask_xn[:,:,i_plot], extent=my_ext2)
fig.colorbar(im, ax=ax11, pad=0.01, fraction=0.048)
# THIRD LC PLOT
ax2 = fig.add_subplot(gs[2,0])
ax2.imshow(dT2[:,params['HII_DIM']//2,:], cmap='jet', origin='lower', aspect='auto', extent=my_ext1, norm=MidpointNormalize(vmin=dT2.min(), vmax=dT2.max(), midpoint=0))
#ax2.contour(mask_xn[:,:,i_plot], extent=my_ext2)
#im = ax2.imshow(lightcone.xH_box[:,params['HII_DIM']//2,:], cmap='jet', origin='lower')
ax21 = fig.add_subplot(gs[2,1])
#ax21.set_title(r'$\nu_{obs}$ = %d MHz $<\delta T_b>^{1/2}$ = %.3f mK' %(t2c.z_to_nu(redshifts[i_plot]), np.std(dT2[:,:,i_plot])), fontsize=18)
#ax21.set_title('$z$ = %.3f $x^v_{HI}$=%.3f' %(redshifts[i_plot], np.mean(lightcone.xH_box[:,:,i_plot])), fontsize=18)
im = ax21.imshow(dT2[:,:,i_plot], cmap='jet', extent=my_ext2, origin='lower', norm=MidpointNormalize(vmin=dT2[:,:,i_plot].min(), vmax=dT2[:,:,i_plot].max(), midpoint=0))
#ax21.contour(mask_xn[:,:,i_plot], extent=my_ext2)
#ax21.imshow(lightcone.xH_box[:,:,i_plot], cmap='jet', extent=my_ext, origin='lower', vmin=lightcone.xH_box.min(), vmax=lightcone.xH_box.max())
fig.colorbar(im, ax=ax21, pad=0.01, fraction=0.048)
for ax in [ax0, ax1, ax2]:
#ax.set_ylabel('Dec [deg]', size=20)
#ax.set_xlabel(r'$\nu_{\rm obs}$ [MHz]', size=20)
ax.set_xlabel('z', size=16)
ax.set_ylabel('x [Mpc]', size=16)
for ax in [ax01, ax11, ax21]:
#ax.set_ylabel('Dec [deg]', size=20)
#ax.set_xlabel('RA [deg]', size=20)
ax.set_ylabel('y [Mpc]', size=16)
ax.set_xlabel('x [Mpc]', size=16)
plt.rcParams['font.size'] = 16
plt.rcParams['axes.linewidth'] = 1.2
plt.subplots_adjust(hspace=0.3, wspace=0.01)
plt.savefig('lc_256Mpc_128.png' , bbox_inches='tight')
|
micbiaREPO_NAMEserenetPATH_START.@serenet_extracted@serenet-main@utils_plot@plot_test.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/layout/shape/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Line(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.shape"
_path_str = "layout.shape.line"
_valid_props = {"color", "dash", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.shape.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.shape.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.shape.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@layout@shape@_line.py@.PATH_END.py
|
{
"filename": "test_image_dask.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/io/fits/tests/test_image_dask.py",
"type": "Python"
}
|
# Tests related to writing dask arrays to FITS files in an efficient way
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits import ImageHDU, PrimaryHDU
da = pytest.importorskip("dask.array")
@pytest.fixture
def dask_array_in_mem():
return da.random.uniform(-1000, 1000, (1322, 755)).rechunk((59, 55))
def test_construct_image_hdu(dask_array_in_mem):
hdu = ImageHDU(data=dask_array_in_mem)
assert isinstance(hdu.data, da.Array)
def test_construct_hdulist(dask_array_in_mem):
hdu = ImageHDU(data=dask_array_in_mem)
hdulist = fits.HDUList([hdu])
assert isinstance(hdulist[0].data, da.Array)
def test_save_primary_hdu(dask_array_in_mem, tmp_path):
# Saving a Primary HDU directly
filename = tmp_path / "test.fits"
hdu = PrimaryHDU(data=dask_array_in_mem)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
def test_save_image_hdu(dask_array_in_mem, tmp_path):
# Saving an image HDU directly
filename = tmp_path / "test.fits"
hdu = ImageHDU(data=dask_array_in_mem)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, dask_array_in_mem.compute())
def test_save_hdulist(dask_array_in_mem, tmp_path):
# Saving an HDUList
filename = tmp_path / "test.fits"
hdu1 = PrimaryHDU(data=dask_array_in_mem)
hdu2 = ImageHDU(data=np.random.random((128, 128)))
hdu3 = ImageHDU(data=dask_array_in_mem * 2)
hdulist = fits.HDUList([hdu1, hdu2, hdu3])
assert isinstance(hdulist[0].data, da.Array)
hdulist.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, hdu2.data)
assert isinstance(hdulist_new[2].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[2].data, dask_array_in_mem.compute() * 2)
def test_long_header(dask_array_in_mem, tmp_path):
# Make sure things work correctly if there is a long header in the HDU.
filename = tmp_path / "test.fits"
# NOTE: we deliberately set up a long header here rather than add the
# keys one by one to hdu.header as adding the header in one go used to
# cause issues, so this acts as a regression test.
header = fits.Header()
for index in range(2048):
header[f"KEY{index:x}"] = 0.0
hdu = PrimaryHDU(data=dask_array_in_mem, header=header)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert len(hdulist_new[0].header) == 2053
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
VALID_DTYPES = (">i2", "<i2", ">i4", "<i4", ">i8", "<i8", ">f4", "<f4", ">f8", "<f8")
@pytest.mark.parametrize("dtype", VALID_DTYPES)
def test_dtypes(dask_array_in_mem, tmp_path, dtype):
filename = tmp_path / "test.fits"
array = dask_array_in_mem.astype(dtype)
hdu = PrimaryHDU(data=array)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, array.compute())
def test_scaled(dask_array_in_mem, tmp_path):
filename = tmp_path / "test.fits"
hdu = PrimaryHDU(data=dask_array_in_mem)
hdu.scale("int32", bzero=-1000, bscale=1e-6)
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(
hdulist_new[0].data, dask_array_in_mem.compute(), atol=1e-5
)
def test_scaled_minmax(dask_array_in_mem, tmp_path):
filename = tmp_path / "test.fits"
hdu = PrimaryHDU(data=dask_array_in_mem)
hdu.scale("int32", option="minmax")
hdu.writeto(filename)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(
hdulist_new[0].data, dask_array_in_mem.compute(), atol=1e-5
)
def test_append(dask_array_in_mem, tmp_path):
# Test append mode
filename = tmp_path / "test.fits"
fits.append(filename, dask_array_in_mem)
fits.append(filename, np.arange(10))
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, np.arange(10))
# @pytest.mark.parametrize('mode', ['rb+', 'ab', 'ab+', 'wb', 'wb+'])
@pytest.mark.parametrize("mode", ["wb", "wb+"])
def test_file_handle(mode, dask_array_in_mem, tmp_path):
filename = tmp_path / "test.fits"
hdu1 = PrimaryHDU(data=dask_array_in_mem)
hdu2 = ImageHDU(data=np.arange(10))
hdulist = fits.HDUList([hdu1, hdu2])
with filename.open(mode=mode) as fp:
hdulist.writeto(fp)
with fits.open(filename) as hdulist_new:
assert isinstance(hdulist_new[0].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[0].data, dask_array_in_mem.compute())
assert isinstance(hdulist_new[1].data, np.ndarray)
np.testing.assert_allclose(hdulist_new[1].data, np.arange(10))
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@io@fits@tests@test_image_dask.py@.PATH_END.py
|
{
"filename": "_unselected.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/violin/_unselected.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UnselectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="unselected", parent_name="violin", **kwargs):
super(UnselectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Unselected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
:class:`plotly.graph_objects.violin.unselected.
Marker` instance or dict with compatible
properties
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@violin@_unselected.py@.PATH_END.py
|
{
"filename": "testMedialGenerator2d.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/functional/Generators/testMedialGenerator2d.py",
"type": "Python"
}
|
import mpi
from Spheral2d import *
from MedialGenerator import *
from CompositeNodeDistribution import *
from SpheralTestUtilities import *
from VoronoiDistributeNodes import distributeNodes2d as distributeNodes
from siloPointmeshDump import *
commandLine(hmin = 1e-5,
hmax = 1e6,
rhoscale = 0.5,
n1 = 200,
n2 = 100,
n3 = 50,
n4 = 400,
nPerh = 2.01,
maxIterations = 200,
fracTol = 1e-3)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
gamma = 1.4
mu = 2.0
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(BSplineKernel(), 1000)
output("WT")
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodes2 = makeFluidNodeList("nodes2", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodes3 = makeFluidNodeList("nodes3", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodes4 = makeFluidNodeList("nodes4", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
topGridCellSize = 100,
xmin = Vector.one * -100.0,
xmax = Vector.one * 100.0)
nodeSet = [nodes1, nodes2, nodes3, nodes4]
for nodes in nodeSet:
output("nodes.name")
output(" nodes.hmin")
output(" nodes.hmax")
output(" nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Make some interesting boundaries for each of our NodeLists and generators.
#-------------------------------------------------------------------------------
# An H of height & width 3
bcpoints = vector_of_Vector()
bcfacets = vector_of_vector_of_unsigned()
for p in [(0,0), (1,0), (1,1), (2,1), (2,0), (3,0),
(3,3), (2,3), (2,2), (1,2), (1,3), (0,3)]:
bcpoints.append(Vector(*p))
for i in range(len(bcpoints)):
bcfacets.append(vector_of_unsigned(2))
bcfacets[-1][0] = i
bcfacets[-1][1] = (i + 1) % len(bcpoints)
Hboundary = Polygon(bcpoints, bcfacets)
# A couple of concentric circles.
n = 30
bcpoints1 = vector_of_Vector(n)
bcpoints2 = vector_of_Vector(n)
bcfacets = vector_of_vector_of_unsigned(n, vector_of_unsigned(2))
for i in range(n):
bcpoints1[i] = Vector(5.0 + 1.5*cos(i*2.0*pi/n), 1.5 + 1.5*sin(i*2.0*pi/n))
bcpoints2[i] = Vector(5.0 + 0.5*cos(i*2.0*pi/n), 1.5 + 0.5*sin(i*2.0*pi/n))
bcfacets[i][0] = i
bcfacets[i][1] = (i + 1) % n
outerCircle = Polygon(bcpoints1, bcfacets)
innerCircle = Polygon(bcpoints2, bcfacets)
# In the no holes case we need to generate a split version of the outer circle.
nhalf = n/2
bcpoints1_left = vector_of_Vector(n)
bcpoints1_right = vector_of_Vector(n)
bcpoints2_left = vector_of_Vector(n)
bcpoints2_right = vector_of_Vector(n)
for i in range(nhalf):
theta = i*pi/(nhalf - 1) - pi/2.0
bcpoints1_right[i] = Vector(5.0 + 1.5*cos(theta), 1.5 + 1.5*sin(theta))
bcpoints1_right[i+nhalf] = Vector(5.0 + 0.5*cos(-theta), 1.5 + 0.5*sin(-theta))
bcpoints1_left[i] = Vector(5.0 + 1.5*cos(pi + theta), 1.5 + 1.5*sin(pi + theta))
bcpoints1_left[i+nhalf] = Vector(5.0 + 0.5*cos(pi - theta), 1.5 + 0.5*sin(pi - theta))
outerCircle_left = Polygon(bcpoints1_left, bcfacets)
outerCircle_right = Polygon(bcpoints1_right, bcfacets)
# A box surrounding the whole thing.
bcpoints = vector_of_Vector()
bcfacets = vector_of_vector_of_unsigned()
for p in [(-1,-1), (8,-1), (8,4), (-1,4)]:
bcpoints.append(Vector(*p))
for i in range(len(bcpoints)):
bcfacets.append(vector_of_unsigned(2))
bcfacets[-1][0] = i
bcfacets[-1][1] = (i + 1) % len(bcpoints)
outerBox = Polygon(bcpoints, bcfacets)
# And the no hole version of the surrounding box, broken into three pieces.
bcpoints = vector_of_Vector()
bcfacets = vector_of_vector_of_unsigned()
for p in [(-1,-1), (3,-1),
(3,0), (2,0), (2,1), (1,1), (1,0), (0,0),
(0,3), (1,3), (1,2), (2,2), (2,3), (3,3),
(3,4), (-1,4)]:
bcpoints.append(Vector(*p))
for i in range(len(bcpoints)):
bcfacets.append(vector_of_unsigned(2))
bcfacets[-1][0] = i
bcfacets[-1][1] = (i + 1) % len(bcpoints)
outerBox_left = Polygon(bcpoints, bcfacets)
bcpoints = vector_of_Vector()
bcfacets = vector_of_vector_of_unsigned()
for p in [(3,-1), (5, -1)]:
bcpoints.append(Vector(*p))
for i in range(nhalf):
theta = 1.5*pi - i*pi/(nhalf - 1)
bcpoints.append(Vector(5.0 + 1.5*cos(theta), 1.5 + 1.5*sin(theta)))
for p in [(5, 4), (3,4)]:
bcpoints.append(Vector(*p))
for i in range(len(bcpoints)):
bcfacets.append(vector_of_unsigned(2))
bcfacets[-1][0] = i
bcfacets[-1][1] = (i + 1) % len(bcpoints)
outerBox_mid = Polygon(bcpoints, bcfacets)
bcpoints = vector_of_Vector()
bcfacets = vector_of_vector_of_unsigned()
for p in [(5, -1), (8,-1), (8,4), (5,4)]:
bcpoints.append(Vector(*p))
for i in range(nhalf):
theta = 0.5*pi - i*pi/(nhalf - 1)
bcpoints.append(Vector(5.0 + 1.5*cos(theta), 1.5 + 1.5*sin(theta)))
for i in range(len(bcpoints)):
bcfacets.append(vector_of_unsigned(2))
bcfacets[-1][0] = i
bcfacets[-1][1] = (i + 1) % len(bcpoints)
outerBox_right = Polygon(bcpoints, bcfacets)
#-------------------------------------------------------------------------------
# Generate them nodes.
#-------------------------------------------------------------------------------
def rhoprofile1(posi):
r = (posi - Vector(1.5,1.5)).magnitude()
return exp(-r*r/(rhoscale*rhoscale))
print("Generator 1")
generator1 = MedialGenerator2d(n = n1,
rho = rhoprofile1,
boundary = Hboundary,
maxIterations = maxIterations,
fracTol = fracTol,
#tessellationFileName = "test_medial_nodes1_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
print("Generator 2")
generator2 = MedialGenerator2d(n = n2,
rho = 1.0,
boundary = outerCircle,
holes = [innerCircle],
maxIterations = maxIterations,
fracTol = fracTol,
#tessellationFileName = "test_medial_nodes2_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
print("Generator 3")
generator3 = MedialGenerator2d(n = n3,
rho = 0.1,
boundary = innerCircle,
maxIterations = maxIterations,
fracTol = fracTol,
#tessellationFileName = "test_medial_nodes3_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
print("Generator 4")
generator4 = MedialGenerator2d(n = n4,
rho = 0.1,
boundary = outerBox,
holes = [Hboundary, outerCircle],
maxIterations = maxIterations,
fracTol = fracTol,
#tessellationFileName = "test_medial_nodes4_maxiter=%i_tol=%g" % (maxIterations, fracTol),
nNodePerh = nPerh)
distributeNodes((nodes1, generator1),
(nodes2, generator2),
(nodes3, generator3),
(nodes4, generator4))
#-------------------------------------------------------------------------------
# Drop a viz file for inspection.
#-------------------------------------------------------------------------------
Hfield = nodes.Hfield()
HfieldInv = SymTensorField("H inverse", nodes)
domainField = IntField("Domain", nodes)
for i in range(nodes.numNodes):
HfieldInv[i] = Hfield[i].Inverse()
domainField[i] = mpi.rank
vizfile = siloPointmeshDump(baseName = "test_medial_maxiter=%i_tol=%g" % (maxIterations, fracTol),
baseDirectory = "test_medial",
fields = ([x.massDensity() for x in nodeSet] +
[x.mass() for x in nodeSet] +
[x.velocity() for x in nodeSet] +
[x.specificThermalEnergy() for x in nodeSet] +
[x.Hfield() for x in nodeSet] +
[HfieldInv, domainField])
)
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@functional@Generators@testMedialGenerator2d.py@.PATH_END.py
|
{
"filename": "_cone.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/_cone.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ConeValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="cone", parent_name="", **kwargs):
super(ConeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Cone"),
data_docs=kwargs.pop(
"data_docs",
"""
anchor
Sets the cones' anchor with respect to their
x/y/z positions. Note that "cm" denote the
cone's center of mass which corresponds to 1/4
from the tail to tip.
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
u/v/w norm) or the bounds set in `cmin` and
`cmax` Defaults to `false` when `cmin` and
`cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value
should have the same units as u/v/w norm and if
set, `cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `cmin` and/or `cmax` to be equidistant
to this point. Value should have the same units
as u/v/w norm. Has no effect when `cauto` is
`false`.
cmin
Sets the lower bound of the color domain. Value
should have the same units as u/v/w norm and if
set, `cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.cone.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Grey
s,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,
Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth
,Electric,Viridis,Cividis.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.cone.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
variable `norm` Anything contained in tag
`<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>".
To hide the secondary box completely, use an
empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
lighting
:class:`plotly.graph_objects.cone.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.cone.Lightposition
` instance or dict with compatible properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the surface. Please note
that in the case of using high `opacity` values
for example a value greater than or equal to
0.5 on two surfaces (and 0.25 with four
surfaces), an overlay of multiple transparent
surfaces may not perfectly be sorted in depth
by the webgl API. This behavior may be improved
in the near future and is subject to change.
reversescale
Reverses the color mapping if true. If true,
`cmin` will correspond to the last color in the
array and `cmax` will correspond to the first
color.
scene
Sets a reference between this trace's 3D
coordinate system and a 3D scene. If "scene"
(the default value), the (x,y,z) coordinates
refer to `layout.scene`. If "scene2", the
(x,y,z) coordinates refer to `layout.scene2`,
and so on.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
sizemode
Determines whether `sizeref` is set as a
"scaled" (i.e unitless) scalar (normalized by
the max u/v/w norm in the vector field) or as
"absolute" value (in the same units as the
vector field).
sizeref
Adjusts the cone size scaling. The size of the
cones is determined by their u/v/w norm
multiplied a factor and `sizeref`. This factor
(computed internally) corresponds to the
minimum "time" to travel across two successive
x/y/z positions at the average velocity of
those two successive positions. All cones in a
given trace use the same factor. With
`sizemode` set to "scaled", `sizeref` is
unitless, its default value is 0.5 With
`sizemode` set to "absolute", `sizeref` has the
same units as the u/v/w vector field, its the
default value is half the sample's maximum
vector norm.
stream
:class:`plotly.graph_objects.cone.Stream`
instance or dict with compatible properties
text
Sets the text elements associated with the
cones. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements
will be seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
u
Sets the x components of the vector field.
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
usrc
Sets the source reference on Chart Studio Cloud
for u .
v
Sets the y components of the vector field.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
vsrc
Sets the source reference on Chart Studio Cloud
for v .
w
Sets the z components of the vector field.
wsrc
Sets the source reference on Chart Studio Cloud
for w .
x
Sets the x coordinates of the vector field and
of the displayed cones.
xsrc
Sets the source reference on Chart Studio Cloud
for x .
y
Sets the y coordinates of the vector field and
of the displayed cones.
ysrc
Sets the source reference on Chart Studio Cloud
for y .
z
Sets the z coordinates of the vector field and
of the displayed cones.
zsrc
Sets the source reference on Chart Studio Cloud
for z .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@_cone.py@.PATH_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/barpolar/_legendgrouptitle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="legendgrouptitle", parent_name="barpolar", **kwargs
):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@barpolar@_legendgrouptitle.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "andresmegias/radex-python",
"repo_path": "radex-python_extracted/radex-python-main/README.md",
"type": "Markdown"
}
|
# RADEX Line Fitter
Python interface for using the software RADEX to fit observed transition lines. It consists of a Python 3 script, `radexfit.py`, which is used with RADEX locally.
You can check the user guide ([`userguide.pdf`](https://github.com/andresmegias/radex-python/blob/main/userguide.pdf)) and the example files (inside the `examples` folder) to learn how to use it.
|
andresmegiasREPO_NAMEradex-pythonPATH_START.@radex-python_extracted@radex-python-main@README.md@.PATH_END.py
|
{
"filename": "Trajectory.py",
"repo_name": "wmpg/WesternMeteorPyLib",
"repo_path": "WesternMeteorPyLib_extracted/WesternMeteorPyLib-master/wmpl/Trajectory/Trajectory.py",
"type": "Python"
}
|
""" PyLIG trajectory solver
Estimates meteor trajectory from given observed points.
"""
from __future__ import print_function, division, absolute_import
import time
import copy
import sys
import os
import datetime
import collections
import pickle
import json
from operator import attrgetter
import base64
import hashlib
try:
import git
HAS_GITPYTHON = True
except ImportError:
HAS_GITPYTHON = False
import numpy as np
import scipy.optimize
import scipy.interpolate
import scipy.stats
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from wmpl.Utils.OSTools import importBasemap
Basemap = importBasemap()
try:
# If Numba is available, use the jit decorator with specified options
from numba import njit
except ImportError:
# If Numba is not available, define a no-op decorator that just returns the function unchanged
def njit(func, *args, **kwargs):
return func
import wmpl
from wmpl.Trajectory.Orbit import calcOrbit
from wmpl.Utils.Math import vectNorm, vectMag, meanAngle, findClosestPoints, RMSD, \
angleBetweenSphericalCoords, angleBetweenVectors, lineFunc, normalizeAngleWrap, confidenceInterval
from wmpl.Utils.Misc import valueFormat
from wmpl.Utils.OSTools import mkdirP
from wmpl.Utils.Pickling import savePickle
from wmpl.Utils.Plotting import savePlot
from wmpl.Utils.PlotOrbits import plotOrbits
from wmpl.Utils.PlotCelestial import CelestialPlot
from wmpl.Utils.PlotMap import GroundMap
from wmpl.Utils.TrajConversions import EARTH, G, ecef2ENU, enu2ECEF, geo2Cartesian, geo2Cartesian_vect, \
cartesian2Geo, altAz2RADec_vect, raDec2AltAz, raDec2AltAz_vect, raDec2ECI, eci2RaDec, jd2Date, datetime2JD
from wmpl.Utils.PyDomainParallelizer import parallelComputeGenerator
# Text size of image legends
LEGEND_TEXT_SIZE = 6
class ObservedPoints(object):
def __init__(self, jdt_ref, meas1, meas2, time_data, lat, lon, ele, meastype, station_id=None, \
excluded_time=None, ignore_list=None, ignore_station=False, magnitudes=None, fov_beg=None, \
fov_end=None, obs_id=None, comment=""):
""" Structure for containing data of observations from invidiual stations.
Arguments:
jdt_ref: [float] reference Julian date for the measurements. Add provided times should be given
relative to this number. This is user selectable and can be the time of the first camera, or
the first measurement, or some average time for the meteor, but should be close to the time of
the meteor. This same reference date/time will be used on all camera measurements for the
purposes of computing local sidereal time and making geocentric coordinate transformations,
thus it is good that this time corresponds to the beginning of the meteor.
meas1: [list or ndarray] First measurement array (azimuth or R.A., depending on meastype, see
meastype documentation for more information). Measurements should be given in radians.
meas2: [list or ndarray] Second measurement array (altitude, zenith angle or declination,
depending on meastype, see meastype documentation for more information), in radians.
time_data: [list or ndarray] Time in seconds from the reference Julian date.
lat: [float] Latitude +N of station in radians.
lon: [float] Longitude +E of station in radians.
ele: [float] Elevation of station in meters.
meastype: [float] Flag indicating the type of angle measurements the user is providing for meas1
and meas2 below. The following are all in radians:
1 = Right Ascension for meas1, Declination for meas2.
2 = Azimuth +east of due north for meas1, Elevation angle
above the horizon for meas2
3 = Azimuth +west of due south for meas1, Zenith angle for meas2
4 = Azimuth +north of due east for meas1, Zenith angle for meas2
Keyword arguments:
station_id: [str] Identification of the station. None by default, in which case a number will be
assigned to the station by the program.
excluded_time: [list] [excluded_time_min, excluded_time_max] A range of minimum and maximum
observation time which should be excluded from the optimization because the measurements are
missing in that portion of the time.
ignore_list: [list or ndarray] A list of 0s and 1s which should be of the equal length as
the input data. If a particular data point is to be ignored, number 1 should be put,
otherwise (if the point should be used) 0 should be used. E.g. the this should could look
like this: [0, 0, 0, 1, 1, 0, 0], which would mean that the fourth and the fifth points
will be ignored in trajectory estimation.
ignore_station: [bool] If True, all data from the given station will not be taken into
consideration upon trajectory fitting, but they will still be shown on the graphs.
magnitudes: [list] A list of apparent magnitudes of the meteor. None by default.
fov_beg: [bool] True if the meteor began inside the FOV, False otherwise. None by default.
fov_end: [bool] True if the meteor ended inside the FOV, False otherwise. None by default.
obs_id: [int] Unique ID of the observation. This is to differentiate different observations from
the same station.
comment: [str] A comment about the observations. May be used to store RMS FF file number on which
the meteor was observed.
"""
### INPUT DATA ###
######################################################################################################
self.meas1 = meas1
self.meas2 = meas2
# reference Julian date
self.jdt_ref = jdt_ref
self.time_data = time_data
self.ignore_station = ignore_station
# Set all points to be ignored if the station is ignored
if self.ignore_station:
ignore_list = np.ones(len(time_data), dtype=np.uint8)
# Init the ignore list
if ignore_list is None:
self.ignore_list = np.zeros(len(time_data), dtype=np.uint8)
else:
self.ignore_list = np.array(ignore_list, dtype=np.uint8)
# If all points are ignored, set this station as ignored
if np.all(ignore_list):
self.ignore_station = True
# Store the number of measurement
self.kmeas = len(self.time_data)
# Calculate JD of each point
self.JD_data = self.jdt_ref + self.time_data/86400.0
# Station info
self.lat = lat
self.lon = lon
self.ele = ele
self.station_id = station_id
# Observed points
# - azim_data: azimuth +west of due north
# - elev_data: elevation angle (altitude)
self.azim_data = None
self.elev_data = None
# Equatorial coordinates
self.ra_data = None
self.dec_data = None
# Apparent magnitude
self.magnitudes = magnitudes
# Meteor began/ended inside the FOV flags
self.fov_beg = fov_beg
self.fov_end = fov_end
# Unique observation ID
self.obs_id = obs_id
# Observations comment (may be the FF file name)
self.comment = comment
######################################################################################################
### CALCULATED DATA ###
######################################################################################################
# Angle between the station, the state vector, and the trajectory
self.incident_angle = None
# Weight for the station
self.weight = None
# Residuals from the fit
self.h_residuals = None
self.h_res_rms = None
self.v_residuals = None
self.v_res_rms = None
# Calculated point to point velocities (in m/s)
self.velocities = None
# Average velocities including all previous points up to the current point (for first 4 points the
# velocity corresponds to the average velocity through those 4 points)
self.velocities_prev_point = None
# Calculated length along the path (meters)
self.length = None
# Distance from state vector (meters)
self.state_vect_dist = None
# Calculated lag (meters)
self.lag = None
# Line parameters used for lag calculation (first element is the line slope, i.e. velocity in m/s)
self.lag_line = None
# Initial velocity
self.v_init = None
# Direct fit standard deviation of the initial velocity
self.v_init_stddev = None
# Jacchia fit parameters for these observations
self.jacchia_fit = None
# Modelled RA and Dec
self.model_ra = None
self.model_dec = None
# Modelled azimuth and elevation
self.model_azim = None
self.model_elev = None
# Modelled values for the input type data
self.model_fit1 = None
self.model_fit2 = None
# ECI coordinates of observed CPA to the radiant line, with the station fixed in time at jdt_ref
self.meas_eci = None
# ECI vector of observed CPA to the radiant line, with the station moving in time
self.meas_eci_los = None
# ECI coordinates of radiant CPA to the observed line of sight
self.model_eci = None
# Arrays for geo coords of closest points of approach of observed lines of sight to the radiant line
# (i.e. points on the LoS lines)
self.meas_lat = None
self.meas_lon = None
self.meas_ht = None
self.meas_range = None
# Arrays for geo coords of closest points of approach of the radiant line to the observed lines of
# sight (i.e. points on the trajectory)
self.model_lat = None
self.model_lon = None
self.model_ht = None
self.model_range = None
# Coordinates of the first point (observed)
self.rbeg_lat = None
self.rbeg_lon = None
self.rbeg_ele = None
self.rbeg_jd = None
# Coordinates of the last point (observed)
self.rend_lat = None
self.rend_lon = None
self.rend_ele = None
self.rend_jd = None
# Coordinates of the lowest point (observed)
self.htmin_lat = None
self.htmin_lon = None
self.htmin_ele = None
self.htmin_jd = None
# Absolute magntiudes
self.absolute_magnitudes = None
######################################################################################################
# If inputs are RA and Dec
if meastype == 1:
self.ra_data = meas1
self.dec_data = meas2
# Calculate azimuthal coordinates
self.calcAzimuthal()
# If inputs are azimuth +east of due north, and elevation angle
elif meastype == 2:
self.azim_data = meas1
self.elev_data = meas2
# If inputs are azimuth +west of due south, and zenith angle
elif meastype == 3:
self.azim_data = (meas1 + np.pi)%(2*np.pi)
self.elev_data = np.pi/2.0 - meas2
# If input are azimuth +north of due east, and zenith angle
elif meastype == 4:
self.azim_data = (np.pi/2.0 - meas1)%(2*np.pi)
self.elev_data = np.pi/2.0 - meas2
else:
print("Measurement type 'meastype' =", meastype, 'invalid!')
sys.exit()
# Calculate equatorial coordinates
self.calcEquatorial()
# Calculate the Earth-centered interial coordinates of observed points
self.calcECI()
# Calculate position of the station in ECI coordinates (only for the reference JD, used for
# intersecting planes solution)
self.x_stat, self.y_stat, self.z_stat = geo2Cartesian(self.lat, self.lon, self.ele, self.jdt_ref)
self.stat_eci = np.array([self.x_stat, self.y_stat, self.z_stat])
# Calculate positions of the station in ECI coordinates, for each JD of individual measurements
# (used for the lines of sight least squares approach)
self.stat_eci_los = np.ascontiguousarray(
np.array(geo2Cartesian_vect(self.lat, self.lon, self.ele, self.JD_data)).T
)
# Fit a plane through the given points
self.plane_N = self.planeFit()
### EXCLUDED POINTS ###
######################################################################################################
self.excluded_time = excluded_time
self.excluded_indx_range = []
# Get the indices of measurements between which there is an excluded part of the trajectory
if self.excluded_time is not None:
# Get minimum and maximum excluded times
excluded_time_min, excluded_time_max = min(self.excluded_time), max(self.excluded_time)
# Make sure the excluded time is within the observations
if (excluded_time_min >= np.min(self.time_data)) and (excluded_time_max <= np.max(time_data)):
excluded_indx_min = 0
excluded_indx_max = len(self.time_data) - 1
# Find indices of excluded times, taking the ignored points into account
for i, t in enumerate(self.time_data[self.ignore_list == 0]):
if t <= excluded_time_min:
excluded_indx_min = i
if t >= excluded_time_max:
excluded_indx_max = i
break
self.excluded_indx_range = [excluded_indx_min, excluded_indx_max]
else:
print('Excluded time range', self.excluded_time, 'is outside the observation times!')
######################################################################################################
# ### PLOT RESULTS
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# # Plot station position
# ax.scatter(self.x_stat, self.y_stat, self.z_stat, s=50)
# # Plot line of sight
# #ax.scatter(self.x_stat + self.x_eci, self.y_stat + self.y_eci, self.z_stat + self.z_eci, c='red')
# ax.quiver(self.x_stat, self.y_stat, self.z_stat, self.x_eci, self.y_eci, self.z_eci, length=1.0,
# normalize=True, arrow_length_ratio=0.1)
# # ax.scatter(0, 0, 0, s=50)
# # ax.scatter(self.x_eci, self.y_eci, self.z_eci, c='red')
# d = -np.array([self.x_stat, self.y_stat, self.z_stat]).dot(self.plane_N)
# #d = -np.array([0, 0, 0]).dot(self.plane_N)
# print('d', d)
# # create x,y
# xx, yy = np.meshgrid(np.arange(self.x_stat - 1, self.x_stat + 2), np.arange(self.y_stat - 1, self.y_stat + 2))
# #xx, yy = np.meshgrid(np.arange(-1, 2), np.arange(-1, 2))
# # calculate corresponding z
# z = (-self.plane_N[0]*xx - self.plane_N[1]*yy - d)*1.0/self.plane_N[2]
# # Plot plane normal
# ax.scatter(*(self.plane_N + self.stat_eci))
# print('N:', self.plane_N)
# print(z)
# # plot the surface
# ax.plot_surface(xx, yy, z, color='green', alpha=0.5)
# ax.set_xlim([-1 + self.x_stat, 1 + self.x_stat])
# ax.set_ylim([-1 + self.y_stat, 1 + self.y_stat])
# ax.set_zlim([-1 + self.z_stat, 1 + self.z_stat])
# plt.show()
# ###
def calcAzimuthal(self):
""" Calculate azimuthal coordinates from right ascension and declination. """
# Let the JD data be fixed to the reference time - this is done because for CAMS data the azimuthal to
# equatorial conversion was done without considering the flow of time during the meteor's appearance.
# NOTE: If your data does account for the changing time, then jdt_ref_vect should be:
# jdt_ref_vect = self.JD_data
jdt_ref_vect = np.zeros_like(self.ra_data) + self.jdt_ref
# Calculate azimuth and elevation
self.azim_data, self.elev_data = raDec2AltAz_vect(self.ra_data, self.dec_data, jdt_ref_vect, self.lat,
self.lon)
def calcEquatorial(self):
""" Calculates equatorial coordinates from the given azimuthal coordinates. """
# Calculate RA and declination for the plane intersection method
self.ra_data, self.dec_data = altAz2RADec_vect(self.azim_data, self.elev_data, self.jdt_ref, self.lat,
self.lon)
# Calculate RA and declination for the line of sight method
self.ra_data_los, self.dec_data_los = altAz2RADec_vect(self.azim_data, self.elev_data, self.JD_data,
self.lat, self.lon)
def calcECI(self):
""" Calculate Earth-centered intertial coordinates from RA and Dec. """
# Calculate measurement ECI coordinates for the planes intersection method
self.meas_eci = np.array(raDec2ECI(self.ra_data, self.dec_data)).T
self.x_eci, self.y_eci, self.z_eci = self.meas_eci.T
# Calculate measurement ECI coordinates for the line of sight method
self.meas_eci_los = np.ascontiguousarray(np.array(raDec2ECI(self.ra_data_los, self.dec_data_los)).T)
self.x_eci_los, self.y_eci_los, self.z_eci_los = self.meas_eci_los.T
def planeFit(self):
""" Fits a plane through station position and observed points. """
# Add meteor line of sight positions and station positions to single arays.
# Only use non-ignored points
x_data = np.append(self.x_eci[self.ignore_list == 0], 0)
y_data = np.append(self.y_eci[self.ignore_list == 0], 0)
z_data = np.append(self.z_eci[self.ignore_list == 0], 0)
A = np.c_[x_data, y_data, np.ones(x_data.shape[0])]
# Fit a linear plane through the data points, return plane params (form: aX + bY + d = Z)
C,_,_,_ = scipy.linalg.lstsq(A, z_data)
# Calculate the plane normal
N = np.array([C[0], C[1], -1.0])
# Norm the normal vector to unit length
N = vectNorm(N)
return N
class PlaneIntersection(object):
def __init__(self, obs1, obs2):
""" Calculate the plane intersection between two stations.
Arguments:
obs1: [ObservedPoints] Observations from the first station.
obs2: [ObservedPoints] Observations from the second station.
"""
self.obs1 = obs1
self.obs2 = obs2
# Calculate the observed angular length of the track from the first station
obsangle1 = np.arccos(np.dot(self.obs1.meas_eci[0], self.obs1.meas_eci[-1]))
# Calculate the observed angular length of the track from the second station
obsangle2 = np.arccos(np.dot(self.obs2.meas_eci[0], self.obs2.meas_eci[-1]))
### Calculate the angle between the pair of planes (convergence angle) ###
######################################################################################################
# Calculate the cosine of the convergence angle
ang_cos = np.dot(self.obs1.plane_N, self.obs2.plane_N)
# Make sure the cosine is in the proper range
self.conv_angle = np.arccos(np.abs(np.clip(ang_cos, -1, 1)))
######################################################################################################
# Calculate the plane intersection radiant ECI vector
self.radiant_eci = np.cross(self.obs1.plane_N, self.obs2.plane_N)
self.radiant_eci = vectNorm(self.radiant_eci)
# If the last measurement is closer to the radiant than the first point, reverse signs
if np.dot(self.obs1.meas_eci[0], self.radiant_eci) < np.dot(self.obs1.meas_eci[-1], self.radiant_eci):
self.radiant_eci = -self.radiant_eci
# Calculate the radiant position in RA and Dec
self.radiant_eq = eci2RaDec(self.radiant_eci)
###### Calculate the closest point of approach (CPA) from the stations to the radiant line,
###### that is, a vector pointing from each station to the radiant line, which magnitude
###### corresponds to the distance to the radiant line
### Calculate the unit vector pointing from the 1st station to the radiant line ###
######################################################################################################
self.w1 = np.cross(self.radiant_eci, self.obs1.plane_N)
# Normalize the vector
self.w1 = vectNorm(self.w1)
# Invert vector orientation if pointing towards the station, not the radiant line
if np.dot(self.w1, self.obs1.meas_eci[0]) < 0:
self.w1 = -self.w1
######################################################################################################
### Calculate the unit vector pointing from the 2nd station to the radiant line ###
######################################################################################################
self.w2 = np.cross(self.radiant_eci, self.obs2.plane_N)
# Normalize the vector
self.w2 = vectNorm(self.w2)
# Invert vector orientation if pointing towards the station, not the radiant line
if np.dot(self.w2, self.obs2.meas_eci[0]) < 0:
self.w2 = -self.w2
######################################################################################################
### Calculate the range from stations to the radiant line ###
######################################################################################################
# Calculate the difference in position of the two stations
stat_diff = self.obs1.stat_eci - self.obs2.stat_eci
# Calculate the angle between the pointings to the radiant line
stat_cosangle = np.dot(self.w1, self.w2)
# Calculate the range from the 1st station to the radiant line
stat_range1 = (stat_cosangle*np.dot(stat_diff, self.w2) - np.dot(stat_diff, self.w1))/(1.0 \
- stat_cosangle**2)
# Calculate the CPA vector for the 1st station
self.rcpa_stat1 = stat_range1*self.w1
# Calculate the range from the 2nd station to the radiant line
stat_range2 = (np.dot(stat_diff, self.w2) - stat_cosangle*np.dot(stat_diff, self.w1))/(1.0 \
- stat_cosangle**2)
# Calculate the CPA vector for the 2nd station
self.rcpa_stat2 = stat_range2*self.w2
# Calculate the position of the CPA with respect to the first camera, in ECI coordinates
self.cpa_eci = obs1.stat_eci + self.rcpa_stat1
######################################################################################################
# Calculate the statistical weight of the radiant solution
self.weight = obsangle1*obsangle2*np.sin(self.conv_angle)**2
def show(self):
""" Shows the intersection of the two planes in 3D. """
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
observations = [self.obs1, self.obs2]
# Calculate one point on the meteor trajectory
traj_point, _, _ = findClosestPoints(self.obs1.stat_eci, self.obs1.meas_eci[0], self.cpa_eci, \
self.radiant_eci)
# Calculate the plot limits
x_min = min([self.obs1.x_stat, self.obs2.x_stat, traj_point[0]])
x_max = max([self.obs1.x_stat, self.obs2.x_stat, traj_point[0]])
y_min = min([self.obs1.y_stat, self.obs2.y_stat, traj_point[1]])
y_max = max([self.obs1.y_stat, self.obs2.y_stat, traj_point[1]])
z_min = min([self.obs1.z_stat, self.obs2.z_stat, traj_point[2]])
z_max = max([self.obs1.z_stat, self.obs2.z_stat, traj_point[2]])
# Normalize the plot limits so they are rectangular
delta_x = x_max - x_min
delta_y = y_max - y_min
delta_z = z_max - z_min
delta_max = max([delta_x, delta_y, delta_z])
x_diff = delta_max - delta_x
x_min -= x_diff/2
x_max += x_diff/2
y_diff = delta_max - delta_y
y_min -= y_diff/2
y_max += y_diff/2
z_diff = delta_max - delta_z
z_min -= z_diff/2
z_max += z_diff/2
# Convert meters to km
x_min /= 1000
x_max /= 1000
y_min /= 1000
y_max /= 1000
z_min /= 1000
z_max /= 1000
# Calculate the quiver arrow length
arrow_len = 0.2*np.sqrt((x_min - x_max)**2 + (y_min - y_max)**2 + (z_min - z_max)**2)
# Plot stations and observations
for obs in observations:
# Station positions
ax.scatter(obs.x_stat/1000, obs.y_stat/1000, obs.z_stat/1000, s=50)
# Lines of sight
ax.quiver(obs.x_stat/1000, obs.y_stat/1000, obs.z_stat/1000, obs.x_eci/1000, obs.y_eci/1000, \
obs.z_eci/1000, length=arrow_len, normalize=True, arrow_length_ratio=0.1, color='blue')
d = -np.array([obs.x_stat/1000, obs.y_stat/1000, obs.z_stat/1000]).dot(obs.plane_N)
# Create x,y
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 10), np.linspace(y_min, y_max, 10))
# Calculate corresponding z
z = (-obs.plane_N[0]*xx - obs.plane_N[1]*yy - d)*1.0/obs.plane_N[2]
# Plot plane normal
ax.quiver(obs.x_stat/1000, obs.y_stat/1000, obs.z_stat/1000, *obs.plane_N, length=arrow_len/2,
normalize=True, arrow_length_ratio=0.1, color='green')
# Plot the plane
ax.plot_surface(xx, yy, z, alpha=0.25)
# Plot the radiant state vector
rad_x, rad_y, rad_z = -self.radiant_eci/1000
rst_x, rst_y, rst_z = traj_point/1000
ax.quiver(rst_x, rst_y, rst_z, rad_x, rad_y, rad_z, length=arrow_len, normalize=True, color='red', \
arrow_length_ratio=0.1)
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
ax.set_zlim([z_min, z_max])
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
ax.set_zlabel('Z (km)')
# Change the size of ticks (make them smaller)
ax.tick_params(axis='both', which='major', labelsize=8)
plt.show()
def numStationsNotIgnored(observations):
""" Take a list of ObservedPoints and returns the number of stations that are actually to be used and
are not ignored in the solution.
Arguments:
observations: [list] A list of ObservedPoints objects.
Return:
[int] Number of stations that are used in the solution.
"""
return len([obs for obs in observations if obs.ignore_station == False])
def angleSumMeasurements2Line(observations, state_vect, radiant_eci, weights=None, gravity=False,
gravity_factor=1.0, v0z=None):
""" Sum all angles between the radiant line and measurement lines of sight.
This function is used as a cost function for the least squares radiant solution of Borovicka et
al. (1990). The difference from the original approach is that the distancesfrom the radiant line
have been replaced with angles.
Arguments:
observations: [list] A list of ObservedPoints objects which are containing meteor observations.
state_vect: [3 element ndarray] Estimated position of the initial state vector in ECI coordinates.
radiant_eci: [3 element ndarray] Unit 3D vector of the radiant in ECI coordinates.
Keyword arguments:
weights: [list] A list of statistical weights for every station. None by default.
gravity: [bool] If True, the gravity drop will be taken into account.
gravity_factor: [float] Factor by which the gravity correction will be multiplied. 1.0 by default.
v0z: [float] Initial vertical velocity of the meteor. If None, 0.0 will be used.
Return:
angle_sum: [float] Sum of angles between the estimated trajectory line and individual lines of sight.
"""
# If the weights were not given, use 1 for every weight
if weights is None:
weights = np.ones(len(observations))
# Set weights for stations that are not used to 0
weights = np.array([w if (observations[i].ignore_station == False) else 0 \
for i, w in enumerate(weights)])
# Make sure there are weights larger than 0
if sum(weights) <= 0:
weights = np.ones(len(observations))
# Set weights for stations that are not used to 0
weights = np.array([w if (observations[i].ignore_station == False) else 0 \
for i, w in enumerate(weights)])
# Move the state vector to the beginning of the trajectory
state_vect = moveStateVector(state_vect, radiant_eci, observations)
# Make sure that the radiant vector is a contigous array for faster calculations
radiant_eci = np.ascontiguousarray(radiant_eci)
# Find the earliest point in time
t0 = min([obs.time_data[0] for obs in observations])
angle_sum = 0.0
weights_sum = 1e-10
# Go through all observations from all stations
for i, obs in enumerate(observations):
# Go through all measured positions
for t, meas_eci, stat_eci, ignore in zip(obs.time_data, obs.meas_eci_los, obs.stat_eci_los, \
obs.ignore_list):
# Skip the point if it is to be ignored
if ignore:
continue
# Get the ECI coordinates of the projection of the measurement line of sight on the radiant line
_, rad_cpa, _ = findClosestPoints(stat_eci, meas_eci, state_vect, radiant_eci)
# Take the gravity drop into account
# Note: here we assume that the acceleration due to gravity is fixed at the given height,
# which might cause an offset of a few meters for events longer than 5 seconds
if gravity:
# Calculate the time in seconds from the beginning of the meteor
t_rel = t - t0
# Compute the model point modified due to gravity, assuming zero vertical velocity
if v0z is None:
v0z = 0.0
# Get the magnitude of the radiant vector
rad_cpa_mag = vectMag(rad_cpa)
# If the magnitude is 0, set it to 1 to avoid a division by zero issue
if rad_cpa_mag == 0:
rad_cpa_mag = 1.0
rad_cpa_grav = applyGravityDrop(rad_cpa, t_rel, rad_cpa_mag, gravity_factor, v0z)
_, rad_cpa, _ = findClosestPoints(stat_eci, meas_eci, rad_cpa_grav, radiant_eci)
# Calculate the unit vector pointing from the station to the point on the trajectory
station_ray = rad_cpa - stat_eci
station_ray = vectNorm(station_ray)
# Calculate the angle between the observed LoS as seen from the station and the radiant line
cosangle = np.dot(meas_eci, station_ray)
# Make sure the cosine is within limits and calculate the angle
angle_sum += weights[i]*np.arccos(np.clip(cosangle, -1, 1))
weights_sum += weights[i]
return angle_sum/weights_sum
def minimizeAngleCost(params, observations, weights=None, gravity=False, gravity_factor=1.0, v0z=None):
""" A helper function for minimization of angle deviations. """
state_vect, radiant_eci = np.hsplit(params, 2)
return angleSumMeasurements2Line(observations, state_vect, radiant_eci, weights=weights, gravity=gravity,\
gravity_factor=gravity_factor, v0z=v0z)
def calcSpatialResidual(jdt_ref, jd, state_vect, radiant_eci, stat, meas, gravity=False, gravity_factor=1.0,
v0z=None):
""" Calculate horizontal and vertical residuals from the radiant line, for the given observed point.
Arguments:
jd: [float] Julian date
state_vect: [3 element ndarray] ECI position of the state vector
radiant_eci: [3 element ndarray] radiant direction vector in ECI
stat: [3 element ndarray] position of the station in ECI
meas: [3 element ndarray] line of sight from the station, in ECI
Keyword arguments:
gravity: [bool] Apply the correction for Earth's gravity.
gravity_factor: [float] Factor by which the gravity correction will be multiplied. 1.0 by default.
v0z: [float] Initial vertical velocity of the meteor. If None, 0.0 will be used.
Return:
(hres, vres): [tuple of floats] residuals in horitontal and vertical direction from the radiant line
"""
# Note:
# This function has been tested (without the gravity influence part) and it produces good results
meas = vectNorm(meas)
# Calculate closest points of approach (observed line of sight to radiant line) from the state vector
obs_cpa, rad_cpa, d = findClosestPoints(stat, meas, state_vect, radiant_eci)
# Apply the gravity drop
if gravity:
# Compute the relative time
t_rel = 86400*(jd - jdt_ref)
# Correct the point on the trajectory for gravity
if v0z is None:
v0z = 0.0
rad_cpa_grav = applyGravityDrop(rad_cpa, t_rel, vectMag(rad_cpa), gravity_factor, v0z)
# ###########################
# # Calculate closest points of approach (observed line of sight to radiant line) from the gravity corrected
# # point
obs_cpa, rad_cpa, d = findClosestPoints(stat, meas, rad_cpa_grav, radiant_eci)
# Works by creating a dummy point ON the gravity dropped trajectory,
# then re-doing the find closest points fit to get better CPA vectors.
# Note that the find closest points algorithm doesn't care whether it uses the state vector or a random point
# further down the trajectory AS LONG AS it's gravity corrected.
# Vector pointing from the point on the trajectory to the point on the line of sight
p = obs_cpa - rad_cpa
# # Calculate geographical coordinates of the point on the trajectory
# lat, lon, elev = cartesian2Geo(jd, *rad_cpa)
# Calculate geographical coordinates of the state vector
lat, lon, elev = cartesian2Geo(jd, *state_vect)
# Calculate ENU (East, North, Up) vector at the position of the state vector, and direction of the radiant
nn = np.array(ecef2ENU(lat, lon, *radiant_eci))
# Convert the vector to polar coordinates
theta = np.arctan2(nn[1], nn[0])
phi = np.arccos(nn[2]/vectMag(nn))
# Local reference frame unit vectors
hx = np.array([ -np.cos(theta), np.sin(theta), 0.0])
vz = np.array([-np.cos(phi)*np.sin(theta), -np.cos(phi)*np.cos(theta), np.sin(phi)])
hy = np.array([ np.sin(phi)*np.sin(theta), np.sin(phi)*np.cos(theta), np.cos(phi)])
# Calculate local reference frame unit vectors in ECEF coordinates
ehorzx = enu2ECEF(lat, lon, *hx)
ehorzy = enu2ECEF(lat, lon, *hy)
evert = enu2ECEF(lat, lon, *vz)
ehx = np.dot(p, ehorzx)
ehy = np.dot(p, ehorzy)
# Calculate vertical residuals
vres = np.sign(ehx)*np.hypot(ehx, ehy)
# Calculate horizontal residuals
hres = np.dot(p, evert)
return hres, vres
def lineFuncLS(params, x, y, weights):
""" Line defined by slope and intercept. Version for least squares.
Arguments:
params: [list] Line parameters
x: [float] Independant variable
y: [float] Estimated values
Keyword arguments:
weight: [float] Weight of the residual.
Return:
[float]: line given by (m, k) evaluated at x
"""
# Compute the residuals and apply weights (sqrt of weights is takes because the value will be squared in
# the LS function)
return (lineFunc(x, *params) - y)*np.sqrt(weights)
def jacchiaLagFunc(t, a1, a2):
""" Jacchia (1955) model for modeling lengths along the trail of meteors, modified to fit the lag (length
along the trail minus the linear part, estimated by fitting a line to the first part of observations,
where the length is still linear) instead of the length along the trail.
Arguments:
t: [float] time in seconds at which the Jacchia function will be evaluated
a1: [float] 1st acceleration term
a2: [float] 2nd acceleration term
Return:
[float] Jacchia model defined by a1 and a2, estimated at point in time t
"""
return -np.abs(a1)*np.exp(np.abs(a2)*t)
def jacchiaLengthFunc(t, a1, a2, v_init, k):
""" Jacchia (1955) model for modelling lengths along the trail of meteors.
Arguments:
t: [float] Time in seconds at which the Jacchia function will be evaluated.
a1: [float] 1st decelerationn term.
a2: [float] 2nd deceleration term.
v_init: [float] Initial velocity in m/s.
k: [float] Initial offset in length.
Return:
[float] Jacchia model defined by a1 and a2, estimated at point in time t.
"""
return k + v_init*t - np.abs(a1)*np.exp(np.abs(a2)*t)
def jacchiaVelocityFunc(t, a1, a2, v_init):
""" Derivation of the Jacchia (1955) model, used for calculating velocities from the fitted model.
Arguments:
t: [float] Time in seconds at which the Jacchia function will be evaluated.
a1: [float] 1st decelerationn term.
a2: [float] 2nd deceleration term.
v_init: [float] Initial velocity in m/s.
k: [float] Initial offset in length.
Return:
[float] velocity at time t
"""
return v_init - np.abs(a1*a2)*np.exp(np.abs(a2)*t)
def checkWeights(observations, weights):
""" Check weight values and make sure they can be used. """
# If the weights were not given, use 1 for every weight
if weights is None:
weights = np.ones(len(observations))
# Set weights for stations that are not used to 0
weights = np.array([w if (observations[i].ignore_station == False) else 0 \
for i, w in enumerate(weights)])
# Make sure there are weights larger than 0
if sum(weights) <= 0:
weights = np.ones(len(observations))
# Set weights for stations that are not used to 0
weights = np.array([w if (observations[i].ignore_station == False) else 0 \
for i, w in enumerate(weights)])
return weights
def timingResiduals(params, observations, time_dict, weights=None, ret_stddev=False):
""" Calculate the sum of absolute differences between timings of given stations using the length from
respective stations.
Arguments:
params: [ndarray] Timing differences from the reference station (NOTE: reference station should NOT be
in this list).
observations: [list] A list of ObservedPoints objects.
time_dict: [dict] A dictionary of timing differences for every station. The keys are station IDs and
the values are timing differences. If a fixed time difference is not given for a station, the
timing difference is set to 0.
Keyword arguments:
weights: [list] A list of statistical weights for every station.
ret_stddev: [bool] Returns the standard deviation instead of the cost function.
Return:
[float] Average absolute difference between the timings from all stations using the length for
matching.
"""
# Make sure weight values are OK
weights = checkWeights(observations, weights)
stat_count = 0
state_vect_distances = []
# Go through observations from all stations
for i, obs in enumerate(observations):
# Check if the station has a given fixed time offset and assign it
if not isinstance(time_dict[str(obs.station_id)], bool):
t_diff = time_dict[str(obs.station_id)]
else:
# Take the estimated time difference for all other stations
t_diff = params[stat_count]
stat_count += 1
# Calculate the shifted time
time_shifted = obs.time_data + t_diff
# Add length to length list
state_vect_distances.append([time_shifted, obs.state_vect_dist])
cost_sum = 0
cost_point_count = 0
weights_sum = 1e-10
# Keep track of stations with confirmed overlaps
confirmed_overlaps = []
# Go through all pairs of observations (i.e. stations)
for i in range(len(observations)):
# Skip ignored stations
if observations[i].ignore_station:
continue
for j in range(len(observations)):
# Skip ignored stations
if observations[j].ignore_station:
continue
# Skip pairing the same observations again
if j <= i:
continue
# Extract times and lengths from both stations
time1, len1 = state_vect_distances[i]
time2, len2 = state_vect_distances[j]
# Exclude ignored points
time1 = time1[observations[i].ignore_list == 0]
len1 = len1[observations[i].ignore_list == 0]
time2 = time2[observations[j].ignore_list == 0]
len2 = len2[observations[j].ignore_list == 0]
# Find common points in length between both stations
common_pts = np.where((len2 >= np.min(len1)) & (len2 <= np.max(len1)))
# Continue without fitting the timing is there is no, or almost no overlap
if len(common_pts[0]) < 4:
continue
# Keep track of stations with confirmed overlaps
confirmed_overlaps.append(observations[i].station_id)
confirmed_overlaps.append(observations[j].station_id)
# Take only the common points
time2 = time2[common_pts]
len2 = len2[common_pts]
# If there are any excluded points in the reference observations, do not take their
# pairs from the other site into consideration
if observations[i].excluded_indx_range:
# Extract excluded indices
excluded_indx_min, excluded_indx_max = observations[i].excluded_indx_range
# Get the range of lengths inside the exclusion zone
len1_excluded_min = len1[excluded_indx_min]
len1_excluded_max = len1[excluded_indx_max]
# Select only those lengths in the other station which are outside the exclusion zone
temp_arr = np.c_[time2, len2]
temp_arr = temp_arr[~((temp_arr[:, 1] >= len1_excluded_min) \
& (temp_arr[:, 1] <= len1_excluded_max))]
time2, len2 = temp_arr.T
# Interpolate the first (i.e. reference length)
len1_interpol = scipy.interpolate.interp1d(len1, time1)
# Calculate the residuals using smooth approximation of L1 (absolute value) cost
z = (len1_interpol(len2) - time2)**2
# Calculate the cost function sum
cost_sum += weights[i]*weights[j]*np.sum(2*(np.sqrt(1 + z) - 1))
# Add the weight sum
weights_sum += weights[i]*weights[j]
# Add the total number of points to the cost counter
cost_point_count += len(z)
# Exclude stations with no time overlap with other stations
if (len(observations) > 2):
confirmed_overlaps = list(set(confirmed_overlaps))
for obs in observations:
if obs.station_id not in confirmed_overlaps:
obs.ignore_station = True
obs.ignore_list = np.ones(len(obs.time_data), dtype=np.uint8)
# If no points were compared, return infinite
if cost_point_count == 0:
return np.inf
# Calculate the standard deviation of the fit
dist_stddev = np.sqrt(cost_sum/weights_sum/cost_point_count)
if ret_stddev:
# Returned for reporting the goodness of fit
return dist_stddev
else:
# Returned for minimization
return cost_sum/weights_sum/cost_point_count
def moveStateVector(state_vect, radiant_eci, observations):
""" Moves the state vector position along the radiant line until it is before any points which are
projected on it. This is used to make sure that lengths and lags are properly calculated.
Arguments:
state_vect: [ndarray] (x, y, z) ECI coordinates of the initial state vector (meters).
radiant_eci: [ndarray] (x, y, z) components of the unit radiant direction vector.
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
Return:
rad_cpa_beg: [ndarray] (x, y, z) ECI coordinates of the beginning point of the trajectory.
"""
rad_cpa_list = []
radiant_ang_dist_list = []
# Go through all non-ignored observations from all stations
nonignored_observations = [obstmp for obstmp in observations if not obstmp.ignore_station]
for obs in nonignored_observations:
# Calculate closest points of approach (observed line of sight to radiant line) of the first point
# on the trajectory across all stations
_, rad_cpa, _ = findClosestPoints(obs.stat_eci_los[0], obs.meas_eci_los[0], state_vect,
radiant_eci)
rad_cpa_list.append(rad_cpa)
# Compute angular distance from the first point to the radiant
rad_ang_dist = angleBetweenVectors(radiant_eci, vectNorm(rad_cpa))
radiant_ang_dist_list.append(rad_ang_dist)
# # Choose the state vector with the largest height
# rad_cpa_beg = rad_cpa_list[np.argmax([vectMag(rad_cpa_temp) for rad_cpa_temp in rad_cpa_list])]
# Choose the state vector as the point of initial observation closest to the radiant
rad_cpa_beg = rad_cpa_list[np.argmin([rad_ang_dist for rad_ang_dist in radiant_ang_dist_list])]
return np.ascontiguousarray(rad_cpa_beg)
class MCUncertainties(object):
def __init__(self, mc_traj_list):
""" Container for standard deviations and confidence intervals of trajectory parameters calculated
using Monte Carlo.
"""
# Confidence interval value (95%)
self.ci = 95
# A list with all trajectory objects calculated via Monte Carlo
self.mc_traj_list = mc_traj_list
# State vector position
self.state_vect_mini = None
self.state_vect_mini_ci = None
self.x = None
self.x_ci = None
self.y = None
self.y_ci = None
self.z = None
self.z_ci = None
# Velocity state vector
self.vx = None
self.vx_ci = None
self.vy = None
self.vy_ci = None
self.vz = None
self.vz_ci = None
# Radiant vector
self.radiant_eci_mini = None
self.radiant_eci_mini_ci = None
# Beginning/ending points
self.rbeg_lon = None
self.rbeg_lon_ci = None
self.rbeg_lon_m = None
self.rbeg_lat = None
self.rbeg_lat_ci = None
self.rbeg_lat_m = None
self.rbeg_ele = None
self.rbeg_ele_ci = None
self.rbeg_ele_wgs84 = None
self.rbeg_ele_wgs84_ci = None
self.rend_lon = None
self.rend_lon_ci = None
self.rend_lon_m = None
self.rend_lat = None
self.rend_lat_ci = None
self.rend_lat_m = None
self.rend_ele = None
self.rend_ele_ci = None
self.rend_ele_wgs84 = None
self.rend_ele_wgs84_ci = None
# Lowest height point (used for grazers)
self.htmin_lon = None
self.htmin_lon_ci = None
self.htmin_lon_m = None
self.htmin_lat = None
self.htmin_lat_ci = None
self.htmin_lat_m = None
self.htmin_ele = None
self.htmin_ele_ci = None
self.htmin_ele_wgs84 = None
self.htmin_ele_wgs84_ci = None
# Apparent radiant position (radians)
self.ra = None
self.ra_ci = None
self.dec = None
self.dec_ci = None
# Apparent azimuth and altitude
self.azimuth_apparent = None
self.azimuth_apparent_ci = None
self.elevation_apparent = None
self.elevation_apparent_ci = None
# Estimated average velocity
self.v_avg = None
self.v_avg_ci = None
# Estimated initial velocity
self.v_init = None
self.v_init_ci = None
# Longitude of the reference point on the trajectory (rad)
self.lon_ref = None
self.lon_ref_ci = None
# Latitude of the reference point on the trajectory (rad)
self.lat_ref = None
self.lat_ref_ci = None
# Height of the reference point on the trajectory (meters)
self.ht_ref = None
self.ht_ref_ci = None
# Geocentric latitude of the reference point (rad)
self.lat_geocentric = None
self.lat_geocentric_ci = None
# Apparent zenith angle (before the correction for Earth's gravity)
self.zc = None
self.zc_ci = None
# Zenith distance of the geocentric radiant (after the correction for Earth's gravity)
self.zg = None
self.zg_ci = None
# Velocity at infinity
self.v_inf = None
self.v_inf_ci = None
# Geocentric velocity (m/s)
self.v_g = None
self.v_g_ci = None
# Geocentric radiant position (radians)
self.ra_g = None
self.ra_g_ci = None
self.dec_g = None
self.dec_g_ci = None
# Ecliptic coordinates of the radiant (radians)
self.L_g = None
self.L_g_ci = None
self.B_g = None
self.B_g_ci = None
# Sun-centered ecliptic rectangular coordinates of the average position on the meteor's trajectory
# (in kilometers)
self.meteor_pos = None
self.meteor_pos_ci = None
# Helioventric velocity of the meteor (m/s)
self.v_h = None
self.v_h_ci = None
# Corrected heliocentric velocity vector of the meteoroid using the method of Sato & Watanabe (2014)
self.v_h_x = None
self.v_h_x_ci = None
self.v_h_y = None
self.v_h_y_ci = None
self.v_h_z = None
self.v_h_z_ci = None
# Corrected ecliptci coordinates of the meteor using the method of Sato & Watanabe (2014)
self.L_h = None
self.L_h_ci = None
self.B_h = None
self.B_h_ci = None
# Solar longitude (radians)
self.la_sun = None
self.la_sun_ci = None
# Semi-major axis (AU)
self.a = None
self.a_ci = None
# Eccentricty
self.e = None
self.e_ci = None
# Inclination (radians)
self.i = None
self.i_ci = None
# Argument of perihelion (radians)
self.peri = None
self.peri_ci = None
# Ascending node (radians)
self.node = None
self.node_ci = None
# Longitude of perihelion (radians)
self.pi = None
self.pi_ci = None
# Latitude of perihelion (radians)
self.b = None
self.b_ci = None
# Perihelion distance (AU)
self.q = None
self.q_ci = None
# Aphelion distance (AU)
self.Q = None
self.Q_ci = None
# True anomaly at the moment of contact with Earth (radians)
self.true_anomaly = None
self.true_anomaly_ci = None
# Exxentric anomaly (radians)
self.eccentric_anomaly = None
self.eccentric_anomaly_ci = None
# Mean anomaly (radians)
self.mean_anomaly = None
self.mean_anomaly_ci = None
# Calculate the date and time of the last perihelion passage (datetime object)
self.last_perihelion = None
self.last_perihelion_ci = None
# Mean motion in the orbit (rad/day)
self.n = None
self.n_ci = None
# Orbital period
self.T = None
self.T_ci = None
# Tisserand's parameter with respect to Jupiter
self.Tj = None
self.Tj_ci = None
# Preserve compatibility with pickle files genrated before the typo fix
MCUncertanties = MCUncertainties
def calcMCUncertainties(traj_list, traj_best):
""" Takes a list of trajectory objects and returns the standard deviation of every parameter.
Arguments:
traj_list: [list] A list of Trajectory objects, each is the result of an individual Monte Carlo run.
traj_best: [Trajectory object] Trajectory which is chosen to the be the best of all MC runs.
Return:
un: [MCUncertainties object] Object containing the uncertainty of every calculated parameter.
"""
# Init a new container for uncertainties
un = MCUncertainties(traj_list)
# Initial velocity
un.v_init = np.std([traj.v_init for traj in traj_list])
un.v_init_ci = confidenceInterval([traj.v_init for traj in traj_list], un.ci)
# State vector
un.x = np.std([traj.state_vect_mini[0] for traj in traj_list])
un.x_ci = confidenceInterval([traj.state_vect_mini[0] for traj in traj_list], un.ci)
un.y = np.std([traj.state_vect_mini[1] for traj in traj_list])
un.y_ci = confidenceInterval([traj.state_vect_mini[1] for traj in traj_list], un.ci)
un.z = np.std([traj.state_vect_mini[2] for traj in traj_list])
un.z_ci = confidenceInterval([traj.state_vect_mini[2] for traj in traj_list], un.ci)
un.state_vect_mini = np.array([un.x, un.y, un.z])
un.state_vect_mini_ci = np.array([un.x_ci, un.y_ci, un.z_ci])
rad_x = np.std([traj.radiant_eci_mini[0] for traj in traj_list])
rad_x_ci = confidenceInterval([traj.radiant_eci_mini[0] for traj in traj_list], un.ci)
rad_y = np.std([traj.radiant_eci_mini[1] for traj in traj_list])
rad_y_ci = confidenceInterval([traj.radiant_eci_mini[1] for traj in traj_list], un.ci)
rad_z = np.std([traj.radiant_eci_mini[2] for traj in traj_list])
rad_z_ci = confidenceInterval([traj.radiant_eci_mini[2] for traj in traj_list], un.ci)
un.radiant_eci_mini = np.array([rad_x, rad_y, rad_z])
un.radiant_eci_mini_ci = np.array([rad_x_ci, rad_y_ci, rad_z_ci])
# Velocity state vector
un.vx = abs(traj_best.v_init*traj_best.radiant_eci_mini[0]*(un.v_init/traj_best.v_init
+ rad_x/traj_best.radiant_eci_mini[0]))
un.vx_ci = confidenceInterval([traj.v_init*traj.radiant_eci_mini[0] for traj in traj_list], un.ci)
un.vy = abs(traj_best.v_init*traj_best.radiant_eci_mini[1]*(un.v_init/traj_best.v_init
+ rad_y/traj_best.radiant_eci_mini[1]))
un.vy_ci = confidenceInterval([traj.v_init*traj.radiant_eci_mini[1] for traj in traj_list], un.ci)
un.vz = abs(traj_best.v_init*traj_best.radiant_eci_mini[2]*(un.v_init/traj_best.v_init
+ rad_z/traj_best.radiant_eci_mini[2]))
un.vz_ci = confidenceInterval([traj.v_init*traj.radiant_eci_mini[2] for traj in traj_list], un.ci)
# Beginning/ending points
N_beg = EARTH.EQUATORIAL_RADIUS/np.sqrt(1.0 - (EARTH.E**2)*np.sin(traj_best.rbeg_lat)**2)
un.rbeg_lon = scipy.stats.circstd([traj.rbeg_lon for traj in traj_list])
un.rbeg_lon_ci = confidenceInterval([traj.rbeg_lon for traj in traj_list], un.ci, angle=True)
un.rbeg_lon_m = np.sin(un.rbeg_lon)*np.cos(traj_best.rbeg_lat)*N_beg
un.rbeg_lat = np.std([traj.rbeg_lat for traj in traj_list])
un.rbeg_lat_ci = confidenceInterval([traj.rbeg_lat for traj in traj_list], un.ci)
un.rbeg_lat_m = np.sin(un.rbeg_lat)*N_beg
un.rbeg_ele = np.std([traj.rbeg_ele for traj in traj_list])
un.rbeg_ele_ci = confidenceInterval([traj.rbeg_ele for traj in traj_list], un.ci)
un.rbeg_ele_wgs84 = np.std([traj.rbeg_ele_wgs84 for traj in traj_list])
un.rbeg_ele_wgs84_ci = confidenceInterval([traj.rbeg_ele_wgs84 for traj in traj_list], un.ci)
N_end = EARTH.EQUATORIAL_RADIUS/np.sqrt(1.0 - (EARTH.E**2)*np.sin(traj_best.rend_lat)**2)
un.rend_lon = scipy.stats.circstd([traj.rend_lon for traj in traj_list])
un.rend_lon_ci = confidenceInterval([traj.rend_lon for traj in traj_list], un.ci, angle=True)
un.rend_lon_m = np.sin(un.rend_lon)*np.cos(traj_best.rend_lat)*N_end
un.rend_lat = np.std([traj.rend_lat for traj in traj_list])
un.rend_lat_ci = confidenceInterval([traj.rend_lat for traj in traj_list], un.ci)
un.rend_lat_m = np.sin(un.rend_lat)*N_end
un.rend_ele = np.std([traj.rend_ele for traj in traj_list])
un.rend_ele_ci = confidenceInterval([traj.rend_ele for traj in traj_list], un.ci)
un.rend_ele_wgs84 = np.std([traj.rend_ele_wgs84 for traj in traj_list])
un.rend_ele_wgs84_ci = confidenceInterval([traj.rend_ele_wgs84 for traj in traj_list], un.ci)
# Lowest point
N_end = EARTH.EQUATORIAL_RADIUS/np.sqrt(1.0 - (EARTH.E**2)*np.sin(traj_best.htmin_lat)**2)
un.htmin_lon = scipy.stats.circstd([traj.htmin_lon for traj in traj_list])
un.htmin_lon_ci = confidenceInterval([traj.htmin_lon for traj in traj_list], un.ci, angle=True)
un.htmin_lon_m = np.sin(un.htmin_lon)*np.cos(traj_best.htmin_lat)*N_end
un.htmin_lat = np.std([traj.htmin_lat for traj in traj_list])
un.htmin_lat_ci = confidenceInterval([traj.htmin_lat for traj in traj_list], un.ci)
un.htmin_lat_m = np.sin(un.htmin_lat)*N_end
un.htmin_ele = np.std([traj.htmin_ele for traj in traj_list])
un.htmin_ele_ci = confidenceInterval([traj.htmin_ele for traj in traj_list], un.ci)
un.htmin_ele_wgs84 = np.std([traj.htmin_ele_wgs84 for traj in traj_list])
un.htmin_ele_wgs84_ci = confidenceInterval([traj.htmin_ele_wgs84 for traj in traj_list], un.ci)
if traj_best.orbit is not None:
# Apparent ECI
un.ra = scipy.stats.circstd([traj.orbit.ra for traj in traj_list])
un.ra_ci = confidenceInterval([traj.orbit.ra for traj in traj_list], un.ci, angle=True)
un.dec = np.std([traj.orbit.dec for traj in traj_list])
un.dec_ci = confidenceInterval([traj.orbit.dec for traj in traj_list], un.ci)
un.v_avg = np.std([traj.orbit.v_avg for traj in traj_list])
un.v_avg_ci = confidenceInterval([traj.orbit.v_avg for traj in traj_list], un.ci)
un.v_inf = np.std([traj.orbit.v_inf for traj in traj_list])
un.v_inf_ci = confidenceInterval([traj.orbit.v_inf for traj in traj_list], un.ci)
un.azimuth_apparent = scipy.stats.circstd([traj.orbit.azimuth_apparent for traj in traj_list])
un.azimuth_apparent_ci = confidenceInterval([traj.orbit.azimuth_apparent for traj in traj_list], \
un.ci, angle=True)
un.elevation_apparent = np.std([traj.orbit.elevation_apparent for traj in traj_list])
un.elevation_apparent_ci = confidenceInterval([traj.orbit.elevation_apparent for traj in traj_list], \
un.ci)
# Apparent ground-fixed
un.ra_norot = scipy.stats.circstd([traj.orbit.ra_norot for traj in traj_list])
un.ra_norot_ci = confidenceInterval([traj.orbit.ra_norot for traj in traj_list], un.ci, angle=True)
un.dec_norot = np.std([traj.orbit.dec_norot for traj in traj_list])
un.dec_norot_ci = confidenceInterval([traj.orbit.dec_norot for traj in traj_list], un.ci)
un.v_avg_norot = np.std([traj.orbit.v_avg_norot for traj in traj_list])
un.v_avg_norot_ci = confidenceInterval([traj.orbit.v_avg_norot for traj in traj_list], un.ci)
un.v_init_norot = np.std([traj.orbit.v_init_norot for traj in traj_list])
un.v_init_norot_ci = confidenceInterval([traj.orbit.v_init_norot for traj in traj_list], un.ci)
un.azimuth_apparent_norot = scipy.stats.circstd([traj.orbit.azimuth_apparent_norot for traj \
in traj_list])
un.azimuth_apparent_norot_ci = confidenceInterval([traj.orbit.azimuth_apparent_norot for traj \
in traj_list], un.ci, angle=True)
un.elevation_apparent_norot = np.std([traj.orbit.elevation_apparent_norot for traj in traj_list])
un.elevation_apparent_norot_ci = confidenceInterval([traj.orbit.elevation_apparent_norot for traj \
in traj_list], un.ci)
# Reference point on the meteor trajectory
un.lon_ref = scipy.stats.circstd([traj.orbit.lon_ref for traj in traj_list])
un.lon_ref_ci = confidenceInterval([traj.orbit.lon_ref for traj in traj_list], un.ci, angle=True)
un.lat_ref = np.std([traj.orbit.lat_ref for traj in traj_list])
un.lat_ref_ci = confidenceInterval([traj.orbit.lat_ref for traj in traj_list], un.ci)
un.lat_geocentric = np.std([traj.orbit.lat_geocentric for traj in traj_list])
un.lat_geocentric_ci = confidenceInterval([traj.orbit.lat_geocentric for traj in traj_list], un.ci)
un.ht_ref = np.std([traj.orbit.ht_ref for traj in traj_list])
un.ht_ref_ci = confidenceInterval([traj.orbit.ht_ref for traj in traj_list], un.ci)
un.ht_ref_wgs84 = np.std([traj.orbit.ht_ref_wgs84 for traj in traj_list])
un.ht_ref_wgs84_ci = confidenceInterval([traj.orbit.ht_ref_wgs84 for traj in traj_list], un.ci)
# Geocentric
un.ra_g = scipy.stats.circstd([traj.orbit.ra_g for traj in traj_list])
un.ra_g_ci = confidenceInterval([traj.orbit.ra_g for traj in traj_list], un.ci, angle=True)
un.dec_g = np.std([traj.orbit.dec_g for traj in traj_list])
un.dec_g_ci = confidenceInterval([traj.orbit.dec_g for traj in traj_list], un.ci)
un.v_g = np.std([traj.orbit.v_g for traj in traj_list])
un.v_g_ci = confidenceInterval([traj.orbit.v_g for traj in traj_list], un.ci)
# Meteor position in Sun-centred rectangular coordinates
meteor_pos_x = np.std([traj.orbit.meteor_pos[0] for traj in traj_list])
meteor_pos_x_ci = confidenceInterval([traj.orbit.meteor_pos[0] for traj in traj_list], un.ci)
meteor_pos_y = np.std([traj.orbit.meteor_pos[1] for traj in traj_list])
meteor_pos_y_ci = confidenceInterval([traj.orbit.meteor_pos[1] for traj in traj_list], un.ci)
meteor_pos_z = np.std([traj.orbit.meteor_pos[2] for traj in traj_list])
meteor_pos_z_ci = confidenceInterval([traj.orbit.meteor_pos[2] for traj in traj_list], un.ci)
un.meteor_pos = np.array([meteor_pos_x, meteor_pos_y, meteor_pos_z])
un.meteor_pos_ci = np.array([meteor_pos_x_ci, meteor_pos_y_ci, meteor_pos_z_ci])
# Zenith angles
un.zc = np.std([traj.orbit.zc for traj in traj_list])
un.zc_ci = confidenceInterval([traj.orbit.zc for traj in traj_list], un.ci)
un.zg = np.std([traj.orbit.zg for traj in traj_list])
un.zg_ci = confidenceInterval([traj.orbit.zg for traj in traj_list], un.ci)
# Ecliptic geocentric
un.L_g = scipy.stats.circstd([traj.orbit.L_g for traj in traj_list])
un.L_g_ci = confidenceInterval([traj.orbit.L_g for traj in traj_list], un.ci, angle=True)
un.B_g = np.std([traj.orbit.B_g for traj in traj_list])
un.B_g_ci = confidenceInterval([traj.orbit.B_g for traj in traj_list], un.ci)
un.v_h = np.std([traj.orbit.v_h for traj in traj_list])
un.v_h_ci = confidenceInterval([traj.orbit.v_h for traj in traj_list], un.ci)
# Ecliptic heliocentric
un.L_h = scipy.stats.circstd([traj.orbit.L_h for traj in traj_list])
un.L_h_ci = confidenceInterval([traj.orbit.L_h for traj in traj_list], un.ci, angle=True)
un.B_h = np.std([traj.orbit.B_h for traj in traj_list])
un.B_h_ci = confidenceInterval([traj.orbit.B_h for traj in traj_list], un.ci)
un.v_h_x = np.std([traj.orbit.v_h_x for traj in traj_list])
un.v_h_x_ci = confidenceInterval([traj.orbit.v_h_x for traj in traj_list], un.ci)
un.v_h_y = np.std([traj.orbit.v_h_y for traj in traj_list])
un.v_h_y_ci = confidenceInterval([traj.orbit.v_h_y for traj in traj_list], un.ci)
un.v_h_z = np.std([traj.orbit.v_h_z for traj in traj_list])
un.v_h_z_ci = confidenceInterval([traj.orbit.v_h_z for traj in traj_list], un.ci)
# Orbital elements
un.la_sun = scipy.stats.circstd([traj.orbit.la_sun for traj in traj_list])
un.la_sun_ci = confidenceInterval([traj.orbit.la_sun for traj in traj_list], un.ci, angle=True)
un.a = np.std([traj.orbit.a for traj in traj_list])
un.a_ci = confidenceInterval([traj.orbit.a for traj in traj_list], un.ci)
un.e = np.std([traj.orbit.e for traj in traj_list])
un.e_ci = confidenceInterval([traj.orbit.e for traj in traj_list], un.ci)
un.i = np.std([traj.orbit.i for traj in traj_list])
un.i_ci = confidenceInterval([traj.orbit.i for traj in traj_list], un.ci)
un.peri = scipy.stats.circstd([traj.orbit.peri for traj in traj_list])
un.peri_ci = confidenceInterval([traj.orbit.peri for traj in traj_list], un.ci, angle=True)
un.node = scipy.stats.circstd([traj.orbit.node for traj in traj_list])
un.node_ci = confidenceInterval([traj.orbit.node for traj in traj_list], un.ci, angle=True)
un.pi = scipy.stats.circstd([traj.orbit.pi for traj in traj_list])
un.pi_ci = confidenceInterval([traj.orbit.pi for traj in traj_list], un.ci, angle=True)
un.b = np.std([traj.orbit.b for traj in traj_list])
un.b_ci = confidenceInterval([traj.orbit.b for traj in traj_list], un.ci)
un.q = np.std([traj.orbit.q for traj in traj_list])
un.q_ci = confidenceInterval([traj.orbit.q for traj in traj_list], un.ci)
un.Q = np.std([traj.orbit.Q for traj in traj_list])
un.Q_ci = confidenceInterval([traj.orbit.Q for traj in traj_list], un.ci)
un.true_anomaly = scipy.stats.circstd([traj.orbit.true_anomaly for traj in traj_list])
un.true_anomaly_ci = confidenceInterval([traj.orbit.true_anomaly for traj in traj_list], un.ci, \
angle=True)
un.eccentric_anomaly = scipy.stats.circstd([traj.orbit.eccentric_anomaly for traj in traj_list])
un.eccentric_anomaly_ci = confidenceInterval([traj.orbit.eccentric_anomaly for traj in traj_list], \
un.ci, angle=True)
un.mean_anomaly = scipy.stats.circstd([traj.orbit.mean_anomaly for traj in traj_list])
un.mean_anomaly_ci = confidenceInterval([traj.orbit.mean_anomaly for traj in traj_list], un.ci, \
angle=True)
# Last perihelion uncertanty (days)
last_perihelion_list = [datetime2JD(traj.orbit.last_perihelion) for traj \
in traj_list if isinstance(traj.orbit.last_perihelion, datetime.datetime)]
if len(last_perihelion_list):
un.last_perihelion = np.std(last_perihelion_list)
un.last_perihelion_ci = confidenceInterval(last_perihelion_list, un.ci)
else:
un.last_perihelion = np.nan
un.last_perihelion_ci = (np.nan, np.nan)
# Mean motion in the orbit (rad/day)
un.n = np.std([traj.orbit.n for traj in traj_list])
un.n_ci = confidenceInterval([traj.orbit.n for traj in traj_list], un.ci)
# Orbital period
un.T = np.std([traj.orbit.T for traj in traj_list])
un.T_ci = confidenceInterval([traj.orbit.T for traj in traj_list], un.ci)
# Tisserand's parameter
un.Tj = np.std([traj.orbit.Tj for traj in traj_list])
un.Tj_ci = confidenceInterval([traj.orbit.Tj for traj in traj_list], un.ci)
return un
def calcCovMatrices(mc_traj_list):
""" Calculate the covariance matrix between orbital elements, and initial state vector using all Monte
Carlo trajectories. The covariance matrix is weighted by the timing residuals.
The orbital covariance matrix is calculated for radians and the inital state vector matrix in meters
and meters per second.
Arguments:
mc_traj_list: [list] A list of Trajectory objects from Monte Carlo runs.
Return:
orbit_cov, state_vect_cov: [tuple of ndarrays] Orbital and initial state vector covariance matrices.
"""
# Filter out those trajectories for which the last perihelion time could not be estimated
mc_traj_list = [traj for traj in mc_traj_list if traj.orbit.last_perihelion is not None]
# If there are no good orbits, do not estimate the covariance matrix
if not mc_traj_list:
return np.zeros((6, 6)) - 1, np.zeros((6, 6)) - 1
# Extract timing residuals
timing_res_list = np.array([traj.timing_res for traj in mc_traj_list])
# Make sure the timing residual is not 0
timing_res_list[timing_res_list == 0] = 1e-10
# Calculate the weights using timing residuals
weights = np.min(timing_res_list)/timing_res_list
weights = weights
# Extract orbit elements
e_list = np.array([traj.orbit.e for traj in mc_traj_list])
q_list = np.array([traj.orbit.q for traj in mc_traj_list])
tp_list = np.array([datetime2JD(traj.orbit.last_perihelion) for traj in mc_traj_list])
node_list = np.degrees(normalizeAngleWrap(np.array([traj.orbit.node for traj in mc_traj_list])))
peri_list = np.degrees(normalizeAngleWrap(np.array([traj.orbit.peri for traj in mc_traj_list])))
i_list = np.degrees(normalizeAngleWrap(np.array([traj.orbit.i for traj in mc_traj_list])))
# Calculate the orbital covariance (angles in degrees)
orbit_input = np.c_[e_list, q_list, tp_list, node_list, peri_list, i_list].T
orbit_cov = np.cov(orbit_input, aweights=weights)
# Extract inital state vectors
state_vect_list = np.array([traj.state_vect_mini for traj in mc_traj_list])
initial_vel_vect_list = np.array([traj.v_init*traj.radiant_eci_mini for traj in mc_traj_list])
# Calculate inital state vector covariance
state_vect_input = np.hstack([state_vect_list, initial_vel_vect_list]).T
state_vect_cov = np.cov(state_vect_input, aweights=weights)
return orbit_cov, state_vect_cov
def trajNoiseGenerator(traj, noise_sigma):
""" Given a base trajectory object and the observation uncertainly, this generator will generate
new trajectory objects with noise-added obsevations.
Arguments:
traj: [Trajectory] Trajectory instance.
noise_sigma: [float] Standard deviations of noise to add to the data.
Yields:
[counter, traj_mc, traj.observations]:
- counter: [int] Number of trajectories generated since the generator init.
- traj_mc: [Trajectory] Trajectory object with added noise
- traj.observations: [list] A list of original noise-free ObservedPoints.
"""
counter = 0
# Do mc_runs Monte Carlo runs
while True:
# Make a copy of the original trajectory object
traj_mc = copy.deepcopy(traj)
# Set the measurement type to alt/az
traj_mc.meastype = 2
# Reset the observation points
traj_mc.observations = []
# Reinitialize the observations with points sampled using a Gaussian kernel
for obs in traj.observations:
azim_noise_list = []
elev_noise_list = []
# Go through all ECI unit vectors of measurement LoS, add the noise and calculate alt/az coords
for jd, rhat in zip(obs.JD_data, obs.meas_eci_los):
# Unit vector pointing from the station to the meteor observation point in ECI coordinates
rhat = vectNorm(rhat)
### Add noise to simulated coordinates (taken over from Gural solver source)
zhat = np.array([0.0, 0.0, 1.0])
uhat = vectNorm(np.cross(rhat, zhat))
vhat = vectNorm(np.cross(uhat, rhat))
# # sqrt(2)/2*noise in each orthogonal dimension
# NOTE: This is a bad way to do it because the estimated fit residuals are already estimated
# in the prependicular direction to the trajectory line
# sigma = noise_sigma*np.abs(obs.ang_res_std)/np.sqrt(2.0)
# # Make sure sigma is positive, if not set it to 1/sqrt(2) degrees
# if (sigma < 0) or np.isnan(sigma):
# sigma = np.radians(1)/np.sqrt(2)
# Compute noise level to add to observations
sigma = noise_sigma*np.abs(obs.ang_res_std)
# Make sure sigma is positive, if not set it to 1 degree
if (sigma < 0) or np.isnan(sigma):
sigma = np.radians(1)
# Add noise to observations
meas_eci_noise = rhat + np.random.normal(0, sigma)*uhat + np.random.normal(0, sigma)*vhat
# Normalize to a unit vector
meas_eci_noise = vectNorm(meas_eci_noise)
###
# Calculate RA, Dec for the given point
ra, dec = eci2RaDec(meas_eci_noise)
# Calculate azimuth and altitude of this direction vector
azim, elev = raDec2AltAz(ra, dec, jd, obs.lat, obs.lon)
azim_noise_list.append(azim)
elev_noise_list.append(elev)
# Fill in the new trajectory object - the time is assumed to be absolute
traj_mc.infillTrajectory(azim_noise_list, elev_noise_list, obs.time_data, obs.lat, obs.lon, \
obs.ele, station_id=obs.station_id, excluded_time=obs.excluded_time, \
ignore_list=obs.ignore_list, magnitudes=obs.magnitudes, fov_beg=obs.fov_beg, \
fov_end=obs.fov_end, obs_id=obs.obs_id, comment=obs.comment)
# Do not show plots or perform additional optimizations
traj_mc.verbose = False
traj_mc.estimate_timing_vel = True
traj_mc.filter_picks = False
traj_mc.show_plots = False
traj_mc.save_results = False
# Return the modified trajectory object
yield [counter, traj_mc, traj.observations]
counter += 1
def checkMCTrajectories(mc_results, timing_res=np.inf, geometric_uncert=False):
""" Filter out MC computed trajectories and only return successful ones.
Arguments:
mc_results: [list] A list of Trajectory objects computed with added noise.
Keyword arguments:
timing_res: [float] Timing residual from the original LoS trajectory fit.
geometric_uncert: [bool] If True, all MC runs will be taken to estimate the uncertainty, not just
the ones with the better cost function value than the pure geometric solution. Use this when
the lag is not reliable.
Returns:
[list] A filtered list of trajectories.
"""
if not geometric_uncert:
# Take only those solutions which have the timing residuals <= than the initial solution
mc_results = [mc_traj for mc_traj in mc_results if mc_traj.timing_res <= timing_res]
##########
# Reject those solutions for which LoS angle minimization failed
mc_results = [mc_traj for mc_traj in mc_results if mc_traj.los_mini_status == True]
# Reject those solutions for which the orbit could not be calculated
mc_results = [mc_traj for mc_traj in mc_results if (mc_traj.orbit.ra_g is not None) \
and (mc_traj.orbit.dec_g is not None)]
print("{:d} successful MC runs done...".format(len(mc_results)))
return mc_results
def _MCTrajSolve(params):
""" Internal function. Does a Monte Carlo run of the given trajectory object. Used as a function for
parallelization.
Arguments:
params: [list]
- i: [int] Number of MC run to be printed out.
- traj: [Trajectory object] Trajectory object on which the run will be performed.
- observations: [list] A list of observations with no noise.
Return:
traj: [Trajectory object] Trajectory object with the MC solution.
"""
i, traj, observations = params
print('Run No.', i + 1)
traj.run(_mc_run=True, _orig_obs=observations)
return traj
def monteCarloTrajectory(traj, mc_runs=None, mc_pick_multiplier=1, noise_sigma=1, geometric_uncert=False, \
plot_results=True, mc_cores=None, max_runs=None):
""" Estimates uncertanty in the trajectory solution by doing Monte Carlo runs. The MC runs are done
in parallel on all available computer cores.
The uncertanty is taken as the standard deviation of angular measurements. Each point is sampled
mc_pick_multiplier times using a symetric 2D Gaussian kernel.
Arguments:
traj: [Trajectory object] initial trajectory on which Monte Carlo runs will be performed
Keyword arguments:
mc_runs: [int] A fixed number of Monte Carlo simulations. None by default. If it is given, it will
override mc_pick_multiplier.
mc_pick_multiplier: [int] Number of MC samples that will be taken for every point. 1 by default.
noise_sigma: [float] Number of standard deviations to use for adding Gaussian noise to original
measurements.
geometric_uncert: [bool] If True, all MC runs will be taken to estimate the uncertainty, not just
the ones with the better cost function value than the pure geometric solution. Use this when
the lag is not reliable.
plot_results: [bool] Plot the trajectory and orbit spread. True by default.
mc_cores: [int] Number of CPU cores to use for Monte Carlo parallel procesing. None by default,
which means that all available cores will be used.
max_runs: [int] Maximum number of runs. None by default, which will limit the runs to 10x req_num.
"""
### DO MONTE CARLO RUNS ###
##########################################################################################################
# If a fixed number of Monte Carlo simulations is given, use it
if mc_runs is not None:
mc_runs = mc_runs
else:
# Calculate the total number of Monte Carlo runs, so every point is sampled mc_pick_multiplier times.
mc_runs = sum([len(obs.time_data) for obs in traj.observations])
mc_runs = mc_runs*mc_pick_multiplier
print("Doing", mc_runs, "successful Monte Carlo runs...")
# Init the trajectory noise generator
traj_generator = trajNoiseGenerator(traj, noise_sigma)
# Run the MC solutions
results_check_kwagrs = {"timing_res": traj.timing_res, "geometric_uncert": geometric_uncert}
mc_results = parallelComputeGenerator(traj_generator, _MCTrajSolve, checkMCTrajectories, mc_runs, \
results_check_kwagrs=results_check_kwagrs, n_proc=mc_cores, max_runs=max_runs)
# If there are no MC runs which were successful, recompute using geometric uncertainties
if len(mc_results) < 2:
print("No successful MC runs, computing geometric uncertanties...")
# Run the MC solutions
geometric_uncert = True
results_check_kwagrs["geometric_uncert"] = geometric_uncert
mc_results = parallelComputeGenerator(traj_generator, _MCTrajSolve, checkMCTrajectories, mc_runs, \
results_check_kwagrs=results_check_kwagrs, n_proc=mc_cores, max_runs=max_runs)
# Add the original trajectory in the Monte Carlo results, if it is the one which has the best length match
if traj.orbit.ra_g is not None:
mc_results.append(traj)
##########################################################################################################
# Break the function of there are no trajectories to process
if len(mc_results) < 2:
print('!!! Not enough good Monte Carlo runs for uncertaintly estimation!')
return traj, None
# Choose the solution with the lowest timing residuals as the best solution
timing_res_trajs = [traj_tmp.timing_res for traj_tmp in mc_results]
best_traj_ind = timing_res_trajs.index(min(timing_res_trajs))
# Choose the best trajectory
traj_best = mc_results[best_traj_ind]
# Assign geometric uncertainty flag, if it was changed
traj_best.geometric_uncert = geometric_uncert
print('Computing uncertainties...')
# Calculate the standard deviation of every trajectory parameter
uncertainties = calcMCUncertainties(mc_results, traj_best)
print('Computing covariance matrices...')
# Calculate orbital and inital state vector covariance matrices (angles in degrees)
traj_best.orbit_cov, traj_best.state_vect_cov = calcCovMatrices(mc_results)
### PLOT RADIANT SPREAD (Vg color and length stddev) ###
##########################################################################################################
if (traj.orbit is not None) and plot_results:
ra_g_list = np.array([traj_temp.orbit.ra_g for traj_temp in mc_results])
dec_g_list = np.array([traj_temp.orbit.dec_g for traj_temp in mc_results])
v_g_list = np.array([traj_temp.orbit.v_g for traj_temp in mc_results])/1000
timing_res_list = np.array([traj_temp.timing_res for traj_temp in mc_results])
# Color code Vg and length standard deviation
for plt_flag in ['vg', 'time_res']:
# Init a celestial plot
m = CelestialPlot(ra_g_list, dec_g_list, projection='stere', bgcolor='w')
if plt_flag == 'vg':
# Plot all MC radiants (geocentric velocities)
m.scatter(ra_g_list, dec_g_list, c=v_g_list, s=2)
m.colorbar(label='$V_g$ (km/s)')
if traj.orbit.ra_g is not None:
# Plot original radiant
m.scatter(traj.orbit.ra_g, traj.orbit.dec_g, s=20, facecolors='none', edgecolors='r')
if traj_best.orbit.ra_g is not None:
# Plot MC best radiant
m.scatter(traj_best.orbit.ra_g, traj_best.orbit.dec_g, s=20, facecolors='none', edgecolors='g')
elif plt_flag == 'time_res':
timing_res_list_ms = 1000*timing_res_list
v_min = np.min(timing_res_list_ms)
v_max = np.max(timing_res_list_ms)
# Determine the limits of the colorbar if there are more points
if len(timing_res_list) > 4:
v_max = np.median(timing_res_list_ms) + 2*np.std(timing_res_list_ms)
# Plot all MC radiants (length fit offsets)
m.scatter(ra_g_list, dec_g_list, c=timing_res_list_ms, s=2, vmin=v_min, vmax=v_max)
m.colorbar(label='Time residuals (ms)')
# Plot original radiant
m.scatter(traj.orbit.ra_g, traj.orbit.dec_g, s=20, facecolors='none', edgecolors='r')
# Plot MC best radiant
m.scatter(traj_best.orbit.ra_g, traj_best.orbit.dec_g, s=20, facecolors='none', edgecolors='g')
plt.title('Monte Carlo - geocentric radiant')
# plt.xlabel('$\\alpha_g (\\degree)$')
# plt.ylabel('$\\delta_g (\\degree)$')
# plt.tight_layout()
if traj.save_results:
savePlot(plt, traj.file_name + '_monte_carlo_eq_' + plt_flag + '.' + traj.plot_file_type, \
output_dir=traj.output_dir)
if traj.show_plots:
plt.show()
else:
plt.clf()
plt.close()
##########################################################################################################
### PLOT ORBITAL ELEMENTS SPREAD ###
##########################################################################################################
if (traj.orbit is not None) and plot_results:
a_list = np.array([traj_temp.orbit.a for traj_temp in mc_results])
incl_list = np.array([traj_temp.orbit.i for traj_temp in mc_results])
e_list = np.array([traj_temp.orbit.e for traj_temp in mc_results])
peri_list = np.array([traj_temp.orbit.peri for traj_temp in mc_results])
q_list = np.array([traj_temp.orbit.q for traj_temp in mc_results])
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2, sharey=ax1)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4, sharey=ax3)
# Compute the number of bins
nbins = int(np.ceil(np.sqrt(len(a_list))))
if nbins < 10:
nbins = 10
# Semimajor axis vs. inclination
ax1.hist2d(a_list, np.degrees(incl_list), bins=nbins)
ax1.set_xlabel('a (AU)')
ax1.set_ylabel('Inclination (deg)')
plt.setp(ax1.get_xticklabels(), rotation=30, horizontalalignment='right')
#ax1.get_xaxis().get_major_formatter().set_useOffset(False)
ax1.ticklabel_format(useOffset=False)
# Plot the first solution and the MC solution
if traj.orbit.a is not None:
ax1.scatter(traj.orbit.a, np.degrees(traj.orbit.i), c='r', linewidth=1, edgecolors='w')
if traj_best.orbit.a is not None:
ax1.scatter(traj_best.orbit.a, np.degrees(traj_best.orbit.i), c='g', linewidth=1, edgecolors='w')
# Plot argument of perihelion vs. inclination
ax2.hist2d(np.degrees(peri_list), np.degrees(incl_list), bins=nbins)
ax2.set_xlabel('peri (deg)')
plt.setp(ax2.get_xticklabels(), rotation=30, horizontalalignment='right')
#ax2.get_xaxis().get_major_formatter().set_useOffset(False)
ax2.ticklabel_format(useOffset=False)
# Plot the first solution and the MC solution
if traj.orbit.peri is not None:
ax2.scatter(np.degrees(traj.orbit.peri), np.degrees(traj.orbit.i), c='r', linewidth=1, \
edgecolors='w')
if traj_best.orbit.peri is not None:
ax2.scatter(np.degrees(traj_best.orbit.peri), np.degrees(traj_best.orbit.i), c='g', linewidth=1, \
edgecolors='w')
ax2.tick_params(
axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the left edge are off
labelleft='off') # labels along the left edge are off
# Plot eccentricity vs. perihelion distance
ax3.hist2d(e_list, q_list, bins=nbins)
ax3.set_xlabel('Eccentricity')
ax3.set_ylabel('q (AU)')
plt.setp(ax3.get_xticklabels(), rotation=30, horizontalalignment='right')
#ax3.get_xaxis().get_major_formatter().set_useOffset(False)
ax3.ticklabel_format(useOffset=False)
# Plot the first solution and the MC solution
if traj.orbit.e is not None:
ax3.scatter(traj.orbit.e, traj.orbit.q, c='r', linewidth=1, edgecolors='w')
if traj_best.orbit.e is not None:
ax3.scatter(traj_best.orbit.e, traj_best.orbit.q, c='g', linewidth=1, edgecolors='w')
# Plot argument of perihelion vs. perihelion distance
ax4.hist2d(np.degrees(peri_list), q_list, bins=nbins)
ax4.set_xlabel('peri (deg)')
plt.setp(ax4.get_xticklabels(), rotation=30, horizontalalignment='right')
#ax4.get_xaxis().get_major_formatter().set_useOffset(False)
ax4.ticklabel_format(useOffset=False)
# Plot the first solution and the MC solution
if traj.orbit.peri is not None:
ax4.scatter(np.degrees(traj.orbit.peri), traj.orbit.q, c='r', linewidth=1, edgecolors='w')
if traj_best.orbit.peri is not None:
ax4.scatter(np.degrees(traj_best.orbit.peri), traj_best.orbit.q, c='g', linewidth=1, \
edgecolors='w')
ax4.tick_params(
axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the left edge are off
labelleft='off') # labels along the left edge are off
plt.tight_layout()
plt.subplots_adjust(wspace=0)
if traj.save_results:
savePlot(plt, traj.file_name + '_monte_carlo_orbit_elems.' + traj.plot_file_type,
output_dir=traj.output_dir)
if traj.show_plots:
plt.show()
else:
plt.clf()
plt.close()
##########################################################################################################
return traj_best, uncertainties
def copyUncertainties(traj_source, traj_target, copy_mc_traj_instances=False):
""" Copy uncertainties from one trajectory to the other.
Arguments:
traj_source: [Trajectory object] Trajectory object with uncertainties.
traj_target: [Trajectory object] Trajectory object to which uncertainties will be copied.
Keyword arguments:
copy_mc_traj_instances: [bool] Copy all trajectory instances generated during the MC procedure.
This will make the trajectory pickle file very large. False by default.
Return:
traj_target: [Trajectory object] Target trajectory object with copied uncertainties.
"""
# Copy covariance matrices
traj_target.orbit_cov, traj_target.state_vect_cov = copy.deepcopy(traj_source.orbit_cov), \
copy.deepcopy(traj_source.state_vect_cov)
# Copy uncertainties
traj_target.uncertainties = copy.deepcopy(traj_source.uncertainties)
# Handle individual trajectory instances
if not copy_mc_traj_instances:
if traj_target.uncertainties is not None:
del traj_target.uncertainties.mc_traj_list
traj_target.uncertainties.mc_traj_list = []
return traj_target
@njit
def applyGravityDrop(eci_coord, t, r0, gravity_factor, vz):
""" Given the ECI position of the meteor and the duration of flight, this function calculates the
drop caused by gravity and returns ECI coordinates of the meteor corrected for gravity drop. As
gravitational acceleration changes with height, the rate of drop changes too. We assumed that the vertical
component of the meteor's velocity is constant to derive the modified drop equation.
Arguments:
eci_coord: [ndarray] (x, y, z) ECI coordinates of the meteor at the given time t (meters).
t: [float] Time of meteor since the beginning of the trajectory.
r0: [float] Distance from the centre of the Earth of the beginning of the meteor.
gravity_factor: [float] Factor by which the gravity drop will be multiplied.
vz: [float] Vertical component of the meteor's velocity.
"""
# Define the mass of the Earth
earth_mass = 5.9722e24 # kg
# Determing the sign of the initial time
time_sign = np.sign(t)
# The derived drop function does not work for small vz's, thus the classical drop function is used
if abs(vz) < 100:
# Make sure r0 is not 0
if r0 == 0:
r0 = 1e-10
# Calculate gravitational acceleration at given ECI coordinates
g = G*earth_mass/r0**2
# Calculate the amount of gravity drop from a straight trajectory
drop = time_sign*(1.0/2)*g*t**2
else:
if r0 == 0:
r0 = 1e-10
# Compute the denominator to check it's not 0
denominator = r0 + vz*t
if denominator == 0:
denominator = 1e-10
# Compute the drop using a drop model with a constant vertical velocity
drop = time_sign*(G*earth_mass/vz**2)*(r0/denominator + np.log(denominator/r0) - 1)
# Apply gravity drop to ECI coordinates
return eci_coord - gravity_factor*drop*vectNorm(eci_coord)
def generateTrajectoryID(traj):
""" Given trajectory parameters, generate a unique trajectory ID.
Arguments:
traj: [Trajectory object]
Return:
traj_id: [str] Trajectory ID in the YYYYMMDDHHMMSS_#hash format.
"""
# Get the timestamp
timestamp = jd2Date(traj.jdt_ref, dt_obj=True).strftime("%Y%m%d%H%M%S")
# Serialize the measurements and stick them into the hashing function
hashable_string = "{:.10f}".format(traj.jdt_ref) + str(traj.jdt_ref) + str(traj.rbeg_lat) \
+ str(traj.rbeg_lon) + str(traj.rbeg_ele) + str(traj.rend_lat) + str(traj.rend_lon) \
+ str(traj.rend_ele)
# Make an MD5 hash and only take the first 5 characters
hasher = hashlib.md5(hashable_string.encode())
hash_str = base64.urlsafe_b64encode(hasher.digest()).decode()[:5]
hash_str = hash_str.replace(";", "0").replace(",", '1').replace("_", '2').replace("-", '3')
traj_id = timestamp + "_" + hash_str
return traj_id
def addTrajectoryID(traj):
""" Checks if the trajectory ID is present or not, and add it if it's missing. """
if (traj.traj_id is None) or (traj.traj_id == "None"):
traj.traj_id = generateTrajectoryID(traj)
return traj
class Trajectory(object):
""" Meteor trajectory solver designed at the University of Western Ontario.
The solver makes a first estimate using the Ceplecha (1987) plane intersection approach, then refines the
solution my miniming the angles between the observed lines of sight and the radiant line. The best
solution is found by adding noise to original measurements and doing Monte Carlo runs to find the
trajectory whose deceleratioins and velocity profiles match the best, as seen from individual stations.
The initial velocity is estimated from time vs. length by iteratively fitting a line to it and choosing
the solution with the lowest standard deviation, which should correspond to the part of the trajectory
before the meteor stared to decelerate.
"""
def __init__(self, jdt_ref, output_dir='.', max_toffset=None, meastype=4, verbose=True, v_init_part=None,\
v_init_ht=None, estimate_timing_vel=True, monte_carlo=True, mc_runs=None, mc_pick_multiplier=1, \
mc_noise_std=1.0, geometric_uncert=False, filter_picks=True, calc_orbit=True, show_plots=True, \
show_jacchia=False, save_results=True, gravity_correction=True, gravity_factor=1.0, \
plot_all_spatial_residuals=False, plot_file_type='png', traj_id=None, reject_n_sigma_outliers=3,
mc_cores=None, fixed_times=None, mc_runs_max=None):
""" Init the Ceplecha trajectory solver.
Arguments:
jdt_ref: [float] reference Julian date for the measurements. Add provided times should be given
relative to this number. This is user selectable and can be the time of the first camera, or
the first measurement, or some average time for the meteor, but should be close to the time of
the meteor. This same reference date/time will be used on all camera measurements for the
purposes of computing local sidereal time and making geocentric coordinate transformations,
thus it is good that this time corresponds to the beginning of the meteor.
Keyword arguments:
output_dir: [str] Path to the output directory where the Trajectory report and 'pickled' object
will be stored.
max_toffset: [float] Maximum allowed time offset between cameras in seconds (default 1 second).
meastype: [float] Flag indicating the type of angle measurements the user is providing for meas1
and meas2 below. The following are all in radians:
1 = Right Ascension for meas1, declination for meas2, NOTE: epoch of date, NOT J2000!
2 = Azimuth +east of due north for meas1, Elevation angle above the horizon for meas2
3 = Azimuth +west of due south for meas1, Zenith angle for meas2
4 = Azimuth +north of due east for meas1, Zenith angle for meas2
verbose: [bool] Print out the results and status messages, True by default.
v_init_part: [float] Fixed part from the beginning of the meteor on which the automated initial
velocity estimation using the sliding fit will start. Default is 0.25 (25%), but for noisier
data this might be bumped up to 0.5.
v_init_ht: [float] If given, the initial velocity will be estimated as the average velocity
above the given height in kilometers using data from all stations. None by default, in which
case the initial velocity will be estimated using the automated siliding fit.
estimate_timing_vel: [bool/str] Try to estimate the difference in timing and velocity. True by
default. A string with the list of fixed time offsets can also be given, e.g.
"CA001A":0.42,"CA0005":-0.3.
monte_carlo: [bool] Runs Monte Carlo estimation of uncertainties. True by default.
mc_runs: [int] Number of Monte Carlo runs. The default value is the number of observed points.
mc_pick_multiplier: [int] Number of MC samples that will be taken for every point. 1 by default.
mc_noise_std: [float] Number of standard deviations of measurement noise to add during Monte
Carlo estimation.
geometric_uncert: [bool] If True, all MC runs will be taken to estimate the uncertainty, not just
the ones with the better cost function value than the pure geometric solution. Use this when
the lag is not reliable. False by default.
filter_picks: [bool] If True (default), picks which deviate more than 3 sigma in angular residuals
will be removed, and the trajectory will be recalculated.
calc_orbit: [bool] If True, the orbit is calculates as well. True by default
show_plots: [bool] Show plots of residuals, velocity, lag, meteor position. True by default.
show_jacchia: [bool] Show the Jacchia fit on the plot with meteor dynamics. False by default.
save_results: [bool] Save results of trajectory estimation to disk. True by default.
gravity_correction: [bool] Apply the gravity drop when estimating trajectories. True by default.
gravity_factor: [flat] Gravity correction factor. 1.0 by default (full correction).
Can be between 0 - 1. Lower values used for lift compensation.
plot_all_spatial_residuals: [bool] Plot all spatial residuals on one plot (one vs. time, and
the other vs. length). False by default.
plot_file_type: [str] File extansion of the plot image. 'png' by default, can be 'pdf', 'eps', ...
traj_id: [str] Trajectory solution identifier. None by default.
reject_n_sigma_outliers: [float] Reject angular outliers that are n sigma outside the fit.
This value is 3 (sigma) by default.
mc_cores: [int] Number of CPU cores to use for Monte Carlo parallell processing. None by default,
which means that all cores will be used.
fixed_times: [dict] Dictionary of fixed times for each station. None by default, meaning that
all stations will be estimated. Only used if estimate_timing_vel is True.
mc_runs_max: [int] Maximum number of Monte Carlo runs. None by default, which will limit the runs
to 10x req_num.
"""
# All time data must be given relative to this Julian date
self.jdt_ref = jdt_ref
# Measurement type
self.meastype = meastype
# Directory where the trajectory estimation results will be saved
self.output_dir = output_dir
# Maximum time offset between cameras
if max_toffset is None:
max_toffset = 1.0
self.max_toffset = max_toffset
# If verbose True, results and status messages will be printed out, otherwise they will be supressed
self.verbose = verbose
# Fixed part from the beginning of the meteor on which the initial velocity estimation using the
# sliding fit will start
if v_init_part is None:
v_init_part = 0.25
self.v_init_part = v_init_part
# (Optional) Height in kilometers above which points will be taken for estimating the initial
# velocity (linear fit)
self.v_init_ht = v_init_ht
# Estimating the difference in timing between stations, and the initial velocity if this flag is True
self.fixed_time_offsets = {}
if isinstance(estimate_timing_vel, str):
# If a list of fixed timing offsets was given, parse it into a dictionary
for entry in estimate_timing_vel.split(','):
station, offset = entry.split(":")
self.fixed_time_offsets[station] = float(offset)
print("Fixed timing given:", self.fixed_time_offsets)
self.estimate_timing_vel = False
elif isinstance(estimate_timing_vel, bool):
self.estimate_timing_vel = estimate_timing_vel
else:
self.estimate_timing_vel = True
# Extract the fixed times from the fixed time offsets
self.fixed_times = fixed_times
if isinstance(estimate_timing_vel, bool) and isinstance(self.fixed_times, str):
if estimate_timing_vel:
self.fixed_time_offsets = {}
for entry in self.fixed_times.split(','):
station, offset = entry.split(":")
self.fixed_time_offsets[station] = float(offset)
# Running Monte Carlo simulations to estimate uncertainties
self.monte_carlo = monte_carlo
# Number of Monte Carlo runs
self.mc_runs = mc_runs
# Maximum number of Monte Carlo runs, in case the MC runs have to be repeated many times
self.mc_runs_max = mc_runs_max
# Number of MC samples that will be taken for every point
self.mc_pick_multiplier = mc_pick_multiplier
# Standard deviatons of measurement noise to add during Monte Carlo runs
self.mc_noise_std = mc_noise_std
# If True, pure geometric uncertainties will be computed and culling of solutions based on cost
# function value will not be done
self.geometric_uncert = geometric_uncert
# Filter bad picks (ones that deviate more than 3 sigma in angular residuals) if this flag is True
self.filter_picks = filter_picks
# Calculate orbit if True
self.calc_orbit = calc_orbit
# If True, plots will be shown on screen when the trajectory estimation is done
self.show_plots = show_plots
# Show Jacchia fit on dynamics plots
self.show_jacchia = show_jacchia
# Save results to disk if true
self.save_results = save_results
# Apply the correction for gravity when estimating the trajectory
self.gravity_correction = gravity_correction
# Gravity correction factor, limit to 0 - 1
self.gravity_factor = gravity_factor
if self.gravity_factor < 0:
self.gravity_factor = 0
elif self.gravity_factor > 1:
self.gravity_factor = 1
# Plot all spatial residuals on one plot
self.plot_all_spatial_residuals = plot_all_spatial_residuals
# Image file type for the plot
self.plot_file_type = plot_file_type
# Trajectory solution identifier
self.traj_id = str(traj_id)
# n sigma outlier rejection
self.reject_n_sigma_outliers = reject_n_sigma_outliers
# Number of CPU cores to be used for MC
self.mc_cores = mc_cores
######################################################################################################
# Construct a file name for this event
self.generateFileName()
# Counts from how many observations are given from the beginning (start from 1)
# NOTE: This should not the used as the number of observations, use len(traj.observations) instead!
self.meas_count = 1
# List of observations
self.observations = []
# Minimization status - if True if LoS angle minimization is successfull, False otherwise
self.los_mini_status = False
# Index of the station with the reference time
self.t_ref_station = 0
# Final estimate of timing offsets between stations
self.time_diffs_final = None
# List of plane intersections
self.intersection_list = None
# Coordinates of the first point
self.rbeg_lat = None
self.rbeg_lon = None
self.rbeg_ele = None
self.rbeg_ele_wgs84 = None
self.rbeg_jd = None
# Coordinates of the end point
self.rend_lat = None
self.rend_lon = None
self.rend_ele = None
self.rend_ele_wgs84 = None
self.rend_jd = None
# Coordinates of the lowest point (used for grazers)
self.htmin_lat = None
self.htmin_lon = None
self.htmin_ele = None
self.htmin_ele_wgs84 = None
self.htmin_jd = None
# Intersecting planes state vector
self.state_vect = None
# Angles (radians) between the trajectory and the station, looking from the state vector determined
# by intersecting planes
self.incident_angles = []
# Initial state vector (minimization)
self.state_vect_mini = None
# Radiant in ECi and equatorial coordinatrs (minimiziation)
self.radiant_eci_mini = None
self.radiant_eq_mini = None
# Calculated initial velocity
self.v_init = None
# V0z (vertical component of the initial velocity)
self.v0z = None
# Calculated average velocity
self.v_avg = None
# Status of timing minimization
self.timing_minimization_successful = False
# Fit to the best portion of time vs. length
self.velocity_fit = None
# Jacchia fit parameters for all observations combined
self.jacchia_fit = None
# Cost function value of the time vs. state vector distance fit
self.timing_res = None
# Standard deviation of all time differences between individual stations
self.timing_stddev = -1.0
# Average position of the meteor
self.state_vect_avg = None
# Average JD of the meteor
self.jd_avg = None
# Orbit object which contains orbital parameters
self.orbit = None
# Uncertainties calculated using Monte Carlo
self.uncertainties = None
self.uncertanties = self.uncertainties
# Orbital covariance matrix (angles in degrees)
self.orbit_cov = None
# Initial state vector covariance matrix
self.state_vect_cov = None
# flag to indicate whether only phase 1 solving ha been run
# default False to handle existing trajectory data
self.phase_1_only = False
def generateFileName(self):
""" Generate a file name for saving results using the reference julian date. """
self.file_name = jd2Date(self.jdt_ref, dt_obj=True).strftime('%Y%m%d_%H%M%S')
def infillTrajectory(self, meas1, meas2, time_data, lat, lon, ele, station_id=None, excluded_time=None,
ignore_list=None, magnitudes=None, fov_beg=None, fov_end=None, obs_id=None, comment='', ignore_station=False):
""" Initialize a set of measurements for a given station.
Arguments:
meas1: [list or ndarray] First measurement array (azimuth or R.A., depending on meastype, see
meastype documentation for more information). Measurements should be given in radians.
meas2: [list or ndarray] Second measurement array (altitude, zenith angle or declination,
depending on meastype, see meastype documentation for more information), in radians.
time_data: [list or ndarray] Time in seconds from the reference Julian date.
lat: [float] WGS84 latitude +N of station in radians.
lon: [float] WGS84 longitude +E of station in radians.
ele: [float] EGS96 geoidal elevation of station in meters (not the height above the WGS84
ellipsoid!).
Keyword arguments:
station_id: [str] Identification of the station. None by default.
excluded_time: [list] A range of minimum and maximum observation time which should be excluded
from the optimization because the measurements are missing in that portion of the time.
ignore_list: [list or ndarray] A list of 0s and 1s which should be of the equal length as
the input data. If a particular data point is to be ignored, number 1 should be put,
otherwise (if the point should be used) 0 should be used. E.g. the this should could look
like this: [0, 0, 0, 1, 1, 0, 0], which would mean that the fourth and the fifth points
will be ignored in trajectory estimation.
magnitudes: [list] A list of apparent magnitudes of the meteor. None by default.
fov_beg: [bool] True if the meteor began inside the FOV, False otherwise. None by default.
fov_end: [bool] True if the meteor ended inside the FOV, False otherwise. None by default.
obs_id: [int] Unique ID of the observation. This is to differentiate different observations from
the same station.
comment: [str] A comment about the observations. May be used to store RMS FF file number on which
the meteor was observed.
Return:
None
"""
# If station ID was not given, assign it a name
if station_id is None:
station_id = self.meas_count
# If the station id already exists, add a suffix _2, _3, etc.
if str(station_id) in [str(x.station_id) for x in self.observations]:
# Find if there are already stations with suffixes
suffixes = []
for obs in self.observations:
if obs.station_id.startswith(str(station_id) + '_'):
suffixes.append(int(obs.station_id.split('_')[-1]))
# If there are no suffixes, add _2
if len(suffixes) == 0:
station_id = str(station_id) + '_2'
# If there are suffixes, add the next one
else:
station_id = str(station_id) + '_' + str(np.max(suffixes) + 1)
# If obs_id was not given, assign it
if obs_id is None:
obs_id = self.meas_count
# Convert measuremet lists to numpy arrays
meas1 = np.array(meas1)
meas2 = np.array(meas2)
time_data = np.array(time_data)
# Add a fixed offset to time data if given
if str(station_id) in self.fixed_time_offsets:
time_data += self.fixed_time_offsets[str(station_id)]
# Skip the observation if all points were ignored
if ignore_list is not None:
if np.all(ignore_list):
print('All points from station {:s} are ignored, not using this station in the solution!'.format(station_id))
# Init a new structure which will contain the observed data from the given site
obs = ObservedPoints(self.jdt_ref, meas1, meas2, time_data, lat, lon, ele, station_id=station_id,
meastype=self.meastype, excluded_time=excluded_time, ignore_list=ignore_list, ignore_station=ignore_station,
magnitudes=magnitudes, fov_beg=fov_beg, fov_end=fov_end, obs_id=obs_id, comment=comment)
# Add observations to the total observations list
self.observations.append(obs)
self.meas_count += 1
def infillWithObs(self, obs, meastype=None):
""" Infill the trajectory with already initialized ObservedPoints object.
Arguments:
obs: [ObservedPoints] Instance of ObservedPoints.
Keyword arguments:
meastype: [int] Measurement type. If not given, it will be read from the trajectory object.
"""
if meastype is None:
meas1 = obs.meas1
meas2 = obs.meas2
# If inputs were RA and Dec
elif meastype == 1:
meas1 = obs.ra_data
meas2 = obs.dec_data
# If inputs were azimuth +east of due north, and elevation angle
elif meastype == 2:
meas1 = obs.azim_data
meas2 = obs.elev_data
# If inputs were azimuth +west of due south, and zenith angle
elif meastype == 3:
meas1 = (obs.azim_data + np.pi)%(2*np.pi)
meas2 = np.pi/2.0 - obs.elev_data
# If input were azimuth +north of due east, and zenith angle
elif meastype == 4:
meas1 = (np.pi/2.0 - obs.azim_data)%(2*np.pi)
meas2 = np.pi/2.0 - obs.elev_data
### PRESERVE COMPATBILITY WITH OLD obs OBJECTS ###
# Check if the observation had any excluded points
if hasattr(obs, 'excluded_time'):
excluded_time = obs.excluded_time
else:
excluded_time = None
# Check if it has the ignore list argument
if hasattr(obs, 'ignore_list'):
ignore_list = obs.ignore_list
# If the ignore list differens in length from time data, reinit
if len(ignore_list) != len(obs.time_data):
ignore_list = np.zeros(len(obs.time_data), dtype=np.uint8)
else:
ignore_list = np.zeros(len(obs.time_data), dtype=np.uint8)
# Check for apparent magnitudes
if hasattr(obs, 'magnitudes'):
magnitudes = obs.magnitudes
else:
magnitudes = None
# Check if the observation object has FOV beg/end flags
if not hasattr(obs, 'fov_beg'):
obs.fov_beg = None
if not hasattr(obs, 'fov_end'):
obs.fov_end = None
# Check if the observation object has obs_id argument
if not hasattr(obs, 'obs_id'):
obs.obs_id = None
# Check if the observation object as the comment entry
if not hasattr(obs, 'comment'):
obs.comment = ''
### ###
self.infillTrajectory(meas1, meas2, obs.time_data, obs.lat, obs.lon, obs.ele, \
station_id=obs.station_id, excluded_time=excluded_time, ignore_list=ignore_list, \
magnitudes=magnitudes, fov_beg=obs.fov_beg, fov_end=obs.fov_end, obs_id=obs.obs_id, \
comment=obs.comment, ignore_station=obs.ignore_station)
def calcAllResiduals(self, state_vect, radiant_eci, observations):
""" Calculate horizontal and vertical residuals for all observed points.
The residuals are calculated from the closest point on the line of sight to the point of the
radiant line.
Arguments:
state_vect: [ndarray] (x, y, z) ECI coordinates of the initial state vector (meters).
radiant_eci: [ndarray] (x, y, z) components of the unit radiant direction vector.
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
"""
# Go though observations from all stations
for obs in observations:
# Init empty lists for residuals
obs.h_residuals = []
obs.v_residuals = []
# Go through all individual position measurement from each site
for t, jd, stat, meas in zip(obs.time_data, obs.JD_data, obs.stat_eci_los, obs.meas_eci_los):
# Calculate horizontal and vertical residuals
hres, vres = calcSpatialResidual(self.jdt_ref, jd, state_vect, radiant_eci, stat, meas, \
gravity=self.gravity_correction, gravity_factor=self.gravity_factor, v0z=self.v0z)
# Add residuals to the residual list
obs.h_residuals.append(hres)
obs.v_residuals.append(vres)
# Convert residual lists to numpy arrays
obs.h_residuals = np.array(obs.h_residuals)
obs.v_residuals = np.array(obs.v_residuals)
# Calculate RMSD of both residuals
obs.h_res_rms = RMSD(obs.h_residuals[obs.ignore_list == 0])
obs.v_res_rms = RMSD(obs.v_residuals[obs.ignore_list == 0])
# Calculate the angular residuals from the radiant line, with the gravity drop taken care of
obs.ang_res = angleBetweenSphericalCoords(obs.elev_data, obs.azim_data, obs.model_elev, \
obs.model_azim)
# Calculate the standard deviaton of angular residuals in radians, taking the ignored points into
# account
if not obs.ignore_station:
obs.ang_res_std = RMSD(obs.ang_res[obs.ignore_list == 0])
else:
# Compute RMSD for all points if the station is ignored
obs.ang_res_std = RMSD(obs.ang_res)
def calcVelocity(self, state_vect, radiant_eci, observations, weights, calc_res=False):
""" Calculates point to point velocity for the given solution, as well as the average velocity
including all previous points up to the given point.
Arguments:
state_vect: [ndarray] (x, y, z) ECI coordinates of the initial state vector (meters).
radiant_eci: [ndarray] (x, y, z) components of the unit radiant direction vector.
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
weights: [list] A list of statistical weights for every station.
Keyword arguments:
calc_res: [bool] If True, the cost of lag residuals will be calculated. The timing offsets first
need to be calculated for this to work.
"""
# Go through observations from all stations
for obs in observations:
# List of distances from the first trajectory point on the radiant line
first_pt_distances = []
# List of distances from the state vector
state_vect_dist = []
# Go through all individual position measurement from each site
for i, (stat, meas) in enumerate(zip(obs.stat_eci_los, obs.meas_eci_los)):
# Calculate closest points of approach (observed line of sight to radiant line)
obs_cpa, rad_cpa, d = findClosestPoints(stat, meas, state_vect, radiant_eci)
# Take the position of the first point as the reference point
if i == 0:
ref_point = np.copy(rad_cpa)
# Calculate the distance from the first observed point to the projected point on the radiant line
dist = vectMag(ref_point - rad_cpa)
first_pt_distances.append(dist)
# Distance from the state vector to the projected point on the radiant line
state_vect_dist.append(vectMag(state_vect - rad_cpa))
# Convert the distances (length along the trail) into a numpy array
obs.length = np.array(first_pt_distances)
obs.state_vect_dist = np.array(state_vect_dist)
### Calculate average velocity including all points up to the given point ###
velocities_prev_point = []
for i, (t, l) in enumerate(zip(obs.time_data, obs.length)):
# For the first 4 points compute the velocity using the first 4 points
if i < 4:
time_part = obs.time_data[:4]
len_part = obs.length[:4]
# Otherwise include all points up to the current point
else:
time_part = obs.time_data[: i+1]
len_part = obs.length[: i+1]
# If there are NaNs or infs, drop them
filter_mask = np.logical_not(np.isnan(time_part) | np.isinf(time_part) | np.isnan(len_part) \
| np.isinf(len_part))
time_part = time_part[filter_mask]
len_part = len_part[filter_mask]
if len(time_part) > 1:
# Fit a line through time vs. length data
try:
popt, _ = scipy.optimize.curve_fit(lineFunc, time_part, len_part)
# Check for this fit error, which happens extrememly rarely:
# RuntimeError: Optimal parameters not found: gtol=0.000000 is too small, func(x) is
# orthogonal to the columns of the Jacobian to machine precision.
except RuntimeError:
print("A velocity fit failed with a RuntimeError, skipping this iteration.")
popt = [np.nan]
velocities_prev_point.append(popt[0])
else:
# If there are no good points to estimate the velocity on, use NaN
velocities_prev_point.append(np.nan)
obs.velocities_prev_point = np.array(velocities_prev_point)
### ###
### Length vs. time
# plt.plot(obs.state_vect_dist, obs.time_data, marker='x', label=str(obs.station_id), zorder=3)
##########
### Calculate point to point velocities ###
# Shift the radiant distances one element down (for difference calculation)
dists_shifted = np.r_[0, obs.length][:-1]
# Calculate distance differences from point to point (first is always 0)
dists_diffs = obs.length - dists_shifted
# Shift the time one element down (for difference calculation)
time_shifted = np.r_[0, obs.time_data][:-1]
# Calculate the time differences from point to point
time_diffs = obs.time_data - time_shifted
# Replace zeros in time by machine precision value to avoid division by zero errors
time_diffs[time_diffs == 0] = np.finfo(np.float64).eps
# Calculate velocity for every point
obs.velocities = dists_diffs/time_diffs
### ###
# plt.ylabel('Time (s)')
# plt.xlabel('Distance from state vector (m)')
# plt.gca().invert_yaxis()
# plt.legend()
# plt.grid()
# plt.savefig('mc_time_offsets.' + self.plot_file_type, dpi=300)
# plt.show()
if calc_res:
# Because the timing offsets have already been applied, the timing offsets are 0
zero_timing_res = np.zeros(len(self.observations))
# Calculate the timing offset between the meteor time vs. length
if self.timing_res is None:
self.timing_res = timingResiduals(zero_timing_res, self.observations, self.stations_time_dict,
weights=weights)
self.timing_stddev = timingResiduals(zero_timing_res, self.observations, self.stations_time_dict,
weights=weights, ret_stddev=True)
def calcAvgVelocityAboveHt(self, observations, bottom_ht, weights):
""" Calculate the average velocity of all points above a given height.
Arguments:
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
bottom_ht: [float] Height above which points will be used to compute the average velocity (m).
weights: [list] A list of statistical weights for every station.
Return:
(v_ht_avg, intercept):
v_ht_avg: [float] Average velocity above the given height (m/s).
intercept: [float] Fit intercept (m).
"""
# Maker sure weight values are OK
weights = checkWeights(observations, weights)
# Construct arrays of times vs. distance from state vector
all_times = []
all_state_vect_dists = []
all_inv_weights = []
for obs, w in zip(observations, weights):
# Skip ignored stations
if obs.ignore_station:
continue
# Skip stations with weight 0
if w <= 0:
continue
for t, sv_dist, ht, ignore in zip(obs.time_data, obs.state_vect_dist, obs.model_ht, \
obs.ignore_list):
# Skip ignored points
if ignore:
continue
# Skip heights below the given height
if ht < bottom_ht:
continue
all_times.append(t)
all_state_vect_dists.append(sv_dist)
all_inv_weights.append(1.0/w)
# If there are less than 4 points, don't estimate the initial velocity this way!
if len(all_times) < 4:
print('!!! Error, there are less than 4 points for velocity estimation above the given height of {:.2f} km!'.format(bottom_ht/1000))
print('Using automated velocity estimation with the sliding fit...')
return None, None
# Fit a line through the time vs. state vector distance data
line_params, _ = scipy.optimize.curve_fit(lineFunc, all_times, all_state_vect_dists, \
sigma=all_inv_weights)
return line_params
def calcLag(self, observations, velocity_fit=None):
""" Calculate lag by fitting a line to the first part of the points and subtracting the line from the
length along the trail.
Arguments:
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
Keyword arguments:
velocity_fit: [tuple of float] Initial velocity and fit intercept (m/s and m). None by defualt.
"""
# Go through observations from all stations
for obs in observations:
if velocity_fit is None:
# Fit a line to the first part of the points
init_part_size = int(self.v_init_part*len(obs.time_data))
# If the size is smaller than 4 points, take all point
if init_part_size < 4:
init_part_size = len(obs.time_data)
# Cut the length and time to the first quarter
quart_length = obs.length[:init_part_size]
quart_time = obs.time_data[:init_part_size]
# Fit a line to the data, estimate the velocity
try:
obs.lag_line, _ = scipy.optimize.curve_fit(lineFunc, quart_time, quart_length)
# Handle this error:
# RuntimeError: Optimal parameters not found: gtol=0.000000 is too small, func(x) is orthogonal to the columns of
# the Jacobian to machine precision.
except RuntimeError:
obs.lag_line = [0, 0]
# Calculate lag
obs.lag = obs.length - lineFunc(obs.time_data, *obs.lag_line)
else:
obs.lag_line = list(velocity_fit)
# Calculate lag
obs.lag = obs.state_vect_dist - lineFunc(obs.time_data, *obs.lag_line)
# Initial velocity is the slope of the fitted line
obs.v_init = obs.lag_line[0]
def fitJacchiaLag(self, observations):
""" Fit an exponential model proposed by Jacchia (1955) to the lag. """
# Go through observations from all stations and do a per station fit
for obs in observations:
# Initial parameters
p0 = np.zeros(2)
try:
obs.jacchia_fit, _ = scipy.optimize.curve_fit(jacchiaLagFunc, obs.time_data, obs.lag, p0=p0)
# If the maximum number of iterations have been reached, skip Jacchia fitting
except RuntimeError:
obs.jacchia_fit = p0
# Force the parameters to be positive
obs.jacchia_fit = np.abs(obs.jacchia_fit)
if self.verbose:
print('Jacchia fit params for station:', obs.station_id, ':', obs.jacchia_fit)
# Get the time and lag points from all sites
time_all = np.hstack([obs.time_data[obs.ignore_list == 0] for obs in self.observations \
if not obs.ignore_station])
lag_all = np.hstack([obs.lag[obs.ignore_list == 0] for obs in self.observations \
if not obs.ignore_station])
time_lag = np.c_[time_all, lag_all]
# Sort by time
time_lag = time_lag[time_lag[:, 0].argsort()]
# Unpack all data points sorted by time
time_all, lag_all = time_lag.T
# Do a Jacchia function fit on the collective lag
p0 = np.zeros(2)
try:
jacchia_fit, _ = scipy.optimize.curve_fit(jacchiaLagFunc, time_all, lag_all, p0=p0)
# If the maximum number of iterations have been reached, skip Jacchia fitting
except RuntimeError:
jacchia_fit = p0
return jacchia_fit
def estimateTimingAndVelocity(self, observations, weights, estimate_timing_vel=True):
""" Estimates time offsets between the stations by matching time vs. distance from state vector.
The initial velocity is calculated by ineratively fitting a line from the beginning to 20% of the
total trajectory, and up to the 80% of the total trajectory. The fit with the lowest standard
deviation is chosen to represent the initial velocity.
Arguments:
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
weights: [list] A list of statistical weights for every station.
Keyword arguments:
estimate_timing_vel: [bool] If True (default), the time differences and the velocity will be
estimated, otherwise the velocity will be estimated as the medial velocity.
Return:
(fit_success, velocity_fit, v_init_mini, time_diffs, observations): [tuple]
fit_success: [bool] True if timing minimization was successful, False otherwise.
velocity_fit: [tuple] (slope, intercept) tuple of a line fit on the time vs. length data.
v_init_mini: [float] Estimated initial velocity in m/s.
time_diffs: [ndarray] Estimated time offsets from individual stations.
observations: [list] A list of ObservationPoints objects which hold measurements from
individual stations. These objects are modified during timing estimations.
"""
# Take the initial velocity as the median velocity between all sites
v_init_list = np.array([obs.v_init for obs in observations if obs.v_init is not None])
v_init = np.median(v_init_list)
vel_stddev = 0
# Timing differences which will be calculated
time_diffs = np.zeros(len(observations))
if not estimate_timing_vel:
return True, np.zeros(2), v_init, 0, time_diffs, observations
# Run timing offset estimation if it needs to be done
if estimate_timing_vel:
# Make a dictionary of station IDs and the time offset estimation status
# - if the time is fixed, a number is given
# - if the time is to be estimated, True is set
self.stations_time_dict = collections.OrderedDict()
station_list = [str(obs.station_id) for obs in observations]
for obs in observations:
if str(obs.station_id) in self.fixed_time_offsets:
self.stations_time_dict[str(obs.station_id)] = self.fixed_time_offsets[str(obs.station_id)]
else:
self.stations_time_dict[str(obs.station_id)] = True
# If no fixed times are given, set the station with the longest track as the reference station
# (time difference = 0)
if len(self.fixed_time_offsets) == 0:
obs_points = [obs.kmeas for obs in observations]
ref_index = obs_points.index(max(obs_points))
self.stations_time_dict[station_list[ref_index]] = 0
# Generate an initial guess for stations which have no fixed time
p0 = np.zeros(len([val for val in self.stations_time_dict.values() if val is True]))
if self.verbose:
print('Initial function evaluation:', timingResiduals(p0, observations,
self.stations_time_dict,
weights=weights))
# Set bounds for timing to +/- given maximum time offset
bounds = []
for i in range(len(p0)):
bounds.append([-self.max_toffset, self.max_toffset])
### Try different methods of optimization until it is successful ##
# If there are more than 5 stations, use the advanced L-BFGS-B method by default
if len(self.observations) >= 5:
methods = [None]
maxiter_list = [15000]
else:
# If there are less than 5, try faster methods first
methods = ['SLSQP', 'TNC', None]
maxiter_list = [1000, None, 15000]
# Try different methods to minimize timing residuals
for opt_method, maxiter in zip(methods, maxiter_list):
# Run the minimization of residuals between all stations
timing_mini = scipy.optimize.minimize(timingResiduals, p0, args=(observations, \
self.stations_time_dict, weights), bounds=bounds, method=opt_method,
options={'maxiter': maxiter}, tol=1e-12)
# Stop trying methods if this one was successful
if timing_mini.success:
# Set the final value of the timing residual
self.timing_res = timing_mini.fun
if self.verbose:
print('Successful timing optimization with', opt_method)
print("Final function evaluation:", timing_mini.fun)
break
else:
print('Unsuccessful timing optimization with', opt_method)
### ###
if not timing_mini.success:
print('Timing difference and initial velocity minimization failed with the message:')
print(timing_mini.message)
print('Try increasing the range of time offsets!')
v_init_mini = v_init
velocity_fit = np.zeros(2)
vel_stddev = 0
v_init_mini = 0
# Check if the velocity should be estimated
estimate_velocity = False
timing_minimization_successful = False
if not estimate_timing_vel:
estimate_velocity = True
timing_minimization_successful = True
else:
if timing_mini.success:
estimate_velocity = True
timing_minimization_successful = True
# If the minimization was successful, apply the time corrections
if estimate_velocity:
stat_count = 0
for i, obs in enumerate(observations):
# Check the station timing dictionary to see if the station is fixed
stat_status = self.stations_time_dict[str(obs.station_id)]
# If the station has a fixed time offset, read it
if not isinstance(stat_status, bool):
t_diff = stat_status
# Otherwise read the estimated offset
else:
t_diff = timing_mini.x[stat_count]
stat_count += 1
if self.verbose:
print('STATION ' + str(obs.station_id) + ' TIME OFFSET = ' + str(t_diff) + ' s')
# Skip NaN and inf time offsets
if np.isnan(t_diff) or np.isinf(t_diff):
continue
# Apply the time shift to original time data
obs.time_data = obs.time_data + t_diff
# Apply the time shift to the excluded time
if obs.excluded_time is not None:
obs.excluded_time = [ex_time + t_diff for ex_time in obs.excluded_time]
# Add the final time difference of the site to the list
time_diffs[i] = t_diff
# Add in time and distance points, excluding the ignored points
times = []
state_vect_dist = []
weight_list = []
for obs, wt in zip(observations, weights):
# Skip ignored stations
if obs.ignore_station:
continue
times.append(obs.time_data[obs.ignore_list == 0])
state_vect_dist.append(obs.state_vect_dist[obs.ignore_list == 0])
weight_list.append(np.zeros_like(obs.time_data[obs.ignore_list == 0]) + wt)
times = np.concatenate(times).ravel()
state_vect_dist = np.concatenate(state_vect_dist).ravel()
weight_list = np.concatenate(weight_list).ravel()
# Sort points by time
time_sort_ind = times.argsort()
times = times[time_sort_ind]
state_vect_dist = state_vect_dist[time_sort_ind]
weight_list = weight_list[time_sort_ind]
stddev_list = []
# Calculate the velocity on different initial portions of the trajectory
# Find the best fit by starting from the first few beginning points
for part_beg in range(4):
# Find the best fit on different portions of the trajectory
for part in np.arange(self.v_init_part, 0.8, 0.05):
# Get the index of the end of the first portion of points
part_end = int(part*len(times))
# Make sure there are at least 4 points per every station
if (part_end - part_beg) < 4*numStationsNotIgnored(observations):
part_end = part_beg + 4*numStationsNotIgnored(observations)
# Make sure the end index is not larger than the meteor
if part_end >= len(times):
part_end = len(times) - 1
# Select only the first part of all points
times_part = times[part_beg:part_end]
state_vect_dist_part = state_vect_dist[part_beg:part_end]
weights_list_path = weight_list[part_beg:part_end]
# Fit a line to time vs. state_vect_dist
fit_params = scipy.optimize.least_squares(lineFuncLS, [v_init, 1], args=(times_part, \
state_vect_dist_part, weights_list_path), loss='soft_l1')
velocity_fit = fit_params.x
### Compute the standard deviation of the velocity fit
jac = fit_params.jac
jac_dot = jac.T.dot(jac)
# Make sure the Jacobian matrix is not singular
if np.linalg.det(jac_dot) == 0:
velocity_stddev = 0
else:
vel_cov = np.linalg.inv(jac_dot)
velocity_stddev = np.sqrt(np.diagonal(vel_cov))[0]
###
# Calculate the lag and fit a line to it
lag_temp = state_vect_dist - lineFunc(times, *velocity_fit)
lag_fit = scipy.optimize.least_squares(lineFuncLS, np.ones(2), args=(times, lag_temp, \
weight_list), loss='soft_l1')
lag_fit = lag_fit.x
# Add the point to the considered list only if the lag has a negative trend, or a trend
# that is not *too* positive, about 100 m per second is the limit
if lag_fit[0] <= 100:
# Calculate the standard deviation of the line fit and add it to the list of solutions
line_stddev = RMSD(state_vect_dist_part - lineFunc(times_part, *velocity_fit), \
weights=weights_list_path)
stddev_list.append([line_stddev, velocity_fit, velocity_stddev])
# stddev_arr = np.array([std[0] for std in stddev_list])
# plt.plot(range(len(stddev_arr)), stddev_arr, label='line')
# plt.legend()
# plt.show()
# If no lags were negative (meaning all fits were bad), use the initially estimated initial
# velocity
if not stddev_list:
v_init_mini = v_init
# Redo the lag fit, but with fixed velocity
vel_intercept, vel_cov = scipy.optimize.curve_fit(lambda x, intercept: lineFunc(x, v_init_mini, \
intercept), times, state_vect_dist, p0=[0])
# Compute the fit standard deviation
vel_stddev = np.sqrt(np.diagonal(vel_cov))[0]
# Construct a velocity fit vector
velocity_fit = [v_init_mini, vel_intercept[0]]
else:
# Take the velocity fit with the minimum line standard deviation
stddev_min_ind = np.argmin([std[0] for std in stddev_list])
velocity_fit = stddev_list[stddev_min_ind][1]
vel_stddev = stddev_list[stddev_min_ind][2]
# Make sure the velocity is positive
v_init_mini = np.abs(velocity_fit[0])
# Calculate the lag for every site
for obs in observations:
obs.lag = obs.state_vect_dist - lineFunc(obs.time_data, *velocity_fit)
if self.verbose:
print('ESTIMATED Vinit: {:.2f} +/- {:.2f} m/s'.format(v_init_mini, vel_stddev))
return timing_minimization_successful, velocity_fit, v_init_mini, vel_stddev, time_diffs, observations
def calcLLA(self, state_vect, radiant_eci, observations):
""" Calculate latitude, longitude and altitude of every point on the observer's line of sight,
which is closest to the radiant line.
Arguments:
state_vect: [ndarray] (x, y, z) ECI coordinates of the initial state vector (meters).
radiant_eci: [ndarray] (x, y, z) components of the unit radiant direction vector.
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
"""
### Compute parameters for gravity drop ###
# Determine the first time
t0 = min([obs.time_data[0] for obs in observations])
# Determine the largest distance from the centre of the Earth and use it as the beginning point
eci_list = []
for obs in observations:
# Calculate closest points of approach (observed line of sight to radiant line)
obs_cpa, rad_cpa, _ = findClosestPoints(obs.stat_eci_los[0], obs.meas_eci_los[0], state_vect, \
radiant_eci)
eci_list.append(rad_cpa)
# Find the largest distance from the centre of the Earth
max_dist_indx = np.argmax([vectMag(r) for r in eci_list])
# Get ECI coordinates of the largest distance and the distance itself
eci0 = eci_list[max_dist_indx]
r0 = vectMag(eci0)
### Compute the apparent zenith angle ###
# Radiant should be calculated from radiant_eci, not state_vect_mini, which is always a near
# vertical ##
# Compute the apparent radiant
ra_a, dec_a = eci2RaDec(radiant_eci)
# Compute alt/az of the apparent radiant
lat_0, lon_0, _ = cartesian2Geo(self.jdt_ref, *eci0)
_, alt_a = raDec2AltAz(ra_a, dec_a, self.jdt_ref, lat_0, lon_0)
# Compute the apparent zenith angle
zc = np.pi/2 - alt_a
####
# Compute the vertical component of the velocity if the orbit was already computed
self.v0z = -self.v_init*np.cos(zc)
###########################################
# Go through all observations from all stations
for obs in observations:
# Init LLA arrays
obs.meas_lat = np.zeros_like(obs.time_data)
obs.meas_lon = np.zeros_like(obs.time_data)
obs.meas_ht = np.zeros_like(obs.time_data)
obs.meas_range = np.zeros_like(obs.time_data)
obs.model_lat = np.zeros_like(obs.time_data)
obs.model_lon = np.zeros_like(obs.time_data)
obs.model_ht = np.zeros_like(obs.time_data)
obs.model_range = np.zeros_like(obs.time_data)
# Go through all individual position measurement from each site
for i, (t, stat, meas) in enumerate(zip(obs.time_data, obs.stat_eci_los, obs.meas_eci_los)):
# Calculate closest points of approach (observed line of sight to radiant line)
obs_cpa, rad_cpa, d = findClosestPoints(stat, meas, state_vect, radiant_eci)
### Take the gravity drop into account ###
if self.gravity_correction:
# Calculate the time in seconds from the beginning of the meteor
t_rel = t - t0
# Apply the gravity drop
rad_cpa_grav = applyGravityDrop(rad_cpa, t_rel, r0, self.gravity_factor, self.v0z)
# Re-do the vector points of closest approach. This is really important!
obs_cpa, rad_cpa, d = findClosestPoints(stat, meas, rad_cpa_grav, radiant_eci)
# Calculate the range to the observed CPA
r_meas = vectMag(obs_cpa - stat)
# Calculate the coordinates of the observed CPA
lat_meas, lon_meas, ele_meas = cartesian2Geo(obs.JD_data[i], *obs_cpa)
obs.meas_lat[i] = lat_meas
obs.meas_lon[i] = lon_meas
obs.meas_ht[i] = ele_meas
obs.meas_range[i] = r_meas
# Calculate the range to the radiant CPA
r_model = vectMag(rad_cpa - stat)
# Calculate the coordinates of the observed CPA
lat_model, lon_model, ele_model = cartesian2Geo(obs.JD_data[i], *rad_cpa)
obs.model_lat[i] = lat_model
obs.model_lon[i] = lon_model
obs.model_ht[i] = ele_model
obs.model_range[i] = r_model
# If the whole station is not ignored
if not obs.ignore_station:
# Set the coordinates of the first point on the trajectory, taking the ignored points into account
obs.rbeg_lat = obs.model_lat[obs.ignore_list == 0][0]
obs.rbeg_lon = obs.model_lon[obs.ignore_list == 0][0]
obs.rbeg_ele = obs.model_ht[obs.ignore_list == 0][0]
obs.rbeg_jd = obs.JD_data[obs.ignore_list == 0][0]
# Set the coordinates of the last point on the trajectory, taking the ignored points into account
obs.rend_lat = obs.model_lat[obs.ignore_list == 0][-1]
obs.rend_lon = obs.model_lon[obs.ignore_list == 0][-1]
obs.rend_ele = obs.model_ht[obs.ignore_list == 0][-1]
obs.rend_jd = obs.JD_data[obs.ignore_list == 0][-1]
# Determine index the lowest point on the trajectory
htmin_index = np.argmin(obs.model_ht[obs.ignore_list == 0])
# Set the coordinates of the lowest point on the trajectory, taking the ignored points into account
obs.htmin_lat = obs.model_lat[obs.ignore_list == 0][htmin_index]
obs.htmin_lon = obs.model_lon[obs.ignore_list == 0][htmin_index]
obs.htmin_ele = obs.model_ht[obs.ignore_list == 0][htmin_index]
obs.htmin_jd = obs.JD_data[obs.ignore_list == 0][htmin_index]
# If the station is completely ignored, compute the coordinates including all points
else:
# Set the coordinates of the first point on the trajectory, taking the ignored points into account
obs.rbeg_lat = obs.model_lat[0]
obs.rbeg_lon = obs.model_lon[0]
obs.rbeg_ele = obs.model_ht[0]
obs.rbeg_jd = obs.JD_data[0]
# Set the coordinates of the last point on the trajectory, taking the ignored points into account
obs.rend_lat = obs.model_lat[-1]
obs.rend_lon = obs.model_lon[-1]
obs.rend_ele = obs.model_ht[-1]
obs.rend_jd = obs.JD_data[-1]
# Determine index the lowest point on the trajectory
htmin_index = np.argmin(obs.model_ht)
# Set the coordinates of the lowest point on the trajectory, taking the ignored points into account
obs.htmin_lat = obs.model_lat[htmin_index]
obs.htmin_lon = obs.model_lon[htmin_index]
obs.htmin_ele = obs.model_ht[htmin_index]
obs.htmin_jd = obs.JD_data[htmin_index]
# Make a list of observations without any ignored stations in them
nonignored_observations = [obs for obs in self.observations if not obs.ignore_station]
# Find the beginning height with the lowest length
min_svd_list = [np.min(obs.state_vect_dist) for obs in nonignored_observations]
first_begin = min_svd_list.index(min(min_svd_list))
# Set the coordinates of the height point as the first point
self.rbeg_lat = nonignored_observations[first_begin].rbeg_lat
self.rbeg_lon = nonignored_observations[first_begin].rbeg_lon
self.rbeg_ele = nonignored_observations[first_begin].rbeg_ele
self.rbeg_jd = nonignored_observations[first_begin].rbeg_jd
# Compute the begin height in WGS84
self.rbeg_ele_wgs84 = wmpl.Utils.GeoidHeightEGM96.mslToWGS84Height(self.rbeg_lat, self.rbeg_lon, self.rbeg_ele)
# Find the ending height with the largest length
max_svd_list = [np.max(obs.state_vect_dist) for obs in nonignored_observations]
last_end = max_svd_list.index(max(max_svd_list))
# Set coordinates of the lowest point as the last point
self.rend_lat = nonignored_observations[last_end].rend_lat
self.rend_lon = nonignored_observations[last_end].rend_lon
self.rend_ele = nonignored_observations[last_end].rend_ele
self.rend_jd = nonignored_observations[last_end].rend_jd
# Compute the end height in WGS84
self.rend_ele_wgs84 = wmpl.Utils.GeoidHeightEGM96.mslToWGS84Height(self.rend_lat, self.rend_lon, self.rend_ele)
# Find the lowest point on the trajectory
min_ht_list = [obs.htmin_ele for obs in nonignored_observations]
self.htmin_lat = nonignored_observations[np.argmin(min_ht_list)].htmin_lat
self.htmin_lon = nonignored_observations[np.argmin(min_ht_list)].htmin_lon
self.htmin_ele = nonignored_observations[np.argmin(min_ht_list)].htmin_ele
self.htmin_jd = nonignored_observations[np.argmin(min_ht_list)].htmin_jd
# Compute the lowest height in WGS84
self.htmin_ele_wgs84 = wmpl.Utils.GeoidHeightEGM96.mslToWGS84Height(self.htmin_lat, self.htmin_lon, self.htmin_ele)
def calcECIEqAltAz(self, state_vect, radiant_eci, observations):
""" Calculate ECI coordinates of both CPAs (observed and radiant), equatorial and alt-az coordinates
of CPA positions on the radiant line.
Arguments:
state_vect: [ndarray] (x, y, z) ECI coordinates of the initial state vector (meters).
radiant_eci: [ndarray] (x, y, z) components of the unit radiant direction vector.
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
"""
### Compute parameters for gravity drop ###
# Determine the first time of observation
t0 = min([obs.time_data[0] for obs in observations])
# Determine the largest distance from the centre of the Earth and use it as the beginning point
eci_list = []
for obs in observations:
# Calculate closest points of approach (observed line of sight to radiant line)
_, rad_cpa, _ = findClosestPoints(obs.stat_eci_los[0], obs.meas_eci_los[0], state_vect, \
radiant_eci)
eci_list.append(rad_cpa)
# Find the largest distance from the centre of the Earth
max_dist_indx = np.argmax([vectMag(r) for r in eci_list])
# Get ECI coordinates of the largest distance and the distance itself
eci0 = eci_list[max_dist_indx]
r0 = vectMag(eci0)
### Compute the apparent zenith angle ###
# Compute the apparent radiant
ra_a, dec_a = eci2RaDec(radiant_eci)
# Compute alt/az of the apparent radiant
lat_0, lon_0, _ = cartesian2Geo(self.jdt_ref, *eci0)
_, alt_a = raDec2AltAz(ra_a, dec_a, self.jdt_ref, lat_0, lon_0)
# Compute the apparent zenith angle
zc = np.pi/2 - alt_a
# Compute the vertical component of the velocity if the orbit was already computed
self.v0z = -self.v_init*np.cos(zc)
####
# Go through observations from all stations
for obs in observations:
# Init array for modelled ECI positons
obs.model_eci = []
# Init array for modelled RA, Dec positions
obs.model_ra = np.zeros_like(obs.time_data)
obs.model_dec = np.zeros_like(obs.time_data)
# Init arrays for modelled alt, az
obs.model_azim = np.zeros_like(obs.time_data)
obs.model_elev = np.zeros_like(obs.time_data)
# Go through all individual position measurement from each site
for i, (t, jd, stat, meas) in enumerate(zip(obs.time_data, obs.JD_data, obs.stat_eci_los, \
obs.meas_eci_los)):
# Calculate closest points of approach (observed line of sight to radiant line)
_, rad_cpa, _ = findClosestPoints(stat, meas, state_vect, radiant_eci)
### Take the gravity drop into account ###
if self.gravity_correction:
# Calculate the time in seconds from the beginning of the meteor
t_rel = t - t0
# Apply the gravity drop
rad_cpa_grav = applyGravityDrop(rad_cpa, t_rel, r0, self.gravity_factor, self.v0z)
# Re-do find closest points after gravity drop. This is really important!
# The algorithm doesn't care which trajectory point is used to construct the line for the fit,
# but it MUST be gravity corrected.
_, rad_cpa, _ = findClosestPoints(stat, meas, rad_cpa_grav, radiant_eci)
# Set the ECI position of the CPA on the radiant line, as seen by this observer
obs.model_eci.append(rad_cpa)
# Calculate the right ascension and declination of the modelled point from the observer's
# point of view
stat_rad_eci = rad_cpa - stat
model_ra, model_dec = eci2RaDec(stat_rad_eci)
obs.model_ra[i] = model_ra
obs.model_dec[i] = model_dec
# Calculate the azimuth and elevation of the modelled point from the observer's point of view
model_azim, model_elev = raDec2AltAz(model_ra, model_dec, jd, obs.lat, obs.lon)
obs.model_azim[i] = model_azim
obs.model_elev[i] = model_elev
obs.model_eci = np.array(obs.model_eci)
### Assign model_fit1, model_fit2, so they are in the same format as the input meas1, meas2 data
######################################################################################################
# If inputs were RA and Dec
if self.meastype == 1:
obs.model_fit1 = obs.model_ra
obs.model_fit2 = obs.model_dec
# If inputs were azimuth +east of due north, and elevation angle
elif self.meastype == 2:
obs.model_fit1 = obs.model_azim
obs.model_fit2 = obs.model_elev
# If inputs were azimuth +west of due south, and zenith angle
elif self.meastype == 3:
obs.model_fit1 = (obs.model_azim + np.pi)%(2*np.pi)
obs.model_fit2 = np.pi/2.0 - obs.model_elev
# If input were azimuth +north of due east, and zenith angle
elif self.meastype == 4:
obs.model_fit1 = (np.pi/2.0 - obs.model_azim)%(2*np.pi)
obs.model_fit2 = np.pi/2.0 - obs.model_elev
######################################################################################################
def calcAverages(self, observations):
""" Calculate the average velocity, the average ECI position of the trajectory and the average
Julian date of the trajectory.
Arguments:
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
Return:
(v_avg, eci_avg, jd_avg): [tuple]
v_avg: [float] Average velocity of the meteor in m/s.
eci_avg: [ndarray] (x, y, z) ECI coordinates of the average point on the trajectory (meters).
jd_avg: [float] Julian date of the average time of the trajectory.
"""
v_sum = 0
eci_sum = np.zeros(3)
jd_min = np.inf
jd_max = -np.inf
meas_sum = 0
count = 0
# Go through all observations
for obs in observations:
# Skip ignored stations
if obs.ignore_station:
continue
# Calculate the average velocity, ignoring ignored points
meteor_duration = obs.time_data[obs.ignore_list == 0][-1] - obs.time_data[obs.ignore_list == 0][0]
meteor_length = vectMag(obs.model_eci[obs.ignore_list == 0][-1] \
- obs.model_eci[obs.ignore_list == 0][0])
# Calculate the average velocity
v_avg = meteor_length/meteor_duration
v_sum += v_avg
eci_sum += np.sum(obs.model_eci[obs.ignore_list == 0], axis=0)
jd_min = min(jd_min, np.min(obs.JD_data[obs.ignore_list == 0]))
jd_max = max(jd_max, np.max(obs.JD_data[obs.ignore_list == 0]))
# Add in the total number of used points
meas_sum += len(obs.time_data[obs.ignore_list == 0])
count += 1
# Average velocity across all stations
v_avg = v_sum/count
# Average ECI across all stations
eci_avg = eci_sum/meas_sum
# Average Julian date
jd_avg = (jd_min + jd_max)/2
return v_avg, eci_avg, jd_avg
def calcAbsMagnitudes(self):
""" Compute absolute magnitudes (apparent magnitude at 100 km) after trajectory estimation. """
# Go through observations from all stations
for obs in self.observations:
# Check if the apparent magnitudes were given
if obs.magnitudes is not None:
abs_magnitudes = []
for i, app_mag in enumerate(obs.magnitudes):
if app_mag is not None:
# Compute absolute magntiude (apparent magnitude at 100 km)
abs_mag = app_mag + 5*np.log10(100000/obs.model_range[i])
else:
abs_mag = None
abs_magnitudes.append(abs_mag)
obs.absolute_magnitudes = np.array(abs_magnitudes)
else:
obs.absolute_magnitudes = None
def dumpMeasurements(self, dir_path, file_name):
""" Writes the initialized measurements in a MATLAB format text file."""
with open(os.path.join(dir_path, file_name), 'w') as f:
for i, obs in enumerate(sorted(self.observations, key=lambda x:x.rbeg_ele, reverse=True)):
# Write site coordinates
f.write('m->longitude[' + str(i) + '] = ' + str(obs.lon) + ';\n')
f.write('m->latitude[' + str(i) + '] = ' + str(obs.lat) + ';\n')
f.write('m->heightkm[' + str(i) + '] = ' + str(obs.ele/1000) + ';\n\n')
# Construct an measurement matrix (time, elevation, azimuth) - meastype 2
meas_matr = np.c_[obs.time_data, np.degrees(obs.elev_data), np.degrees(obs.azim_data)]
f.write('double ' + chr(97 + i) + '[' + str(len(meas_matr)) + '][3] = {\n')
for j, row in enumerate(meas_matr):
suffix = ','
if j == len(meas_matr) - 1:
suffix = '};\n'
f.write(', '.join(row.astype(str)) + suffix + '\n')
yyyy, MM, DD, hh, mm, ss, ms = jd2Date(self.jdt_ref)
ss = ss + ms/1000
date_formatted = ', '.join(map(str, [yyyy, MM, DD, hh, mm, ss]))
f.write('m->jdt_ref = JulianDate( ' + date_formatted + ');\n')
print('Measurements dumped into ', os.path.join(dir_path, file_name))
def toJson(self):
""" Convert the Trajectory object to a JSON string. """
# Get a list of builtin types
try :
import __builtin__
builtin_types = [t for t in __builtin__.__dict__.itervalues() if isinstance(t, type)]
except:
# Python 3.x
import builtins
builtin_types = [getattr(builtins, d) for d in dir(builtins) if isinstance(getattr(builtins, d), type)]
def _convertDict(d):
""" Convert the given object dictionary to JSON-compatible format. """
d = copy.deepcopy(d)
d_new = {}
for key in d:
# Set the old value to the new dictionary
d_new[key] = d[key]
# Recursively convert all dictionaries
if isinstance(d[key], dict):
d_new[key] = _convertDict(d[key])
# Recursively convert items in lists
if isinstance(d[key], list):
# Skip empty lists
if len(d[key]) == 0:
continue
# Remove the old list
del d_new[key]
# Convert the list to a dictionary
d_tmp = {i: item for (i, item) in enumerate(d[key])}
# Run the convert procedure
d_tmp = _convertDict(d_tmp)
# Unpack the dictionary to a list
index_list = []
value_list = []
for k in d_tmp:
index_list.append(k)
value_list.append(d_tmp[k])
# Sort value list by index
value_list = [x for _, x in sorted(zip(index_list, value_list))]
d_new[key] = value_list
# Skip None types
elif d[key] is None:
continue
# Convert datetime objects to strings
elif isinstance(d[key], datetime.datetime):
d_new[key] = str(d[key])
# Convert numpy arrays to lists
elif isinstance(d[key], np.ndarray):
d_new[key] = d[key].tolist()
# Convert numpy types to float
elif type(d[key]).__module__ == np.__name__:
d_new[key] = float(d[key])
# Recursively convert all non-builtin types
elif type(d[key]) not in builtin_types:
# Get the name of the class
class_name = type(d[key]).__name__
key_name = class_name
# Handle class-specific things
if class_name == "ObservedPoints":
key_name += "." + d[key].station_id
elif class_name == "PlaneIntersection":
key_name += "." + d[key].obs1.station_id + "_" + d[key].obs2.station_id
del d[key].obs1
del d[key].obs2
# Remove a list of trajectoryes in the uncertainties object
elif class_name == "MCUncertainties":
d[key].mc_traj_list = None
# Assign the converted dictionary to the given attribute name
del d_new[key]
d_new[key] = {key_name: _convertDict(d[key].__dict__)}
return d_new
traj = copy.deepcopy(self)
# Remove noise-added observations
if hasattr(traj, "obs_noisy"):
del traj.obs_noisy
# Delete duplicate misspelt attribute
if hasattr(traj, "uncertanties"):
del traj.uncertanties
# Convert the trajectory object's attributes to JSON-compatible format
traj_dict = _convertDict(traj.__dict__)
# Convert the trajectory object to JSON
out_str = json.dumps(traj_dict, indent=4, sort_keys=False)
return out_str
def saveReport(self, dir_path, file_name, uncertainties=None, verbose=True, save_results=True):
""" Save the trajectory estimation report to file.
Arguments:
dir_path: [str] Path to the directory where the report will be saved.
file_name: [str] Name of the report time.
Keyword arguments:
uncertainties: [MCUncertainties object] Object contaning uncertainties of every parameter.
verbose: [bool] Print the report to the screen. True by default.
save_results: [bool] If True, the results will be saved to a file.
"""
def _uncer(str_format, std_name, multi=1.0, deg=False):
""" Internal function. Returns the formatted uncertanty, if the uncertanty is given. If not,
it returns nothing.
Arguments:
str_format: [str] String format for the unceertanty.
std_name: [str] Name of the uncertanty attribute, e.g. if it is 'x', then the uncertanty is
stored in uncertainties.x.
Keyword arguments:
multi: [float] Uncertanty multiplier. 1.0 by default. This is used to scale the uncertanty to
different units (e.g. from m/s to km/s).
deg: [bool] Converet radians to degrees if True. False by default.
"""
if deg:
multi *= np.degrees(1.0)
if uncertainties is not None:
# Construct symmetrical 1 sigma uncertainty
ret_str = " +/- " + str_format.format(getattr(uncertainties, std_name)*multi)
# Add confidence interval if available
if hasattr(uncertainties, std_name + "_ci"):
ci_l, ci_u = np.array(getattr(uncertainties, std_name + "_ci"))*multi
ret_str += ", [{:s}, {:s}]".format(str_format.format(ci_l), str_format.format(ci_u))
return ret_str
else:
return ''
# Format longitude in the -180 to 180 deg range
_formatLongitude = lambda x: (x + np.pi)%(2*np.pi) - np.pi
out_str = ''
out_str += 'Input measurement type: '
# Write out measurement type
if self.meastype == 1:
out_str += 'Right Ascension for meas1, Declination for meas2, epoch of date\n'
elif self.meastype == 2:
out_str += 'Azimuth +east of due north for meas1, Elevation angle above the horizon for meas2\n'
elif self.meastype == 3:
out_str += 'Azimuth +west of due south for meas1, Zenith angle for meas2\n'
elif self.meastype == 4:
out_str += 'Azimuth +north of due east for meas1, Zenith angle for meas2\n'
out_str += "\n"
# Write the uncertainty type
if self.geometric_uncert:
uncert_label = "Purely geometric uncertainties"
else:
uncert_label = "Uncertainties computed using MC runs with lower cost function value than the purely geometric solution"
if self.state_vect_cov is not None:
out_str += 'Uncertainties type:\n'
out_str += ' {:s}'.format(uncert_label)
out_str += '\n\n'
out_str += "Reference JD: {:20.12f}\n".format(self.jdt_ref)
out_str += "Time: " + str(jd2Date(self.orbit.jd_ref, dt_obj=True)) + " UTC\n"
out_str += "\n\n"
out_str += 'Plane intersections\n'
out_str += '-------------------\n'
# Write out all intersecting planes pairs
for n, plane_intersection in enumerate(self.intersection_list):
n = n + 1
out_str += 'Intersection ' + str(n) + ' - Stations: ' + str(plane_intersection.obs1.station_id) +\
' and ' + str(plane_intersection.obs2.station_id) + '\n'
out_str += ' Convergence angle = {:.5f} deg\n'.format(np.degrees(plane_intersection.conv_angle))
ra, dec = plane_intersection.radiant_eq
out_str += ' R.A. = {:>9.5f} Dec = {:>+9.5f} deg\n'.format(np.degrees(ra), np.degrees(dec))
out_str += '\nBest intersection: Stations ' + str(self.best_conv_inter.obs1.station_id) + ' and ' \
+ str(self.best_conv_inter.obs2.station_id) \
+ ' with Qconv = {:.2f} deg\n'.format(np.degrees(self.best_conv_inter.conv_angle))
out_str += '\n\n'
out_str += 'Least squares solution\n'
out_str += '----------------------\n'
# Calculate the state vector components
x, y, z = self.state_vect_mini
vx, vy, vz = self.v_init*self.radiant_eci_mini
# Write out the state vector
out_str += "State vector (ECI, epoch of date):\n"
out_str += " X = {:s} m\n".format(valueFormat("{:11.2f}", x, '{:7.2f}', uncertainties, 'x'))
out_str += " Y = {:s} m\n".format(valueFormat("{:11.2f}", y, '{:7.2f}', uncertainties, 'y'))
out_str += " Z = {:s} m\n".format(valueFormat("{:11.2f}", z, '{:7.2f}', uncertainties, 'z'))
out_str += " Vx = {:s} m/s\n".format(valueFormat("{:11.2f}", vx, '{:7.2f}', uncertainties, 'vx'))
out_str += " Vy = {:s} m/s\n".format(valueFormat("{:11.2f}", vy, '{:7.2f}', uncertainties, 'vy'))
out_str += " Vz = {:s} m/s\n".format(valueFormat("{:11.2f}", vz, '{:7.2f}', uncertainties, 'vz'))
out_str += "\n"
# Write out the state vector covariance matrix
if self.state_vect_cov is not None:
out_str += "State vector covariance matrix (X, Y, Z, Vx, Vy, Vz):\n"
for line in self.state_vect_cov:
line_list = []
for entry in line:
line_list.append("{:+.6e}".format(entry))
out_str += ", ".join(line_list) + "\n"
out_str += "\n"
out_str += "Timing offsets (from input data):\n"
for stat_id, t_diff in zip([obs.station_id for obs in self.observations], self.time_diffs_final):
out_str += "{:>14s}: {:.6f} s\n".format(str(stat_id), t_diff)
out_str += "\n"
if self.orbit is not None:
out_str += "Reference point on the trajectory:\n"
out_str += " Time: " + str(jd2Date(self.orbit.jd_ref, dt_obj=True)) + " UTC\n"
out_str += " Lat = {:s} deg\n".format(valueFormat("{:>11.6f}", self.orbit.lat_ref, \
'{:6.4f}', uncertainties, 'lat_ref', deg=True))
out_str += " Lon = {:s} deg\n".format(valueFormat("{:>11.6f}", self.orbit.lon_ref, \
'{:6.4f}', uncertainties, 'lon_ref', deg=True, callable_val=_formatLongitude, \
callable_ci=_formatLongitude))
out_str += " Ht MSL = {:s} m\n".format(valueFormat("{:>11.2f}", self.orbit.ht_ref, \
'{:6.2f}', uncertainties, 'ht_ref', deg=False))
try:
out_str += " Ht WGS84 = {:s} m\n".format(valueFormat("{:>11.2f}", self.orbit.ht_ref_wgs84, \
'{:6.2f}', uncertainties, 'ht_ref_wgs84', deg=False))
except:
pass
out_str += " Lat geo = {:s} deg\n".format(valueFormat("{:>11.6f}", self.orbit.lat_geocentric, \
'{:6.4f}', uncertainties, 'lat_geocentric', deg=True))
out_str += "\n"
# Write out orbital parameters
out_str += self.orbit.__repr__(uncertainties=uncertainties, v_init_ht=self.v_init_ht)
out_str += "\n"
# Write out the orbital covariance matrix
if self.state_vect_cov is not None:
out_str += "Orbit covariance matrix:\n"
out_str += " e , q (AU) , Tp (JD) , node (deg) , peri (deg) , i (deg)\n"
elements_list = ["e ", "q ", "Tp ", "node", "peri", "i "]
for elem_name, line in zip(elements_list, self.orbit_cov):
line_list = [elem_name]
for entry in line:
line_list.append("{:+.6e}".format(entry))
out_str += ", ".join(line_list) + "\n"
out_str += "\n"
out_str += "Jacchia fit on lag = -|a1|*exp(|a2|*t):\n"
jacchia_fit = self.jacchia_fit
if jacchia_fit is None:
jacchia_fit = [0, 0]
out_str += " a1 = {:.6f}\n".format(jacchia_fit[0])
out_str += " a2 = {:.6f}\n".format(jacchia_fit[1])
out_str += "\n"
if self.estimate_timing_vel is True:
out_str += "Mean time residuals from time vs. length:\n"
out_str += " Station with reference time: {:s}\n".format(
str(self.observations[self.t_ref_station].station_id))
out_str += " Avg. res. = {:.3e} s\n".format(self.timing_res)
out_str += " Stddev = {:.2e} s\n".format(self.timing_stddev)
out_str += "\n"
out_str += "\n"
out_str += "Begin point on the trajectory:\n"
out_str += " Lat (+N) = {:s} deg\n".format(valueFormat("{:>11.6f}", self.rbeg_lat, "{:6.4f}", \
uncertainties, 'rbeg_lat', deg=True))
if uncertainties is not None:
if hasattr(uncertainties, "rbeg_lat_m"):
out_str += " +/- {:6.2f} m\n".format(uncertainties.rbeg_lat_m)
out_str += " Lon (+E) = {:s} deg\n".format(valueFormat("{:>11.6f}", self.rbeg_lon, "{:6.4f}", \
uncertainties, 'rbeg_lon', deg=True, callable_val=_formatLongitude, callable_ci=_formatLongitude))
if uncertainties is not None:
if hasattr(uncertainties, "rbeg_lon_m"):
out_str += " +/- {:6.2f} m\n".format(uncertainties.rbeg_lon_m)
out_str += " Ht MSL = {:s} m\n".format(valueFormat("{:>11.2f}", self.rbeg_ele, "{:6.2f}", \
uncertainties, 'rbeg_ele'))
try:
out_str += " Ht WGS84 = {:s} m\n".format(valueFormat("{:>11.2f}", self.rbeg_ele_wgs84, "{:6.2f}", \
uncertainties, 'rbeg_ele_wgs84'))
except:
pass
out_str += "\n"
out_str += "End point on the trajectory:\n"
out_str += " Lat (+N) = {:s} deg\n".format(valueFormat("{:>11.6f}", self.rend_lat, "{:6.4f}", \
uncertainties, 'rend_lat', deg=True))
if uncertainties is not None:
if hasattr(uncertainties, "rend_lat_m"):
out_str += " +/- {:6.2f} m\n".format(uncertainties.rend_lat_m)
out_str += " Lon (+E) = {:s} deg\n".format(valueFormat("{:>11.6f}", self.rend_lon, "{:6.4f}", \
uncertainties, 'rend_lon', deg=True, callable_val=_formatLongitude, callable_ci=_formatLongitude))
if uncertainties is not None:
if hasattr(uncertainties, "rend_lon_m"):
out_str += " +/- {:6.2f} m\n".format(uncertainties.rend_lon_m)
out_str += " Ht MSL = {:s} m\n".format(valueFormat("{:>11.2f}", self.rend_ele, "{:6.2f}", \
uncertainties, 'rend_ele'))
try:
out_str += " Ht WGS84 = {:s} m\n".format(valueFormat("{:>11.2f}", self.rend_ele_wgs84, "{:6.2f}", \
uncertainties, 'rend_ele_wgs84'))
except:
pass
out_str += "\n"
out_str += "Lowest point on the trajectory:\n"
out_str += " Lat (+N) = {:s} deg\n".format(valueFormat("{:>11.6f}", self.htmin_lat, "{:6.4f}", \
uncertainties, 'htmin_lat', deg=True))
if uncertainties is not None:
if hasattr(uncertainties, "htmin_lat_m"):
out_str += " +/- {:6.2f} m\n".format(uncertainties.htmin_lat_m)
out_str += " Lon (+E) = {:s} deg\n".format(valueFormat("{:>11.6f}", self.htmin_lon, "{:6.4f}", \
uncertainties, 'htmin_lon', deg=True, callable_val=_formatLongitude, callable_ci=_formatLongitude))
if uncertainties is not None:
if hasattr(uncertainties, "htmin_lon_m"):
out_str += " +/- {:6.2f} m\n".format(uncertainties.htmin_lon_m)
out_str += " Ht MSL = {:s} m\n".format(valueFormat("{:>11.2f}", self.htmin_ele, "{:6.2f}", \
uncertainties, 'htmin_ele'))
try:
out_str += " Ht WGS84 = {:s} m\n".format(valueFormat("{:>11.2f}", self.htmin_ele_wgs84, "{:6.2f}", \
uncertainties, 'htmin_ele_wgs84'))
except:
pass
out_str += "\n"
### Write information about stations ###
######################################################################################################
out_str += "Stations\n"
out_str += "--------\n"
out_str += " ID, Ignored, Lon +E (deg), Lat +N (deg), Ht (m), Jacchia a1, Jacchia a2, Beg Ht (m), End Ht (m), +/- Obs ang (deg), +/- V (m), +/- H (m), Persp. angle (deg), Weight, FOV Beg, FOV End, Comment\n"
for obs in self.observations:
station_info = []
station_info.append("{:>14s}".format(str(obs.station_id)))
station_info.append("{:>7s}".format(str(obs.ignore_station)))
station_info.append("{:>12.6f}".format(np.degrees(obs.lon)))
station_info.append("{:>12.6f}".format(np.degrees(obs.lat)))
station_info.append("{:>7.2f}".format(obs.ele))
jacchia_fit = obs.jacchia_fit
if jacchia_fit is None:
jacchia_fit = [0, 0]
station_info.append("{:>10.6f}".format(jacchia_fit[0]))
station_info.append("{:>10.6f}".format(jacchia_fit[1]))
station_info.append("{:>11.2f}".format(obs.rbeg_ele))
station_info.append("{:>11.2f}".format(obs.rend_ele))
station_info.append("{:>17.6f}".format(np.degrees(obs.ang_res_std)))
station_info.append("{:>9.2f}".format(obs.v_res_rms))
station_info.append("{:>9.2f}".format(obs.h_res_rms))
station_info.append("{:>18.2f}".format(np.degrees(obs.incident_angle)))
if obs.weight is not None:
station_info.append("{:>6.4f}".format(obs.weight))
else:
station_info.append("{:>6s}".format('None'))
station_info.append("{:>7s}".format(str(obs.fov_beg)))
station_info.append("{:>7s}".format(str(obs.fov_end)))
station_info.append("{:s}".format(str(obs.comment)))
out_str += ", ".join(station_info) + "\n"
######################################################################################################
out_str += "\n"
### Write information about individual points ###
######################################################################################################
out_str += "Points\n"
out_str += "------\n"
out_str += " No, "
out_str += " Station ID, "
out_str += " Ignore, "
out_str += " Time (s), "
out_str += " JD, "
out_str += " meas1, "
out_str += " meas2, "
out_str += "Azim +E of due N (deg), "
out_str += "Alt (deg), "
out_str += "Azim line (deg), "
out_str += "Alt line (deg), "
out_str += "RA obs (deg), "
out_str += "Dec obs (deg), "
out_str += "RA line (deg), "
out_str += "Dec line (deg), "
out_str += " X (m), "
out_str += " Y (m), "
out_str += " Z (m), "
out_str += "Latitude (deg), "
out_str += "Longitude (deg), "
out_str += "Height (m), "
out_str += " Range (m), "
out_str += "Length (m), "
out_str += "State vect dist (m), "
out_str += " Lag (m), "
out_str += "Vel (m/s), "
out_str += "Vel prev avg (m/s), "
out_str += "H res (m), "
out_str += "V res (m), "
out_str += "Ang res (asec), "
out_str += "AppMag, "
out_str += "AbsMag"
out_str += "\n"
# Go through observation from all stations
for obs in self.observations:
# Go through all observed points
for i in range(obs.kmeas):
point_info = []
point_info.append("{:3d}".format(i))
point_info.append("{:>14s}".format(str(obs.station_id)))
point_info.append("{:>7d}".format(obs.ignore_list[i]))
point_info.append("{:9.6f}".format(obs.time_data[i]))
point_info.append("{:20.12f}".format(obs.JD_data[i]))
point_info.append("{:9.5f}".format(np.degrees(obs.meas1[i])))
point_info.append("{:9.5f}".format(np.degrees(obs.meas2[i])))
point_info.append("{:22.5f}".format(np.degrees(obs.azim_data[i])))
point_info.append("{:9.5f}".format(np.degrees(obs.elev_data[i])))
point_info.append("{:15.5f}".format(np.degrees(obs.model_azim[i])))
point_info.append("{:14.5f}".format(np.degrees(obs.model_elev[i])))
point_info.append("{:12.5f}".format(np.degrees(obs.ra_data[i])))
point_info.append("{:+13.5f}".format(np.degrees(obs.dec_data[i])))
point_info.append("{:13.5f}".format(np.degrees(obs.model_ra[i])))
point_info.append("{:+14.5f}".format(np.degrees(obs.model_dec[i])))
point_info.append("{:11.2f}".format(obs.model_eci[i][0]))
point_info.append("{:11.2f}".format(obs.model_eci[i][1]))
point_info.append("{:11.2f}".format(obs.model_eci[i][2]))
point_info.append("{:14.6f}".format(np.degrees(obs.model_lat[i])))
point_info.append("{:+15.6f}".format(np.degrees(obs.model_lon[i])))
point_info.append("{:10.2f}".format(obs.model_ht[i]))
point_info.append("{:10.2f}".format(obs.model_range[i]))
point_info.append("{:10.2f}".format(obs.length[i]))
point_info.append("{:19.2f}".format(obs.state_vect_dist[i]))
point_info.append("{:9.2f}".format(obs.lag[i]))
point_info.append("{:9.2f}".format(obs.velocities[i]))
point_info.append("{:18.2f}".format(obs.velocities_prev_point[i]))
point_info.append("{:9.2f}".format(obs.h_residuals[i]))
point_info.append("{:9.2f}".format(obs.v_residuals[i]))
point_info.append("{:14.2f}".format(3600*np.degrees(obs.ang_res[i])))
if obs.magnitudes is not None:
# Write the magnitude
if obs.magnitudes[i] is not None:
point_info.append("{:+6.2f}".format(obs.magnitudes[i]))
else:
point_info.append("{:>6s}".format('None'))
# Write the magnitude
if obs.absolute_magnitudes[i] is not None:
point_info.append("{:+6.2f}".format(obs.absolute_magnitudes[i]))
else:
point_info.append("{:>6s}".format('None'))
else:
point_info.append("{:>6s}".format('None'))
point_info.append("{:>6s}".format('None'))
out_str += ", ".join(point_info) + "\n"
######################################################################################################
out_str += "\n"
out_str += "Notes\n"
out_str += "-----\n"
out_str += "- Points that have not been taken into consideration when computing the trajectory have '1' in the 'Ignore' column.\n"
out_str += "- The time already has time offsets applied to it.\n"
out_str += "- 'meas1' and 'meas2' are given input points.\n"
out_str += "- X, Y, Z are ECI (Earth-Centered Inertial) positions of projected lines of sight on the radiant line.\n"
out_str += "- Zc is the observed zenith distance of the entry angle, while the Zg is the entry zenith distance corrected for Earth's gravity.\n"
out_str += "- Latitude (deg) and Longitude (deg) are in WGS84 coordinates, while Height (m) is in the EGM96 datum. There values are coordinates of each point on the radiant line.\n"
out_str += "- Jacchia (1955) deceleration equation fit was done on the lag.\n"
out_str += "- Right ascension and declination in the table are given in the epoch of date for the corresponding JD, per every point.\n"
out_str += "- 'RA and Dec obs' are the right ascension and declination calculated from the observed values, while the 'RA and Dec line' are coordinates of the lines of sight projected on the fitted radiant line. The coordinates are in the epoch of date, and NOT J2000!. 'Azim and alt line' are thus corresponding azimuthal coordinates.\n"
out_str += "- 'Vel prev avg' is the average velocity including all previous points up to the given point. For the first 4 points this velocity is computed as the average velocity of those 4 points. \n"
if uncertainties is not None:
out_str += "- The number after +/- is the 1 sigma uncertainty, and the numbers in square brackets are the 95% confidence intervals. \n"
# Add the wmpl version and the date of the version and the date of the report
out_str += "\n\n"
out_str += "Report generated by the Western Meteor Physics Library (WMPL) on {:s} UTC\n".format(str(datetime.datetime.now(datetime.timezone.utc)))
if HAS_GITPYTHON:
# in the case where WMPL wasn't called from the WMPL home directory, git.Repo() will fail
# And in fact, might read version info from a totally different git repo.
try:
repo = git.Repo(search_parent_directories=True)
out_str += "WMPL version commit: {:s}\n".format(repo.head.object.hexsha)
out_str += "WMPL version date: {:s}\n".format(datetime.datetime.fromtimestamp(repo.head.commit.committed_date).strftime('%Y-%m-%d %H:%M:%S'))
except Exception:
pass
if verbose:
print(out_str)
# Save the report to a file
if save_results:
mkdirP(dir_path)
with open(os.path.join(dir_path, file_name), 'w') as f:
f.write(out_str)
return out_str
def savePlots(self, output_dir, file_name, show_plots=True, ret_figs=False):
""" Show plots of the estimated trajectory.
Arguments:
output_dir: [str] Path to the output directory.
file_name: [str] File name which will be used for saving plots.
Keyword_arguments:
show_plots: [bools] Show the plots on the screen. True by default.
ret_figs: [bool] If True, it will return a dictionary of figure handles for every plot. It will
override the show_plots and set them to False, and it will not save any plots.
Return:
fig_pickle_dict: [dict] Dictionary of pickled figure handles for every plot. To unpickle the
figure objects, run:
fig = pickle.loads(fig_pickle_dict[key])
where key is the dictionary key, e.g. "lags_all".
"""
if output_dir is None:
output_dir = '.'
if file_name is None:
file_name = 'blank'
# Dictionary which will hold figure handles for every plot
fig_pickle_dict = {}
# Override the status of saving commands if the figures should be returned
save_results_prev_status = self.save_results
if ret_figs:
self.save_results = False
show_plots = False
# Get the first reference time
t0 = min([obs.time_data[0] for obs in self.observations])
# Plot spatial residuals per observing station
for obs in sorted(self.observations, key=lambda x: x.rbeg_ele, reverse=True):
### PLOT SPATIAL RESIDUALS PER STATION ###
##################################################################################################
# Plot vertical residuals
plt.scatter(obs.time_data, obs.v_residuals, c='r', \
label='Vertical, RMSD = {:.2f} m'.format(obs.v_res_rms), zorder=3, s=4, marker='o')
# Plot horizontal residuals
plt.scatter(obs.time_data, obs.h_residuals, c='b', \
label='Horizontal, RMSD = {:.2f} m'.format(obs.h_res_rms), zorder=3, s=20, marker='+')
# Mark ignored points
if np.any(obs.ignore_list):
ignored_times = obs.time_data[obs.ignore_list > 0]
ignored_v_res = obs.v_residuals[obs.ignore_list > 0]
ignored_h_res = obs.h_residuals[obs.ignore_list > 0]
plt.scatter(ignored_times, ignored_v_res, facecolors='none', edgecolors='k', marker='o', \
zorder=3, s=20, label='Ignored points')
plt.scatter(ignored_times, ignored_h_res, facecolors='none', edgecolors='k', marker='o',
zorder=3, s=20)
plt.title('Residuals, station ' + str(obs.station_id))
plt.xlabel('Time (s)')
plt.ylabel('Residuals (m)')
plt.grid()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
# Set the residual limits to +/-10m if they are smaller than that
if (np.max(np.abs(obs.v_residuals)) < 10) and (np.max(np.abs(obs.h_residuals)) < 10):
plt.ylim([-10, 10])
# Pickle the figure
if ret_figs:
fig_pickle_dict["spatial_residuals_{:s}".format(str(obs.station_id))] \
= pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_' + str(obs.station_id) + '_spatial_residuals.' \
+ self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
##################################################################################################
# marker type, size multiplier
markers = [
['x', 2 ],
['+', 8 ],
['o', 1 ],
['s', 1 ],
['d', 1 ],
['v', 1 ],
['*', 1.5 ],
]
if self.plot_all_spatial_residuals:
### PLOT ALL SPATIAL RESIDUALS VS. TIME ###
##################################################################################################
for obs in sorted(self.observations, key=lambda x: x.rbeg_ele, reverse=True):
# Plot vertical residuals
vres_plot = plt.scatter(obs.time_data, obs.v_residuals, marker='o', s=4, \
label='{:s}, vertical, RMSD = {:.2f} m'.format(str(obs.station_id), obs.v_res_rms), \
zorder=3)
# Plot horizontal residuals
plt.scatter(obs.time_data, obs.h_residuals, c=vres_plot.get_facecolor(), marker='+', \
label='{:s}, horizontal, RMSD = {:.2f} m'.format(str(obs.station_id), obs.h_res_rms), \
zorder=3)
# Mark ignored points
if np.any(obs.ignore_list):
ignored_times = obs.time_data[obs.ignore_list > 0]
ignored_v_res = obs.v_residuals[obs.ignore_list > 0]
ignored_h_res = obs.h_residuals[obs.ignore_list > 0]
plt.scatter(ignored_times, ignored_v_res, facecolors='none', edgecolors='k', marker='o', \
zorder=3, s=20)
plt.scatter(ignored_times, ignored_h_res, facecolors='none', edgecolors='k', marker='o',
zorder=3, s=20)
plt.title('All spatial residuals')
plt.xlabel('Time (s)')
plt.ylabel('Residuals (m)')
plt.grid()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
# Set the residual limits to +/-10m if they are smaller than that
if np.max(np.abs(plt.gca().get_ylim())) < 10:
plt.ylim([-10, 10])
# Pickle the figure
if ret_figs:
fig_pickle_dict["all_spatial_residuals"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_all_spatial_residuals.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
##################################################################################################
### PLOT ALL SPATIAL RESIDUALS VS LENGTH ###
##################################################################################################
for obs in sorted(self.observations, key=lambda x: x.rbeg_ele, reverse=True):
# Plot vertical residuals
vres_plot = plt.scatter(obs.state_vect_dist/1000, obs.v_residuals, marker='o', s=4, \
label='{:s}, vertical, RMSD = {:.2f} m'.format(str(obs.station_id), obs.v_res_rms), \
zorder=3)
# Plot horizontal residuals
plt.scatter(obs.state_vect_dist/1000, obs.h_residuals, c=vres_plot.get_facecolor(),
marker='+', label='{:s}, horizontal, RMSD = {:.2f} m'.format(str(obs.station_id), \
obs.h_res_rms), zorder=3)
# Mark ignored points
if np.any(obs.ignore_list):
ignored_length = obs.state_vect_dist[obs.ignore_list > 0]
ignored_v_res = obs.v_residuals[obs.ignore_list > 0]
ignored_h_res = obs.h_residuals[obs.ignore_list > 0]
plt.scatter(ignored_length/1000, ignored_v_res, facecolors='none', edgecolors='k', \
marker='o', zorder=3, s=20)
plt.scatter(ignored_length/1000, ignored_h_res, facecolors='none', edgecolors='k', \
marker='o', zorder=3, s=20)
plt.title('All spatial residuals')
plt.xlabel('Length (km)')
plt.ylabel('Residuals (m)')
plt.grid()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
# Set the residual limits to +/-10m if they are smaller than that
if np.max(np.abs(plt.gca().get_ylim())) < 10:
plt.ylim([-10, 10])
# Pickle the figure
if ret_figs:
fig_pickle_dict["all_spatial_residuals_length"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_all_spatial_residuals_length.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
##################################################################################################
### PLOT TOTAL SPATIAL RESIDUALS VS LENGTH ###
##################################################################################################
for i, obs in enumerate(sorted(self.observations, key=lambda x:x.rbeg_ele, reverse=True)):
marker, size_multiplier = markers[i%len(markers)]
# Compute total residuals, take the signs from vertical residuals
tot_res = np.sign(obs.v_residuals)*np.hypot(obs.v_residuals, obs.h_residuals)
# Plot total residuals
plt.scatter(obs.state_vect_dist/1000, tot_res, marker=marker, s=10*size_multiplier, \
label='{:s}'.format(str(obs.station_id)), zorder=3)
# Mark ignored points
if np.any(obs.ignore_list):
ignored_length = obs.state_vect_dist[obs.ignore_list > 0]
ignored_tot_res = tot_res[obs.ignore_list > 0]
plt.scatter(ignored_length/1000, ignored_tot_res, facecolors='none', edgecolors='k', \
marker='o', zorder=3, s=20)
plt.title('Total spatial residuals')
plt.xlabel('Length (km)')
plt.ylabel('Residuals (m), vertical sign')
plt.grid()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
# Set the residual limits to +/-10m if they are smaller than that
if np.max(np.abs(plt.gca().get_ylim())) < 10:
plt.ylim([-10, 10])
# Pickle the figure
if ret_figs:
fig_pickle_dict["total_spatial_residuals_length"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_total_spatial_residuals_length.' + self.plot_file_type, \
output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
##################################################################################################
### PLOT ALL TOTAL SPATIAL RESIDUALS VS HEIGHT ###
##################################################################################################
for i, obs in enumerate(sorted(self.observations, key=lambda x:x.rbeg_ele, reverse=True)):
marker, size_multiplier = markers[i%len(markers)]
# Calculate root mean square of the total residuals
total_res_rms = np.sqrt(obs.v_res_rms**2 + obs.h_res_rms**2)
# Compute total residuals, take the signs from vertical residuals
tot_res = np.sign(obs.v_residuals)*np.hypot(obs.v_residuals, obs.h_residuals)
# Plot total residuals
plt.scatter(tot_res, obs.meas_ht/1000, marker=marker, \
s=10*size_multiplier, label='{:s}, RMSD = {:.2f} m'.format(str(obs.station_id), \
total_res_rms), zorder=3)
# Mark ignored points
if np.any(obs.ignore_list):
ignored_ht = obs.model_ht[obs.ignore_list > 0]
ignored_tot_res = np.sign(obs.v_residuals[obs.ignore_list > 0])\
*np.hypot(obs.v_residuals[obs.ignore_list > 0], obs.h_residuals[obs.ignore_list > 0])
plt.scatter(ignored_tot_res, ignored_ht/1000, facecolors='none', edgecolors='k', \
marker='o', zorder=3, s=20)
plt.title('All spatial residuals')
plt.xlabel('Total deviation (m)')
plt.ylabel('Height (km)')
plt.grid()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
# Set the residual limits to +/-10m if they are smaller than that
if np.max(np.abs(plt.gca().get_xlim())) < 10:
plt.gca().set_xlim([-10, 10])
# Pickle the figure
if ret_figs:
fig_pickle_dict["all_spatial_total_residuals_height"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_all_spatial_total_residuals_height.' + self.plot_file_type, \
output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
##################################################################################################
# # Plot lag per observing station
# for obs in sorted(self.observations, key=lambda x: x.rbeg_ele, reverse=True):
# ### PLOT LAG ###
# ##################################################################################################
# fig, ax1 = plt.subplots()
# # Extract lag points that were not ignored
# used_times = obs.time_data[obs.ignore_list == 0]
# used_lag = obs.lag[obs.ignore_list == 0]
# if not obs.ignore_station:
# # Plot the lag
# ax1.plot(used_lag, used_times, color='r', marker='x', label='Lag', zorder=3)
# # Plot the Jacchia fit
# ax1.plot(jacchiaLagFunc(obs.time_data, *obs.jacchia_fit), obs.time_data, color='b',
# label='Jacchia fit', zorder=3)
# # Plot ignored lag points
# if np.any(obs.ignore_list):
# ignored_times = obs.time_data[obs.ignore_list > 0]
# ignored_lag = obs.lag[obs.ignore_list > 0]
# ax1.scatter(ignored_lag, ignored_times, c='k', marker='+', zorder=4, \
# label='Lag, ignored points')
# ax1.legend(prop={'size': LEGEND_TEXT_SIZE})
# plt.title('Lag, station ' + str(obs.station_id))
# ax1.set_xlabel('Lag (m)')
# ax1.set_ylabel('Time (s)')
# ax1.set_ylim(min(obs.time_data), max(obs.time_data))
# ax1.grid()
# ax1.invert_yaxis()
# # Set the height axis
# ax2 = ax1.twinx()
# ax2.set_ylim(min(obs.meas_ht)/1000, max(obs.meas_ht)/1000)
# ax2.set_ylabel('Height (km)')
# plt.tight_layout()
# if self.save_results:
# savePlot(plt, file_name + '_' + str(obs.station_id) + '_lag.' + self.plot_file_type, output_dir)
# if show_plots:
# plt.show()
# else:
# plt.clf()
# plt.close()
# ##################################################################################################
# Generate a list of colors to use for markers
colors = cm.viridis(np.linspace(0, 0.8, len(self.observations)))
# Only use one type of markers if there are not a lot of stations
plot_markers = ['x']
# Keep colors non-transparent if there are not a lot of stations
alpha = 1.0
# If there are more than 5 stations, interleave the colors with another colormap and change up
# markers
if len(self.observations) > 5:
colors_alt = cm.inferno(np.linspace(0, 0.8, len(self.observations)))
for i in range(len(self.observations)):
if i%2 == 1:
colors[i] = colors_alt[i]
plot_markers.append("+")
# Add transparency for more stations
alpha = 0.75
# Sort observations by first height to preserve color linearity
obs_ht_sorted = sorted(self.observations, key=lambda x: x.model_ht[0])
### PLOT ALL LAGS ###
######################################################################################################
# Plot lags from each station on a single plot
for i, obs in enumerate(obs_ht_sorted):
# Extract lag points that were not ignored
used_times = obs.time_data[obs.ignore_list == 0]
used_lag = obs.lag[obs.ignore_list == 0]
# Choose the marker
marker = plot_markers[i%len(plot_markers)]
# Plot the lag
plt_handle = plt.plot(used_lag, used_times, marker=marker, label=str(obs.station_id),
zorder=3, markersize=3, color=colors[i], alpha=alpha)
# Plot ignored lag points
if np.any(obs.ignore_list):
ignored_times = obs.time_data[obs.ignore_list > 0]
ignored_lag = obs.lag[obs.ignore_list > 0]
plt.scatter(ignored_lag, ignored_times, facecolors='k', edgecolors=plt_handle[0].get_color(),
marker='o', s=8, zorder=4, label='{:s} ignored points'.format(str(obs.station_id)))
# Plot the Jacchia fit on all observations
if self.show_jacchia:
time_all = np.sort(np.hstack([obs.time_data for obs in self.observations]))
time_jacchia = np.linspace(np.min(time_all), np.max(time_all), 1000)
plt.plot(jacchiaLagFunc(time_jacchia, *self.jacchia_fit), time_jacchia, label='Jacchia fit',
zorder=3, color='k', alpha=0.5, linestyle="dashed")
plt.title('Lags, all stations')
plt.xlabel('Lag (m)')
plt.ylabel('Time (s)')
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
plt.grid()
plt.gca().invert_yaxis()
# Pickle the figure
if ret_figs:
fig_pickle_dict["lags_all"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_lags_all.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
######################################################################################################
### PLOT VELOCITY ###
######################################################################################################
# Possible markers for velocity
vel_markers = ['x', '+', '.', '2']
fig, ax1 = plt.subplots()
vel_max = -np.inf
vel_min = np.inf
ht_max = -np.inf
ht_min = np.inf
t_max = -np.inf
t_min = np.inf
first_ignored_plot = True
# Plot velocities from each observed site
for i, obs in enumerate(obs_ht_sorted):
# Mark ignored velocities
if np.any(obs.ignore_list):
# Extract data that is not ignored
ignored_times = obs.time_data[1:][obs.ignore_list[1:] > 0]
ignored_velocities = obs.velocities[1:][obs.ignore_list[1:] > 0]
# Set the label only for the first occurence
if first_ignored_plot:
ax1.scatter(ignored_velocities/1000, ignored_times, facecolors='none', edgecolors='k', \
zorder=4, s=30, label='Ignored points')
first_ignored_plot = False
else:
ax1.scatter(ignored_velocities/1000, ignored_times, facecolors='none', edgecolors='k', \
zorder=4, s=30)
# Plot all point to point velocities
ax1.scatter(obs.velocities[1:]/1000, obs.time_data[1:], marker=vel_markers[i%len(vel_markers)],
c=colors[i].reshape(1,-1), alpha=alpha, label='{:s}'.format(str(obs.station_id)), zorder=3)
# Determine the max/min velocity and height, as this is needed for plotting both height/time axes
vel_max = max(np.max(obs.velocities[1:]/1000), vel_max)
vel_min = min(np.min(obs.velocities[1:]/1000), vel_min)
ht_max = max(np.max(obs.meas_ht), ht_max)
ht_min = min(np.min(obs.meas_ht), ht_min)
t_max = max(np.max(obs.time_data), t_max)
t_min = min(np.min(obs.time_data), t_min)
# Plot the velocity calculated from the Jacchia model
if self.show_jacchia:
t_vel = np.linspace(t_min, t_max, 1000)
ax1.plot(jacchiaVelocityFunc(t_vel, self.jacchia_fit[0], self.jacchia_fit[1], self.v_init)/1000, \
t_vel, label='Jacchia fit', alpha=0.5, color='k')
plt.title('Velocity')
ax1.set_xlabel('Velocity (km/s)')
ax1.set_ylabel('Time (s)')
ax1.legend(prop={'size': LEGEND_TEXT_SIZE})
ax1.grid()
# Set absolute limits for velocities
vel_min = max(vel_min, -20)
vel_max = min(vel_max, 100)
# Set velocity limits to +/- 3 km/s
ax1.set_xlim([vel_min - 3, vel_max + 3])
# Set time axis limits
ax1.set_ylim([t_min, t_max])
ax1.invert_yaxis()
# Set the height axis
ax2 = ax1.twinx()
ax2.set_ylim(ht_min/1000, ht_max/1000)
ax2.set_ylabel('Height (km)')
plt.tight_layout()
# Pickle the figure
if ret_figs:
fig_pickle_dict["velocities"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_velocities.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
######################################################################################################
### PLOT DISTANCE FROM RADIANT STATE VECTOR POSITION ###
######################################################################################################
fig, ax1 = plt.subplots()
for i, obs in enumerate(obs_ht_sorted):
# Extract points that were not ignored
used_times = obs.time_data[obs.ignore_list == 0]
used_dists = obs.state_vect_dist[obs.ignore_list == 0]
# Choose the marker
marker = plot_markers[i%len(plot_markers)]
plt_handle = ax1.plot(used_dists/1000, used_times, marker=marker, label=str(obs.station_id), \
zorder=3, markersize=3, color=colors[i], alpha=alpha)
# Plot ignored points
if np.any(obs.ignore_list):
ignored_times = obs.time_data[obs.ignore_list > 0]
ignored_dists = obs.state_vect_dist[obs.ignore_list > 0]
ax1.scatter(ignored_dists/1000, ignored_times, facecolors='k',
edgecolors=plt_handle[0].get_color(), marker='o', s=8, zorder=5, \
label='{:s} ignored points'.format(str(obs.station_id)))
# Add the fitted velocity line
if self.velocity_fit is not None:
# Get time data range
t_min = min([np.min(obs.time_data) for obs in self.observations])
t_max = max([np.max(obs.time_data) for obs in self.observations])
t_range = np.linspace(t_min, t_max, 100)
ax1.plot(lineFunc(t_range, *self.velocity_fit)/1000, t_range, label='Velocity fit', \
linestyle='--', alpha=0.5, zorder=3)
title = "Distances from state vector"
if self.estimate_timing_vel:
if self.timing_res is None:
title += ", Time residuals not calculated"
else:
title += ", Time residuals = {:.3e} s".format(self.timing_res)
plt.title(title)
ax1.set_ylabel('Time (s)')
ax1.set_xlabel('Distance from state vector (km)')
ax1.legend(prop={'size': LEGEND_TEXT_SIZE})
ax1.grid()
# Set time axis limits
ax1.set_ylim([t_min, t_max])
ax1.invert_yaxis()
# Set the height axis
ax2 = ax1.twinx()
ax2.set_ylim(ht_min/1000, ht_max/1000)
ax2.set_ylabel('Height (km)')
# Pickle the figure
if ret_figs:
fig_pickle_dict["lengths"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_lengths.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
######################################################################################################
### Plot lat/lon of the meteor ###
# Calculate mean latitude and longitude of all meteor points
met_lon_mean = meanAngle([x for x in obs.meas_lon for obs in self.observations])
met_lat_mean = meanAngle([x for x in obs.meas_lat for obs in self.observations])
# Put coordinates of all sites and the meteor in the one list
lat_list = [obs.lat for obs in self.observations]
lat_list.append(met_lat_mean)
lon_list = [obs.lon for obs in self.observations]
lon_list.append(met_lon_mean)
# Put edge points of the meteor in the list
lat_list.append(self.rbeg_lat)
lon_list.append(self.rbeg_lon)
lat_list.append(self.rend_lat)
lon_list.append(self.rend_lon)
lat_list.append(self.orbit.lat_ref)
lon_list.append(self.orbit.lon_ref)
# Init the map
m = GroundMap(lat_list, lon_list, border_size=50, color_scheme='light')
# Create a list of unique stations names, such that if there are multiple stations with identical
# coordinates but only differ in the suffix, they will be plotted as one station
station_name_mapping = {}
for i, obs in enumerate(self.observations):
# If the station is already in the mapping, skip it
if obs.station_id in station_name_mapping:
continue
# Check if there are duplicate coordinates
for obs2 in self.observations[i+1:]:
if (obs.lat == obs2.lat) and (obs.lon == obs2.lon):
# Only take the common part of the two station names
common_name = os.path.commonprefix([obs.station_id, obs2.station_id])
# Strip "_" from the end of the common name
common_name = common_name.rstrip("_")
# Get the difference between the two station names
diff1 = obs.station_id[len(common_name):]
diff2 = obs2.station_id[len(common_name):]
# If the difference is e.g. _1, _2, _3, etc., it is a suffix
# Strip out _
# This will catch duplicates such as USL00N and USL00N_2
if (str(diff1).startswith("_") or str(diff2).startswith("_")) \
and ((diff1.strip("_").isdigit()) or (diff2.strip("_").isdigit())):
station_name_mapping[obs.station_id] = common_name
station_name_mapping[obs2.station_id] = common_name
# If the station is not in the mapping, add it
if obs.station_id not in station_name_mapping:
station_name_mapping[obs.station_id] = obs.station_id
# Plot locations of all stations and measured positions of the meteor
plotted_codes = []
for i, obs in enumerate(sorted(self.observations, key=lambda x:np.min(x.state_vect_dist), reverse=False)):
# Plot measured points
m.plot(obs.meas_lat[obs.ignore_list == 0], obs.meas_lon[obs.ignore_list == 0], c='r')
# Plot ignored points
if np.any(obs.ignore_list != 0):
m.scatter(obs.meas_lat[obs.ignore_list != 0], obs.meas_lon[obs.ignore_list != 0], c='k', \
marker='x', s=5, alpha=0.5)
station_name = station_name_mapping[obs.station_id]
# If the station ID is already plotted, skip it
if station_name in plotted_codes:
continue
# Extract marker type and size multiplier
marker, sm = markers[i%len(markers)]
# Plot stations
m.scatter(obs.lat, obs.lon, s=sm*10, label=str(station_name), marker=marker)
# Add the station to the list of plotted stations
plotted_codes.append(station_name)
# Plot a point marking the final point of the meteor
m.scatter(self.htmin_lat, self.htmin_lon, c='k', marker='+', s=50, alpha=0.75, label='Lowest height')
# If there are more than 10 observations, make the legend font smaller
legend_font_size = LEGEND_TEXT_SIZE
if len(self.observations) >= 10:
legend_font_size = 5
plt.legend(loc='upper left', prop={'size': legend_font_size})
# Pickle the figure
if ret_figs:
fig_pickle_dict["ground_track"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_ground_track.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
######################################################################################################
# # Plot angular residuals for every station separately
# for obs in sorted(self.observations, key=lambda x: x.rbeg_ele, reverse=True):
# # Calculate residuals in arcseconds
# res = np.degrees(obs.ang_res)*3600
# # Mark ignored points
# if np.any(obs.ignore_list):
# ignored_times = obs.time_data[obs.ignore_list > 0]
# ignored_residuals = res[obs.ignore_list > 0]
# plt.scatter(ignored_times, ignored_residuals, facecolors='none', edgecolors='k', s=20, \
# zorder=4, label='Ignored points')
# # Calculate the RMSD of the residuals in arcsec
# res_rms = np.degrees(obs.ang_res_std)*3600
# # Plot residuals
# plt.scatter(obs.time_data, res, label='Angle, RMSD = {:.2f}"'.format(res_rms), s=2, zorder=3)
# plt.title('Observed vs. Radiant LoS Residuals, station ' + str(obs.station_id))
# plt.ylabel('Angle (arcsec)')
# plt.xlabel('Time (s)')
# # The lower limit is always at 0
# plt.ylim(ymin=0)
# plt.grid()
# plt.legend(prop={'size': LEGEND_TEXT_SIZE})
# if self.save_results:
# savePlot(plt, file_name + '_' + str(obs.station_id) + '_angular_residuals.' \
# + self.plot_file_type, output_dir)
# if show_plots:
# plt.show()
# else:
# plt.clf()
# plt.close()
# Plot angular residuals from all stations
first_ignored_plot = True
for i, obs in enumerate(sorted(self.observations, key=lambda x:x.rbeg_ele, reverse=True)):
# Extract marker type and size multiplier
marker, sm = markers[i%len(markers)]
# Calculate residuals in arcseconds
res = np.degrees(obs.ang_res)*3600
# Mark ignored points
if np.any(obs.ignore_list):
ignored_times = obs.time_data[obs.ignore_list > 0]
ignored_residuals = res[obs.ignore_list > 0]
# Plot the label only for the first occurence
if first_ignored_plot:
plt.scatter(ignored_times, ignored_residuals, facecolors='none', edgecolors='k', s=20, \
zorder=4, label='Ignored points')
first_ignored_plot = False
else:
plt.scatter(ignored_times, ignored_residuals, facecolors='none', edgecolors='k', s=20, \
zorder=4)
# Calculate the RMS of the residuals in arcsec
res_rms = np.degrees(obs.ang_res_std)*3600
# Plot residuals
plt.scatter(obs.time_data, res, s=10*sm, zorder=3, label=str(obs.station_id) + \
', RMSD = {:.2f}"'.format(res_rms), marker=marker)
plt.title('Observed vs. Radiant LoS Residuals, all stations')
plt.ylabel('Angle (arcsec)')
plt.xlabel('Time (s)')
# The lower limit is always at 0
plt.ylim(ymin=0)
plt.grid()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
# Pickle the figure
if ret_figs:
fig_pickle_dict["all_angular_residuals"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_all_angular_residuals.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
######################################################################################################
### PLOT ABSOLUTE MAGNITUDES VS TIME, IF ANY ###
first_ignored_plot = True
if np.any([obs.absolute_magnitudes is not None for obs in self.observations]):
# Go through all observations
for obs in sorted(self.observations, key=lambda x: x.rbeg_ele, reverse=True):
# Check if the absolute magnitude was given
if obs.absolute_magnitudes is not None:
# Filter out None absolute magnitudes
filter_mask = np.array([abs_mag is not None for abs_mag in obs.absolute_magnitudes])
# Extract data that is not ignored
used_times = obs.time_data[filter_mask & (obs.ignore_list == 0)]
used_magnitudes = obs.absolute_magnitudes[filter_mask & (obs.ignore_list == 0)]
# Filter out magnitudes fainter than mag 8
mag_mask = np.array([abs_mag < 8 for abs_mag in used_magnitudes])
# Avoid crash if no magnitudes exceed the threshold
if np.any(mag_mask):
used_times = used_times[mag_mask]
used_magnitudes = used_magnitudes[mag_mask]
else:
continue
plt_handle = plt.plot(used_times, used_magnitudes, marker='x', \
label=str(obs.station_id), zorder=3)
# Mark ignored absolute magnitudes
if np.any(obs.ignore_list):
# Extract data that is ignored
ignored_times = obs.time_data[filter_mask & (obs.ignore_list > 0)]
ignored_magnitudes = obs.absolute_magnitudes[filter_mask & (obs.ignore_list > 0)]
plt.scatter(ignored_times, ignored_magnitudes, facecolors='k', \
edgecolors=plt_handle[0].get_color(), marker='o', s=8, zorder=4)
plt.xlabel('Time (s)')
plt.ylabel('Absolute magnitude')
plt.gca().invert_yaxis()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
plt.grid()
# Pickle the figure
if ret_figs:
fig_pickle_dict["abs_mag"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_abs_mag.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
######################################################################################################
### PLOT ABSOLUTE MAGNITUDES VS HEIGHT, IF ANY ###
first_ignored_plot = True
if np.any([obs.absolute_magnitudes is not None for obs in self.observations]):
# Go through all observations
for obs in sorted(self.observations, key=lambda x: x.rbeg_ele, reverse=True):
# Check if the absolute magnitude was given
if obs.absolute_magnitudes is not None:
# Filter out None absolute magnitudes
filter_mask = np.array([abs_mag is not None for abs_mag in obs.absolute_magnitudes])
# Extract data that is not ignored
used_heights = obs.model_ht[filter_mask & (obs.ignore_list == 0)]
used_magnitudes = obs.absolute_magnitudes[filter_mask & (obs.ignore_list == 0)]
# Filter out magnitudes fainter than mag 8
mag_mask = np.array([abs_mag < 8 for abs_mag in used_magnitudes])
# Avoid crash if no magnitudes exceed the threshold
if np.any(mag_mask):
used_heights = used_heights[mag_mask]
used_magnitudes = used_magnitudes[mag_mask]
else:
continue
plt_handle = plt.plot(used_magnitudes, used_heights/1000, marker='x', \
label=str(obs.station_id), zorder=3)
# Mark ignored absolute magnitudes
if np.any(obs.ignore_list):
# Extract data that is ignored
ignored_heights = obs.model_ht[filter_mask & (obs.ignore_list > 0)]
ignored_magnitudes = obs.absolute_magnitudes[filter_mask & (obs.ignore_list > 0)]
plt.scatter(ignored_magnitudes, ignored_heights/1000, facecolors='k', \
edgecolors=plt_handle[0].get_color(), marker='o', s=8, zorder=4)
plt.xlabel('Absolute magnitude')
plt.ylabel('Height (km)')
plt.gca().invert_xaxis()
plt.legend(prop={'size': LEGEND_TEXT_SIZE})
plt.grid()
# Pickle the figure
if ret_figs:
fig_pickle_dict["abs_mag_ht"] = pickle.dumps(plt.gcf(), protocol=2)
if self.save_results:
savePlot(plt, file_name + '_abs_mag_ht.' + self.plot_file_type, output_dir)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
######################################################################################################
# Plot the orbit in 3D
if self.calc_orbit:
# Check if the orbit was properly calculated
if self.orbit.ra_g is not None:
# Construct a list of orbital elements of the meteor
orbit_params = np.array([
[self.orbit.a, self.orbit.e, np.degrees(self.orbit.i), np.degrees(self.orbit.peri), \
np.degrees(self.orbit.node)]
])
if (output_dir is None) or (file_name is None):
plot_path = None
save_results = False
else:
plot_path = os.path.join(output_dir, file_name)
save_results = self.save_results
# Run orbit plotting procedure
plotOrbits(orbit_params, jd2Date(self.jdt_ref, dt_obj=True), save_plots=save_results, \
plot_path=plot_path, linewidth=1, color_scheme='light', \
plot_file_type=self.plot_file_type)
plt.tight_layout()
# Pickle the figure
if ret_figs:
fig_pickle_dict["orbit"] = pickle.dumps(plt.gcf(), protocol=2)
if show_plots:
plt.show()
else:
plt.clf()
plt.close()
# Restore the status of save results scripts and return a dictionary of pickled figure objects
if ret_figs:
self.save_results = save_results_prev_status
return fig_pickle_dict
def showLoS(self):
""" Show the stations and the lines of sight solution. """
# Compute ECI values if they have not been computed
if self.observations[0].model_eci is None:
self.calcECIEqAltAz(self.state_vect_mini, self.radiant_eci_mini, self.observations)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Calculate the position of the state vector (aka. first point on the trajectory)
traj_point = self.observations[0].model_eci[0]/1000
# Calculate the length to the last point on the trajectory
meteor_len = np.sqrt(np.sum((self.observations[0].model_eci[0]/1000 \
- self.observations[0].model_eci[-1]/1000)**2))
# Calculate the plot limits
x_list = [x_stat for obs in self.observations for x_stat in obs.stat_eci_los[:, 0]/1000]
x_list.append(traj_point[0])
y_list = [y_stat for obs in self.observations for y_stat in obs.stat_eci_los[:, 1]/1000]
y_list.append(traj_point[1])
z_list = [z_stat for obs in self.observations for z_stat in obs.stat_eci_los[:, 2]/1000]
z_list.append(traj_point[2])
x_min, x_max = min(x_list), max(x_list)
y_min, y_max = min(y_list), max(y_list)
z_min, z_max = min(z_list), max(z_list)
# Normalize the plot limits so they are rectangular
delta_x = x_max - x_min
delta_y = y_max - y_min
delta_z = z_max - z_min
delta_max = max([delta_x, delta_y, delta_z])
x_diff = delta_max - delta_x
x_min -= x_diff/2
x_max += x_diff/2
y_diff = delta_max - delta_y
y_min -= y_diff/2
y_max += y_diff/2
z_diff = delta_max - delta_z
z_min -= z_diff/2
z_max += z_diff/2
# Plot stations and observations
for obs in self.observations:
# Station positions
ax.scatter(obs.stat_eci_los[:, 0]/1000, obs.stat_eci_los[:, 1]/1000, obs.stat_eci_los[:, 2]/1000,\
s=20)
# Plot lines of sight
for i, (stat_eci_los, meas_eci_los) in enumerate(zip(obs.stat_eci_los, obs.meas_eci_los)):
# Take every other
if i%2 == 1:
continue
# Calculate the point on the trajectory
traj_pt, _, _ = findClosestPoints(stat_eci_los, meas_eci_los, self.state_vect_mini,
self.radiant_eci_mini)
vect_len = np.sqrt(np.sum((stat_eci_los - traj_pt)**2))/1000
# Lines of sight
ax.quiver(stat_eci_los[0]/1000, stat_eci_los[1]/1000, stat_eci_los[2]/1000,
meas_eci_los[0]/1000, meas_eci_los[1]/1000, meas_eci_los[2]/1000,
length=vect_len, normalize=True, arrow_length_ratio=0, color='blue', alpha=0.5)
# Plot the radiant state vector
rad_x, rad_y, rad_z = -self.radiant_eci_mini/1000
rst_x, rst_y, rst_z = traj_point
ax.quiver(rst_x, rst_y, rst_z, rad_x, rad_y, rad_z, length=meteor_len, normalize=True, color='red',
arrow_length_ratio=0.1)
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
ax.set_zlim([z_min, z_max])
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
ax.set_zlabel('Z (km)')
# Change the size of ticks (make them smaller)
ax.tick_params(axis='both', which='major', labelsize=8)
plt.show()
def calcStationIncidentAngles(self, state_vect, radiant_eci, observations):
""" Calculate angles between the radiant vector and the vector pointing from a station to the
initial state vector.
Arguments:
state_vect: [ndarray] (x, y, z) ECI coordinates of the initial state vector (meters).
radiant_eci: [ndarray] (x, y, z) components of the unit radiant direction vector.
observations: [list] A list of ObservationPoints objects which hold measurements from individual
stations.
Return:
return: [list] A list of angles (radians) for every station.
"""
angles = []
for obs in observations:
# Calculate the vector pointing from the station to the state vector
w = vectNorm(state_vect - obs.stat_eci)
# Calculate the angle between the pointing vector and the radiant vector
q_r = np.arccos(np.dot(radiant_eci, w))
angles.append(q_r)
return angles
def run(self, _rerun_timing=False, _rerun_bad_picks=False, _mc_run=False, _orig_obs=None,
_prev_toffsets=None):
""" Estimate the trajectory from the given input points.
Keyword arguments (internal flags, DO NOT SPECIFY MANUALLY!):
_rerun_timing: [bool] Internal flag. Is it True when everything is recalculated upon estimating
the difference in timings, so it breaks the second trajectory run after updating the values
of R.A., Dec, velocity, etc.
_rerun_bad_picks: [bool] Internal flag. Is is True when a second pass of trajectory estimation is
run with bad picks removed, thus improving the solution.
_mc_run: [bool] Internal flag. True if the solver is calculating the Carlo Run.
_orig_obs: [list] Used for Monte Carlo. A list of original observations, with no added noise.
Used for calculating all other parameters after the trajectory with noise has been estimated.
_prev_toffsets: [ndarray] Internal variable. Used for keeping the initially estimated timing
offsets from the first run of the solver. None by default.
Return:
traj_best: [Trajectory object] The best trajectory from all Monte Carlo runs. If no Monte Carlo
runs were preformed, the pure LoS trajectory will be returned.
"""
# Make sure there are at least 2 stations
if numStationsNotIgnored(self.observations) < 2:
print('At least 2 sets of measurements from 2 stations are needed to estimate the trajectory!')
return None
### Recompute the reference JD and all times so that the first time starts at 0 ###
# Determine the first relative time from reference JD
t0 = min([obs.time_data[0] for obs in self.observations if (not obs.ignore_station) \
or (not np.all(obs.ignore_list))])
# If the first time is not 0, normalize times so that the earliest time is 0
if t0 != 0.0:
# Offset all times by t0
for obs in self.observations:
obs.time_data -= t0
# Recompute the reference JD to corresponds with t0
self.jdt_ref = self.jdt_ref + t0/86400.0
###################################################################################
# Determine which station has the reference time (the first time entry is 0 for that station, but
# do not take the station which has excluded points)
for i, obs in enumerate(self.observations):
# Do not take the station with excluded points as the reference one
if obs.excluded_indx_range:
continue
if obs.time_data[0] == 0.0:
self.t_ref_station = i
break
### INTERSECTING PLANES SOLUTION ###
######################################################################################################
self.intersection_list = []
# Calculate all plane intersections in between all station pairs, only use non-ignored stations
nonignored_observations = [obs for obs in self.observations if not obs.ignore_station]
for i, obs1 in enumerate(nonignored_observations):
for j, obs2 in enumerate(nonignored_observations[i + 1:]):
# Perform plane intersection
plane_intersection = PlaneIntersection(obs1, obs2)
if self.verbose:
print('Convergence angle between stations', obs1.station_id, 'and', obs2.station_id)
print(' Q =', np.degrees(plane_intersection.conv_angle), 'deg')
self.intersection_list.append(plane_intersection)
radiant_sum = np.zeros(shape=3)
weights_sum = 1e-10
# Sum all radiants ECI positions and weights
for plane_intersection in self.intersection_list:
# Add the calculated radiant to the radiant sum
radiant_sum += plane_intersection.weight*plane_intersection.radiant_eci
weights_sum += plane_intersection.weight
# Calculate the average radiant
avg_radiant = radiant_sum/weights_sum
# Normalize the radiant vector to a unit vector
self.avg_radiant = vectNorm(avg_radiant)
# Calculate the radiant position in RA and Dec
self.radiant_eq = eci2RaDec(self.avg_radiant)
if self.verbose:
print('Multi-Track Weighted IP radiant:', np.degrees(self.radiant_eq))
# Choose the intersection with the largest convergence angle as the best solution
# The reason why the average trajectory determined from plane intersections is not taken as the 'seed'
# for the LoS method is that the state vector cannot be calculated for the average radiant
self.best_conv_inter = max(self.intersection_list, key=attrgetter('conv_angle'))
if self.verbose:
print('Best Convergence Angle IP radiant:', np.degrees(self.best_conv_inter.radiant_eq))
# Set the 3D position of the radiant line as the state vector, at the beginning point
self.state_vect = moveStateVector(self.best_conv_inter.cpa_eci, self.best_conv_inter.radiant_eci,
self.observations)
# Calculate incident angles between the trajectory and the station
self.incident_angles = self.calcStationIncidentAngles(self.state_vect, \
self.best_conv_inter.radiant_eci, self.observations)
# Join each observation the calculated incident angle
for obs, inc_angl in zip(self.observations, self.incident_angles):
obs.incident_angle = inc_angl
# If there are more than 2 stations, use weights for fitting
if numStationsNotIgnored(self.observations) > 2:
# Calculate minimization weights for LoS minimization as squared sines of incident angles
weights = [np.sin(w)**2 for w in self.incident_angles]
else:
# Use unity weights if there are only two stations
weights = [1.0]*len(self.observations)
# Set weights to 0 for stations that are not used
weights = [w if (self.observations[i].ignore_station == False) else 0 for i, w in enumerate(weights)]
# Set weights to stations
for w, obs in zip(weights, self.observations):
obs.weight = w
# Print weights
if self.verbose:
print('LoS statistical weights:')
for obs in self.observations:
print("{:>12s}, {:.3f}".format(obs.station_id, obs.weight))
######################################################################################################
if self.verbose:
print('Intersecting planes solution:', self.state_vect)
print('Minimizing angle deviations...')
### LEAST SQUARES SOLUTION ###
######################################################################################################
# Calculate the initial sum and angles deviating from the radiant line
angle_sum = angleSumMeasurements2Line(self.observations, self.state_vect, \
self.best_conv_inter.radiant_eci, weights=weights, \
gravity=(_rerun_timing and self.gravity_correction), gravity_factor=self.gravity_factor,
v0z=self.v0z
)
if self.verbose:
print('Initial angle sum:', angle_sum)
# Set the initial guess for the state vector and the radiant from the intersecting plane solution
p0 = np.r_[self.state_vect, self.best_conv_inter.radiant_eci]
# Perform the minimization of angle deviations. The gravity will only be compansated for after the
# initial estimate of timing differences
minimize_solution = scipy.optimize.minimize(minimizeAngleCost, p0, args=(self.observations, weights,
(_rerun_timing and self.gravity_correction), self.gravity_factor, self.v0z), method="Nelder-Mead")
# NOTE
# Other minimization methods were tried as well, but all produce higher fit residuals than Nelder-Mead.
# Tried:
# - Powell, CS, BFGS - larger residuals
# - Least Squares - large residuals
# - Basinhopping with NM seed solution - long time to execute with no improvement
# If the minimization diverged, bound the solution to +/-10% of state vector
if np.max(np.abs(minimize_solution.x[:3] - self.state_vect)/self.state_vect) > 0.1:
print('WARNING! Unbounded state vector optimization failed!')
print('Trying bounded minimization to +/-10% of state vector position.')
# Limit the minimization to 10% of original estimation in the state vector
bounds = []
for val in self.state_vect:
bounds.append(sorted([0.9*val, 1.1*val]))
# Bound the radiant vector to +/- 25% of original vales, per each ECI coordinate
for val in self.best_conv_inter.radiant_eci:
bounds.append(sorted([0.75*val, 1.25*val]))
print('BOUNDS:', bounds)
print('p0:', p0)
minimize_solution = scipy.optimize.minimize(minimizeAngleCost, p0, args=(self.observations, \
weights, (_rerun_timing and self.gravity_correction), self.gravity_factor, self.v0z),
bounds=bounds, method='SLSQP')
if self.verbose:
print('Minimization info:')
print(' Message:', minimize_solution.message)
print(' Iterations:', minimize_solution.nit)
print(' Success:', minimize_solution.success)
print(' Final function value:', minimize_solution.fun)
# Set the minimization status
self.los_mini_status = minimize_solution.success
# If the minimization succeded
if minimize_solution.success:
# Unpack the solution
self.state_vect_mini, self.radiant_eci_mini = np.hsplit(minimize_solution.x, 2)
# Set the state vector to the position of the highest point projected on the radiant line
self.state_vect_mini = moveStateVector(self.state_vect_mini, self.radiant_eci_mini,
self.observations)
# Normalize radiant direction
self.radiant_eci_mini = vectNorm(self.radiant_eci_mini)
# Convert the minimized radiant solution to RA and Dec
self.radiant_eq_mini = eci2RaDec(self.radiant_eci_mini)
if self.verbose:
print('Position and radiant LMS solution:')
print(' State vector:', self.state_vect_mini)
print(' Ra', np.degrees(self.radiant_eq_mini[0]), 'Dec:', np.degrees(self.radiant_eq_mini[1]))
else:
print('Angle minimization failed altogether!')
# If the solution did not succeed, set the values to intersecting plates solution
self.radiant_eci_mini = self.best_conv_inter.radiant_eci
# Normalize radiant direction
self.radiant_eci_mini = vectNorm(self.radiant_eci_mini)
# Convert the minimized radiant solution to RA and Dec
self.radiant_eq_mini = eci2RaDec(self.radiant_eci_mini)
# Calculate the state vector
self.state_vect_mini = moveStateVector(self.state_vect, self.radiant_eci_mini,
self.observations)
######################################################################################################
# If running a Monte Carlo run, switch the observations to the original ones, so the noise does not
# influence anything except the radiant position
if (_mc_run or _rerun_timing) and (_orig_obs is not None):
# Store the noisy observations for later
self.obs_noisy = list(self.observations)
# Replace the noisy observations with original observations
self.observations = _orig_obs
# If this is the run of recalculating the parameters after updating the timing, preserve the
# timing as well
if _rerun_timing:
for obs, obs_noise in zip(self.observations, self.obs_noisy):
obs.time_data = np.copy(obs_noise.time_data)
# Calculate velocity at each point
self.calcVelocity(self.state_vect_mini, self.radiant_eci_mini, self.observations, weights)
if self.verbose and self.estimate_timing_vel:
print('Estimating initial velocity and timing differences...')
# # Show the pre-time corrected time vs. length
# if not _rerun_timing:
# ### PLOT DISTANCE FROM RADIANT STATE VECTOR POSITION ###
# ######################################################################################################
# for obs in self.observations:
# # Extract points that were not ignored
# used_times = obs.time_data[obs.ignore_list == 0]
# used_dists = obs.state_vect_dist[obs.ignore_list == 0]
# plt_handle = plt.plot(used_dists/1000, used_times, marker='x', label=str(obs.station_id), \
# zorder=3)
# # Plot ignored points
# if np.any(obs.ignore_list):
# ignored_times = obs.time_data[obs.ignore_list > 0]
# ignored_dists = obs.state_vect_dist[obs.ignore_list > 0]
# plt.scatter(ignored_dists/1000, ignored_times, facecolors='k', \
# edgecolors=plt_handle[0].get_color(), marker='o', s=8, zorder=4, \
# label='{:s} ignored points'.format(str(obs.station_id)))
# plt.title("Distances from state vector, before time correction")
# plt.ylabel('Time (s)')
# plt.xlabel('Distance from state vector (km)')
# plt.legend()
# plt.grid()
# plt.gca().invert_yaxis()
# plt.tight_layout()
# plt.show()
# Calculate the lag ONLY if it was not calculated during timing estimation
if self.observations[0].lag is None:
# Calculate lag
self.calcLag(self.observations)
# Estimate the timing difference between stations and the initial velocity and update the time
(
self.timing_minimization_successful,
self.velocity_fit,
self.v_init,
self.v_init_stddev,
self.time_diffs,
self.observations
) = self.estimateTimingAndVelocity(
self.observations,
weights,
estimate_timing_vel=self.estimate_timing_vel
)
# If estimating the timing failed, skip any further steps
if not self.timing_minimization_successful:
print('unable to minimise timing')
return None
# Calculate velocity at each point with updated timings
self.calcVelocity(self.state_vect_mini, self.radiant_eci_mini, self.observations, weights,
calc_res=_rerun_timing)
### RERUN THE TRAJECTORY ESTIMATION WITH UPDATED TIMINGS ###
######################################################################################################
# Runs only in the first pass of trajectory estimation and estimates timing offsets between stations
if not _rerun_timing:
# Assign the initial timing differences
if not _rerun_bad_picks:
self.time_diffs_final = self.time_diffs
else:
# Assign the timing differences after bad pick removal
self.time_diffs_final += self.time_diffs
# After the timing has been estimated, everything needs to be recalculated from scratch
if self.estimate_timing_vel:
# If doing a Monte Carlo run, switch back to noisy observations
if _mc_run and (_orig_obs is not None):
# Keep the updated timings
for obs, obs_noise in zip(self.observations, self.obs_noisy):
obs_noise.time_data = np.copy(obs.time_data)
# Switch back to noisy observations, but with updated timing
self.observations = self.obs_noisy
# Make a copy of observations
temp_observations = copy.deepcopy(self.observations)
# Reset the observation points
self.observations = []
if self.verbose:
print()
print("---------------------------------------------------------------------------------")
print("Updating the solution after the timing estimation...")
print("---------------------------------------------------------------------------------")
# Reinitialize the observations with proper timing
for obs in temp_observations:
self.infillWithObs(obs)
# Reset fixed times to 0, as the timing offsets have already been applied
for station in self.fixed_time_offsets:
self.fixed_time_offsets[station] = 0.0
# Re-run the trajectory estimation with updated timings. This will update all calculated
# values up to this point
self.run(_rerun_timing=True, _prev_toffsets=self.time_diffs, _orig_obs=_orig_obs)
else:
# In the second pass with updated timings, calculate the final timing offsets
self.time_diffs_final += self.time_diffs
return None
######################################################################################################
# If running a Monte Carlo runs, switch the observations to the original ones, so noise does not
# infuence anything except the radiant position
if _mc_run and (_orig_obs is not None):
# Store the noisy observations for later
self.obs_noisy = list(self.observations)
# Replace the noisy observations with original observations
self.observations = _orig_obs
# Keep the updated timings
for obs, obs_noise in zip(self.observations, self.obs_noisy):
obs.time_data = np.copy(obs_noise.time_data)
# If the stations have no time overlap at all, skip further computations
if len([obs for obs in self.observations if not obs.ignore_station]) < 2:
return None
# Do a Jacchia exponential fit to the lag, per every station
self.jacchia_fit = self.fitJacchiaLag(self.observations)
# Calculate latitude, longitude and altitude of each point closest to the radiant line, in WGS84
self.calcLLA(self.state_vect_mini, self.radiant_eci_mini, self.observations)
# Compute the initial velocity as the average velocity of all points above the given height
# (optional)
if self.v_init_ht is not None:
v_ht_avg, intercept_ht_avg = self.calcAvgVelocityAboveHt(self.observations, 1000*self.v_init_ht, \
weights)
# Assign this average velocity as the initial velocity if the fit was successful
if v_ht_avg is not None:
self.v_init = v_ht_avg
self.velocity_fit = [self.v_init, intercept_ht_avg]
# Recalculate the lag
self.calcLag(self.observations, velocity_fit=self.velocity_fit)
# Refit jacchia lag fit
self.jacchia_fit = self.fitJacchiaLag(self.observations)
# Calculate ECI positions of the CPA on the radiant line, RA and Dec of the points on the radiant
# line as seen by the observers, the corresponding azimuth and elevation, and set arrays model_fit1
# and model_fit2 to be of the same type as the input parameters meas1 and meas2
self.calcECIEqAltAz(self.state_vect_mini, self.radiant_eci_mini, self.observations)
# Calculate horizontal, vertical and angular residuals from the lines of sight to the radiant line
self.calcAllResiduals(self.state_vect_mini, self.radiant_eci_mini, self.observations)
# Calculate absolute magnitudes
self.calcAbsMagnitudes()
### REMOVE BAD PICKS AND RECALCULATE ###
######################################################################################################
if self.filter_picks:
if (not _rerun_bad_picks):
picks_rejected = 0
# Remove all picks which deviate more than N sigma in angular residuals
for obs in self.observations:
# Find the indicies of picks which are within N sigma
good_picks = obs.ang_res < (np.mean(obs.ang_res) \
+ self.reject_n_sigma_outliers*obs.ang_res_std)
# If the number of good picks is below 4, do not remove any picks
if np.count_nonzero(good_picks) < 4:
continue
# Check if any picks were removed
if np.count_nonzero(good_picks) < len(obs.ang_res):
picks_rejected += len(obs.ang_res) - np.count_nonzero(good_picks)
# Ignore bad picks
obs.ignore_list[~good_picks] = 1
# Run only if some picks were rejected
if picks_rejected:
# Make a copy of observations
temp_observations = copy.deepcopy(self.observations)
# Reset the observation points
self.observations = []
if self.verbose:
print()
print("---------------------------------------------------------------------------------")
print("Updating the solution after rejecting", picks_rejected, "bad picks...")
print("---------------------------------------------------------------------------------")
# Reinitialize the observations without the bad picks
for obs in temp_observations:
self.infillWithObs(obs)
# Re-run the trajectory estimation with updated timings. This will update all calculated
# values up to this point
self.run(_rerun_bad_picks=True, _prev_toffsets=self.time_diffs_final)
else:
if self.verbose:
print("All picks are within 3 sigma...")
else:
# In the second pass, return None
return None
######################################################################################################
# If the time fit failed, stop further computations
if not self.timing_minimization_successful:
return None
### CALCULATE ORBIT ###
######################################################################################################
if self.calc_orbit:
# Calculate average velocity and average ECI position of the trajectory
self.v_avg, self.state_vect_avg, self.jd_avg = self.calcAverages(self.observations)
# Calculate the orbit of the meteor
# If the LoS estimation failed, then the plane intersection solution will be used for the orbit,
# which needs to have fixed stations and the average velocity should be the reference velocity
self.orbit = calcOrbit(self.radiant_eci_mini, self.v_init, self.v_avg, self.state_vect_mini, \
self.rbeg_jd, stations_fixed=(not minimize_solution.success), \
reference_init=minimize_solution.success, v_init_stddev_direct=self.v_init_stddev)
if self.verbose:
print(self.orbit.__repr__(v_init_ht=self.v_init_ht))
######################################################################################################
# Break if doing a Monte Carlo run
if _mc_run:
return None
if self.monte_carlo:
# Do a Monte Carlo estimate of the uncertainties in all calculated parameters
traj_best, uncertainties = monteCarloTrajectory(self, mc_runs=self.mc_runs, \
mc_pick_multiplier=self.mc_pick_multiplier, noise_sigma=self.mc_noise_std, \
geometric_uncert=self.geometric_uncert, plot_results=self.save_results, \
mc_cores=self.mc_cores, max_runs=self.mc_runs_max)
### Save uncertainties to the trajectory object ###
if uncertainties is not None:
traj_uncer = copy.deepcopy(uncertainties)
# Remove the list of all MC trajectires (it is unecessarily big)
traj_uncer.mc_traj_list = []
# Set the uncertainties to the best trajectory (maintain compatibility with older version
# before the typo fix)
traj_best.uncertainties = traj_uncer
traj_best.uncertanties = traj_uncer
######
# Copy uncertainties to the geometrical trajectory
self = copyUncertainties(traj_best, self)
else:
uncertainties = None
#### SAVE RESULTS ###
######################################################################################################
# Compute the trajectory ID
if (self.traj_id is None) or (self.traj_id == "None"):
self.traj_id = generateTrajectoryID(self)
if self.monte_carlo:
traj_best = addTrajectoryID(traj_best)
if self.save_results or self.show_plots:
# Save Monte Carlo results
if self.monte_carlo:
traj_best.save_results = self.save_results
traj_best.show_plots = self.show_plots
# Monte Carlo output directory
mc_output_dir = os.path.join(self.output_dir, 'Monte Carlo')
mc_file_name = self.file_name + "_mc"
if self.save_results:
if self.verbose:
print('Saving Monte Carlo results...')
# Save the picked trajectory structure with Monte Carlo points
savePickle(traj_best, mc_output_dir, mc_file_name + '_trajectory.pickle')
# Save the uncertainties
savePickle(uncertainties, mc_output_dir, mc_file_name + '_uncertainties.pickle')
# Save trajectory report
traj_best.saveReport(mc_output_dir, mc_file_name + '_report.txt', \
uncertainties=uncertainties, verbose=self.verbose)
# Save and show plots
traj_best.savePlots(mc_output_dir, mc_file_name, show_plots=self.show_plots)
## Save original picks results
if self.save_results:
if self.verbose:
print('Saving results with original picks...')
# Save the picked trajectory structure with original points
savePickle(self, self.output_dir, self.file_name + '_trajectory.pickle')
# Save trajectory report with original points
self.saveReport(self.output_dir, self.file_name + '_report.txt', \
uncertainties=uncertainties, verbose = not self.monte_carlo)
# Save and show plots
self.savePlots(self.output_dir, self.file_name, \
show_plots=(self.show_plots and not self.monte_carlo))
######################################################################################################
## SHOW PLANE INTERSECTIONS AND LoS PLOTS ###
#####################################################################################################
# # Show the plane intersection
# if self.show_plots:
# self.best_conv_inter.show()
# # Show lines of sight solution in 3D
# if self.show_plots:
# self.showLoS()
#####################################################################################################
# Return the best trajectory
if self.monte_carlo:
return traj_best
else:
return self
if __name__ == "__main__":
### TEST CASE ###
##########################################################################################################
import time
from wmpl.Utils.TrajConversions import equatorialCoordPrecession_vect, J2000_JD
## TEST EVENT
###############
# Reference julian date
jdt_ref = 2458601.365760937799
# Inputs are RA/Dec
meastype = 1
# Measurements
station_id1 = "RU0001"
time1 = np.array([0.401190, 0.441190, 0.481190, 0.521190, 0.561190, 0.601190, 0.641190, 0.681190,
0.721190, 0.761190, 0.801190, 0.841190, 0.881190, 0.921190, 0.961190, 1.001190,
1.041190, 1.081190, 1.121190, 1.161190, 1.201190, 1.241190, 1.281190, 1.321190,
1.361190, 1.401190, 1.441190, 1.561190, 1.601190, 1.641190, 1.721190, 1.761190,
1.841190])
ra1 = np.array([350.35970, 350.71676, 351.29184, 351.58998, 352.04673, 352.50644, 352.91289, 353.37336,
353.80532, 354.23339, 354.69277, 355.07317, 355.49321, 355.93473, 356.32148, 356.74755,
357.13866, 357.51363, 357.89944, 358.34052, 358.72626, 359.11597, 359.53391, 359.88343,
0.35106, 0.71760, 1.05526, 2.17105, 2.58634, 2.86315, 3.58752, 3.90806,
4.48084])
dec1 = np.array([+74.03591, +73.94472, +73.80889, +73.73877, +73.59830, +73.46001, +73.35001, +73.22812,
+73.10211, +72.98779, +72.84568, +72.72924, +72.59691, +72.46677, +72.33622, +72.18147,
+72.04381, +71.91015, +71.77648, +71.63370, +71.47512, +71.32664, +71.16185, +71.03236,
+70.84506, +70.67285, +70.54194, +70.01219, +69.80856, +69.69043, +69.38316, +69.23522,
+68.93025])
station_id2 = "RU0002"
time2 = np.array([0.000000, 0.040000, 0.080000, 0.120000, 0.160000, 0.200000, 0.240000, 0.280000,
0.320000, 0.360000, 0.400000, 0.440000, 0.480000, 0.520000, 0.560000, 0.600000,
0.640000, 0.680000, 0.720000, 0.760000, 0.800000, 0.840000, 0.880000, 0.920000,
0.960000, 1.000000, 1.040000, 1.080000, 1.120000, 1.160000, 1.200000, 1.240000,
1.280000, 1.320000, 1.360000, 1.400000, 1.440000, 1.480000, 1.520000, 1.560000,
1.600000, 1.640000, 1.680000, 1.720000, 1.760000, 1.800000, 1.840000, 1.880000,
1.920000, 1.960000, 2.000000, 2.040000, 2.080000, 2.120000, 2.160000, 2.200000,
2.240000, 2.280000, 2.320000, 2.360000, 2.400000, 2.440000, 2.480000, 2.520000,])
ra2 = np.array([ 81.27325, 81.20801, 81.06648, 81.03509, 80.93281, 80.87338, 80.74776, 80.68456,
80.60038, 80.52306, 80.45021, 80.35990, 80.32309, 80.21477, 80.14311, 80.06967,
79.98169, 79.92234, 79.84210, 79.77507, 79.72752, 79.62422, 79.52738, 79.48236,
79.39613, 79.30580, 79.23434, 79.20863, 79.12019, 79.03670, 78.94849, 78.89223,
78.84252, 78.76605, 78.69339, 78.64799, 78.53858, 78.53906, 78.47469, 78.39496,
78.33473, 78.25761, 78.23964, 78.17867, 78.16914, 78.07010, 78.04741, 77.95169,
77.89130, 77.85995, 77.78812, 77.76807, 77.72458, 77.66024, 77.61543, 77.54208,
77.50465, 77.45944, 77.43200, 77.38361, 77.36004, 77.28842, 77.27131, 77.23300])
dec2 = np.array([+66.78618, +66.66040, +66.43476, +66.21971, +66.01550, +65.86401, +65.63294, +65.43265,
+65.25161, +65.01655, +64.83118, +64.62955, +64.45051, +64.23361, +64.00504, +63.81778,
+63.61334, +63.40714, +63.19009, +62.98101, +62.76420, +62.52019, +62.30266, +62.05585,
+61.84240, +61.60207, +61.40390, +61.22904, +60.93950, +60.74076, +60.53772, +60.25602,
+60.05801, +59.83635, +59.59978, +59.37846, +59.10216, +58.88266, +58.74728, +58.45432,
+58.18503, +57.97737, +57.72030, +57.55891, +57.31933, +56.98481, +56.85845, +56.58652,
+56.36153, +56.15409, +55.88252, +55.66986, +55.46593, +55.20145, +54.91643, +54.69826,
+54.49443, +54.25651, +54.06386, +53.86395, +53.70069, +53.47312, +53.33715, +53.20272])
# Convert measurement to radians
ra1 = np.radians(ra1)
dec1 = np.radians(dec1)
ra2 = np.radians(ra2)
dec2 = np.radians(dec2)
###
### SITES INFO
lon1 = np.radians(37.315140)
lat1 = np.radians(44.890740)
ele1 = 26.00
lon2 = np.radians(38.583580)
lat2 = np.radians(44.791620)
ele2 = 240.00
###
# Init new trajectory solving
traj_solve = Trajectory(jdt_ref, meastype=meastype, save_results=False, monte_carlo=False, show_plots=False)
# Set input points for the first site
traj_solve.infillTrajectory(ra1, dec1, time1, lat1, lon1, ele1, station_id=station_id1)
# Set input points for the second site
traj_solve.infillTrajectory(ra2, dec2, time2, lat2, lon2, ele2, station_id=station_id2)
traj_solve.run()
###############
# TEST
fig_pickle_dict = traj_solve.savePlots(None, None, show_plots=False, ret_figs=True)
for key in fig_pickle_dict:
print(key)
fig = pickle.loads(fig_pickle_dict[key])
|
wmpgREPO_NAMEWesternMeteorPyLibPATH_START.@WesternMeteorPyLib_extracted@WesternMeteorPyLib-master@wmpl@Trajectory@Trajectory.py@.PATH_END.py
|
{
"filename": "sharedport.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/common/twisted/sharedport.py",
"type": "Python"
}
|
#####################################################################################
#
# Copyright (c) typedef int GmbH
# SPDX-License-Identifier: EUPL-1.2
#
#####################################################################################
import sys
import socket
import platform
from twisted.internet import fdesc, tcp, ssl
from twisted.python.runtime import platformType
# Flag indiciating support for creating shared sockets with in-kernel
# load-balancing (!). Note that while FreeBSD had SO_REUSEPORT for ages,
# it does NOT (currently) implement load-balancing. Linux >= 3.9 and
# DragonFly BSD does.
_HAS_SHARED_LOADBALANCED_SOCKET = False
if sys.platform.startswith('linux'):
try:
# get Linux kernel version, like: (3, 19)
_LINUX_KERNEL_VERSION = [int(x) for x in tuple(platform.uname()[2].split('.')[:2])]
# SO_REUSEPORT only supported for Linux kernels >= 3.9
if (_LINUX_KERNEL_VERSION[0] == 3 and _LINUX_KERNEL_VERSION[1] >= 9) or _LINUX_KERNEL_VERSION[0] >= 4:
_HAS_SHARED_LOADBALANCED_SOCKET = True
# monkey patch missing constant if needed
if not hasattr(socket, 'SO_REUSEPORT'):
socket.SO_REUSEPORT = 15
except:
pass
elif sys.platform == 'win32':
# http://stackoverflow.com/questions/14388706/socket-options-so-reuseaddr-and-so-reuseport-how-do-they-differ-do-they-mean-t/14388707#14388707
_HAS_SHARED_LOADBALANCED_SOCKET = True
# FIXME: DragonFly BSD claims support: http://lists.dragonflybsd.org/pipermail/commits/2013-May/130083.html
__all__ = ('create_stream_socket', 'CustomTCPPort', 'CustomTCPTLSPort')
def create_stream_socket(addressFamily, shared=False):
"""
Create a new socket for use with Twisted's IReactor.adoptStreamPort.
:param addressFamily: The socket address family.
:type addressFamily: One of socket.AF_INET, socket.AF_INET6, socket.AF_UNIX
:param shared: If `True`, request to create a shared, load-balanced socket.
When this feature is not available, throw an exception.
:type shared: bool
:returns obj -- A socket.
"""
s = socket.socket(addressFamily, socket.SOCK_STREAM)
s.setblocking(0)
fdesc._setCloseOnExec(s.fileno())
if platformType == "posix" and sys.platform != "cygwin":
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if shared:
if addressFamily not in [socket.AF_INET, socket.AF_INET6]:
raise Exception("shared sockets are only supported for IPv4 and IPv6")
if _HAS_SHARED_LOADBALANCED_SOCKET:
if sys.platform.startswith('linux'):
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
elif sys.platform == 'win32':
# http://stackoverflow.com/questions/14388706/socket-options-so-reuseaddr-and-so-reuseport-how-do-they-differ-do-they-mean-t/14388707#14388707
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
else:
raise Exception("logic error")
else:
raise Exception("shared sockets unsupported on this system")
return s
class CustomTCPPort(tcp.Port):
"""
A custom TCP port which allows to set socket options for sharing TCP ports between multiple processes.
"""
def __init__(self, port, factory, backlog=50, interface='', reactor=None, shared=False, user_timeout=None):
if shared and not _HAS_SHARED_LOADBALANCED_SOCKET:
raise Exception("shared sockets unsupported on this system")
else:
self._shared = shared
self._user_timeout = user_timeout
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
def createInternetSocket(self):
s = tcp.Port.createInternetSocket(self)
if self._shared:
if _HAS_SHARED_LOADBALANCED_SOCKET:
if sys.platform.startswith('linux'):
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
elif sys.platform == 'win32':
# http://stackoverflow.com/questions/14388706/socket-options-so-reuseaddr-and-so-reuseport-how-do-they-differ-do-they-mean-t/14388707#14388707
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
else:
raise Exception("logic error")
else:
raise Exception("shared sockets unsupported on this system")
if self._user_timeout is not None:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_USER_TIMEOUT, self._user_timeout)
return s
class CustomTCPTLSPort(CustomTCPPort, ssl.Port):
"""
A custom TLS port which allows to set socket options for sharing (the underlying) TCP ports between multiple processes.
"""
def __init__(self,
port,
factory,
ctxFactory,
backlog=50,
interface='',
reactor=None,
shared=False,
user_timeout=None):
if shared and not _HAS_SHARED_LOADBALANCED_SOCKET:
raise Exception("shared sockets unsupported on this system")
else:
self._shared = shared
self._user_timeout = user_timeout
ssl.Port.__init__(self, port, factory, ctxFactory, backlog, interface, reactor)
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@common@twisted@sharedport.py@.PATH_END.py
|
{
"filename": "mcsamples.py",
"repo_name": "cmbant/CosmoMC",
"repo_path": "CosmoMC_extracted/CosmoMC-master/python/getdist/mcsamples.py",
"type": "Python"
}
|
import os
import glob
import logging
import copy
import pickle
import math
import time
from typing import Mapping, Any, Optional, Union, Iterable
import numpy as np
from scipy.stats import norm
import getdist
from getdist import chains, types, covmat, ParamInfo, IniFile, ParamNames, cobaya_interface
from getdist.densities import Density1D, Density2D
from getdist.chains import Chains, chainFiles, last_modified, WeightedSampleError, ParamError
from getdist.convolve import convolve1D, convolve2D
from getdist.cobaya_interface import MCSamplesFromCobaya
import getdist.kde_bandwidth as kde
from getdist.parampriors import ParamBounds
pickle_version = 22
class MCSamplesError(WeightedSampleError):
"""
An Exception that is raised when there is an error inside the MCSamples class.
"""
class SettingError(MCSamplesError):
"""
An Exception that indicates bad settings.
"""
class BandwidthError(MCSamplesError):
"""
An Exception that indicate KDE bandwidth failure.
"""
def loadMCSamples(file_root: str, ini: Union[None, str, IniFile] = None,
jobItem=None, no_cache=False, settings: Optional[Mapping[str, Any]] = None) -> 'MCSamples':
"""
Loads a set of samples from a file or files.
Sample files are plain text (*file_root.txt*) or a set of files (*file_root_1.txt*, *file_root_2.txt*, etc.).
Auxiliary files **file_root.paramnames** gives the parameter names
and (optionally) **file_root.ranges** gives hard prior parameter ranges.
For a description of the various analysis settings and default values see
`analysis_defaults.ini <https://getdist.readthedocs.org/en/latest/analysis_settings.html>`_.
:param file_root: The root name of the files to read (no extension)
:param ini: The name of a .ini file with analysis settings to use
:param jobItem: an optional grid jobItem instance for a CosmoMC grid output
:param no_cache: Indicates whether or not we should cache loaded samples in a pickle
:param settings: dictionary of analysis settings to override defaults
:return: The :class:`MCSamples` instance
"""
files = chainFiles(file_root)
if not files: # try new Cobaya format
files = chainFiles(file_root, separator='.')
path, name = os.path.split(file_root)
cache_dir = getdist.make_cache_dir()
if cache_dir:
import hashlib
cache_name = name + '_' + hashlib.md5(os.path.abspath(path).encode('utf-8')).hexdigest()[:10]
path = cache_dir
else:
cache_name = name
if not os.path.exists(path):
os.mkdir(path)
cachefile = os.path.join(path, cache_name) + '.py_mcsamples'
samples = MCSamples(file_root, jobItem=jobItem, ini=ini, settings=settings)
if os.path.isfile(file_root + '.paramnames'):
allfiles = files + [file_root + '.ranges', file_root + '.paramnames', file_root + '.properties.ini']
else: # Cobaya
folder = os.path.dirname(file_root)
prefix = os.path.basename(file_root)
allfiles = files + [
os.path.join(folder, f) for f in os.listdir(folder) if (
f.startswith(prefix) and
any(f.lower().endswith(end) for end in ['updated.yaml', 'full.yaml']))]
if not no_cache and os.path.exists(cachefile) and last_modified(allfiles) < os.path.getmtime(cachefile):
try:
with open(cachefile, 'rb') as inp:
cache = pickle.load(inp)
if cache.version == pickle_version and samples.ignore_rows == cache.ignore_rows \
and samples.min_weight_ratio == cache.min_weight_ratio:
changed = len(samples.contours) != len(cache.contours) or \
np.any(np.array(samples.contours) != np.array(cache.contours))
cache.updateSettings(ini=ini, settings=settings, doUpdate=changed)
return cache
except Exception:
pass
if not len(files):
raise IOError('No chains found: ' + file_root)
samples.readChains(files)
if no_cache:
if os.path.exists(cachefile):
os.remove(cachefile)
else:
samples.savePickle(cachefile)
return samples
class Kernel1D:
def __init__(self, winw, h):
self.winw = winw
self.h = h
self.x = np.arange(-winw, winw + 1)
Win = np.exp(-(self.x / h) ** 2 / 2.)
self.Win = Win / np.sum(Win)
# =============================================================================
class MCSamples(Chains):
"""
The main high-level class for a collection of parameter samples.
Derives from :class:`.chains.Chains`, adding high-level functions including
Kernel Density estimates, parameter ranges and custom settings.
"""
def __init__(self, root: Optional[str] = None, jobItem=None, ini=None,
settings: Optional[Mapping[str, Any]] = None, ranges=None,
samples: Union[np.ndarray, Iterable[np.ndarray], None] = None,
weights: Union[np.ndarray, Iterable[np.ndarray], None] = None,
loglikes: Union[np.ndarray, Iterable[np.ndarray], None] = None, **kwargs):
"""
For a description of the various analysis settings and default values see
`analysis_defaults.ini <https://getdist.readthedocs.org/en/latest/analysis_settings.html>`_.
:param root: A root file name when loading from file
:param jobItem: optional jobItem for parameter grid item. Should have jobItem.chainRoot and jobItem.batchPath
:param ini: a .ini file to use for custom analysis settings
:param settings: a dictionary of custom analysis settings
:param ranges: a dictionary giving any additional hard prior bounds for parameters,
eg. {'x':[0, 1], 'y':[None,2]}
:param samples: if not loading from file, array of parameter values for each sample, passed
to :meth:`setSamples`, or list of arrays if more than one chain
:param weights: array of weights for samples, or list of arrays if more than one chain
:param loglikes: array of -log(Likelihood) for samples, or list of arrays if more than one chain
:param kwargs: keyword arguments passed to inherited classes, e.g. to manually make a samples object from
sample arrays in memory:
- **paramNamesFile**: optional name of .paramnames file with parameter names
- **names**: list of names for the parameters, or list of arrays if more than one chain
- **labels**: list of latex labels for the parameters
- **renames**: dictionary of parameter aliases
- **ignore_rows**:
- if int >=1: The number of rows to skip at the file in the beginning of the file
- if float <1: The fraction of rows to skip at the beginning of the file
- **label**: a latex label for the samples
- **name_tag**: a name tag for this instance
- **sampler**: string describing the type of samples; if "nested" or "uncorrelated"
the effective number of samples is calculated using uncorrelated approximation. If not specified
will be read from the root.properties.ini file if it exists and otherwise default to "mcmc".
"""
Chains.__init__(self, root, jobItem=jobItem, **kwargs)
self.version = pickle_version
self.markers = {}
self.ini = ini
if self.jobItem:
self.batch_path = self.jobItem.batchPath
else:
self.batch_path = ''
self._readRanges()
if ranges:
self.setRanges(ranges)
# Other variables
self.range_ND_contour: int = 1
self.range_confidence: float = 0.001
self.num_bins: int = 128
self.fine_bins: int = 1024
self.num_bins_2D: int = 40
self.fine_bins_2D: int = 256
self.smooth_scale_1D: float = -1.
self.smooth_scale_2D: float = -1.
self.num_bins_ND: int = 12
self.boundary_correction_order: int = 1
self.mult_bias_correction_order: int = 1
self.max_corr_2D: float = 0.95
self.use_effective_samples_2D = False
self.contours = np.array([0.68, 0.95])
self.max_scatter_points: int = 2000
self.credible_interval_threshold: float = 0.05
self.shade_likes_is_mean_loglikes = False
self.likeStats = None
self.max_mult: float = 0
self.mean_mult: float = 0
self.plot_data_dir = ""
if root:
self.rootname = os.path.basename(root)
else:
self.rootname = ""
self.rootdirname = ""
self.indep_thin = 0
if 'ignore_rows' in kwargs:
if settings is None:
settings = {}
settings['ignore_rows'] = kwargs['ignore_rows']
self.ignore_rows = float(kwargs.get('ignore_rows', 0))
# Do not remove burn-in for nested sampler samples
if self.sampler == "nested" and not np.isclose(self.ignore_rows, 0):
raise ValueError("Should not remove burn-in from Nested Sampler samples.")
self.subplot_size_inch = 4.0
self.subplot_size_inch2 = self.subplot_size_inch
self.subplot_size_inch3 = 6.0
self.plot_output = getdist.default_plot_output
self.out_dir = ""
self.no_warning_params = []
self.no_warning_chi2_params = True
self.max_split_tests = 4
self.force_twotail = False
self.corr_length_thin = 0
self.corr_length_steps = 15
self.converge_test_limit = 0.95
self.done_1Dbins = False
self.density1D = dict()
self.updateSettings(ini=ini, settings=settings)
if root and os.path.exists(root + '.properties.ini'):
# any settings in properties.ini override settings for this specific chain
self.properties = IniFile(root + '.properties.ini')
self._setBurnOptions(self.properties)
if self.properties.bool('burn_removed', False):
self.ignore_frac = 0.
self.ignore_lines = 0
self.label = self.label or self.properties.params.get('label', None)
if 'sampler' not in kwargs:
self.setSampler(self.properties.string('sampler', self.sampler))
else:
self.properties = IniFile()
if root and self.paramNames and self.paramNames.info_dict:
if cobaya_interface.get_burn_removed(self.paramNames.info_dict):
self.properties.params['burn_removed'] = True
self.ignore_frac = 0.
self.ignore_lines = 0
if not self.label:
self.label = cobaya_interface.get_sample_label(self.paramNames.info_dict)
if self.label:
self.properties.params['label'] = self.label
if 'sampler' not in kwargs:
self.setSampler(cobaya_interface.get_sampler_type(self.paramNames.info_dict))
self.properties.params['sampler'] = self.sampler
if self.ignore_frac or self.ignore_rows:
self.properties.params['burn_removed'] = True
if samples is not None:
self.readChains(samples, weights, loglikes)
def copy(self, label=None, settings=None):
"""
Create a copy of this sample object
:param label: optional lable for the new copy
:param settings: optional modified settings for the new copy
:return: copyied :class:`MCSamples` instance
"""
new = copy.deepcopy(self)
if label:
new.label = label
if settings is not None:
new.needs_update = True
new.updateSettings(settings)
return new
def setRanges(self, ranges):
"""
Sets the ranges parameters, e.g. hard priors on positivity etc.
If a min or max value is None, then it is assumed to be unbounded.
:param ranges: A list or a tuple of [min,max] values for each parameter,
or a dictionary giving [min,max] values for specific parameter names
"""
if isinstance(ranges, (list, tuple)):
for i, minmax in enumerate(ranges):
self.ranges.setRange(self.parName(i), minmax)
elif isinstance(ranges, Mapping):
for key, value in ranges.items():
self.ranges.setRange(key, value)
elif isinstance(ranges, ParamBounds):
self.ranges = copy.deepcopy(ranges)
else:
raise ValueError('MCSamples ranges parameter must be list or dict')
self.needs_update = True
def parName(self, i, starDerived=False):
"""
Gets the name of i'th parameter
:param i: The index of the parameter
:param starDerived: add a star at the end of the name if the parameter is derived
:return: The name of the parameter (string)
"""
return self.paramNames.name(i, starDerived)
def parLabel(self, i):
"""
Gets the latex label of the parameter
:param i: The index or name of a parameter.
:return: The parameter's label.
"""
if isinstance(i, str):
return self.paramNames.parWithName(i).label
else:
return self.paramNames.names[i].label
def _setBurnOptions(self, ini):
"""
Sets the ignore_rows value from configuration.
:param ini: The :class:`.inifile.IniFile` to be used
"""
ini.setAttr('ignore_rows', self)
self.ignore_lines = int(self.ignore_rows)
if not self.ignore_lines:
self.ignore_frac = self.ignore_rows
else:
self.ignore_frac = 0
ini.setAttr('min_weight_ratio', self)
def initParameters(self, ini):
"""
Initializes settings.
Gets parameters from :class:`~.inifile.IniFile`.
:param ini: The :class:`~.inifile.IniFile` to be used
"""
self._setBurnOptions(ini)
ini.setAttr('range_ND_contour', self)
ini.setAttr('range_confidence', self)
ini.setAttr('num_bins', self)
ini.setAttr('fine_bins', self)
ini.setAttr('num_bins_2D', self)
ini.setAttr('fine_bins_2D', self)
ini.setAttr('smooth_scale_1D', self)
ini.setAttr('smooth_scale_2D', self)
ini.setAttr('boundary_correction_order', self, 1)
ini.setAttr('mult_bias_correction_order', self, 1)
ini.setAttr('num_bins_ND', self)
ini.setAttr('max_scatter_points', self)
ini.setAttr('credible_interval_threshold', self)
ini.setAttr('subplot_size_inch', self)
ini.setAttr('subplot_size_inch2', self)
ini.setAttr('subplot_size_inch3', self)
ini.setAttr('plot_output', self)
ini.setAttr('force_twotail', self)
if self.force_twotail:
logging.warning('Computing two tail limits')
ini.setAttr('max_corr_2D', self)
if ini.hasKey('contours'):
ini.setAttr('contours', self)
elif ini.hasKey('num_contours'):
num_contours = ini.int('num_contours', 2)
self.contours = np.array([ini.float('contour' + str(i + 1)) for i in range(num_contours)])
# how small the end bin must be relative to max to use two tail
self.max_frac_twotail = []
for i, contour in enumerate(self.contours):
max_frac = np.exp(-1.0 * math.pow(norm.ppf((1 - contour) / 2), 2) / 2)
if ini:
max_frac = ini.float('max_frac_twotail' + str(i + 1), max_frac)
self.max_frac_twotail.append(max_frac)
ini.setAttr('converge_test_limit', self, self.contours[-1])
ini.setAttr('corr_length_thin', self)
ini.setAttr('corr_length_steps', self)
ini.setAttr('no_warning_params', self, [])
ini.setAttr('no_warning_chi2_params', self, True)
self.batch_path = ini.string('batch_path', self.batch_path, allowEmpty=False)
def _initLimits(self, ini=None):
bin_limits = ""
if ini:
bin_limits = ini.string('all_limits', '')
self.markers = {}
for par in self.paramNames.names:
if bin_limits:
line = bin_limits
else:
line = ''
if ini and 'limits[%s]' % par.name in ini.params:
line = ini.string('limits[%s]' % par.name)
if line:
limits = line.split()
if len(limits) == 2:
self.ranges.setRange(par.name, limits)
par.limmin = self.ranges.getLower(par.name)
par.limmax = self.ranges.getUpper(par.name)
par.has_limits_bot = par.limmin is not None
par.has_limits_top = par.limmax is not None
if ini and 'marker[%s]' % par.name in ini.params:
line = ini.string('marker[%s]' % par.name)
if line:
self.markers[par.name] = float(line)
def updateSettings(self, settings: Optional[Mapping[str, Any]] = None,
ini: Union[None, str, IniFile] = None, doUpdate=True):
"""
Updates settings from a .ini file or dictionary
:param settings: The a dict containing settings to set, taking preference over any values in ini
:param ini: The name of .ini file to get settings from, or an :class:`~.inifile.IniFile` instance; by default
uses current settings
:param doUpdate: True if should update internal computed values, False otherwise (e.g. if want to make
other changes first)
"""
assert (settings is None or isinstance(settings, Mapping))
if not ini:
ini = self.ini
elif isinstance(ini, str):
ini = IniFile(ini)
else:
ini = copy.deepcopy(ini)
if not ini:
ini = IniFile(getdist.default_getdist_settings)
if settings:
ini.params.update(settings)
self.ini = ini
if ini:
self.initParameters(ini)
if doUpdate and self.samples is not None:
self.updateBaseStatistics()
def readChains(self, files_or_samples, weights=None, loglikes=None):
"""
Loads samples from a list of files or array(s), removing burn in,
deleting fixed parameters, and combining into one self.samples array
:param files_or_samples: The list of file names to read, samples or list of samples
:param weights: array of weights if setting from arrays
:param loglikes: array of -2 log(likelihood) if setting from arrays
:return: self.
"""
self.loadChains(self.root, files_or_samples, weights=weights, loglikes=loglikes)
if self.ignore_frac and (not self.jobItem or not hasattr(self.jobItem, "isImportanceJob")
or (not self.jobItem.isImportanceJob and not self.jobItem.isBurnRemoved())):
self.removeBurnFraction(self.ignore_frac)
chains.print_load_line('Removed %s as burn in' % self.ignore_frac)
elif not int(self.ignore_rows):
chains.print_load_line('Removed no burn in')
self.deleteFixedParams()
# Make a single array for chains
if self.chains is not None:
self.makeSingle()
self.updateBaseStatistics()
return self
def updateBaseStatistics(self):
"""
Updates basic computed statistics (y, covariance etc), e.g. after a change in samples or weights
:return: self
"""
super().updateBaseStatistics()
mult_max = (self.mean_mult * self.numrows) / min(self.numrows // 2, 500)
outliers = np.sum(self.weights > mult_max)
if outliers != 0:
logging.warning('outlier fraction %s ', float(outliers) / self.numrows)
self.indep_thin = 0
self._setCov()
self.done_1Dbins = False
self.density1D = dict()
self._initLimits(self.ini)
for par in self.paramNames.names:
par.N_eff_kde = None
# Get ND confidence region
self._setLikeStats()
return self
def makeSingleSamples(self, filename="", single_thin=None, random_state=None):
"""
Make file of unit weight samples by choosing samples
with probability proportional to their weight.
If you just want the indices of the samples use
:meth:`~.chains.WeightedSamples.random_single_samples_indices` instead.
:param filename: The filename to write to, leave empty if no output file is needed
:param single_thin: factor to thin by; if not set generates as many samples as it can
up to self.max_scatter_points
:param random_state: random seed or Generator
:return: numpy array of selected weight-1 samples if no filename
"""
if single_thin is None:
single_thin = max(1, self.norm / self.max_mult / self.max_scatter_points)
random_state = np.random.default_rng(random_state)
rand = random_state.random(self.numrows)
if filename:
with open(filename, 'w', encoding='utf-8') as f:
for i, r in enumerate(rand):
if r <= self.weights[i] / self.max_mult / single_thin:
f.write("%16.7E" % 1.0)
f.write("%16.7E" % (self.loglikes[i]))
for j in range(self.n):
f.write("%16.7E" % (self.samples[i][j]))
f.write("\n")
else:
return self.samples[rand <= self.weights / (self.max_mult * single_thin)]
def writeThinData(self, fname, thin_ix, cool=1):
"""
Writes samples at thin_ix to file
:param fname: The filename to write to.
:param thin_ix: Indices of the samples to write
:param cool: if not 1, cools the samples by this factor
"""
nparams = self.samples.shape[1]
if cool != 1:
logging.info('Cooled thinned output with temp: %s', cool)
MaxL = np.max(self.loglikes)
with open(fname, 'w', encoding='utf-8') as f:
i = 0
for thin in thin_ix:
if cool != 1:
newL = self.loglikes[thin] * cool
f.write("%16.7E" % (
np.exp(-(newL - self.loglikes[thin]) - MaxL * (1 - cool))))
f.write("%16.7E" % newL)
for j in range(nparams):
f.write("%16.7E" % (self.samples[i][j]))
else:
f.write("%f" % 1.)
f.write("%f" % (self.loglikes[thin]))
for j in range(nparams):
f.write("%16.7E" % (self.samples[i][j]))
i += 1
print('Wrote ', len(thin_ix), ' thinned samples')
def getCovMat(self):
"""
Gets the CovMat instance containing covariance matrix for all the non-derived parameters
(for example useful for subsequent MCMC runs to orthogonalize the parameters)
:return: A :class:`~.covmat.CovMat` object holding the covariance
"""
nparamNonDerived = self.paramNames.numNonDerived()
return covmat.CovMat(matrix=self.fullcov[:nparamNonDerived, :nparamNonDerived],
paramNames=self.paramNames.list()[:nparamNonDerived])
def writeCovMatrix(self, filename=None):
"""
Writes the covrariance matrix of non-derived parameters to a file.
:param filename: The filename to write to; default is file_root.covmat
"""
filename = filename or self.rootdirname + ".covmat"
self.getCovMat().saveToFile(filename)
def writeCorrelationMatrix(self, filename=None):
"""
Write the correlation matrix to a file
:param filename: The file to write to, If none writes to file_root.corr
"""
filename = filename or self.rootdirname + ".corr"
np.savetxt(filename, self.getCorrelationMatrix(), fmt="%15.7E")
def getFractionIndices(self, weights, n):
"""
Calculates the indices of weights that split the weights into sets of equal 1/n fraction of the total weight
:param weights: array of weights
:param n: number of groups to split in to
:return: array of indices of the boundary rows in the weights array
"""
cumsum = np.cumsum(weights)
fraction_indices = np.append(np.searchsorted(cumsum, np.linspace(0, 1, n, endpoint=False) * self.norm),
self.weights.shape[0])
return fraction_indices
def PCA(self, params, param_map=None, normparam=None, writeDataToFile=False, filename=None,
conditional_params=(), n_best_only=None):
"""
Perform principle component analysis (PCA). In other words,
get eigenvectors and eigenvalues for normalized variables
with optional (log modulus) mapping to find power law fits.
:param params: List of names of the parameters to use
:param param_map: A transformation to apply to parameter values; A list or string containing
either N (no transformation) or L (for log transform) for each parameter.
By default uses log if no parameter values cross zero
:param normparam: optional name of parameter to normalize result (i.e. this parameter will have unit power)
:param writeDataToFile: True if should write the output to file.
:param filename: The filename to write, by default root_name.PCA.
:param conditional_params: optional list of parameters to treat as fixed,
i.e. for PCA conditional on fixed values of these parameters
:param n_best_only: return just the short summary constraint for the tightest n_best_only constraints
:return: a string description of the output of the PCA
"""
logging.info('Doing PCA for %s parameters', len(params))
if len(conditional_params):
logging.info('conditional %u fixed parameters', len(conditional_params))
PCAtext = 'PCA for parameters:\n'
params = [name for name in params if self.paramNames.parWithName(name)]
nparams = len(params)
indices = [self.index[param] for param in params]
conditional_params = [self.index[param] for param in conditional_params]
indices += conditional_params
if normparam:
if normparam in params:
normparam = params.index(normparam)
else:
normparam = -1
else:
normparam = -1
n = len(indices)
PCdata = self.samples[:, indices].copy()
PClabs = []
PCmean = np.zeros(n)
sd = np.zeros(n)
newmean = np.zeros(n)
newsd = np.zeros(n)
if param_map is None:
param_map = ''
for par in self.paramNames.parsWithNames(params):
self._initParamRanges(par.name)
if par.param_max < 0 or par.param_min < (par.param_max - par.param_min) / 10:
param_map += 'N'
else:
param_map += 'L'
doexp = False
for i, parix in enumerate(indices):
if i < nparams:
label = self.parLabel(parix)
if param_map[i] == 'L':
doexp = True
PCdata[:, i] = np.log(PCdata[:, i])
PClabs.append("ln(" + label + ")")
elif param_map[i] == 'M':
doexp = True
PCdata[:, i] = np.log(-1.0 * PCdata[:, i])
PClabs.append("ln(-" + label + ")")
else:
PClabs.append(label)
PCAtext += "%10s :%s\n" % (str(parix + 1), str(PClabs[i]))
PCmean[i] = np.dot(self.weights, PCdata[:, i]) / self.norm
PCdata[:, i] -= PCmean[i]
sd[i] = np.sqrt(np.dot(self.weights, PCdata[:, i] ** 2) / self.norm)
if sd[i] != 0:
PCdata[:, i] /= sd[i]
PCAtext += "\n"
PCAtext += 'Correlation matrix for reduced parameters\n'
correlationMatrix = np.ones((n, n))
for i in range(n):
for j in range(i):
correlationMatrix[j][i] = np.dot(self.weights, PCdata[:, i] * PCdata[:, j]) / self.norm
correlationMatrix[i][j] = correlationMatrix[j][i]
for i in range(nparams):
PCAtext += '%12s :' % params[i]
for j in range(n):
PCAtext += '%8.4f' % correlationMatrix[j][i]
PCAtext += '\n'
if len(conditional_params):
u = np.linalg.inv(correlationMatrix)
u = u[np.ix_(list(range(len(params))), list(range(len(params))))]
u = np.linalg.inv(u)
n = nparams
PCdata = PCdata[:, :nparams]
else:
u = correlationMatrix
evals, evects = np.linalg.eig(u)
isorted = evals.argsort()
u = np.transpose(evects[:, isorted]) # redefining u
PCAtext += '\n'
PCAtext += 'e-values of correlation matrix\n'
for i in range(n):
isort = isorted[i]
PCAtext += 'PC%2i: %8.4f\n' % (i + 1, evals[isort])
PCAtext += '\n'
PCAtext += 'e-vectors\n'
for j in range(n):
PCAtext += '%3i:' % (indices[j] + 1)
for i in range(n):
isort = isorted[i]
PCAtext += '%8.4f' % (evects[j][isort])
PCAtext += '\n'
if normparam != -1:
# Set so parameter normparam has exponent 1
for i in range(n):
u[i, :] = u[i, :] / u[i, normparam] * sd[normparam]
else:
# Normalize so main component has exponent 1
for i in range(n):
maxi = np.abs(u[i, :]).argmax()
u[i, :] = u[i, :] / u[i, maxi] * sd[maxi]
nrows = PCdata.shape[0]
for i in range(nrows):
PCdata[i, :] = np.dot(u, PCdata[i, :])
if doexp:
PCdata[i, :] = np.exp(PCdata[i, :])
PCAtext += '\n'
PCAtext += 'Principle components\n'
PCAmodeTexts = []
for i in range(n):
isort = isorted[i]
summary = 'PC%i (e-value: %f)\n' % (i + 1, evals[isort])
for j in range(n):
label = self.parLabel(indices[j])
if param_map[j] in ['L', 'M']:
expo = "%f" % (1.0 / sd[j] * u[i][j])
if param_map[j] == "M":
div = "%f" % (-np.exp(PCmean[j]))
else:
div = "%f" % (np.exp(PCmean[j]))
summary += '[%f] (%s/%s)^{%s}\n' % (u[i][j], label, div, expo)
else:
expo = "%f" % (sd[j] / u[i][j])
if doexp:
summary += '[%f] exp((%s-%f)/%s)\n' % (u[i][j], label, PCmean[j], expo)
else:
summary += '[%f] (%s-%f)/%s)\n' % (u[i][j], label, PCmean[j], expo)
newmean[i] = self.mean(PCdata[:, i])
newsd[i] = np.sqrt(self.mean((PCdata[:, i] - newmean[i]) ** 2))
summary += ' = %f +- %f\n' % (newmean[i], newsd[i])
summary += '\n'
PCAmodeTexts += [summary]
PCAtext += summary
# Find out how correlated these components are with other parameters
PCAtext += 'Correlations of principle components\n'
comps = ["%8i" % i for i in range(1, n + 1)]
PCAtext += '%s\n' % ("".join(comps))
for i in range(n):
PCdata[:, i] = (PCdata[:, i] - newmean[i]) / newsd[i]
for j in range(n):
PCAtext += 'PC%2i' % (j + 1)
for i in range(n):
PCAtext += '%8.3f' % (self.mean(PCdata[:, i] * PCdata[:, j]))
PCAtext += '\n'
for j in range(self.n):
PCAtext += '%4i' % (j + 1)
for i in range(n):
PCAtext += '%8.3f' % (
np.sum(self.weights * PCdata[:, i]
* (self.samples[:, j] - self.means[j]) / self.sddev[j]) / self.norm)
PCAtext += ' (%s)\n' % (self.parLabel(j))
if writeDataToFile:
with open(filename or self.rootdirname + ".PCA", "w", encoding='utf-8') as f:
f.write(PCAtext)
if n_best_only:
if n_best_only == 1:
return PCAmodeTexts[0]
return PCAmodeTexts[:n_best_only]
else:
return PCAtext
def getNumSampleSummaryText(self):
"""
Returns a summary text describing numbers of parameters and samples,
and various measures of the effective numbers of samples.
:return: The summary text as a string.
"""
lines = 'using %s rows, %s parameters; mean weight %s, tot weight %s\n' % (
self.numrows, self.paramNames.numParams(), self.mean_mult, self.norm)
if self.indep_thin != 0:
lines += 'Approx indep samples (N/corr length): %s\n' % (round(self.norm / self.indep_thin))
lines += 'Equiv number of single samples (sum w)/max(w): %s\n' % (round(self.norm / self.max_mult))
lines += 'Effective number of weighted samples (sum w)^2/sum(w^2): %s\n' % (
int(self.norm ** 2 / np.dot(self.weights, self.weights)))
return lines
# noinspection PyUnboundLocalVariable
def getConvergeTests(self, test_confidence=0.95, writeDataToFile=False,
what=('MeanVar', 'GelmanRubin', 'SplitTest', 'RafteryLewis', 'CorrLengths'),
filename=None, feedback=False):
"""
Do convergence tests.
:param test_confidence: confidence limit to test for convergence (two-tail, only applies to some tests)
:param writeDataToFile: True if should write output to a file
:param what: The tests to run. Should be a list of any of the following:
- 'MeanVar': Gelman-Rubin sqrt(var(chain mean)/mean(chain var)) test in individual parameters (multiple chains only)
- 'GelmanRubin': Gelman-Rubin test for the worst orthogonalized parameter (multiple chains only)
- 'SplitTest': Crude test for variation in confidence limits when samples are split up into subsets
- 'RafteryLewis': `Raftery-Lewis test <http://www.stat.washington.edu/tech.reports/raftery-lewis2.ps>`_ (integer weight samples only)
- 'CorrLengths': Sample correlation lengths
:param filename: The filename to write to, default is file_root.converge
:param feedback: If set to True, Prints the output as well as returning it.
:return: text giving the output of the tests
"""
lines = ''
nparam = self.n
chainlist = self.getSeparateChains()
num_chains_used = len(chainlist)
if num_chains_used > 1 and feedback:
print('Number of chains used = ', num_chains_used)
for chain in chainlist:
chain.setDiffs()
parForm = self.paramNames.parFormat()
parNames = [parForm % self.parName(j) for j in range(nparam)]
limits = np.array([1 - (1 - test_confidence) / 2, (1 - test_confidence) / 2])
if 'CorrLengths' in what:
lines += "Parameter autocorrelation lengths " \
"(effective number of samples N_eff = tot weight/weight length)\n"
lines += "\n"
lines += parForm % "" + '%15s %15s %15s\n' % ('Weight Length', 'Sample length', 'N_eff')
maxoff = np.min([chain.weights.size // 10 for chain in chainlist])
maxN = 0
for j in range(nparam):
corr = np.zeros(maxoff + 1)
for chain in chainlist:
corr += chain.getAutocorrelation(j, maxoff, normalized=False) * chain.norm
corr /= self.norm * self.vars[j]
ix = np.argmin(corr > 0.05 * corr[0])
N = corr[0] + 2 * np.sum(corr[1:ix])
maxN = max(N, maxN)
form = '%15.2E'
if self.mean_mult > 1:
form = '%15.2f'
lines += parNames[j] + form % N + ' %15.2f %15i\n' % (N / self.mean_mult, self.norm / N)
self.indep_thin = maxN
lines += "\n"
if num_chains_used > 1 and 'MeanVar' in what:
lines += "\n"
lines += "mean convergence stats using remaining chains\n"
lines += "param sqrt(var(chain mean)/mean(chain var))\n"
lines += "\n"
between_chain_var = np.zeros(nparam)
in_chain_var = np.zeros(nparam)
for chain in chainlist:
between_chain_var += (chain.means - self.means) ** 2
between_chain_var /= (num_chains_used - 1)
for j in range(nparam):
# Get stats for individual chains - the variance of the y over the mean of the variances
for chain in chainlist:
in_chain_var[j] += np.dot(chain.weights, chain.diffs[j] ** 2)
in_chain_var[j] /= self.norm
lines += parNames[j] + "%10.4f %s\n" % (
math.sqrt(between_chain_var[j] / in_chain_var[j]), self.parLabel(j))
lines += "\n"
nparamMC = self.paramNames.numNonDerived()
if num_chains_used > 1 and nparamMC > 0 and 'GelmanRubin' in what:
D = self.getGelmanRubinEigenvalues(chainlist=chainlist)
if D is not None:
self.GelmanRubin = np.max(D)
lines += "var(mean)/mean(var) for eigenvalues of covariance of y of orthonormalized parameters\n"
for jj, Di in enumerate(D):
lines += "%3i%13.5f\n" % (jj + 1, Di)
# noinspection PyStringFormat
GRSummary = " var(mean)/mean(var), remaining chains, worst e-value: R-1 = %13.5F" % self.GelmanRubin
else:
self.GelmanRubin = None
GRSummary = 'Gelman-Rubin covariance not invertible (parameter not moved?)'
logging.warning(GRSummary)
if feedback:
print(GRSummary)
lines += "\n"
if 'SplitTest' in what:
# Do tests for robustness under using splits of the samples
# Return the rms ([change in upper/lower quantile]/[standard deviation])
# when data split into 2, 3,.. sets
lines += "Split tests: rms_n([delta(upper/lower quantile)]/sd) n={2,3,4}, limit=%.0f%%:\n" % (
100 * self.converge_test_limit)
lines += "i.e. mean sample splitting change in the quantiles in units of the st. dev.\n"
lines += "\n"
frac_indices = []
for i in range(self.max_split_tests - 1):
frac_indices.append(self.getFractionIndices(self.weights, i + 2))
for j in range(nparam):
split_tests = np.zeros((self.max_split_tests - 1, 2))
confids = self.confidence(self.samples[:, j], limits)
for ix, frac in enumerate(frac_indices):
split_n = 2 + ix
for f1, f2 in zip(frac[:-1], frac[1:]):
split_tests[ix, :] += (self.confidence(self.samples[:, j], limits, start=f1,
end=f2) - confids) ** 2
split_tests[ix, :] = np.sqrt(split_tests[ix, :] / split_n) / self.sddev[j]
for endb, typestr in enumerate(['upper', 'lower']):
lines += parNames[j]
for ix in range(self.max_split_tests - 1):
lines += "%9.4f" % (split_tests[ix, endb])
lines += " %s\n" % typestr
lines += "\n"
class LoopException(Exception):
pass
if np.all(np.abs(self.weights - self.weights.astype(int)) < 1e-4 / self.max_mult):
if 'RafteryLewis' in what:
# Raftery and Lewis method
# See http://www.stat.washington.edu/tech.reports/raftery-lewis2.ps
# Raw non-importance sampled chains only
thin_fac = np.empty(num_chains_used, dtype=int)
epsilon = 0.001
nburn = np.zeros(num_chains_used, dtype=int)
markov_thin = np.zeros(num_chains_used, dtype=int)
hardest = -1
hardestend = 0
for ix, chain in enumerate(chainlist):
thin_fac[ix] = int(round(np.max(chain.weights)))
try:
for j in range(nparamMC):
# Get binary chain depending on whether above or below confidence value
confids = self.confidence(chain.samples[:, j], limits, weights=chain.weights)
for endb in [0, 1]:
u = confids[endb]
while True:
thin_ix = self.thin_indices(thin_fac[ix], chain.weights)
thin_rows = len(thin_ix)
if thin_rows < 2:
break
binchain = np.ones(thin_rows, dtype=int)
binchain[chain.samples[thin_ix, j] >= u] = 0
indexes = binchain[:-2] * 4 + binchain[1:-1] * 2 + binchain[2:]
# Estimate transitions probabilities for 2nd order process
tran = np.bincount(indexes, minlength=8).reshape((2, 2, 2))
# tran[:, :, :] = 0
# for i in range(2, thin_rows):
# tran[binchain[i - 2]][binchain[i - 1]][binchain[i]] += 1
# Test whether 2nd order is better than Markov using BIC statistic
g2 = 0
for i1 in [0, 1]:
for i2 in [0, 1]:
for i3 in [0, 1]:
if tran[i1][i2][i3] != 0:
fitted = float(
(tran[i1][i2][0] + tran[i1][i2][1]) *
(tran[0][i2][i3] + tran[1][i2][i3])) \
/ float(tran[0][i2][0] + tran[0][i2][1] +
tran[1][i2][0] + tran[1][i2][1])
focus = float(tran[i1][i2][i3])
g2 += math.log(focus / fitted) * focus
g2 *= 2
if g2 - math.log(float(thin_rows - 2)) * 2 < 0:
break
thin_fac[ix] += 1
# Get Markov transition probabilities for binary processes
if np.sum(tran[:, 0, 1]) == 0 or np.sum(tran[:, 1, 0]) == 0:
thin_fac[ix] = 0
raise LoopException()
alpha = np.sum(tran[:, 0, 1]) / float(np.sum(tran[:, 0, 0]) + np.sum(tran[:, 0, 1]))
beta = np.sum(tran[:, 1, 0]) / float(np.sum(tran[:, 1, 0]) + np.sum(tran[:, 1, 1]))
probsum = alpha + beta
tmp1 = math.log(probsum * epsilon / max(alpha, beta)) / math.log(abs(1.0 - probsum))
if int(tmp1 + 1) * thin_fac[ix] > nburn[ix]:
nburn[ix] = int(tmp1 + 1) * thin_fac[ix]
hardest = j
hardestend = endb
markov_thin[ix] = thin_fac[ix]
# Get thin factor to have independent samples rather than Markov
hardest = max(hardest, 0)
u = self.confidence(self.samples[:, hardest], (1 - test_confidence) / 2, hardestend == 0)
while True:
thin_ix = self.thin_indices(thin_fac[ix], chain.weights)
thin_rows = len(thin_ix)
if thin_rows < 2:
break
binchain = np.ones(thin_rows, dtype=int)
binchain[chain.samples[thin_ix, hardest] >= u] = 0
indexes = binchain[:-1] * 2 + binchain[1:]
# Estimate transitions probabilities for 2nd order process
tran2 = np.bincount(indexes, minlength=4).reshape(2, 2)
# tran2[:, :] = 0
# for i in range(1, thin_rows):
# tran2[binchain[i - 1]][binchain[i]] += 1
# Test whether independence is better than Markov using BIC statistic
g2 = 0
for i1 in [0, 1]:
for i2 in [0, 1]:
if tran2[i1][i2] != 0:
fitted = float(
(tran2[i1][0] + tran2[i1][1]) *
(tran2[0][i2] + tran2[1][i2])) / float(thin_rows - 1)
focus = float(tran2[i1][i2])
if fitted <= 0 or focus <= 0:
print('Raftery and Lewis estimator had problems')
return
g2 += np.log(focus / fitted) * focus
g2 *= 2
if g2 - np.log(float(thin_rows - 1)) < 0:
break
thin_fac[ix] += 1
except LoopException:
pass
except:
thin_fac[ix] = 0
if thin_fac[ix] and thin_rows < 2:
thin_fac[ix] = 0
lines += "Raftery&Lewis statistics\n"
lines += "\n"
lines += "chain markov_thin indep_thin nburn\n"
for ix in range(num_chains_used):
if thin_fac[ix] == 0:
lines += "%4i Failed/not enough samples\n" % ix
else:
lines += "%4i%12i%12i%12i\n" % (
ix, markov_thin[ix], thin_fac[ix], nburn[ix])
self.RL_indep_thin = np.max(thin_fac)
if feedback:
if not np.all(thin_fac != 0):
print('RL: Not enough samples to estimate convergence stats')
else:
print('RL: Thin for Markov: ', np.max(markov_thin))
print('RL: Thin for indep samples: ', str(self.RL_indep_thin))
print('RL: Estimated burn in steps: ', np.max(nburn), ' (',
int(round(np.max(nburn) / self.mean_mult)), ' rows)')
lines += "\n"
if 'CorrSteps' in what:
# Get correlation lengths.
# We ignore the fact that there are jumps between chains, so slight underestimate
lines += "Parameter auto-correlations as function of step separation\n"
lines += "\n"
if self.corr_length_thin != 0:
autocorr_thin = self.corr_length_thin
else:
if self.indep_thin == 0:
autocorr_thin = 20
elif self.indep_thin <= 30:
autocorr_thin = 5
else:
autocorr_thin = int(5 * (self.indep_thin / 30))
thin_ix = self.thin_indices(autocorr_thin)
thin_rows = len(thin_ix)
maxoff = int(min(self.corr_length_steps, thin_rows // (2 * num_chains_used)))
if maxoff > 0:
corrs = np.zeros([maxoff, nparam])
for chain in chainlist:
thin_ix = chain.thin_indices(autocorr_thin)
thin_rows = len(thin_ix)
maxoff = min(maxoff, thin_rows // autocorr_thin)
for j in range(nparam):
diff = chain.diffs[j][thin_ix]
for off in range(1, maxoff + 1):
corrs[off - 1][j] += np.dot(diff[off:], diff[:-off]) / (thin_rows - off) / \
self.vars[j]
corrs /= len(chainlist)
lines += parForm % ""
for i in range(maxoff):
lines += "%8i" % ((i + 1) * autocorr_thin)
lines += "\n"
for j in range(nparam):
label = self.parLabel(j)
lines += parNames[j]
for i in range(maxoff):
lines += "%8.3f" % corrs[i][j]
lines += " %s\n" % label
if writeDataToFile:
with open(filename or (self.rootdirname + '.converge'), 'w', encoding='utf-8') as f:
f.write(lines)
return lines
def _get1DNeff(self, par, param):
N_eff = getattr(par, 'N_eff_kde', None)
if N_eff is None:
par.N_eff_kde = self.getEffectiveSamplesGaussianKDE(param, scale=par.sigma_range)
N_eff = par.N_eff_kde
return N_eff
def getAutoBandwidth1D(self, bins, par, param, mult_bias_correction_order=None, kernel_order=1, N_eff=None):
"""
Get optimized kernel density bandwidth (in units of the range of the bins)
Based on optimal Improved Sheather-Jones bandwidth for basic Parzen kernel, then scaled if higher-order method
being used. For details see the notes at `arXiv:1910.13970 <https://arxiv.org/abs/1910.13970>`_.
:param bins: numpy array of binned weights for the samples
:param par: A :class:`~.paramnames.ParamInfo` instance for the parameter to analyse
:param param: index of the parameter to use
:param mult_bias_correction_order: order of multiplicative bias correction (0 is basic Parzen kernel);
by default taken from instance settings.
:param kernel_order: order of the kernel
(0 is Parzen, 1 does linear boundary correction, 2 is a higher-order kernel)
:param N_eff: effective number of samples. If not specified estimated using weights, autocorrelations,
and fiducial bandwidth
:return: kernel density bandwidth (in units the range of the bins)
"""
if N_eff is None:
N_eff = self._get1DNeff(par, param)
h = kde.gaussian_kde_bandwidth_binned(bins, Neff=N_eff)
bin_range = max(par.param_max, par.range_max) - min(par.param_min, par.range_min)
if h is None or h < 0.01 * N_eff ** (-1. / 5) * (par.range_max - par.range_min) / bin_range:
hnew = 1.06 * par.sigma_range * N_eff ** (-1. / 5) / bin_range
if par.name not in self.no_warning_params \
and (not self.no_warning_chi2_params or 'chi2_' not in par.name and 'minuslog' not in par.name):
msg = 'auto bandwidth for %s very small or failed (h=%s,N_eff=%s). Using fallback (h=%s)' % (
par.name, h, N_eff, hnew)
if getattr(self, 'raise_on_bandwidth_errors', False):
raise BandwidthError(msg)
else:
logging.warning(msg)
h = hnew
par.kde_h = h
m = self.mult_bias_correction_order if mult_bias_correction_order is None else mult_bias_correction_order
if kernel_order > 1:
m = max(m, 1)
if m:
# higher order method
# e.g. http://biomet.oxfordjournals.org/content/82/2/327.full.pdf+html
# some prefactors given in http://eprints.whiterose.ac.uk/42950/6/taylorcc2%5D.pdf
# Here we just take unit prefactor relative to Gaussian
# and rescale the optimal h for standard KDE to accounts for higher order scaling
# Should be about 1.3 x larger for Gaussian, but smaller in some other cases
return h * N_eff ** (1. / 5 - 1. / (4 * m + 5))
else:
return h
def getAutoBandwidth2D(self, bins, parx, pary, paramx, paramy, corr, rangex, rangey, base_fine_bins_2D,
mult_bias_correction_order=None, min_corr=0.2, N_eff=None, use_2D_Neff=False):
"""
Get optimized kernel density bandwidth matrix in parameter units, using Improved Sheather Jones method in
sheared parameters. The shearing is determined using the covariance, so you know the distribution is
multi-modal, potentially giving 'fake' correlation, turn off shearing by setting min_corr=1.
For details see the notes `arXiv:1910.13970 <https://arxiv.org/abs/1910.13970>`_.
:param bins: 2D numpy array of binned weights
:param parx: A :class:`~.paramnames.ParamInfo` instance for the x parameter
:param pary: A :class:`~.paramnames.ParamInfo` instance for the y parameter
:param paramx: index of the x parameter
:param paramy: index of the y parameter
:param corr: correlation of the samples
:param rangex: scale in the x parameter
:param rangey: scale in the y parameter
:param base_fine_bins_2D: number of bins to use for re-binning in rotated parameter space
:param mult_bias_correction_order: multiplicative bias correction order (0 is Parzen kernel); by default taken
from instance settings
:param min_corr: minimum correlation value at which to bother de-correlating the parameters
:param N_eff: effective number of samples. If not specified, uses rough estimate that accounts for
weights and strongly-correlated nearby samples (see notes)
:param use_2D_Neff: if N_eff not specified, whether to use 2D estimate of effective number, or approximate from
the 1D results (default from use_effective_samples_2D setting)
:return: kernel density bandwidth matrix in parameter units
"""
if N_eff is None:
if (use_2D_Neff if use_2D_Neff is not None else self.use_effective_samples_2D) and abs(corr) < 0.999:
# For multi-modal could overestimate width, and hence underestimate number of samples
N_eff = self.getEffectiveSamplesGaussianKDE_2d(paramx, paramy)
else:
N_eff = min(self._get1DNeff(parx, paramx), self._get1DNeff(pary, paramy))
logging.debug('%s %s AutoBandwidth2D: N_eff=%s, corr=%s', parx.name, pary.name, N_eff, corr)
has_limits = parx.has_limits or pary.has_limits
do_correlated = not parx.has_limits or not pary.has_limits
def fallback_widths(ex):
msg = '2D kernel density bandwidth optimizer failed for %s, %s. Using fallback width: %s' % (
parx.name, pary.name, ex)
if getattr(self, 'raise_on_bandwidth_errors', False):
raise BandwidthError(msg)
logging.warning(msg)
_hx = parx.sigma_range / N_eff ** (1. / 6)
_hy = pary.sigma_range / N_eff ** (1. / 6)
return _hx, _hy, max(min(corr, self.max_corr_2D), -self.max_corr_2D)
if min_corr < abs(corr) <= self.max_corr_2D and do_correlated:
# 'shear' the data so fairly uncorrelated, making sure shear keeps any bounds on one parameter unchanged
# the binning step will rescale to make roughly isotropic as assumed
# by the 2D kernel optimizer psi_{ab} derivatives
i, j = paramx, paramy
imax, imin = None, None
if parx.has_limits_bot:
imin = parx.range_min
if parx.has_limits_top:
imax = parx.range_max
if pary.has_limits:
i, j = j, i
if pary.has_limits_bot:
imin = pary.range_min
if pary.has_limits_top:
imax = pary.range_max
cov = self.getCov(pars=[i, j])
S = np.linalg.cholesky(cov)
ichol = np.linalg.inv(S)
S *= ichol[0, 0]
r = ichol[1, :] / ichol[0, 0]
p1 = self.samples[:, i]
p2 = r[0] * self.samples[:, i] + r[1] * self.samples[:, j]
bin1, r1 = kde.bin_samples(p1, nbins=base_fine_bins_2D, range_min=imin, range_max=imax)
bin2, r2 = kde.bin_samples(p2, nbins=base_fine_bins_2D)
rotbins, _ = self._make2Dhist(bin1, bin2, base_fine_bins_2D, base_fine_bins_2D)
try:
opt = kde.KernelOptimizer2D(rotbins, N_eff, 0, do_correlation=not has_limits)
hx, hy, c = opt.get_h()
hx *= r1
hy *= r2
kernelC = S.dot(np.array([[hx ** 2, hx * hy * c], [hx * hy * c, hy ** 2]])).dot(S.T)
hx, hy, c = np.sqrt(kernelC[0, 0]), np.sqrt(kernelC[1, 1]), kernelC[0, 1] / np.sqrt(
kernelC[0, 0] * kernelC[1, 1])
if pary.has_limits:
hx, hy = hy, hx
# print 'derotated pars', hx, hy, c
except ValueError as e:
hx, hy, c = fallback_widths(e)
elif abs(corr) > self.max_corr_2D or not do_correlated and corr > 0.8:
c = max(min(corr, self.max_corr_2D), -self.max_corr_2D)
hx = parx.sigma_range / N_eff ** (1. / 6)
hy = pary.sigma_range / N_eff ** (1. / 6)
else:
try:
opt = kde.KernelOptimizer2D(bins, N_eff, corr, do_correlation=not has_limits,
fallback_t=(min(pary.sigma_range / rangey,
parx.sigma_range / rangex) / N_eff ** (1. / 6)) ** 2)
hx, hy, c = opt.get_h()
hx *= rangex
hy *= rangey
except ValueError as e:
hx, hy, c = fallback_widths(e)
if mult_bias_correction_order is None:
mult_bias_correction_order = self.mult_bias_correction_order
logging.debug('hx/sig, hy/sig, corr =%s, %s, %s', hx / parx.err, hy / pary.err, c)
if mult_bias_correction_order:
scale = 1.1 * N_eff ** (1. / 6 - 1. / (2 + 4 * (1 + mult_bias_correction_order)))
hx *= scale
hy *= scale
logging.debug('hx/sig, hy/sig, corr, scale =%s, %s, %s, %s', hx / parx.err, hy / pary.err, c, scale)
return hx, hy, c
def _initParamRanges(self, j, paramConfid=None):
if isinstance(j, str):
j = self.index[j]
paramVec = self.samples[:, j]
return self._initParam(self.paramNames.names[j], paramVec, self.means[j], self.sddev[j], paramConfid)
def _initParam(self, par, paramVec, mean=None, sddev=None, paramConfid=None):
if mean is None:
mean = paramVec.mean()
if sddev is None:
sddev = paramVec.std()
par.err = sddev
par.mean = mean
par.param_min = np.min(paramVec)
par.param_max = np.max(paramVec)
paramConfid = paramConfid or self.initParamConfidenceData(paramVec)
# sigma_range is estimate related to shape of structure in the distribution = std dev for Gaussian
# search for peaks using quantiles,
# e.g. like simplified version of Janssen 95 (http://dx.doi.org/10.1080/10485259508832654)
confid_points = np.linspace(0.1, 0.9, 9)
confids = self.confidence(paramConfid,
np.array([self.range_confidence, 1 - self.range_confidence] + list(confid_points)))
par.range_min, par.range_max = confids[0:2]
confids[1:-1] = confids[2:]
confids[0] = par.param_min
confids[-1] = par.param_max
diffs = confids[4:] - confids[:-4]
scale = np.min(diffs) / 1.049
if np.all(diffs > par.err * 1.049) and np.all(diffs < scale * 1.5):
# very flat, can use bigger
par.sigma_range = scale
else:
par.sigma_range = min(par.err, scale)
if self.range_ND_contour >= 0 and self.likeStats:
if self.range_ND_contour >= par.ND_limit_bot.size:
raise SettingError("range_ND_contour should be -1 (off), or 0, 1 for first or second contour level")
par.range_min = min(max(par.range_min - par.err, par.ND_limit_bot[self.range_ND_contour]), par.range_min)
par.range_max = max(max(par.range_max + par.err, par.ND_limit_top[self.range_ND_contour]), par.range_max)
smooth_1D = par.sigma_range * 0.4
if par.has_limits_bot:
if par.range_min - par.limmin > 2 * smooth_1D and par.param_min - par.limmin > smooth_1D:
# long way from limit
par.has_limits_bot = False
else:
par.range_min = par.limmin
if par.has_limits_top:
if par.limmax - par.range_max > 2 * smooth_1D and par.limmax - par.param_max > smooth_1D:
par.has_limits_top = False
else:
par.range_max = par.limmax
if not par.has_limits_bot:
par.range_min -= smooth_1D * 2
if not par.has_limits_top:
par.range_max += smooth_1D * 2
par.has_limits = par.has_limits_top or par.has_limits_bot
return par
def _binSamples(self, paramVec, par, num_fine_bins, borderfrac=0.1):
# High resolution density (sampled many times per smoothing scale). First and last bins are half width
border = (par.range_max - par.range_min) * borderfrac
binmin = min(par.param_min, par.range_min)
if not par.has_limits_bot:
binmin -= border
binmax = max(par.param_max, par.range_max)
if not par.has_limits_top:
binmax += border
fine_width = (binmax - binmin) / (num_fine_bins - 1)
ix = ((paramVec - binmin) / fine_width + 0.5).astype(int)
return ix, fine_width, binmin, binmax
def get1DDensity(self, name, **kwargs):
"""
Returns a :class:`~.densities.Density1D` instance for parameter with given name. Result is cached.
:param name: name of the parameter
:param kwargs: arguments for :func:`~MCSamples.get1DDensityGridData`
:return: A :class:`~.densities.Density1D` instance for parameter with given name
"""
if self.needs_update:
self.updateBaseStatistics()
if not kwargs:
density = self.density1D.get(name)
if density is not None:
return density
return self.get1DDensityGridData(name, **kwargs)
# noinspection PyUnboundLocalVariable
def get1DDensityGridData(self, j, paramConfid=None, meanlikes=False, **kwargs):
"""
Low-level function to get a :class:`~.densities.Density1D` instance for the marginalized 1D density
of a parameter. Result is not cached.
:param j: a name or index of the parameter
:param paramConfid: optional cached :class:`~.chains.ParamConfidenceData` instance
:param meanlikes: include mean likelihoods
:param kwargs: optional settings to override instance settings of the same name (see `analysis_settings`):
- **smooth_scale_1D**
- **boundary_correction_order**
- **mult_bias_correction_order**
- **fine_bins**
- **num_bins**
:return: A :class:`~.densities.Density1D` instance
"""
if self.needs_update:
self.updateBaseStatistics()
j = self._parAndNumber(j)[0]
if j is None:
return None
par = self._initParamRanges(j, paramConfid)
num_bins = kwargs.get('num_bins', self.num_bins)
smooth_scale_1D = kwargs.get('smooth_scale_1D', self.smooth_scale_1D)
boundary_correction_order = kwargs.get('boundary_correction_order', self.boundary_correction_order)
mult_bias_correction_order = kwargs.get('mult_bias_correction_order', self.mult_bias_correction_order)
fine_bins = kwargs.get('fine_bins', self.fine_bins)
paramrange = par.range_max - par.range_min
if paramrange <= 0:
raise MCSamplesError('Parameter range is <= 0: ' + par.name)
width = paramrange / (num_bins - 1)
bin_indices, fine_width, binmin, binmax = self._binSamples(self.samples[:, j], par, fine_bins)
bins = np.bincount(bin_indices, weights=self.weights, minlength=fine_bins)
if meanlikes:
if self.shade_likes_is_mean_loglikes:
w = self.weights * self.loglikes
else:
w = self.weights * np.exp((self.mean_loglike - self.loglikes))
finebinlikes = np.bincount(bin_indices, weights=w, minlength=fine_bins)
if smooth_scale_1D <= 0:
# Set automatically.
bandwidth = self.getAutoBandwidth1D(bins, par, j, mult_bias_correction_order,
boundary_correction_order) * (binmax - binmin)
# for low sample numbers with big tails (e.g. from nested), prevent making too wide
bandwidth = min(bandwidth, paramrange / 4)
smooth_1D = bandwidth * abs(smooth_scale_1D) / fine_width
elif smooth_scale_1D < 1.0:
smooth_1D = smooth_scale_1D * par.err / fine_width
else:
smooth_1D = smooth_scale_1D * width / fine_width
if smooth_1D < 2:
logging.warning('fine_bins not large enough to well sample smoothing scale - ' + par.name)
smooth_1D = min(max(1., smooth_1D), fine_bins // 2)
logging.debug("%s 1D sigma_range, std: %s, %s; smooth_1D_bins: %s ", par.name, par.sigma_range, par.err,
smooth_1D)
winw = min(int(round(2.5 * smooth_1D)), fine_bins // 2 - 2)
kernel = Kernel1D(winw, smooth_1D)
cache = {}
conv = convolve1D(bins, kernel.Win, 'same', cache=cache)
fine_x = np.linspace(binmin, binmax, fine_bins)
density1D = Density1D(fine_x, P=conv, view_ranges=[par.range_min, par.range_max])
if meanlikes:
rawbins = conv.copy()
if par.has_limits and boundary_correction_order >= 0:
# correct for cuts allowing for normalization over window
prior_mask = np.ones(fine_bins + 2 * winw)
if par.has_limits_bot:
prior_mask[winw] = 0.5
prior_mask[: winw] = 0
if par.has_limits_top:
prior_mask[-(winw + 1)] = 0.5
prior_mask[-winw:] = 0
a0 = convolve1D(prior_mask, kernel.Win, 'valid', cache=cache)
ix = np.nonzero(a0 * density1D.P)
a0 = a0[ix]
normed = density1D.P[ix] / a0
if boundary_correction_order == 0:
density1D.P[ix] = normed
elif boundary_correction_order <= 2:
# linear boundary kernel, e.g. Jones 1993, Jones and Foster 1996
# www3.stat.sinica.edu.tw/statistica/oldpdf/A6n414.pdf after Eq 1b, expressed for general prior mask
# cf arXiv:1411.5528
xWin = kernel.Win * kernel.x
a1 = convolve1D(prior_mask, xWin, 'valid', cache=cache)[ix]
a2 = convolve1D(prior_mask, xWin * kernel.x, 'valid', cache=cache, cache_args=[1])[ix]
xP = convolve1D(bins, xWin, 'same', cache=cache)[ix]
if boundary_correction_order == 1:
corrected = (density1D.P[ix] * a2 - xP * a1) / (a0 * a2 - a1 ** 2)
else:
# quadratic correction
a3 = convolve1D(prior_mask, xWin * kernel.x ** 2, 'valid', cache=cache, cache_args=[1])[ix]
a4 = convolve1D(prior_mask, xWin * kernel.x ** 3, 'valid', cache=cache, cache_args=[1])[ix]
x2P = convolve1D(bins, xWin * kernel.x, 'same', cache=cache, cache_args=[1])[ix]
denom = a4 * a2 * a0 - a4 * a1 ** 2 - a2 ** 3 - a3 ** 2 * a0 + 2 * a1 * a2 * a3
A = a4 * a2 - a3 ** 2
B = a2 * a3 - a4 * a1
C = a3 * a1 - a2 ** 2
corrected = (density1D.P[ix] * A + xP * B + x2P * C) / denom
density1D.P[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
else:
raise SettingError('Unknown boundary_correction_order (expected 0, 1, 2)')
elif boundary_correction_order == 2:
# higher order kernel
# eg. see http://www.jstor.org/stable/2965571
xWin2 = kernel.Win * kernel.x ** 2
x2P = convolve1D(bins, xWin2, 'same', cache=cache)
a2 = np.sum(xWin2)
a4 = np.dot(xWin2, kernel.x ** 2)
corrected = (density1D.P * a4 - a2 * x2P) / (a4 - a2 ** 2)
ix = density1D.P > 0
density1D.P[ix] *= np.exp(np.minimum(corrected[ix] / density1D.P[ix], 2) - 1)
if mult_bias_correction_order:
prior_mask = np.ones(fine_bins)
if par.has_limits_bot:
prior_mask[0] *= 0.5
if par.has_limits_top:
prior_mask[-1] *= 0.5
a0 = convolve1D(prior_mask, kernel.Win, 'same', cache=cache, cache_args=[2])
for _ in range(mult_bias_correction_order):
# estimate using flattened samples to remove second order biases
# mostly good performance, see http://www.jstor.org/stable/2965571 method 3,1 for first order
prob1 = density1D.P.copy()
prob1[prob1 == 0] = 1
fine = bins / prob1
conv = convolve1D(fine, kernel.Win, 'same', cache=cache, cache_args=[2])
density1D.setP(density1D.P * conv)
density1D.P /= a0
density1D.normalize('max', in_place=True)
if not kwargs:
self.density1D[par.name] = density1D
if meanlikes:
ix = density1D.P > 0
finebinlikes[ix] /= density1D.P[ix]
binlikes = convolve1D(finebinlikes, kernel.Win, 'same', cache=cache, cache_args=[2])
binlikes[ix] *= density1D.P[ix] / rawbins[ix]
if self.shade_likes_is_mean_loglikes:
maxbin = np.min(binlikes)
binlikes = np.where((binlikes - maxbin) < 30, np.exp(-(binlikes - maxbin)), 0)
binlikes[rawbins == 0] = 0
binlikes /= np.max(binlikes)
density1D.likes = binlikes
else:
density1D.likes = None
return density1D
def _setEdgeMask2D(self, parx, pary, prior_mask, winw, alledge=False):
if parx.has_limits_bot:
prior_mask[:, winw] /= 2
prior_mask[:, :winw] = 0
if parx.has_limits_top:
prior_mask[:, -(winw + 1)] /= 2
prior_mask[:, -winw:] = 0
if pary.has_limits_bot:
prior_mask[winw, :] /= 2
prior_mask[:winw:] = 0
if pary.has_limits_top:
prior_mask[-(winw + 1), :] /= 2
prior_mask[-winw:, :] = 0
if alledge:
prior_mask[:, :winw] = 0
prior_mask[:, -winw:] = 0
prior_mask[:winw:] = 0
prior_mask[-winw:, :] = 0
def _getScaleForParam(self, par):
# Also ensures that the 1D limits are initialized
density = self.get1DDensity(par)
mn, mx, lim_bot, lim_top = density.getLimits(0.5, accuracy_factor=1)
if lim_bot or lim_top:
scale = (mx - mn) / 0.675
else:
scale = (mx - mn) / (2 * 0.675)
return scale
def _make2Dhist(self, ixs, iys, xsize, ysize):
flatix = ixs + iys * xsize
# note arrays are indexed y,x
return np.bincount(flatix, weights=self.weights,
minlength=xsize * ysize).reshape((ysize, xsize)), flatix
def get2DDensity(self, x, y, normalized=False, **kwargs):
"""
Returns a :class:`~.densities.Density2D` instance with marginalized 2D density.
:param x: index or name of x parameter
:param y: index or name of y parameter
:param normalized: if False, is normalized so the maximum is 1, if True, density is normalized
:param kwargs: keyword arguments for the :func:`get2DDensityGridData` function
:return: :class:`~.densities.Density2D` instance
"""
if self.needs_update:
self.updateBaseStatistics()
density = self.get2DDensityGridData(x, y, get_density=True, **kwargs)
if normalized:
density.normalize(in_place=True)
return density
# noinspection PyUnboundLocalVariable
def get2DDensityGridData(self, j, j2, num_plot_contours=None, get_density=False, meanlikes=False, **kwargs):
"""
Low-level function to get 2D plot marginalized density and optional additional plot data.
:param j: name or index of the x parameter
:param j2: name or index of the y parameter.
:param num_plot_contours: number of contours to calculate and return in density.contours
:param get_density: only get the 2D marginalized density, don't calculate confidence level members
:param meanlikes: calculate mean likelihoods as well as marginalized density
(returned as array in density.likes)
:param kwargs: optional settings to override instance settings of the same name (see `analysis_settings`):
- **fine_bins_2D**
- **boundary_correction_order**
- **mult_bias_correction_order**
- **smooth_scale_2D**
:return: a :class:`~.densities.Density2D` instance
"""
if self.needs_update:
self.updateBaseStatistics()
start = time.time()
j, parx = self._parAndNumber(j)
j2, pary = self._parAndNumber(j2)
if j is None or j2 is None:
return None
self._initParamRanges(j)
self._initParamRanges(j2)
base_fine_bins_2D = kwargs.get('fine_bins_2D', self.fine_bins_2D)
boundary_correction_order = kwargs.get('boundary_correction_order', self.boundary_correction_order)
mult_bias_correction_order = kwargs.get('mult_bias_correction_order', self.mult_bias_correction_order)
smooth_scale_2D = float(kwargs.get('smooth_scale_2D', self.smooth_scale_2D))
has_prior = parx.has_limits or pary.has_limits
corr = self.getCorrelationMatrix()[j2][j]
if corr == 1:
logging.warning('Parameters are 100%% correlated: %s, %s', parx.name, pary.name)
logging.debug('Doing 2D: %s - %s', parx.name, pary.name)
logging.debug('sample x_err, y_err, correlation: %s, %s, %s', parx.err, pary.err, corr)
# keep things simple unless obvious degeneracy
if abs(self.max_corr_2D) > 1:
raise SettingError('max_corr_2D cannot be >=1')
if abs(corr) < 0.1:
corr = 0.
# for tight degeneracies increase bin density
angle_scale = max(0.2, np.sqrt(1 - min(self.max_corr_2D, abs(corr)) ** 2))
nbin2D = int(round(self.num_bins_2D / angle_scale))
fine_bins_2D = base_fine_bins_2D
if corr:
scaled = 192 * int(3 / angle_scale) // 3
if base_fine_bins_2D < scaled and int(1 / angle_scale) > 1:
fine_bins_2D = scaled
ixs, finewidthx, xbinmin, xbinmax = self._binSamples(self.samples[:, j], parx, fine_bins_2D)
iys, finewidthy, ybinmin, ybinmax = self._binSamples(self.samples[:, j2], pary, fine_bins_2D)
xsize = fine_bins_2D
ysize = fine_bins_2D
histbins, flatix = self._make2Dhist(ixs, iys, xsize, ysize)
if meanlikes:
likeweights = self.weights * np.exp(self.mean_loglike - self.loglikes)
finebinlikes = np.bincount(flatix, weights=likeweights,
minlength=xsize * ysize).reshape((ysize, xsize))
# smooth_x and smooth_y should be in rotated bin units
if smooth_scale_2D < 0:
rx, ry, corr = self.getAutoBandwidth2D(histbins, parx, pary, j, j2, corr, xbinmax - xbinmin,
ybinmax - ybinmin,
base_fine_bins_2D,
mult_bias_correction_order=mult_bias_correction_order)
rx = rx * abs(smooth_scale_2D) / finewidthx
ry = ry * abs(smooth_scale_2D) / finewidthy
elif smooth_scale_2D < 1.0:
rx = smooth_scale_2D * parx.err / finewidthx
ry = smooth_scale_2D * pary.err / finewidthy
else:
rx = smooth_scale_2D * fine_bins_2D / nbin2D
ry = smooth_scale_2D * fine_bins_2D / nbin2D
smooth_scale = float(max(rx, ry))
logging.debug('corr, rx, ry: %s, %s, %s', corr, rx, ry)
if smooth_scale < 2:
logging.warning('fine_bins_2D not large enough for optimal density: %s, %s', parx.name, pary.name)
winw = int(round(2.5 * smooth_scale))
Cinv = np.linalg.inv(np.array([[ry ** 2, rx * ry * corr], [rx * ry * corr, rx ** 2]]))
ix1, ix2 = np.mgrid[-winw:winw + 1, -winw:winw + 1]
Win = np.exp(-(ix1 ** 2 * Cinv[0, 0] + ix2 ** 2 * Cinv[1, 1] + 2 * Cinv[1, 0] * ix1 * ix2) / 2)
Win /= np.sum(Win)
logging.debug('time 2D binning and bandwidth: %s ; bins: %s', time.time() - start, fine_bins_2D)
start = time.time()
cache = {}
convolvesize = xsize + 2 * winw + Win.shape[0]
bins2D = convolve2D(histbins, Win, 'same', largest_size=convolvesize, cache=cache)
if meanlikes:
bin2Dlikes = convolve2D(finebinlikes, Win, 'same', largest_size=convolvesize, cache=cache, cache_args=[2])
if mult_bias_correction_order:
ix = bin2Dlikes > 0
finebinlikes[ix] /= bin2Dlikes[ix]
likes2 = convolve2D(finebinlikes, Win, 'same', largest_size=convolvesize, cache=cache, cache_args=[2])
likes2[ix] *= bin2Dlikes[ix]
bin2Dlikes = likes2
del finebinlikes
mx = 1e-4 * np.max(bins2D)
bin2Dlikes[bins2D > mx] /= bins2D[bins2D > mx]
bin2Dlikes[bins2D <= mx] = 0
else:
bin2Dlikes = None
if has_prior and boundary_correction_order >= 0:
# Correct for edge effects
prior_mask = np.ones((ysize + 2 * winw, xsize + 2 * winw))
self._setEdgeMask2D(parx, pary, prior_mask, winw)
a00 = convolve2D(prior_mask, Win, 'valid', largest_size=convolvesize, cache=cache)
ix = a00 * bins2D > np.max(bins2D) * 1e-8
a00 = a00[ix]
normed = bins2D[ix] / a00
if boundary_correction_order == 1:
# linear boundary correction
indexes = np.arange(-winw, winw + 1)
y = np.empty(Win.shape)
for i in range(Win.shape[0]):
y[:, i] = indexes
winx = Win * indexes
winy = Win * y
a10 = convolve2D(prior_mask, winx, 'valid', largest_size=convolvesize, cache=cache)[ix]
a01 = convolve2D(prior_mask, winy, 'valid', largest_size=convolvesize, cache=cache)[ix]
a20 = convolve2D(prior_mask, winx * indexes, 'valid', largest_size=convolvesize, cache=cache,
cache_args=[1])[ix]
a02 = convolve2D(prior_mask, winy * y, 'valid', largest_size=convolvesize, cache=cache,
cache_args=[1])[ix]
a11 = convolve2D(prior_mask, winy * indexes, 'valid', largest_size=convolvesize, cache=cache,
cache_args=[1])[ix]
xP = convolve2D(histbins, winx, 'same', largest_size=convolvesize, cache=cache)[ix]
yP = convolve2D(histbins, winy, 'same', largest_size=convolvesize, cache=cache)[ix]
denom = (a20 * a01 ** 2 + a10 ** 2 * a02 - a00 * a02 * a20 + a11 ** 2 * a00 - 2 * a01 * a10 * a11)
A = a11 ** 2 - a02 * a20
Ax = a10 * a02 - a01 * a11
Ay = a01 * a20 - a10 * a11
corrected = (bins2D[ix] * A + xP * Ax + yP * Ay) / denom
bins2D[ix] = normed * np.exp(np.minimum(corrected / normed, 4) - 1)
elif boundary_correction_order == 0:
# simple boundary correction by normalization
bins2D[ix] = normed
else:
raise SettingError('unknown boundary_correction_order (expected 0 or 1)')
if mult_bias_correction_order:
prior_mask = np.ones((ysize + 2 * winw, xsize + 2 * winw))
self._setEdgeMask2D(parx, pary, prior_mask, winw, alledge=True)
a00 = convolve2D(prior_mask, Win, 'valid', largest_size=convolvesize, cache=cache, cache_args=[2])
for _ in range(mult_bias_correction_order):
box = histbins.copy()
ix2 = bins2D > np.max(bins2D) * 1e-8
box[ix2] /= bins2D[ix2]
bins2D *= convolve2D(box, Win, 'same', largest_size=convolvesize, cache=cache, cache_args=[2])
bins2D /= a00
x = np.linspace(xbinmin, xbinmax, xsize)
y = np.linspace(ybinmin, ybinmax, ysize)
density = Density2D(x, y, bins2D,
view_ranges=[(parx.range_min, parx.range_max), (pary.range_min, pary.range_max)])
density.normalize('max', in_place=True)
if get_density:
return density
ncontours = len(self.contours)
if num_plot_contours:
ncontours = min(num_plot_contours, ncontours)
contours = self.contours[:ncontours]
logging.debug('time 2D convolutions: %s', time.time() - start)
# Get contour containing contours(:) of the probability
density.contours = density.getContourLevels(contours)
if meanlikes:
bin2Dlikes /= np.max(bin2Dlikes)
density.likes = bin2Dlikes
else:
density.likes = None
return density
# This ND code was contributed but not updated, and currently seems not to work; welcome pull request to restore
# def _setRawEdgeMaskND(self, parv, prior_mask):
# ndim = len(parv)
# vrap = parv[::-1]
# mskShape = prior_mask.shape
#
# if len(mskShape) != ndim:
# raise ValueError("parv and prior_mask or different sizes!")
#
# # create a slice object iterating over everything
# mskSlices = [slice(None) for _ in range(ndim)]
#
# for i in range(ndim):
# if vrap[i].has_limits_bot:
# mskSlices[i] = 0
# prior_mask[mskSlices] /= 2
# mskSlices[i] = slice(None)
#
# if vrap[i].has_limits_top:
# mskSlices[i] = mskShape[i] - 1
# prior_mask[mskSlices] /= 2
# mskSlices[i] = slice(None)
#
# def _flattenValues(self, ixs, xsizes):
# ndim = len(ixs)
#
# q = ixs[0]
# for i in range(1, ndim):
# q = q + np.prod(xsizes[0:i]) * ixs[i]
# return q
#
# def _unflattenValues(self, q, xsizes):
# ndim = len(xsizes)
#
# ixs = list([np.array(q) for _ in range(ndim)])
#
# if ndim == 1:
# ixs[0] = q
# return ixs
#
# ixs[ndim - 1] = q / np.prod(xsizes[0:ndim - 1])
#
# acc = 0
# for k in range(ndim - 2, -1, -1):
# acc = acc + ixs[k + 1] * np.prod(xsizes[0:k + 1])
# if k > 0:
# ixs[k] = (q - acc) / np.prod(xsizes[0:k])
# else:
# ixs[k] = q - acc
#
# return ixs
#
# def _makeNDhist(self, ixs, xsizes):
#
# if len(ixs) != len(xsizes):
# raise ValueError('index and size arrays are of unequal length')
#
# flatixv = self._flattenValues(ixs, xsizes)
#
# # to be removed debugging only
# if np.count_nonzero(np.asarray(ixs) - self._unflattenValues(flatixv, xsizes)) != 0:
# raise ValueError('ARG!!! flatten/unflatten screwed')
#
# # note arrays are indexed y,x
# return np.bincount(flatixv, weights=self.weights,
# minlength=np.prod(xsizes)).reshape(xsizes[::-1], order='C'), flatixv
#
# def getRawNDDensity(self, xs, normalized=False, **kwargs):
# """
# Returns a :class:`~.densities.DensityND` instance with marginalized ND density.
#
# :param xs: indices or names of x_i parameters
# :param normalized: if False, is normalized so the maximum is 1, if True, density is normalized
# :param kwargs: keyword arguments for the :meth:`~.mcsamples.MCSamples.getRawNDDensityGridData` function
# :return: :class:`~.densities.DensityND` instance
# """
# if self.needs_update:
# self.updateBaseStatistics()
# density = self.getRawNDDensityGridData(xs, get_density=True, **kwargs)
# if normalized:
# density.normalize(in_place=True)
# return density
#
# # noinspection PyUnresolvedReferences
# def getRawNDDensityGridData(self, js, num_plot_contours=None, get_density=False,
# meanlikes=False, maxlikes=False, **kwargs):
# """
# Low-level function to get unsmooth ND plot marginalized
# density and optional additional plot data (no KDE).
#
# :param js: vector of names or indices of the x_i parameters
# :param num_plot_contours: number of contours to calculate and return in density.contours
# :param get_density: only get the ND marginalized density, no additional plot data, no contours.
# :param meanlikes: calculate mean likelihoods as well as marginalized density
# (returned as array in density.likes)
# :param maxlikes: calculate the profile likelihoods in addition to the others
# (returned as array in density.maxlikes)
# :param kwargs: optional settings to override instance settings of the same name (see `analysis_settings`):
#
# :return: a :class:`~.densities.DensityND` instance
# """
#
# if self.needs_update:
# self.updateBaseStatistics()
#
# ndim = len(js)
#
# jv, parv = zip(*[self._parAndNumber(j) for j in js])
#
# if None in jv:
# return None
#
# [self._initParamRanges(j) for j in jv]
#
# boundary_correction_order = kwargs.get('boundary_correction_order', self.boundary_correction_order)
# has_prior = any(parv[i].has_limits for i in range(ndim))
#
# nbinsND = kwargs.get('num_bins_ND', self.num_bins_ND)
# ixv, widthv, xminv, xmaxv = zip(*[self._binSamples(self.samples[:, jv[i]],
# parv[i], nbinsND) for i in range(ndim)])
#
# # could also be non-equals over the dimensions
# xsizev = nbinsND * np.ones(ndim, dtype=np.int)
#
# binsND, flatixv = self._makeNDhist(ixv, xsizev)
#
# if has_prior and boundary_correction_order >= 0:
# # Correct for edge effects
# prior_mask = np.ones(xsizev[::-1])
# self._setRawEdgeMaskND(parv, prior_mask)
# binsND /= prior_mask
#
# if meanlikes:
# likeweights = self.weights * np.exp(self.mean_loglike - self.loglikes)
# binNDlikes = np.bincount(flatixv, weights=likeweights,
# minlength=np.prod(xsizev)).reshape(xsizev[::-1], order='C')
# else:
# binNDlikes = None
#
# if maxlikes:
# binNDmaxlikes = np.zeros(binsND.shape)
# ndindex = zip(*[ixv[i] for i in range(ndim)[::-1]])
# bestfit = np.max(-self.loglikes)
#
# for irec in range(len(self.loglikes)):
# binNDmaxlikes[ndindex[irec]] = max(binNDmaxlikes[ndindex[irec]],
# np.exp(-bestfit - self.loglikes[irec]))
# else:
# binNDmaxlikes = None
#
# xv = [np.linspace(xminv[i], xmaxv[i], xsizev[i]) for i in range(ndim)]
# views = [(parv[i].range_min, parv[i].range_max) for i in range(ndim)]
#
# density = DensityND(xv, binsND, view_ranges=views)
#
# # density.normalize('integral', in_place=True)
# density.normalize('max', in_place=True)
# if get_density:
# return density
#
# ncontours = len(self.contours)
# if num_plot_contours:
# ncontours = min(num_plot_contours, ncontours)
# contours = self.contours[:ncontours]
#
# # Get contour containing contours(:) of the probability
# density.contours = density.getContourLevels(contours)
#
# if meanlikes:
# binNDlikes /= np.max(binNDlikes)
# density.likes = binNDlikes
# else:
# density.likes = None
#
# if maxlikes:
# density.maxlikes = binNDmaxlikes
# density.maxcontours = getOtherContourLevels(binNDmaxlikes, contours, half_edge=False)
# else:
# density.maxlikes = None
#
# return density
def _setLikeStats(self):
"""
Get and store LikeStats (see :func:`MCSamples.getLikeStats`)
"""
if self.loglikes is None:
self.likeStats = None
return None
m = types.LikeStats()
bestfit_ix = np.argmin(self.loglikes)
maxlike = self.loglikes[bestfit_ix]
m.logLike_sample = maxlike
if np.max(self.loglikes) - maxlike < 30:
m.logMeanInvLike = np.log(self.mean(np.exp(self.loglikes - maxlike))) + maxlike
else:
m.logMeanInvLike = None
m.meanLogLike = self.mean_loglike
m.logMeanLike = -np.log(self.mean(np.exp(-(self.loglikes - maxlike)))) + maxlike
# assuming maxlike is well determined
m.complexity = 2 * (self.mean_loglike - maxlike)
m.names = self.paramNames.names
# get N-dimensional confidence region
indexes = self.loglikes.argsort()
cumsum = np.cumsum(self.weights[indexes])
m.ND_cont1, m.ND_cont2 = np.searchsorted(cumsum, self.norm * self.contours[0:2])
for j, par in enumerate(self.paramNames.names):
region1 = self.samples[indexes[:m.ND_cont1], j]
region2 = self.samples[indexes[:m.ND_cont2], j]
par.ND_limit_bot = np.array([np.min(region1), np.min(region2)])
par.ND_limit_top = np.array([np.max(region1), np.max(region2)])
par.bestfit_sample = self.samples[bestfit_ix][j]
self.likeStats = m
return m
def _readRanges(self):
if self.root:
ranges_file = self.root + '.ranges'
if os.path.isfile(ranges_file):
self.ranges = ParamBounds(ranges_file)
return
ranges_file = cobaya_interface.cobaya_params_file(self.root)
if ranges_file:
self.ranges = ParamBounds(ranges_file)
return
self.ranges = ParamBounds()
def getBounds(self):
"""
Returns the bounds in the form of a :class:`~.parampriors.ParamBounds` instance, for example
for determining plot ranges
Bounds are not the same as self.ranges, as if samples are not near the range boundary, the bound is set to None
:return: a :class:`~.parampriors.ParamBounds` instance
"""
bounds = ParamBounds()
bounds.names = self.paramNames.list()
for par in self.paramNames.names:
if par.has_limits_bot:
bounds.lower[par.name] = par.limmin
if par.has_limits_top:
bounds.upper[par.name] = par.limmax
return bounds
def getUpper(self, name):
"""
Return the upper limit of the parameter with the given name.
:param name: parameter name
:return: The upper limit if name exists, None otherwise.
"""
par = self.paramNames.parWithName(name)
if par:
return getattr(par, 'limmax', None)
return None
def getLower(self, name):
"""
Return the lower limit of the parameter with the given name.
:param name: parameter name
:return: The lower limit if name exists, None otherwise.
"""
par = self.paramNames.parWithName(name)
if par:
return getattr(par, 'limmin', None)
return None
def getBestFit(self, max_posterior=True):
"""
Returns a :class:`~.types.BestFit` object with best-fit point stored in .minimum or .bestfit file
:param max_posterior: whether to get maximum posterior (from .minimum file)
or maximum likelihood (from .bestfit file)
:return:
"""
ext = '.minimum' if max_posterior else '.bestfit'
bf_file = self.root + ext
if os.path.exists(bf_file):
return types.BestFit(bf_file, max_posterior=max_posterior)
else:
raise MCSamplesError('Best fit can only be included if loaded from file and file_root%s exists '
'(cannot be calculated from samples)' % ext)
def getMargeStats(self, include_bestfit=False):
"""
Returns a :class:`~.types.MargeStats` object with marginalized 1D parameter constraints
:param include_bestfit: if True, set best fit values by loading from root_name.minimum file (assuming it exists)
:return: A :class:`~.types.MargeStats` instance
"""
self._setDensitiesandMarge1D()
m = types.MargeStats()
m.hasBestFit = False
m.limits = self.contours
m.names = self.paramNames.names
if include_bestfit:
m.addBestFit(self.getBestFit())
return m
def getLikeStats(self):
"""
Get best fit sample and n-D confidence limits, and various likelihood based statistics
:return: a :class:`~.types.LikeStats` instance storing N-D limits for parameter i in
result.names[i].ND_limit_top, result.names[i].ND_limit_bot, and best-fit sample value
in result.names[i].bestfit_sample
"""
return self.likeStats or self._setLikeStats()
def getTable(self, columns=1, include_bestfit=False, **kwargs):
"""
Creates and returns a :class:`~.types.ResultTable` instance. See also :func:`~MCSamples.getInlineLatex`.
:param columns: number of columns in the table
:param include_bestfit: True if should include the bestfit parameter values (assuming set)
:param kwargs: arguments for :class:`~.types.ResultTable` constructor.
:return: A :class:`~.types.ResultTable` instance
"""
return types.ResultTable(columns, [self.getMargeStats(include_bestfit)], **kwargs)
def getLatex(self, params=None, limit=1, err_sig_figs=None):
"""
Get tex snippet for constraints on a list of parameters
:param params: list of parameter names, or a single parameter name
:param limit: which limit to get, 1 is the first (default 68%), 2 is the second
(limits array specified by self.contours)
:param err_sig_figs: significant figures in the error
:return: labels, texs: a list of parameter labels, and a list of tex snippets,
or for a single parameter, the latex snippet.
"""
if isinstance(params, str):
return self.getInlineLatex(params, limit, err_sig_figs)
marge = self.getMargeStats()
if params is None:
params = marge.list()
formatter = types.NoLineTableFormatter()
if err_sig_figs:
formatter.numberFormatter.err_sf = err_sig_figs
texs = []
labels = []
for par in params:
tex = marge.texValues(formatter, par, limit=limit)
if tex is not None:
texs.append(tex[0])
labels.append((par if isinstance(par, ParamInfo) else marge.parWithName(par)).getLabel())
else:
texs.append(None)
labels.append(None)
return labels, texs
def getInlineLatex(self, param, limit=1, err_sig_figs=None):
r"""
Get snippet like: A=x\\pm y. Will adjust appropriately for one and two tail limits.
:param param: The name of the parameter
:param limit: which limit to get, 1 is the first (default 68%), 2 is the second
(limits array specified by self.contours)
:param err_sig_figs: significant figures in the error
:return: The tex snippet.
"""
labels, texs = self.getLatex([param], limit, err_sig_figs)
if texs[0] is None:
raise ValueError('parameter %s not found' % param)
if not texs[0][0] in ['<', '>']:
return labels[0] + ' = ' + texs[0]
else:
return labels[0] + ' ' + texs[0]
def _setDensitiesandMarge1D(self, max_frac_twotail=None, meanlikes=False):
"""
Get all the 1D densities; result is cached.
:param max_frac_twotail: optional override for self.max_frac_twotail
:param meanlikes: include mean likelihoods
"""
if self.done_1Dbins:
return
for j in range(self.n):
paramConfid = self.initParamConfidenceData(self.samples[:, j])
self.get1DDensityGridData(j, paramConfid=paramConfid, meanlikes=meanlikes)
self._setMargeLimits(self.paramNames.names[j], paramConfid, max_frac_twotail)
self.done_1Dbins = True
# noinspection PyUnboundLocalVariable
def _setMargeLimits(self, par, paramConfid, max_frac_twotail=None, density1D=None):
"""
Get limits, one or two tail depending on whether posterior
goes to zero at the limits or not
:param par: The :class:`~.paramnames.ParamInfo` to set limits for
:param paramConfid: :class:`~.chains.ParamConfidenceData` instance
:param max_frac_twotail: optional override for self.max_frac_twotail
:param density1D: any existing density 1D instance to use
"""
if max_frac_twotail is None:
max_frac_twotail = self.max_frac_twotail
par.limits = []
density1D = density1D or self.get1DDensity(par.name)
interpGrid = None
for ix1, contour in enumerate(self.contours):
marge_limits_bot = par.has_limits_bot and not self.force_twotail and density1D.P[0] > max_frac_twotail[ix1]
marge_limits_top = par.has_limits_top and not self.force_twotail and density1D.P[-1] > max_frac_twotail[ix1]
if not marge_limits_bot or not marge_limits_top:
# give limit
if not interpGrid:
interpGrid = density1D.initLimitGrids()
tail_limit_bot, tail_limit_top, marge_limits_bot, marge_limits_top = density1D.getLimits(contour,
interpGrid)
limfrac = 1 - contour
if marge_limits_bot:
# fix to end of prior range
tail_limit_bot = par.range_min
elif marge_limits_top:
# 1 tail limit
tail_limit_bot = self.confidence(paramConfid, limfrac, upper=False)
else:
# 2 tail limit
tail_confid_bot = self.confidence(paramConfid, limfrac / 2, upper=False)
if marge_limits_top:
tail_limit_top = par.range_max
elif marge_limits_bot:
tail_limit_top = self.confidence(paramConfid, limfrac, upper=True)
else:
tail_confid_top = self.confidence(paramConfid, limfrac / 2, upper=True)
if not marge_limits_bot and not marge_limits_top:
# Two tail, check if limits are at very different density
if (math.fabs(density1D.Prob(tail_confid_top) -
density1D.Prob(tail_confid_bot)) < self.credible_interval_threshold):
tail_limit_top = tail_confid_top
tail_limit_bot = tail_confid_bot
lim = [tail_limit_bot, tail_limit_top]
else:
# no limit
lim = [par.range_min, par.range_max]
if marge_limits_bot and marge_limits_top:
tag = 'none'
elif marge_limits_bot:
tag = '>'
elif marge_limits_top:
tag = '<'
else:
tag = 'two'
par.limits.append(types.ParamLimit(lim, tag))
def getCorrelatedVariable2DPlots(self, num_plots=12, nparam=None):
"""
Gets a list of most correlated variable pair names.
:param num_plots: The number of plots
:param nparam: maximum number of pairs to get
:return: list of [x,y] pair names
"""
nparam = nparam or self.paramNames.numNonDerived()
try_t = 1e5
x, y = 0, 0
cust2DPlots = []
correlationMatrix = self.correlationMatrix
for _ in range(num_plots):
try_b = -1e5
for ix1 in range(nparam):
for ix2 in range(ix1 + 1, nparam):
if try_b < abs(correlationMatrix[ix1][ix2]) < try_t:
try_b = abs(correlationMatrix[ix1][ix2])
x, y = ix1, ix2
if try_b == -1e5:
break
try_t = try_b
cust2DPlots.append([self.parName(x), self.parName(y)])
return cust2DPlots
def addDerived(self, paramVec, name, label='', comment='', range=None):
"""
Adds a new derived parameter
:param paramVec: The vector of parameter values to add. For example a combination of
parameter arrays from MCSamples.getParams()
:param name: The name for the new parameter
:param label: optional latex label for the parameter
:param comment: optional comment describing the parameter
:param range: if specified, a tuple of min, max values for the new parameter hard prior bounds
(either can be None for one-side bound)
:return: The added parameter's :class:`~.paramnames.ParamInfo` object
"""
if range is not None:
self.ranges.setRange(name, range)
return super().addDerived(paramVec, name, label=label, comment=comment)
def getParamBestFitDict(self, best_sample=False, want_derived=True, want_fixed=True, max_posterior=True):
"""
Gets an ordered dictionary of parameter values for the best fit point,
assuming calculated results from mimimization runs in .minimum (max posterior) .bestfit (max likelihood)
files exists.
Can also get the best-fit (max posterior) sample, which typically has a likelihood that differs significantly
from the true best fit in high dimensions.
:param best_sample: load from global minimum files (False, default) or using maximum posterior sample (True)
:param want_derived: include derived parameters
:param want_fixed: also include values of any fixed parameters
:param max_posterior: whether to get maximum posterior (from .minimum file) or maximum likelihood
(from .bestfit file)
:return: ordered dictionary of parameter values
"""
if best_sample:
if not max_posterior:
raise ValueError('best_fit_sample is only maximum posterior')
return self.getParamSampleDict(np.argmin(self.loglikes))
else:
res = self.getBestFit(max_posterior=max_posterior).getParamDict(include_derived=want_derived)
if want_fixed:
res.update(self.ranges.fixedValueDict())
return res
def getParamSampleDict(self, ix, want_derived=True, want_fixed=True):
"""
Gets a dictionary of parameter values for sample number ix
:param ix: index of the sample to return (zero based)
:param want_derived: include derived parameters
:param want_fixed: also include values of any fixed parameters
:return: ordered dictionary of parameter values
"""
res = super().getParamSampleDict(ix, want_derived=want_derived)
if want_fixed:
res.update(self.ranges.fixedValueDict())
return res
def getCombinedSamplesWithSamples(self, samps2, sample_weights=(1, 1)):
"""
Make a new :class:`MCSamples` instance by appending samples from samps2 for parameters which are in common.
By default they are weighted so that the probability mass of each set of samples is the same,
independent of tha actual sample sizes. The Weights parameter can be adjusted to change the
relative weighting.
:param samps2: :class:`MCSamples` instance to merge
:param sample_weights: relative weights for combining the samples. Set to None to just directly append samples.
:return: a new :class:`MCSamples` instance with the combined samples
"""
params = ParamNames()
params.names = [ParamInfo(name=p.name, label=p.label, derived=p.isDerived) for p in samps2.paramNames.names if
p.name in self.paramNames.list()]
if self.loglikes is not None and samps2.loglikes is not None:
loglikes = np.concatenate([self.loglikes, samps2.loglikes])
else:
loglikes = None
if sample_weights is None:
fac = 1
sample_weights = (1, 1)
else:
fac = np.sum(self.weights) / np.sum(samps2.weights)
weights = np.concatenate([self.weights * sample_weights[0], samps2.weights * sample_weights[1] * fac])
p1 = self.getParams()
p2 = samps2.getParams()
samples = np.array([np.concatenate([getattr(p1, name), getattr(p2, name)]) for name in params.list()]).T
samps = MCSamples(samples=samples, weights=weights, loglikes=loglikes, paramNamesFile=params, ignore_rows=0,
ranges=self.ranges, settings=copy.deepcopy(self.ini.params))
return samps
def saveTextMetadata(self, root, properties=None):
"""
Saves metadata about the sames to text files with given file root
:param root: root file name
:param properties: optional dictiory of values to save in root.properties.ini
"""
super().saveTextMetadata(root)
self.ranges.saveToFile(root + '.ranges')
ini_name = root + '.properties.ini'
if properties or self.properties and self.properties.params or self.label:
if os.path.exists(ini_name):
ini = IniFile(ini_name)
else:
ini = IniFile()
if self.properties:
ini.params.update(self.properties.params)
if self.label:
ini.params.update({'label': self.label})
ini.params.update(properties or {})
ini.saveFile(ini_name)
elif os.path.exists(ini_name):
os.remove(ini_name)
def saveChainsAsText(self, root, make_dirs=False, properties=None):
if self.chains is None:
chain_list = self.getSeparateChains()
else:
chain_list = self.chains
for i, chain in enumerate(chain_list):
chain.saveAsText(root, i, make_dirs)
self.saveTextMetadata(root, properties)
# Write functions for console script
def _writeScriptPlots1D(self, filename, plotparams=None, ext=None):
"""
Write a script that generates a 1D plot. Only intended for use by getdist script.
:param filename: The filename to write to.
:param plotparams: The list of parameters to plot (default: all)
:param ext: The extension for the filename, Default if None
"""
text = 'markers = ' + (str(self.markers) if self.markers else 'None') + '\n'
if plotparams:
text += 'g.plots_1d(roots,[' + ",".join(['\'' + par + '\'' for par in plotparams]) + '], markers=markers)'
else:
text += 'g.plots_1d(roots, markers=markers)'
self._WritePlotFile(filename, self.subplot_size_inch, text, '', ext)
def _writeScriptPlots2D(self, filename, plot_2D_param=None, cust2DPlots=(), ext=None):
"""
Write script that generates a 2 dimensional plot. Only intended for use by getdist script.
:param filename: The filename to write to.
:param plot_2D_param: parameter to plot other variables against
:param cust2DPlots: list of parts of parameter names to plot
:param ext: The extension for the filename, Default if None
:return: A dictionary indexed by pairs of parameters where 2D densities have been calculated
"""
done2D = {}
text = 'pairs=[]\n'
plot_num = 0
if len(cust2DPlots):
cuts = [par1 + '__' + par2 for par1, par2 in cust2DPlots]
for j, par1 in enumerate(self.paramNames.list()):
if plot_2D_param or cust2DPlots:
if par1 == plot_2D_param:
continue
j2min = 0
else:
j2min = j + 1
for j2 in range(j2min, self.n):
par2 = self.parName(j2)
if plot_2D_param and par2 != plot_2D_param:
continue
# noinspection PyUnboundLocalVariable
if len(cust2DPlots) and (par1 + '__' + par2) not in cuts:
continue
if (par1, par2) not in done2D:
plot_num += 1
done2D[(par1, par2)] = True
text += "pairs.append(['%s','%s'])\n" % (par1, par2)
text += 'g.plots_2d(roots,param_pairs=pairs,filled=True)'
self._WritePlotFile(filename, self.subplot_size_inch2, text, '_2D', ext)
return done2D
def _writeScriptPlotsTri(self, filename, triangle_params, ext=None):
"""
Write a script that generates a triangle plot. Only intended for use by getdist script.
:param filename: The filename to write to.
:param triangle_params: list of parameter names to plot
:param ext: The extension for the filename, Default if None
"""
text = 'g.triangle_plot(roots, %s)' % triangle_params
self._WritePlotFile(filename, self.subplot_size_inch, text, '_tri', ext)
def _writeScriptPlots3D(self, filename, plot_3D, ext=None):
"""
Writes a script that generates a 3D (coloured-scatter) plot. Only intended for use by getdist script.
:param filename: The filename to write to
:param plot_3D: list of [x,y,z] parameters for the 3 Dimensional plots
:param ext: The extension for the filename, Default if None
"""
text = 'sets=[]\n'
for pars in plot_3D:
text += "sets.append(['%s','%s','%s'])\n" % tuple(pars)
text += 'g.plots_3d(roots,sets)'
self._WritePlotFile(filename, self.subplot_size_inch3, text, '_3D', ext)
def _WritePlotFile(self, filename, subplot_size, text, tag, ext=None):
"""
Write plot file.
Used by other functions
:param filename: The filename to write to
:param subplot_size: The size of the subplot.
:param text: The text to write after the headers.
:param tag: Tag used for the filename the created file will export to.
:param ext: The extension for the filename, Default if None
"""
with open(filename, 'w', encoding='utf-8') as f:
f.write("import getdist.plots as plots, os\n")
f.write("g=plots.GetDistPlotter(chain_dir=r'%s')\n" % (self.batch_path or os.path.dirname(self.root)))
f.write("g.settings.set_with_subplot_size(%s)\n" % subplot_size)
f.write("roots = ['%s']\n" % self.rootname)
f.write(text + '\n')
ext = ext or self.plot_output
fname = self.rootname + tag + '.' + ext
f.write("g.export(os.path.join(r'%s',r'%s'))\n" % (self.out_dir, fname))
# Useful functions
def getRootFileName(rootdir):
"""
Gets the root name of chains in given directory (assuming only one set of chain files).
:param rootdir: The directory to check
:return: The root file name.
"""
rootFileName = ""
pattern = os.path.join(rootdir, '*_*.txt')
chain_files = glob.glob(pattern)
chain_files.sort()
if chain_files:
chain_file0 = chain_files[0]
rindex = chain_file0.rindex('_')
rootFileName = chain_file0[:rindex]
return rootFileName
def _dummy_usage():
assert MCSamplesFromCobaya and ParamError
|
cmbantREPO_NAMECosmoMCPATH_START.@CosmoMC_extracted@CosmoMC-master@python@getdist@mcsamples.py@.PATH_END.py
|
{
"filename": "ragged_autograph.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/ragged/ragged_autograph.py",
"type": "Python"
}
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Autograph-specific overrides for ragged_tensor."""
from tensorflow.python.autograph.operators import control_flow
from tensorflow.python.ops import cond as tf_cond
from tensorflow.python.ops.ragged import ragged_tensor
def _tf_ragged_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts
):
"""Overload of for_stmt that iterates over TF ragged tensors."""
init_vars = get_state()
control_flow.verify_loop_init_vars(init_vars, symbol_names)
# TODO(mdan): Move this into len()? Requires eager support.
if iter_.shape and iter_.shape[0] is not None:
n = iter_.shape[0]
else:
n = iter_.row_lengths()[0]
iterate_index = 0
def aug_get_state():
return (iterate_index,) + get_state()
def aug_set_state(aug_loop_vars):
nonlocal iterate_index
# TODO(b/171479293): Drop the lint override.
iterate_index, *loop_vars = aug_loop_vars # pylint:disable=unused-variable
# The iteration index is not "output" by the for loop. If the iteration
# index is used outside the loop, it will appear
# in the loop vars separately.
set_state(loop_vars)
def aug_body():
nonlocal iterate_index
body(iter_[iterate_index])
iterate_index += 1
def aug_test():
main_test = iterate_index < n
if extra_test is not None:
return tf_cond.cond(main_test, extra_test, lambda: False)
return main_test
control_flow._add_max_iterations_hint(opts, n) # pylint: disable=protected-access
control_flow._tf_while_stmt( # pylint: disable=protected-access
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts,
)
control_flow.for_loop_registry.register(
ragged_tensor.RaggedTensor, _tf_ragged_for_stmt
)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@ragged@ragged_autograph.py@.PATH_END.py
|
{
"filename": "test_script_BANE.py",
"repo_name": "PaulHancock/Aegean",
"repo_path": "Aegean_extracted/Aegean-main/tests/integration/test_script_BANE.py",
"type": "Python"
}
|
#! /usr/bin/env python
from AegeanTools.CLI import BANE
image_SIN = 'tests/test_files/1904-66_SIN.fits'
image_AIT = 'tests/test_files/1904-66_AIT.fits'
tempfile = 'dlme'
def test_help():
BANE.main()
def test_cite():
BANE.main(['--cite'])
def test_invalid_file():
BANE.main([tempfile])
def test_noclobber():
BANE.main(['--noclobber', image_SIN])
def test_run_BANE():
BANE.main([image_SIN])
if __name__ == "__main__":
# introspect and run all the functions starting with 'test'
for f in dir():
if f.startswith('test'):
print(f)
globals()[f]()
|
PaulHancockREPO_NAMEAegeanPATH_START.@Aegean_extracted@Aegean-main@tests@integration@test_script_BANE.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.