metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "_textsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/pie/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="pie", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@pie@_textsrc.py@.PATH_END.py
|
{
"filename": "test_settings.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/test_settings.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
from pyro import settings
_TEST_SETTING: float = 0.1
pytestmark = pytest.mark.stage("unit")
def test_settings():
v0 = settings.get()
assert isinstance(v0, dict)
assert all(isinstance(alias, str) for alias in v0)
assert settings.get("validate_distributions_pyro") is True
assert settings.get("validate_distributions_torch") is True
assert settings.get("validate_poutine") is True
assert settings.get("validate_infer") is True
def test_register():
with pytest.raises(KeyError):
settings.get("test_setting")
@settings.register("test_setting", "tests.test_settings", "_TEST_SETTING")
def _validate(value):
assert isinstance(value, float)
assert 0 < value
# Test simple get and set.
assert settings.get("test_setting") == 0.1
settings.set(test_setting=0.2)
assert settings.get("test_setting") == 0.2
with pytest.raises(AssertionError):
settings.set(test_setting=-0.1)
# Test context manager.
with settings.context(test_setting=0.3):
assert settings.get("test_setting") == 0.3
assert settings.get("test_setting") == 0.2
# Test decorator.
@settings.context(test_setting=0.4)
def fn():
assert settings.get("test_setting") == 0.4
fn()
assert settings.get("test_setting") == 0.2
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@test_settings.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "mackelab/sbi",
"repo_path": "sbi_extracted/sbi-main/sbi/diagnostics/__init__.py",
"type": "Python"
}
|
from sbi.diagnostics.sbc import check_sbc, get_nltp, run_sbc
from sbi.diagnostics.tarp import check_tarp, run_tarp
|
mackelabREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@sbi@diagnostics@__init__.py@.PATH_END.py
|
{
"filename": "_yhoverformat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/violin/_yhoverformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YhoverformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="yhoverformat", parent_name="violin", **kwargs):
super(YhoverformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@violin@_yhoverformat.py@.PATH_END.py
|
{
"filename": "_orbits.py",
"repo_name": "pysat/pysat",
"repo_path": "pysat_extracted/pysat-main/pysat/_orbits.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Full license can be found in License.md
# Full author list can be found in .zenodo.json file
# DOI:10.5281/zenodo.1199703
#
# DISTRIBUTION STATEMENT A: Approved for public release. Distribution is
# unlimited.
# ----------------------------------------------------------------------------
import copy
import datetime as dt
import functools
import numpy as np
import pandas as pds
import weakref
import xarray as xr
import pysat
class Orbits(object):
"""Determine orbits on the fly and provide orbital data in `.data`.
Parameters
----------
inst : pysat.Instrument
Instrument object for which the orbits will be determined
index : str or NoneType
Name of the data series to use for determining orbit breaks
(default=None)
kind : str
Kind of orbit, which specifies how orbital breaks are determined.
Expects one of: 'local time', 'longitude', 'polar', or 'orbit'
- local time: negative gradients in lt or breaks in inst.data.index
- longitude: negative gradients or breaks in inst.data.index
- polar: zero crossings in latitude or breaks in inst.data.index
- orbit: uses unique values of orbit number
(default='local time')
period : np.timedelta64 or NoneType
length of time for orbital period, used to gauge when a break
in the datetime index `inst.index` is large enough to
consider it a new orbit (default=None)
Attributes
----------
inst
kind
orbit_period : pds.Timedelta
Pandas Timedelta that specifies the orbit period. Used instead of
dt.timedelta to enable np.timedelta64 input. (default=97 min)
num : int
Number of orbits in loaded data
orbit_index : int
Index of currently loaded orbit, zero indexed
Raises
------
ValueError
If `kind` is unsupported
Note
----
Determines the locations of orbit breaks in the loaded data in `inst.data`
and provides iteration tools and convenient orbit selection via
`inst.orbit[orbit num]`
This class should not be called directly by the user, it uses the interface
provided by `inst.orbits` where `inst = pysat.Instrument()`
Examples
--------
::
# Use orbit_info Instrument keyword to pass all Orbit kwargs
orbit_info = {'index': 'longitude', 'kind': 'longitude'}
vefi = pysat.Instrument(platform='cnofs', name='vefi', tag='dc_b',
clean_level=None, orbit_info=orbit_info)
# Load data
vefi.load(date=start)
# Set the instrument bounds
start = dt.datetime(2009, 1, 1)
stop = dt.datetime(2009, 1, 10)
vefi.bounds(start, stop)
# Iterate over orbits
for loop_vefi in vefi.orbits:
print('Next available orbit ', loop_vefi['dB_mer'])
# Load fifth orbit of first day
vefi.load(date=start)
vefi.orbits[5]
# Equivalent but less convenient load
vefi.orbits.load(5)
# Manually iterate forwards to the orbit
vefi.orbits.next()
# Manually iterate backwards to the previous orbit
vefi.orbits.prev()
"""
# -----------------------------------------------------------------------
# Define the magic methods
def __init__(self, inst, index=None, kind='local time', period=None):
"""Initialize `pysat.Instrument.orbits` object."""
# Set the class attributes
self.inst = weakref.proxy(inst)
self.kind = kind.lower()
if period is None:
period = pds.Timedelta(np.timedelta64(97, 'm'))
self.orbit_period = pds.Timedelta(period)
orbit_breaks = None
if self.kind in ['local time', 'lt']:
orbit_breaks = 24.0
elif self.kind in ['longitude', 'long', 'lon']:
orbit_breaks = 360.0
if orbit_breaks is None:
if self.kind == 'polar':
self._det_breaks = self._polar_breaks
elif self.kind == 'orbit':
self._det_breaks = self._orbit_number_breaks
else:
raise ValueError('Unknown kind of orbit requested.')
else:
self._det_breaks = functools.partial(
self._equa_breaks, orbit_index_period=orbit_breaks)
self._orbit_breaks = []
self.num = 0
self._current = 0
self.orbit_index = index
def __repr__(self):
"""Print the basic Orbits properties."""
out_str = "".join(["pysat.Orbits(inst=", self.inst.__repr__(),
", index=", self.orbit_index.__repr__(),
", kind=", self.kind.__repr__(), ", period=",
self.orbit_period.__repr__(), ")"])
return out_str
def __str__(self):
"""Descriptively print the basic Obits properties."""
output_str = 'Orbit Settings\n'
output_str += '--------------\n'
output_str += 'Orbit Kind: {:s}\n'.format(self.kind.__repr__())
output_str += 'Orbit Index: {:s}\n'.format(self.orbit_index.__repr__())
output_str += 'Orbit Period: {:s}\n'.format(
self.orbit_period.__repr__())
output_str += 'Number of Orbits: {:d}\n'.format(self.num)
output_str += 'Loaded Orbit Number: {:s}\n'.format(
self._current.__repr__())
return output_str
def __eq__(self, other):
"""Perform equality check.
Parameters
----------
other : any
Other object to compare for equality
Returns
-------
bool
True if objects are identical, False if they are not
"""
# Check if other is the same class (Orbits). Exit early if not.
if not isinstance(other, self.__class__):
return False
# If the type is the same then check everything that is attached to
# the Orbits object. Includes attributes, methods, variables, etc.
checks = []
key_check = []
for key in self.__dict__.keys():
if key in other.__dict__.keys():
if key not in ['_full_day_data', 'inst', '_det_breaks']:
# Standard equality comparison
test = np.all(self.__dict__[key] == other.__dict__[key])
checks.append(test)
key_check.append(key)
elif key in ['_full_day_data']:
# Compare data
if isinstance(self.__dict__[key], pds.DataFrame):
try:
# Comparisons can error simply for having
# different DataFrames.
check = np.all(self.__dict__[key]
== other.__dict__[key])
except ValueError:
# If there is an error they aren't the same
return False
checks.append(check)
key_check.append(key)
else:
# xarray comparison
test = xr.Dataset.equals(self.__dict__[key],
other.__dict__[key])
checks.append(test)
key_check.append(key)
elif key == '_det_breaks':
# Equality of partial functions does not work well.
# Using a string comparison instead. This can also break
# if one of the objects is missing some attributes.
try:
check = str(self._det_breaks) == str(other._det_breaks)
except AttributeError:
# One object is missing a required attribute
return False
checks.append(check)
key_check.append(key)
else:
checks.append(False)
key_check.append(key)
return False
# Confirm that Orbits object `other` doesn't have extra terms
for key in other.__dict__.keys():
if key not in self.__dict__.keys():
return False
test_data = np.all(checks)
return test_data
def __getitem__(self, orbit_key):
"""Enable convenience notation for loading orbit into parent object.
Parameters
----------
orbit_key : int or None
Orbit number to get, zero indexed
Examples
--------
::
inst.load(date=date)
inst.orbits[4]
print('Orbit data ', inst.data)
Note
----
A day of data must already be loaded.
"""
if orbit_key < 0:
# Loading for reverse indices
self.load(orbit_key)
else:
# Loading for forward indices
self.load(orbit_key + 1)
def __iter__(self):
"""Support iteration by orbit.
Examples
--------
::
for loop_inst in inst.orbits:
print('next available orbit ', loop_inst.data)
Note
----
For each iteration the next available orbit is loaded into
`inst.data`
Limits of iteration set by setting `inst.bounds`
"""
# Load up the first increment of data
while self.inst.empty:
self.inst.next()
# Make a copy of the Instrument object
local_inst = self.inst.copy()
while True:
try:
self.next()
# Ensure that garbage collection doesn't delete `self.inst`
# by yielding a copy, without spending time on copying data
data = self.inst.data
self.inst.data = self.inst._null_data
curr_data = self.inst._curr_data
self.inst._curr_data = self.inst._null_data
prev_data = self.inst._prev_data
self.inst._prev_data = self.inst._null_data
next_data = self.inst._next_data
self.inst._next_data = self.inst._null_data
# Account for data on orbit object itself
full_day_data = self._full_day_data
self._full_day_data = self.inst._null_data
local_inst.date = self.inst.date
# Restore data
self.inst.data = data
local_inst.data = data
self.inst._curr_data = curr_data
local_inst._curr_data = curr_data
self.inst._prev_data = prev_data
local_inst._prev_data = prev_data
self.inst._next_data = next_data
local_inst._next_data = next_data
self._full_day_data = full_day_data
local_inst.orbits._full_day_data = full_day_data
local_inst.orbits.num = self.num
local_inst.orbits._current = self._current
local_inst.orbits._orbit_breaks = self._orbit_breaks
yield local_inst
except StopIteration:
return
# -----------------------------------------------------------------------
# Define the hidden methods
def _report_current_orbit(self):
"""Report the current orbit to log at the info level."""
# Index appears as zero-indexed, though it is one-indexed
pysat.logger.info('Loaded Orbit: {:d}'.format(self._current - 1))
return
def _reset(self):
"""Create null arrays for storing orbit info."""
self._orbit_breaks = []
self.num = 0
self._current = 0
return
def _calc_orbits(self):
"""Prepare data structure for breaking data into orbits.
Raises
------
ValueError
If the the Instrument bounds are set to load overlapping data sets
"""
# Check there isn't an overlapping data set from iteration bounds
estr = ' '.join(('Orbit iteration is not currently supported',
'when the pysat.Instrument bounds are',
'configured for loading overlapping',
'data. Please set the Instrument bounds width',
'to be less than or equal to the bounds step ',
'increment. See `pysat.Instrument.bounds` for more.'))
if self.inst._iter_type == 'file':
if self.inst._iter_step < self.inst._iter_width:
raise ValueError(estr)
else:
# Iterating by date. We need to check step (frequency string)
# against width (timedelta)
step = pds.tseries.frequencies.to_offset(self.inst._iter_step)
step = dt.timedelta(seconds=pds.Timedelta(step).total_seconds())
root = dt.datetime(2001, 1, 1)
if root + step < root + self.inst._iter_width:
raise ValueError(estr)
# If the breaks between orbit have not been defined, define them here.
# Also store the data so that grabbing different orbits does not
# require reloads of whole dataset.
if len(self._orbit_breaks) == 0:
# Determine orbit breaks
self._det_breaks()
# Store a copy of data
self._full_day_data = self.inst.data.copy()
# Set current orbit counter to zero (default)
self._current = 0
return
def _equa_breaks(self, orbit_index_period=24.0):
"""Determine where breaks in an equatorial satellite orbit occur.
Looks for negative gradients in local time (or longitude) as well as
breaks in UT.
Parameters
----------
orbit_index_period : float
The change in value of supplied index parameter for a single orbit
(default=24.0)
Raises
------
ValueError
If the `orbit_index` attribute is not set to an appropriate value,
or if the requested index not in loaded data
"""
if self.orbit_index is None:
raise ValueError(' '.join(('Orbit properties must be defined at ',
'pysat.Instrument object instantiation.',
'See Instrument docs.')))
else:
try:
self.inst[self.orbit_index]
except KeyError as err:
raise ValueError(''.join((str(err), '\n',
'Provided orbit index does not ',
'exist in loaded data')))
# Get the difference in orbit index around the orbit
lt_diff = self.inst[self.orbit_index]
if not self.inst.pandas_format:
lt_diff = lt_diff.to_pandas()
lt_diff = lt_diff.diff()
# Get the typical (median) difference
typical_lt_diff = np.nanmedian(lt_diff)
pysat.logger.info(''.join(('typical lt diff ', str(typical_lt_diff))))
# Get the Universal Time difference between data values. Assumes that
# the time index is in UT.
ut_vals = pds.Series(self.inst.index)
ut_diff = ut_vals.diff()
# Get the locations where the orbit index derivative is less than 0,
# then do some sanity checks on these locations
ind, = np.where((lt_diff < -0.2 * typical_lt_diff))
if len(ind) > 0:
ind = np.hstack((ind, np.array([len(self.inst[self.orbit_index])])))
# Look at distance between breaks
dist = ind[1:] - ind[0:-1]
# Only keep orbit breaks with a distance greater than 1. This check
# is done to ensure robustness.
if len(ind) > 1:
if min(dist) == 1:
pysat.logger.info(' '.join(('There are orbit breaks right',
'next to each other')))
ind = ind[:-1][dist > 1]
# Check for large positive gradients around the break that would
# suggest not a true orbit break, but rather bad `orbit_index`
# values
new_ind = []
for idx in ind:
sub_idx = slice((idx - 5), (idx + 6))
tidx, = np.where(lt_diff[sub_idx]
> 10 * typical_lt_diff)
if len(tidx) != 0:
# There are large changes, this suggests a false alarm.
# Iterate over samples and check.
for sub_tidx in tidx:
# Look at time change vs local time change
false_alarm = (
ut_diff[sub_idx].iloc[sub_tidx] * orbit_index_period
< lt_diff[sub_idx].iloc[sub_tidx]
* self.orbit_period)
if false_alarm:
# The change in UT is small compared to the change
# in the orbit index this is flagged as a false
# alarm, or dropped from consideration
pysat.logger.info(' '.join(('Dropping found break',
'as false positive.')))
pass
else:
# The change in UT is significant, keep orbit break
new_ind.append(idx)
break
else:
# There are no large positive gradients, current orbit
# break passes the first test
new_ind.append(idx)
# Replace all breaks with those that are 'good'
ind = np.array(new_ind)
# Now, assemble some orbit breaks that are not triggered by changes in
# the orbit index
#
# Check if there is a UT break that is larger than orbital period, AKA
# a time gap
ut_change_vs_period = (ut_diff > self.orbit_period)
# Characterize ut change using orbital period
norm_ut = ut_diff / self.orbit_period
# Now, look for breaks because the length of time between samples is
# too large, thus there is no break in slt/mlt/etc, lt_diff is small
# but UT change is big
norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values
/ orbit_index_period))
# Indices when one or other flag is true
ut_ind, = np.where(ut_change_vs_period
| (norm_ut_vs_norm_lt & (norm_ut > 0.95)))
# Combine these UT determined orbit breaks with the orbit index orbit
# breaks
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
pysat.logger.info('Time Gap at locations: {:}'.format(ut_ind))
# Now that most problems in orbits should have been caught, look at
# the time difference between orbits (not individual orbits)
orbit_ut_diff = ut_vals[ind].diff()
if not self.inst.pandas_format:
orbit_lt_diff = self.inst[
self.orbit_index].to_pandas().iloc[ind].diff()
else:
orbit_lt_diff = self.inst[self.orbit_index].iloc[ind].diff()
# Look for time gaps between partial orbits. The full orbital time
# period is not required between end of one orbit and beginning of next
# if first orbit is partial. Also provides another general test of the
# orbital breaks determined.
idx, = np.where((orbit_ut_diff / self.orbit_period
- orbit_lt_diff.values / orbit_index_period) > 0.97)
# Pull out breaks that pass the test, need to make sure the first one
# is always included it gets dropped via the nature of diff
if len(idx) > 0:
if idx[0] != 0:
idx = np.hstack((0, idx))
else:
idx = np.array([0])
# Only keep the good indices
if len(ind) > 0:
ind = ind[idx]
# Create an orbit break index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
else:
ind = np.array([0])
# Set the index of orbit breaks and the number of orbits
self._orbit_breaks = ind
self.num = len(ind)
return
def _polar_breaks(self):
"""Determine where breaks in a polar orbiting satellite orbit occur.
Raises
------
ValueError
If the `orbit_index` attribute is not set to an appropriate value,
or if the requested index not in the loaded data
Note
----
Looks for sign changes in latitude (magnetic or geographic) as well as
breaks in UT.
"""
if self.orbit_index is None:
raise ValueError(' '.join(('Orbit properties must be defined at',
'pysat.Instrument object instantiation.',
'See Instrument docs.')))
else:
try:
self.inst[self.orbit_index]
except KeyError as err:
raise ValueError(''.join((str(err), '\n',
'Provided orbit index does not ',
'exist in loaded data')))
# Determine where orbit index goes from positive to negative
pos = (self.inst[self.orbit_index] >= 0)
npos = np.logical_not(pos)
change = ((pos.values[:-1] & npos.values[1:])
| (npos.values[:-1] & pos.values[1:]))
ind, = np.where(change)
ind += 1
ut_diff = pds.Series(self.inst.index).diff()
ut_ind, = np.where(ut_diff / self.orbit_period > 0.95)
if len(ut_ind) > 0:
ind = np.unique(np.sort(np.hstack((ind, ut_ind))))
# Create an orbit break index, ensure first element is always 0
if len(ind) > 0:
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
else:
ind = np.array([0])
# Set the index of orbit breaks and the number of orbits
self._orbit_breaks = ind
self.num = len(ind)
return
def _orbit_number_breaks(self):
"""Find orbital breaks in a dataset with orbit numbers occur.
Raises
------
ValueError
If the `orbit_index` attribute is not set to an appropriate value,
or if the requested orbit not in the loaded data
Note
----
Looks for changes in unique values.
"""
if self.orbit_index is None:
raise ValueError(' '.join(('Orbit properties must be defined at ',
'pysat.Instrument object instantiation.',
'See Instrument docs.')))
else:
try:
self.inst[self.orbit_index]
except KeyError as err:
raise ValueError(''.join((str(err), '\n',
'Provided orbit index does not ',
'exist in loaded data')))
# Determine where the orbit index changes from one value to the next
uniq_vals = np.unique(self.inst[self.orbit_index].values)
orbit_index = []
for val in uniq_vals:
idx, = np.where(val == self.inst[self.orbit_index].values)
orbit_index.append(idx[0])
# Create orbit break index, ensure first element is always 0
if len(orbit_index) > 0:
if orbit_index[0] != 0:
ind = np.hstack((np.array([0]), orbit_index))
else:
ind = orbit_index
else:
ind = np.array([0])
# Set the index of orbit breaks and the number of orbits
self._orbit_breaks = ind
self.num = len(ind)
return
def _get_basic_orbit(self, orbit_num):
"""Load a particular orbit into .data for loaded day.
Parameters
----------
orbit_num : int
Orbit number. Note that the first orbit is `orbit_num=1`,
rather than a zero index. Negative indexes are allowed
with `-1` as the last orbit.
Raises
------
ValueError
If `orbit_num` set to 0, or if requested orbit past loaded orbits
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will NOT automatically be
padded with data from the next day.
"""
# Ensure data exists
if not self.inst.empty:
# Ensure proper orbit metadata present
self._calc_orbits()
# Set up data access for both pandas and xarray
self.inst.data = self._full_day_data
# Pull out the requested orbit
if orbit_num == -1:
# Load last orbit data into data
orb_break = self._orbit_breaks[self.num + orbit_num]
self.inst.data = self.inst[orb_break:]
self._current = self.num + orbit_num + 1
elif orbit_num < 0 and orbit_num >= -self.num:
# Load backwards index orbit data into data
self.inst.data = self.inst[
self._orbit_breaks[self.num + orbit_num]:
self._orbit_breaks[self.num + orbit_num + 1]]
self._current = self.num + orbit_num + 1
elif orbit_num < self.num and orbit_num != 0:
# Load forward indexed orbit data into data
self.inst.data = self.inst[self._orbit_breaks[orbit_num - 1]:
self._orbit_breaks[orbit_num]]
self._current = orbit_num
elif orbit_num == self.num:
self.inst.data = self.inst[self._orbit_breaks[orbit_num - 1]:]
self._current = orbit_num
elif orbit_num == 0:
raise ValueError(' '.join(('Orbits internally indexed by',
'1, 0 not allowed.')))
else:
# Gone too far
self.inst.data = []
raise ValueError(' '.join(('Requested an orbit past total',
'orbits for day.')))
return
# -----------------------------------------------------------------------
# Define the public methods and properties
def copy(self):
"""Provide a deep copy of object.
Returns
-------
Orbits class instance
Copy of self
"""
# pysat.Instrument has a link to orbits, so copying the referenced
# self.inst would lead to infinite recursion.
inst = self.inst
self.inst = None
# Copy everything else
orbits_copy = copy.deepcopy(self)
# Both this object and the copy refer back to the same pysat.Instrument
orbits_copy.inst = inst
self.inst = inst
return orbits_copy
@property
def current(self):
"""Retrieve current orbit number.
Returns
-------
int or NoneType
None if no orbit data. Otherwise, returns orbit number, beginning
with zero. The first and last orbit of a day is somewhat ambiguous.
The first orbit for day n is generally also the last orbit
on day n - 1. When iterating forward, the orbit will be labeled
as first (0). When iterating backward, orbit labeled as the last.
"""
if self._current > 0:
return self._current - 1
else:
return None
def load(self, orbit_num):
"""Load a particular orbit into .data for loaded day.
Parameters
----------
orbit_num : int
orbit number, 1 indexed (1-length or -1 to -length) with sign
denoting forward or backward indexing
Raises
------
ValueError
If index requested lies beyond the number of orbits
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will automatically be
padded with data from the next day. The orbit counter will be
reset to 1.
"""
# Ensure data exits
if not self.inst.empty:
# Set up orbit metadata
self._calc_orbits()
# Pull out the requested orbit
if orbit_num < 0:
# Negative indexing consistent with numpy, -1 last,
# -2 second to last, etc
orbit_num = self.num + 1 + orbit_num
if orbit_num == self.num:
# We get here if user asks for last orbit. This cal is first to
# trap case where there is only one orbit (self.num=1), which
# needs to be treated differently than a orbit=1 call.
if self.num != 1:
# More than one orbit, go back one (simple call) and
# then forward doing full logic for breaks across day
self._get_basic_orbit(self.num - 1)
self.next()
else:
# At most one complete orbit in the file, check if we are
# close to beginning or end of day
date = self.inst.date
delta_start = self.inst.index[-1] - date
delta_end = (date + dt.timedelta(days=1)
- self.inst.index[0])
if delta_start <= self.orbit_period * 1.05:
# We are near the beginning. Load the previous file,
# then go forward one orbit.
self.inst.prev()
self.next()
if self.inst.index[-1] < date + delta_start:
# We could go back a day, iterate over orbit, as
# above, and the data we have is the wrong day.
# In this case, move forward again. This happens
# when previous day doesn't have data near end of
# the day.
self.next()
elif delta_end <= self.orbit_period * 1.05:
# Near end; load next file, then go back one orbit
self.inst.next()
self.prev()
if self.inst.index[0] > (date - delta_end
+ dt.timedelta(days=1)):
# We could go forward a day, iterate over orbit
# as above, and the data we have is the wrong day.
# In this case, move back again. This happens when
# next day doesn't have data near beginning of the
# day.
self.prev()
else:
# Not near beginning or end, just get the last orbit
# available (only one)
self._get_basic_orbit(-1)
elif orbit_num == 1:
# User asked for first orbit
try:
# Orbit could start file previous; check for this condition
# and store the real date user wants
true_date = self.inst.date
# Go back a day
self.inst.prev()
# If and else added because of Instruments that have large
# gaps (e.g., C/NOFS). In this case, prev can return
# empty data.
if not self.inst.empty:
# Get last orbit if there is data. This will deal with
# orbits across file cleanly.
self.load(-1)
else:
# No data, no previous data to account for. Move back
# to original data, do simple load of first orbit.
self.inst.next()
self._get_basic_orbit(1)
# Check that this orbit should end on the current day
delta = true_date - self.inst.index[0]
if delta >= self.orbit_period:
# The orbit loaded isn't close enough to date to be the
# first orbit of the day, move forward
self.next()
except StopIteration:
# Check if the first orbit is also the last orbit
self._get_basic_orbit(1)
self._report_current_orbit()
elif orbit_num < self.num:
# Load basic orbit data into data
self._get_basic_orbit(orbit_num)
self._report_current_orbit()
else:
# Gone too far
self.inst.data = self.inst._null_data
raise ValueError(' '.join(('Requested an orbit past total',
'orbits for day.')))
else:
pysat.logger.info(' '.join(('No data loaded in instrument object',
'to determine orbits.')))
return
def next(self):
"""Load the next orbit into associated `Instrument.data` object.
Raises
------
RuntimeError
Placed in code that a user should never be able to reach
Note
----
Forms complete orbits across day boundaries. If no data loaded
then the first orbit from the first date of data is returned.
"""
# Check if data exists
if not self.inst.empty:
# Set up orbit metadata
self._calc_orbits()
# If current orbit near the last, must be careful
if self._current == (self.num - 1):
# Load last orbit data
self._get_basic_orbit(-1)
# End of orbit may occur on the next day
load_next = True
if self.inst._iter_type == 'date':
delta = (self.inst.date - self.inst.index[-1]
+ pds.Timedelta('1 day'))
if delta >= self.orbit_period:
# Don't need to load the next day because this orbit
# ends more than a orbital period from the next date
load_next = False
if load_next:
# The end of the user's desired orbit occurs tomorrow, need
# to form a complete orbit save this current orbit, load
# the next day, combine data, select the correct orbit
temp_orbit_data = self.inst.copy()
try:
# Loading next day/file clears orbit breaks info
self.inst.next()
if not self.inst.empty:
# Combine this next day's data with previous last
# orbit, grab the first one
final_val = self.inst.index[0] - dt.timedelta(
microseconds=1)
self.inst.concat_data(temp_orbit_data[:final_val],
prepend=True)
self._get_basic_orbit(1)
else:
# No data, go back a day and grab the last orbit.
# This is as complete as this orbit can be.
self.inst.prev()
self._get_basic_orbit(-1)
except StopIteration:
pass
del temp_orbit_data
self._report_current_orbit()
elif self._current == (self.num):
# At the last orbit, need to be careful about getting the next
# orbit save this current orbit and load the next day
temp_orbit_data = self.inst.copy()
# Load next day, which clears orbit breaks info
self.inst.next()
# Combine this next day orbit with previous last orbit to
# ensure things are correct
if not self.inst.empty:
pad_next = True
# Check if data padding is really needed, only works when
# loading by date
if self.inst._iter_type == 'date':
delta = self.inst.date - temp_orbit_data.index[-1]
if delta >= self.orbit_period:
# The end of the previous orbit is more than an
# orbit away from today we don't have to worry
# about it
pad_next = False
if pad_next:
# The orbit went across day break, stick old orbit onto
# new data and grab second orbit (first is old)
self.inst.concat_data(
temp_orbit_data[:self.inst.index[0]
- dt.timedelta(microseconds=1)],
prepend=True)
# Select second orbit of combined data
self._get_basic_orbit(2)
else:
# Padding from the previous orbit wasn't needed, can
# just grab the first orbit of loaded data
self._get_basic_orbit(1)
if self.inst._iter_type == 'date':
delta = (self.inst.date + dt.timedelta(days=1)
- self.inst.index[0])
if delta < self.orbit_period:
# This orbits end occurs on the next day,
# though we grabbed the first orbit, missing
# data means the first available orbit in the
# datais actually the last for the day.
# Resetting to the second to last orbit and t
# hen callingnext() will get the last orbit,
# accounting for tomorrow's data as well.
self._current = self.num - 1
self.next()
else:
# There is no data for the next day, continue loading data
# until there is some. The `next` method raises
# StopIteration when it reaches the end, leaving this
# function.
while self.inst.empty:
self.inst.next()
self._get_basic_orbit(1)
del temp_orbit_data
self._report_current_orbit()
elif self._current == 0:
# No current orbit set, grab the first one using the load
# command to specify the first orbit, which automatically
# loads prev day if needed to form a complete orbit
self.load(1)
elif self._current < (self.num - 1):
# Since we aren't close to the last orbit, just pull the next
# orbit
self._get_basic_orbit(self._current + 1)
self._report_current_orbit()
else:
raise RuntimeError(' '.join(('This is a serious bug. Talk to ',
'someone about this fundamental ',
'failure or open an issue at',
'www.github.com/pysat/pysat')))
else:
# There is no data
while self.inst.empty:
# Keep going until data is found or next raises stopIteration
# at the end of the data set, and no more data is available
self.inst.next()
# We've found data, grab the next orbit
self.next()
return
def prev(self):
"""Load the previous orbit into associated `Instrument.data` object.
Raises
------
RuntimeError
Placed in code that a user should never be able to reach
Note
----
Forms complete orbits across day boundaries. If no data loaded
then the last orbit of data from the last day is loaded.
"""
# First, check if data exists
if not self.inst.empty:
# Set up orbit metadata
self._calc_orbits()
if (self._current > 2) and (self._current <= self.num):
# If not close to the first orbit, just pull the previous orbit.
# Load orbit and put it into `self.inst.data`.
self._get_basic_orbit(self._current - 1)
self._report_current_orbit()
elif self._current == 2:
# If current orbit near the first, must be careful.
# First, load prev orbit data.
self._get_basic_orbit(self._current - 1)
load_prev = True
if self.inst._iter_type == 'date':
delta = self.inst.index[-1] - self.inst.date
if delta >= self.orbit_period:
# Don't need to load the prev day because this orbit
# ends more than a orbital period from start of today's
# date
load_prev = False
if load_prev:
# Need to save this current orbit and load the prev day
temp_orbit_data = self.inst[self.inst.date:]
# Load previous day, which clears orbit breaks info
try:
self.inst.prev()
# Combine this next day orbit with previous last orbit
if not self.inst.empty:
self.inst.concat_data(temp_orbit_data,
prepend=False)
# Select first orbit of combined data
self._get_basic_orbit(-1)
else:
self.inst.next()
self._get_basic_orbit(1)
except StopIteration:
# If loading the first orbit, of first day of data,
# you'll end up here as the attempt to make a full
# orbit will move the date backwards, and StopIteration
# is made. Everything is already ok, just move along.
pass
del temp_orbit_data
self._report_current_orbit()
elif self._current == 0:
self.load(-1)
return
elif self._current < 2:
# First, load prev orbit data
self._get_basic_orbit(1)
# Need to save this current orbit and load the prev day
temp_orbit_data = self.inst[self.inst.date:]
# Load previous day, which clears orbit breaks info
self.inst.prev()
# Combine this next day orbit with previous last orbit
if not self.inst.empty:
load_prev = True
if self.inst._iter_type == 'date':
delta = (self.inst.date - self.inst.index[-1]
+ pds.Timedelta('1 day'))
if delta >= self.orbit_period:
# Don't need to load the prev day because this
# orbit ends more than a orbital period from start
# of today's date
load_prev = False
if load_prev:
self.inst.concat_data(temp_orbit_data, prepend=False)
# Select second to last orbit of combined data
self._get_basic_orbit(-2)
else:
# Padding from the previous is needed
self._get_basic_orbit(-1)
if self.inst._iter_type == 'date':
delta = (self.inst.date - self.inst.index[-1]
+ pds.Timedelta('1 day'))
if delta < self.orbit_period:
self._current = self.num
self.prev()
else:
while self.inst.empty:
self.inst.prev()
self._get_basic_orbit(-1)
del temp_orbit_data
self._report_current_orbit()
else:
raise RuntimeError(' '.join(('You ended up where nobody should',
'ever be. Talk to someone about',
'this fundamental failure or open',
'an issue at',
'www.github.com/pysat/pysat')))
else:
# No data found
while self.inst.empty:
# Cycle to more data or raise stopIteration at end of data set
self.inst.prev()
self.prev()
return
|
pysatREPO_NAMEpysatPATH_START.@pysat_extracted@pysat-main@pysat@_orbits.py@.PATH_END.py
|
{
"filename": "sklearn_gpu_training.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/demo/dask/sklearn_gpu_training.py",
"type": "Python"
}
|
"""
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
import dask
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
def main(client: Client) -> dxgb.Booster:
# Generate some random data for demonstration
rng = da.random.default_rng(1)
m = 2**18
n = 100
X = rng.uniform(size=(m, n), chunks=(128**2, -1))
y = X.sum(axis=1)
regressor = dxgb.DaskXGBRegressor(verbosity=1)
# Set the device to CUDA
regressor.set_params(tree_method="hist", device="cuda")
# Assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
# Create client from cluster, set the backend to GPU array (cupy).
with Client(cluster) as client, dask.config.set({"array.backend": "cupy"}):
main(client)
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@demo@dask@sklearn_gpu_training.py@.PATH_END.py
|
{
"filename": "models.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/contrib/mue/models.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example MuE observation models.
"""
import datetime
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import softplus
from torch.optim import Adam
from torch.utils.data import DataLoader
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.contrib.mue.missingdatahmm import MissingDataDiscreteHMM
from pyro.contrib.mue.statearrangers import Profile
from pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO
from pyro.optim import MultiStepLR
class ProfileHMM(nn.Module):
"""
Profile HMM.
This model consists of a constant distribution (a delta function) over the
regressor sequence, plus a MuE observation distribution. The priors
are all Normal distributions, and are pushed through a softmax function
onto the simplex.
:param int latent_seq_length: Length of the latent regressor sequence M.
Must be greater than or equal to 1.
:param int alphabet_length: Length of the sequence alphabet (e.g. 20 for
amino acids).
:param float prior_scale: Standard deviation of the prior distribution.
:param float indel_prior_bias: Mean of the prior distribution over the
log probability of an indel not occurring. Higher values lead to lower
probability of indels.
:param bool cuda: Transfer data onto the GPU during training.
:param bool pin_memory: Pin memory for faster GPU transfer.
"""
def __init__(
self,
latent_seq_length,
alphabet_length,
prior_scale=1.0,
indel_prior_bias=10.0,
cuda=False,
pin_memory=False,
):
super().__init__()
assert isinstance(cuda, bool)
self.is_cuda = cuda
assert isinstance(pin_memory, bool)
self.pin_memory = pin_memory
assert isinstance(latent_seq_length, int) and latent_seq_length > 0
self.latent_seq_length = latent_seq_length
assert isinstance(alphabet_length, int) and alphabet_length > 0
self.alphabet_length = alphabet_length
self.precursor_seq_shape = (latent_seq_length, alphabet_length)
self.insert_seq_shape = (latent_seq_length + 1, alphabet_length)
self.indel_shape = (latent_seq_length, 3, 2)
assert isinstance(prior_scale, float)
self.prior_scale = prior_scale
assert isinstance(indel_prior_bias, float)
self.indel_prior = torch.tensor([indel_prior_bias, 0.0])
# Initialize state arranger.
self.statearrange = Profile(latent_seq_length)
def model(self, seq_data, local_scale):
# Latent sequence.
precursor_seq = pyro.sample(
"precursor_seq",
dist.Normal(
torch.zeros(self.precursor_seq_shape),
self.prior_scale * torch.ones(self.precursor_seq_shape),
).to_event(2),
)
precursor_seq_logits = precursor_seq - precursor_seq.logsumexp(-1, True)
insert_seq = pyro.sample(
"insert_seq",
dist.Normal(
torch.zeros(self.insert_seq_shape),
self.prior_scale * torch.ones(self.insert_seq_shape),
).to_event(2),
)
insert_seq_logits = insert_seq - insert_seq.logsumexp(-1, True)
# Indel probabilities.
insert = pyro.sample(
"insert",
dist.Normal(
self.indel_prior * torch.ones(self.indel_shape),
self.prior_scale * torch.ones(self.indel_shape),
).to_event(3),
)
insert_logits = insert - insert.logsumexp(-1, True)
delete = pyro.sample(
"delete",
dist.Normal(
self.indel_prior * torch.ones(self.indel_shape),
self.prior_scale * torch.ones(self.indel_shape),
).to_event(3),
)
delete_logits = delete - delete.logsumexp(-1, True)
# Construct HMM parameters.
initial_logits, transition_logits, observation_logits = self.statearrange(
precursor_seq_logits, insert_seq_logits, insert_logits, delete_logits
)
with pyro.plate("batch", seq_data.shape[0]):
with poutine.scale(scale=local_scale):
# Observations.
pyro.sample(
"obs_seq",
MissingDataDiscreteHMM(
initial_logits, transition_logits, observation_logits
),
obs=seq_data,
)
def guide(self, seq_data, local_scale):
# Sequence.
precursor_seq_q_mn = pyro.param(
"precursor_seq_q_mn", torch.zeros(self.precursor_seq_shape)
)
precursor_seq_q_sd = pyro.param(
"precursor_seq_q_sd", torch.zeros(self.precursor_seq_shape)
)
pyro.sample(
"precursor_seq",
dist.Normal(precursor_seq_q_mn, softplus(precursor_seq_q_sd)).to_event(2),
)
insert_seq_q_mn = pyro.param(
"insert_seq_q_mn", torch.zeros(self.insert_seq_shape)
)
insert_seq_q_sd = pyro.param(
"insert_seq_q_sd", torch.zeros(self.insert_seq_shape)
)
pyro.sample(
"insert_seq",
dist.Normal(insert_seq_q_mn, softplus(insert_seq_q_sd)).to_event(2),
)
# Indels.
insert_q_mn = pyro.param(
"insert_q_mn", torch.ones(self.indel_shape) * self.indel_prior
)
insert_q_sd = pyro.param("insert_q_sd", torch.zeros(self.indel_shape))
pyro.sample(
"insert",
dist.Normal(insert_q_mn, softplus(insert_q_sd)).to_event(3),
)
delete_q_mn = pyro.param(
"delete_q_mn", torch.ones(self.indel_shape) * self.indel_prior
)
delete_q_sd = pyro.param("delete_q_sd", torch.zeros(self.indel_shape))
pyro.sample(
"delete",
dist.Normal(delete_q_mn, softplus(delete_q_sd)).to_event(3),
)
def fit_svi(
self,
dataset,
epochs=2,
batch_size=1,
scheduler=None,
jit=False,
):
"""
Infer approximate posterior with stochastic variational inference.
This runs :class:`~pyro.infer.svi.SVI`. It is an approximate inference
method useful for quickly iterating on probabilistic models.
:param ~torch.utils.data.Dataset dataset: The training dataset.
:param int epochs: Number of epochs of training.
:param int batch_size: Minibatch size (number of sequences).
:param pyro.optim.MultiStepLR scheduler: Optimization scheduler.
(Default: Adam optimizer, 0.01 constant learning rate.)
:param bool jit: Whether to use a jit compiled ELBO.
"""
# Setup.
if batch_size is not None:
self.batch_size = batch_size
if scheduler is None:
scheduler = MultiStepLR(
{
"optimizer": Adam,
"optim_args": {"lr": 0.01},
"milestones": [],
"gamma": 0.5,
}
)
if self.is_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Initialize guide.
self.guide(None, None)
dataload = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=self.pin_memory,
generator=torch.Generator(device=device),
)
# Setup stochastic variational inference.
if jit:
elbo = JitTrace_ELBO(ignore_jit_warnings=True)
else:
elbo = Trace_ELBO()
svi = SVI(self.model, self.guide, scheduler, loss=elbo)
# Run inference.
losses = []
t0 = datetime.datetime.now()
for epoch in range(epochs):
for seq_data, L_data in dataload:
if self.is_cuda:
seq_data = seq_data.cuda()
loss = svi.step(
seq_data, torch.tensor(len(dataset) / seq_data.shape[0])
)
losses.append(loss)
scheduler.step()
print(epoch, loss, " ", datetime.datetime.now() - t0)
return losses
def evaluate(self, dataset_train, dataset_test=None, jit=False):
"""
Evaluate performance (log probability and per residue perplexity) on
train and test datasets.
:param ~torch.utils.data.Dataset dataset: The training dataset.
:param ~torch.utils.data.Dataset dataset: The testing dataset.
:param bool jit: Whether to use a jit compiled ELBO.
"""
dataload_train = DataLoader(dataset_train, batch_size=1, shuffle=False)
if dataset_test is not None:
dataload_test = DataLoader(dataset_test, batch_size=1, shuffle=False)
# Initialize guide.
self.guide(None, None)
if jit:
elbo = JitTrace_ELBO(ignore_jit_warnings=True)
else:
elbo = Trace_ELBO()
scheduler = MultiStepLR({"optimizer": Adam, "optim_args": {"lr": 0.01}})
# Setup stochastic variational inference.
svi = SVI(self.model, self.guide, scheduler, loss=elbo)
# Compute elbo and perplexity.
train_lp, train_perplex = self._evaluate_local_elbo(
svi, dataload_train, len(dataset_train)
)
if dataset_test is not None:
test_lp, test_perplex = self._evaluate_local_elbo(
svi, dataload_test, len(dataset_test)
)
return train_lp, test_lp, train_perplex, test_perplex
else:
return train_lp, None, train_perplex, None
def _local_variables(self, name, site):
"""Return per datapoint random variables in model."""
return name in ["obs_L", "obs_seq"]
def _evaluate_local_elbo(self, svi, dataload, data_size):
"""Evaluate elbo and average per residue perplexity."""
lp, perplex = 0.0, 0.0
with torch.no_grad():
for seq_data, L_data in dataload:
if self.is_cuda:
seq_data, L_data = seq_data.cuda(), L_data.cuda()
conditioned_model = poutine.condition(
self.model, data={"obs_seq": seq_data}
)
args = (seq_data, torch.tensor(1.0))
guide_tr = poutine.trace(self.guide).get_trace(*args)
model_tr = poutine.trace(
poutine.replay(conditioned_model, trace=guide_tr)
).get_trace(*args)
local_elbo = (
(
model_tr.log_prob_sum(self._local_variables)
- guide_tr.log_prob_sum(self._local_variables)
)
.cpu()
.numpy()
)
lp += local_elbo
perplex += -local_elbo / L_data[0].cpu().numpy()
perplex = np.exp(perplex / data_size)
return lp, perplex
class Encoder(nn.Module):
def __init__(self, data_length, alphabet_length, z_dim):
super().__init__()
self.input_size = data_length * alphabet_length
self.f1_mn = nn.Linear(self.input_size, z_dim)
self.f1_sd = nn.Linear(self.input_size, z_dim)
def forward(self, data):
data = data.reshape(-1, self.input_size)
z_loc = self.f1_mn(data)
z_scale = softplus(self.f1_sd(data))
return z_loc, z_scale
class FactorMuE(nn.Module):
"""
FactorMuE
This model consists of probabilistic PCA plus a MuE output distribution.
The priors are all Normal distributions, and where relevant pushed through
a softmax onto the simplex.
:param int data_length: Length of the input sequence matrix, including
zero padding at the end.
:param int alphabet_length: Length of the sequence alphabet (e.g. 20 for
amino acids).
:param int z_dim: Number of dimensions of the z space.
:param int batch_size: Minibatch size.
:param int latent_seq_length: Length of the latent regressor sequence (M).
Must be greater than or equal to 1. (Default: 1.1 x data_length.)
:param bool indel_factor_dependence: Indel probabilities depend on the
latent variable z.
:param float indel_prior_scale: Standard deviation of the prior
distribution on indel parameters.
:param float indel_prior_bias: Mean of the prior distribution over the
log probability of an indel not occurring. Higher values lead to lower
probability of indels.
:param float inverse_temp_prior: Mean of the prior distribution over the
inverse temperature parameter.
:param float weights_prior_scale: Standard deviation of the prior
distribution over the factors.
:param float offset_prior_scale: Standard deviation of the prior
distribution over the offset (constant) in the pPCA model.
:param str z_prior_distribution: Prior distribution over the latent
variable z. Either 'Normal' (pPCA model) or 'Laplace' (an ICA model).
:param bool ARD_prior: Use automatic relevance determination prior on
factors.
:param bool substitution_matrix: Use a learnable substitution matrix
rather than the identity matrix.
:param float substitution_prior_scale: Standard deviation of the prior
distribution over substitution matrix parameters (when
substitution_matrix is True).
:param int latent_alphabet_length: Length of the alphabet in the latent
regressor sequence.
:param bool cuda: Transfer data onto the GPU during training.
:param bool pin_memory: Pin memory for faster GPU transfer.
:param float epsilon: A small value for numerical stability.
"""
def __init__(
self,
data_length,
alphabet_length,
z_dim,
batch_size=10,
latent_seq_length=None,
indel_factor_dependence=False,
indel_prior_scale=1.0,
indel_prior_bias=10.0,
inverse_temp_prior=100.0,
weights_prior_scale=1.0,
offset_prior_scale=1.0,
z_prior_distribution="Normal",
ARD_prior=False,
substitution_matrix=True,
substitution_prior_scale=10.0,
latent_alphabet_length=None,
cuda=False,
pin_memory=False,
epsilon=1e-32,
):
super().__init__()
assert isinstance(cuda, bool)
self.is_cuda = cuda
assert isinstance(pin_memory, bool)
self.pin_memory = pin_memory
# Constants.
assert isinstance(data_length, int) and data_length > 0
self.data_length = data_length
if latent_seq_length is None:
latent_seq_length = int(data_length * 1.1)
else:
assert isinstance(latent_seq_length, int) and latent_seq_length > 0
self.latent_seq_length = latent_seq_length
assert isinstance(alphabet_length, int) and alphabet_length > 0
self.alphabet_length = alphabet_length
assert isinstance(z_dim, int) and z_dim > 0
self.z_dim = z_dim
# Parameter shapes.
if (not substitution_matrix) or (latent_alphabet_length is None):
latent_alphabet_length = alphabet_length
self.latent_alphabet_length = latent_alphabet_length
self.indel_shape = (latent_seq_length, 3, 2)
self.total_factor_size = (
(2 * latent_seq_length + 1) * latent_alphabet_length
+ 2 * indel_factor_dependence * latent_seq_length * 3 * 2
)
# Architecture.
self.indel_factor_dependence = indel_factor_dependence
self.ARD_prior = ARD_prior
self.substitution_matrix = substitution_matrix
# Priors.
assert isinstance(indel_prior_scale, float)
self.indel_prior_scale = torch.tensor(indel_prior_scale)
assert isinstance(indel_prior_bias, float)
self.indel_prior = torch.tensor([indel_prior_bias, 0.0])
assert isinstance(inverse_temp_prior, float)
self.inverse_temp_prior = torch.tensor(inverse_temp_prior)
assert isinstance(weights_prior_scale, float)
self.weights_prior_scale = torch.tensor(weights_prior_scale)
assert isinstance(offset_prior_scale, float)
self.offset_prior_scale = torch.tensor(offset_prior_scale)
assert isinstance(epsilon, float)
self.epsilon = torch.tensor(epsilon)
assert isinstance(substitution_prior_scale, float)
self.substitution_prior_scale = torch.tensor(substitution_prior_scale)
self.z_prior_distribution = z_prior_distribution
# Batch control.
assert isinstance(batch_size, int)
self.batch_size = batch_size
# Initialize layers.
self.encoder = Encoder(data_length, alphabet_length, z_dim)
self.statearrange = Profile(latent_seq_length)
def decoder(self, z, W, B, inverse_temp):
# Project.
v = torch.mm(z, W) + B
out = dict()
if self.indel_factor_dependence:
# Extract insertion and deletion parameters.
ind0 = (2 * self.latent_seq_length + 1) * self.latent_alphabet_length
ind1 = ind0 + self.latent_seq_length * 3 * 2
ind2 = ind1 + self.latent_seq_length * 3 * 2
insert_v, delete_v = v[:, ind0:ind1], v[:, ind1:ind2]
insert_v = (
insert_v.reshape([-1, self.latent_seq_length, 3, 2]) + self.indel_prior
)
out["insert_logits"] = insert_v - insert_v.logsumexp(-1, True)
delete_v = (
delete_v.reshape([-1, self.latent_seq_length, 3, 2]) + self.indel_prior
)
out["delete_logits"] = delete_v - delete_v.logsumexp(-1, True)
# Extract precursor and insertion sequences.
ind0 = self.latent_seq_length * self.latent_alphabet_length
ind1 = ind0 + (self.latent_seq_length + 1) * self.latent_alphabet_length
precursor_seq_v, insert_seq_v = v[:, :ind0], v[:, ind0:ind1]
precursor_seq_v = (precursor_seq_v * softplus(inverse_temp)).reshape(
[-1, self.latent_seq_length, self.latent_alphabet_length]
)
out["precursor_seq_logits"] = precursor_seq_v - precursor_seq_v.logsumexp(
-1, True
)
insert_seq_v = (insert_seq_v * softplus(inverse_temp)).reshape(
[-1, self.latent_seq_length + 1, self.latent_alphabet_length]
)
out["insert_seq_logits"] = insert_seq_v - insert_seq_v.logsumexp(-1, True)
return out
def model(self, seq_data, local_scale, local_prior_scale):
# ARD prior.
if self.ARD_prior:
# Relevance factors
alpha = pyro.sample(
"alpha",
dist.Gamma(torch.ones(self.z_dim), torch.ones(self.z_dim)).to_event(1),
)
else:
alpha = torch.ones(self.z_dim)
# Factor and offset.
W = pyro.sample(
"W",
dist.Normal(
torch.zeros([self.z_dim, self.total_factor_size]),
torch.ones([self.z_dim, self.total_factor_size])
* self.weights_prior_scale
/ (alpha[:, None] + self.epsilon),
).to_event(2),
)
B = pyro.sample(
"B",
dist.Normal(
torch.zeros(self.total_factor_size),
torch.ones(self.total_factor_size) * self.offset_prior_scale,
).to_event(1),
)
# Indel probabilities.
if not self.indel_factor_dependence:
insert = pyro.sample(
"insert",
dist.Normal(
self.indel_prior * torch.ones(self.indel_shape),
self.indel_prior_scale * torch.ones(self.indel_shape),
).to_event(3),
)
insert_logits = insert - insert.logsumexp(-1, True)
delete = pyro.sample(
"delete",
dist.Normal(
self.indel_prior * torch.ones(self.indel_shape),
self.indel_prior_scale * torch.ones(self.indel_shape),
).to_event(3),
)
delete_logits = delete - delete.logsumexp(-1, True)
# Inverse temperature.
inverse_temp = pyro.sample(
"inverse_temp", dist.Normal(self.inverse_temp_prior, torch.tensor(1.0))
)
# Substitution matrix.
if self.substitution_matrix:
substitute = pyro.sample(
"substitute",
dist.Normal(
torch.zeros([self.latent_alphabet_length, self.alphabet_length]),
self.substitution_prior_scale
* torch.ones([self.latent_alphabet_length, self.alphabet_length]),
).to_event(2),
)
with pyro.plate("batch", seq_data.shape[0]):
with poutine.scale(scale=local_scale):
with poutine.scale(scale=local_prior_scale):
# Sample latent variable from prior.
if self.z_prior_distribution == "Normal":
z = pyro.sample(
"latent",
dist.Normal(
torch.zeros(self.z_dim), torch.ones(self.z_dim)
).to_event(1),
)
elif self.z_prior_distribution == "Laplace":
z = pyro.sample(
"latent",
dist.Laplace(
torch.zeros(self.z_dim), torch.ones(self.z_dim)
).to_event(1),
)
# Decode latent sequence.
decoded = self.decoder(z, W, B, inverse_temp)
if self.indel_factor_dependence:
insert_logits = decoded["insert_logits"]
delete_logits = decoded["delete_logits"]
# Construct HMM parameters.
if self.substitution_matrix:
(
initial_logits,
transition_logits,
observation_logits,
) = self.statearrange(
decoded["precursor_seq_logits"],
decoded["insert_seq_logits"],
insert_logits,
delete_logits,
substitute,
)
else:
(
initial_logits,
transition_logits,
observation_logits,
) = self.statearrange(
decoded["precursor_seq_logits"],
decoded["insert_seq_logits"],
insert_logits,
delete_logits,
)
# Draw samples.
pyro.sample(
"obs_seq",
MissingDataDiscreteHMM(
initial_logits, transition_logits, observation_logits
),
obs=seq_data,
)
def guide(self, seq_data, local_scale, local_prior_scale):
# Register encoder with pyro.
pyro.module("encoder", self.encoder)
# ARD weightings.
if self.ARD_prior:
alpha_conc = pyro.param("alpha_conc", torch.randn(self.z_dim))
alpha_rate = pyro.param("alpha_rate", torch.randn(self.z_dim))
pyro.sample(
"alpha",
dist.Gamma(softplus(alpha_conc), softplus(alpha_rate)).to_event(1),
)
# Factors.
W_q_mn = pyro.param("W_q_mn", torch.randn([self.z_dim, self.total_factor_size]))
W_q_sd = pyro.param("W_q_sd", torch.ones([self.z_dim, self.total_factor_size]))
pyro.sample("W", dist.Normal(W_q_mn, softplus(W_q_sd)).to_event(2))
B_q_mn = pyro.param("B_q_mn", torch.randn(self.total_factor_size))
B_q_sd = pyro.param("B_q_sd", torch.ones(self.total_factor_size))
pyro.sample("B", dist.Normal(B_q_mn, softplus(B_q_sd)).to_event(1))
# Indel probabilities.
if not self.indel_factor_dependence:
insert_q_mn = pyro.param(
"insert_q_mn", torch.ones(self.indel_shape) * self.indel_prior
)
insert_q_sd = pyro.param("insert_q_sd", torch.zeros(self.indel_shape))
pyro.sample(
"insert", dist.Normal(insert_q_mn, softplus(insert_q_sd)).to_event(3)
)
delete_q_mn = pyro.param(
"delete_q_mn", torch.ones(self.indel_shape) * self.indel_prior
)
delete_q_sd = pyro.param("delete_q_sd", torch.zeros(self.indel_shape))
pyro.sample(
"delete", dist.Normal(delete_q_mn, softplus(delete_q_sd)).to_event(3)
)
# Inverse temperature.
inverse_temp_q_mn = pyro.param("inverse_temp_q_mn", torch.tensor(0.0))
inverse_temp_q_sd = pyro.param("inverse_temp_q_sd", torch.tensor(0.0))
pyro.sample(
"inverse_temp", dist.Normal(inverse_temp_q_mn, softplus(inverse_temp_q_sd))
)
# Substitution matrix.
if self.substitution_matrix:
substitute_q_mn = pyro.param(
"substitute_q_mn",
torch.zeros([self.latent_alphabet_length, self.alphabet_length]),
)
substitute_q_sd = pyro.param(
"substitute_q_sd",
torch.zeros([self.latent_alphabet_length, self.alphabet_length]),
)
pyro.sample(
"substitute",
dist.Normal(substitute_q_mn, softplus(substitute_q_sd)).to_event(2),
)
# Per datapoint local latent variables.
with pyro.plate("batch", seq_data.shape[0]):
# Encode sequences.
z_loc, z_scale = self.encoder(seq_data)
# Scale log likelihood to account for mini-batching.
with poutine.scale(scale=local_scale * local_prior_scale):
# Sample.
if self.z_prior_distribution == "Normal":
pyro.sample("latent", dist.Normal(z_loc, z_scale).to_event(1))
elif self.z_prior_distribution == "Laplace":
pyro.sample("latent", dist.Laplace(z_loc, z_scale).to_event(1))
def fit_svi(
self,
dataset,
epochs=2,
anneal_length=1.0,
batch_size=None,
scheduler=None,
jit=False,
):
"""
Infer approximate posterior with stochastic variational inference.
This runs :class:`~pyro.infer.svi.SVI`. It is an approximate inference
method useful for quickly iterating on probabilistic models.
:param ~torch.utils.data.Dataset dataset: The training dataset.
:param int epochs: Number of epochs of training.
:param float anneal_length: Number of epochs over which to linearly
anneal the prior KL divergence weight from 0 to 1, for improved
training.
:param int batch_size: Minibatch size (number of sequences).
:param pyro.optim.MultiStepLR scheduler: Optimization scheduler.
(Default: Adam optimizer, 0.01 constant learning rate.)
:param bool jit: Whether to use a jit compiled ELBO.
"""
# Setup.
if batch_size is not None:
self.batch_size = batch_size
if scheduler is None:
scheduler = MultiStepLR(
{
"optimizer": Adam,
"optim_args": {"lr": 0.01},
"milestones": [],
"gamma": 0.5,
}
)
if self.is_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
dataload = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=self.pin_memory,
generator=torch.Generator(device=device),
)
# Initialize guide.
for seq_data, L_data in dataload:
if self.is_cuda:
seq_data = seq_data.cuda()
self.guide(seq_data, torch.tensor(1.0), torch.tensor(1.0))
break
# Setup stochastic variational inference.
if jit:
elbo = JitTrace_ELBO(ignore_jit_warnings=True)
else:
elbo = Trace_ELBO()
svi = SVI(self.model, self.guide, scheduler, loss=elbo)
# Run inference.
losses = []
step_i = 1
t0 = datetime.datetime.now()
for epoch in range(epochs):
for seq_data, L_data in dataload:
if self.is_cuda:
seq_data = seq_data.cuda()
loss = svi.step(
seq_data,
torch.tensor(len(dataset) / seq_data.shape[0]),
self._beta_anneal(step_i, batch_size, len(dataset), anneal_length),
)
losses.append(loss)
scheduler.step()
step_i += 1
print(epoch, loss, " ", datetime.datetime.now() - t0)
return losses
def _beta_anneal(self, step, batch_size, data_size, anneal_length):
"""Annealing schedule for prior KL term (beta annealing)."""
if np.allclose(anneal_length, 0.0):
return torch.tensor(1.0)
anneal_frac = step * batch_size / (anneal_length * data_size)
return torch.tensor(min([anneal_frac, 1.0]))
def evaluate(self, dataset_train, dataset_test=None, jit=False):
"""
Evaluate performance (log probability and per residue perplexity) on
train and test datasets.
:param ~torch.utils.data.Dataset dataset: The training dataset.
:param ~torch.utils.data.Dataset dataset: The testing dataset
(optional).
:param bool jit: Whether to use a jit compiled ELBO.
"""
dataload_train = DataLoader(dataset_train, batch_size=1, shuffle=False)
if dataset_test is not None:
dataload_test = DataLoader(dataset_test, batch_size=1, shuffle=False)
# Initialize guide.
for seq_data, L_data in dataload_train:
if self.is_cuda:
seq_data = seq_data.cuda()
self.guide(seq_data, torch.tensor(1.0), torch.tensor(1.0))
break
if jit:
elbo = JitTrace_ELBO(ignore_jit_warnings=True)
else:
elbo = Trace_ELBO()
scheduler = MultiStepLR({"optimizer": Adam, "optim_args": {"lr": 0.01}})
# Setup stochastic variational inference.
svi = SVI(self.model, self.guide, scheduler, loss=elbo)
# Compute elbo and perplexity.
train_lp, train_perplex = self._evaluate_local_elbo(
svi, dataload_train, len(dataset_train)
)
if dataset_test is not None:
test_lp, test_perplex = self._evaluate_local_elbo(
svi, dataload_test, len(dataset_test)
)
return train_lp, test_lp, train_perplex, test_perplex
else:
return train_lp, None, train_perplex, None
def _local_variables(self, name, site):
"""Return per datapoint random variables in model."""
return name in ["latent", "obs_L", "obs_seq"]
def _evaluate_local_elbo(self, svi, dataload, data_size):
"""Evaluate elbo and average per residue perplexity."""
lp, perplex = 0.0, 0.0
with torch.no_grad():
for seq_data, L_data in dataload:
if self.is_cuda:
seq_data, L_data = seq_data.cuda(), L_data.cuda()
conditioned_model = poutine.condition(
self.model, data={"obs_seq": seq_data}
)
args = (seq_data, torch.tensor(1.0), torch.tensor(1.0))
guide_tr = poutine.trace(self.guide).get_trace(*args)
model_tr = poutine.trace(
poutine.replay(conditioned_model, trace=guide_tr)
).get_trace(*args)
local_elbo = (
(
model_tr.log_prob_sum(self._local_variables)
- guide_tr.log_prob_sum(self._local_variables)
)
.cpu()
.numpy()
)
lp += local_elbo
perplex += -local_elbo / L_data[0].cpu().numpy()
perplex = np.exp(perplex / data_size)
return lp, perplex
def embed(self, dataset, batch_size=None):
"""
Get the latent space embedding (mean posterior value of z).
:param ~torch.utils.data.Dataset dataset: The dataset to embed.
:param int batch_size: Minibatch size (number of sequences). (Defaults
to batch_size of the model object.)
"""
if batch_size is None:
batch_size = self.batch_size
dataload = DataLoader(dataset, batch_size=batch_size, shuffle=False)
with torch.no_grad():
z_locs, z_scales = [], []
for seq_data, L_data in dataload:
if self.is_cuda:
seq_data = seq_data.cuda()
z_loc, z_scale = self.encoder(seq_data)
z_locs.append(z_loc.cpu())
z_scales.append(z_scale.cpu())
return torch.cat(z_locs), torch.cat(z_scales)
def _reconstruct_regressor_seq(self, data, ind, param):
"Reconstruct the latent regressor sequence given data."
with torch.no_grad():
# Encode seq.
z_loc = self.encoder(data[ind][0])[0]
# Reconstruct
decoded = self.decoder(
z_loc, param("W_q_mn"), param("B_q_mn"), param("inverse_temp_q_mn")
)
return torch.exp(decoded["precursor_seq_logits"])
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@contrib@mue@models.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/ares/data/__init__.py",
"type": "Python"
}
|
_ARES = __path__[0]
ARES = _ARES[0:_ARES.rfind('ares/')]
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@ares@data@__init__.py@.PATH_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/marker/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="sizesrc", parent_name="scattergl.marker", **kwargs):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@marker@_sizesrc.py@.PATH_END.py
|
{
"filename": "process_tianlai_baselines.py",
"repo_name": "philbull/RadioFisher",
"repo_path": "RadioFisher_extracted/RadioFisher-master/process_tianlai_baselines.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
Output the Tianlai baseline density distribution.
"""
import numpy as np
import pylab as P
import scipy.integrate
nu = 1000. # MHz
l = 3e8 / (nu * 1e6) # Lambda [m]
root = "TIANLAI"
Ddish = 15.
Dmin = 15.
Ndish = 256 * 8
array_config = "array_config/TIANLAI_baselines.npy"
outfile = "array_config/nx_TIANLAI_%d.dat" % nu
root = "TIANLAIpathfinder"
Ddish = 15.
Dmin = 15.
Ndish = 32 * 3
array_config = "array_config/TIANLAIpath_baselines.npy"
outfile = "array_config/nx_TIANLAIpath_%d.dat" % nu
def fov(nu, D):
"""
Field of view, in rad^2, as a fn. of frequency.
"""
l = 3e8 / (nu*1e6)
return 180. * 1.22 * (l/D) * (np.pi/180.)**2.
def ubin_width(nu, D):
"""
Bin width, corresponding to du at a given frequency (u = d / lambda).
"""
return (1./10.) / np.sqrt(fov(nu, D)) # 1/10!
dat = np.load(array_config).T
# Remove D < Ddish baselines
dat = dat[np.where(dat > Ddish)] # Cut sub-FOV baselines
dat /= l # Rescale into u = d / lambda
# Calculate bin edges
du = ubin_width(nu, Ddish)
imax = int(np.max(dat) / du) + 1
edges = np.linspace(0., imax * du, imax+1)
# Calculate histogram (no. baselines in each ring of width du)
bins, edges = np.histogram(dat, edges)
u = np.array([0.5*(edges[i+1] + edges[i]) for i in range(edges.size-1)]) # Centroids
#idxs = np.where(u < Dmin/l)
#for i in range(bins.size):
# print "%2d [%3.1f -- %3.1f]: %d" % (i, edges[i], edges[i+1], bins[i])
# Convert to a density, n(u)
nn = bins / (2. * np.pi * u * du)
# Integrate n(u) to find normalisation (should give unity if no baseline cuts applied)
norm = scipy.integrate.simps(2.*np.pi*nn*u, u)
print "n(u) renorm. factor:", 0.5 * Ndish * (Ndish - 1) / norm, "(not applied)"
#n *= 0.5 * Ndish * (Ndish - 1) / norm
# Convert to freq.-independent expression, n(x) = n(u) * nu^2,
# where nu is in MHz.
n_x = nn * nu**2.
x = u / nu
np.savetxt(outfile, np.column_stack((x, n_x)))
print "Saved to %s." % outfile
P.plot(u, nn)
P.axvline(Dmin/l, color='r')
P.show()
|
philbullREPO_NAMERadioFisherPATH_START.@RadioFisher_extracted@RadioFisher-master@process_tianlai_baselines.py@.PATH_END.py
|
{
"filename": "_connector.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/waterfall/_connector.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Connector(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "waterfall"
_path_str = "waterfall.connector"
_valid_props = {"line", "mode", "visible"}
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.waterfall.connector.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.waterfall.connector.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# mode
# ----
@property
def mode(self):
"""
Sets the shape of connector lines.
The 'mode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['spanning', 'between']
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
# visible
# -------
@property
def visible(self):
"""
Determines if connector lines are drawn.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
line
:class:`plotly.graph_objects.waterfall.connector.Line`
instance or dict with compatible properties
mode
Sets the shape of connector lines.
visible
Determines if connector lines are drawn.
"""
def __init__(self, arg=None, line=None, mode=None, visible=None, **kwargs):
"""
Construct a new Connector object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.Connector`
line
:class:`plotly.graph_objects.waterfall.connector.Line`
instance or dict with compatible properties
mode
Sets the shape of connector lines.
visible
Determines if connector lines are drawn.
Returns
-------
Connector
"""
super(Connector, self).__init__("connector")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.waterfall.Connector
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.Connector`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("mode", None)
_v = mode if mode is not None else _v
if _v is not None:
self["mode"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@waterfall@_connector.py@.PATH_END.py
|
{
"filename": "split_dota.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/reference/data/split_dota.md",
"type": "Markdown"
}
|
---
description: Learn how to utilize the ultralytics.data.split_dota module to process and split DOTA datasets efficiently. Explore detailed functions and examples.
keywords: Ultralytics, DOTA dataset, data splitting, YOLO, Python, bbox_iof, load_yolo_dota, get_windows, crop_and_save
---
# Reference for `ultralytics/data/split_dota.py`
!!! note
This file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/split_dota.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/split_dota.py). If you spot a problem please help fix it by [contributing](https://docs.ultralytics.com/help/contributing/) a [Pull Request](https://github.com/ultralytics/ultralytics/edit/main/ultralytics/data/split_dota.py) 🛠️. Thank you 🙏!
<br>
## ::: ultralytics.data.split_dota.bbox_iof
<br><br><hr><br>
## ::: ultralytics.data.split_dota.load_yolo_dota
<br><br><hr><br>
## ::: ultralytics.data.split_dota.get_windows
<br><br><hr><br>
## ::: ultralytics.data.split_dota.get_window_obj
<br><br><hr><br>
## ::: ultralytics.data.split_dota.crop_and_save
<br><br><hr><br>
## ::: ultralytics.data.split_dota.split_images_and_labels
<br><br><hr><br>
## ::: ultralytics.data.split_dota.split_trainval
<br><br><hr><br>
## ::: ultralytics.data.split_dota.split_test
<br><br>
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@reference@data@split_dota.md@.PATH_END.py
|
{
"filename": "2024-06-07_13:43-269c15476fb1.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/alembic/versions/2024-06-07_13:43-269c15476fb1.py",
"type": "Python"
}
|
"""Delete "run_no" from "trace"
Revision ID: 269c15476fb1
Revises: 8d24d9c2e9ba
Create Date: 2024-06-07 13:43:16.746754
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '269c15476fb1'
down_revision = '8d24d9c2e9ba'
branch_labels = None
depends_on = None
def upgrade():
# Disable the foreign key constraints during the migration.
# https://alembic.sqlalchemy.org/en/latest/batch.html#dealing-with-referencing-foreign-keys
op.execute('PRAGMA foreign_keys=OFF;')
with op.batch_alter_table('trace', schema=None) as batch_op:
batch_op.drop_column('run_no')
# Re-enable the foreign key constraints
op.execute('PRAGMA foreign_keys=ON;')
def downgrade():
op.execute('PRAGMA foreign_keys=OFF;')
with op.batch_alter_table('trace', schema=None) as batch_op:
batch_op.add_column(sa.Column('run_no', sa.Integer(), nullable=True))
op.execute(
'''
UPDATE trace
SET run_no = (SELECT run_no FROM run WHERE run.id = trace.run_id)
'''
)
with op.batch_alter_table('trace', schema=None) as batch_op:
batch_op.alter_column('run_no', nullable=False)
# ### end Alembic commands ###
op.execute('PRAGMA foreign_keys=ON;')
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@alembic@versions@2024-06-07_13:43-269c15476fb1.py@.PATH_END.py
|
{
"filename": "telescopes_controller.py",
"repo_name": "plazar/TOASTER",
"repo_path": "TOASTER_extracted/TOASTER-master/webtoaster/app/controllers/telescopes_controller.py",
"type": "Python"
}
|
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import *
from django.template import Context, RequestContext
from django.template import loader
from app.models import *
from httplib import HTTPResponse
from lib.toaster import Telescopes
from django.core.context_processors import csrf
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ObjectDoesNotExist
import oauth2
from django.contrib.auth.decorators import login_required
from django import forms
from django.core.paginator import Paginator
class TelescopeForm(forms.Form):
name = forms.CharField(required=True, max_length=64)
itrf_x = forms.FloatField(required=True)
itrf_y = forms.FloatField(required=True)
itrf_z = forms.FloatField(required=True)
abbrev = forms.CharField(required=True, max_length=16)
code = forms.CharField(required=True, max_length=2)
latitude = forms.FloatField(required=False)
longitude = forms.FloatField(required=False)
datum = forms.CharField(required=False, max_length=64)
def index(request):
page = request.GET.get('page')
if page == None:
page = 1
else:
page = int( page )
sort_by = request.GET.get('sort_by')
order = request.GET.get('order')
if sort_by == None:
sort_by = 'id'
if order == None:
order == 'desc'
per_page = 2
telescopes = Telescope.objects.all()
# if order == 'asc':
# pulsars_sorted = list(reversed(pulsars_sorted))
# elif order == 'desc':
# pass
telescopes_paged = Paginator(telescopes, per_page)
telescopes_current_page = telescopes_paged.page(page)
t = loader.get_template('telescopes/index.html')
c = RequestContext(request, {
'telescopes': telescopes_current_page,
'page_range': telescopes_paged.page_range,
'sort_by': sort_by,
'order': order
})
return HttpResponse(t.render(c))
def new(request):
if request.method == 'GET':
form = TelescopeForm()
aliases = list()
elif request.method == 'POST':
aliases = request.POST.getlist('aliases[]')
form = TelescopeForm(request.POST)
if form.is_valid():
new_telescope_name = form.cleaned_data['name']
new_aliases = aliases
try:
response = Telescopes.add(name=form.cleaned_data['name'], \
itrf_x=form.cleaned_data['itrf_x'], \
itrf_y=form.cleaned_data['itrf_y'], \
itrf_z=form.cleaned_data['itrf_z'], \
abbrev=form.cleaned_data['abbrev'], \
code=form.cleaned_data['code'], \
aliases=aliases)
request.session['flash'] = { 'type': 'success', 'message': 'Telescope was succesfully added with iD: %i' % response }
return redirect("/webtoaster/telescopes/%i/" % response)
except Exception, e:
request.session['flash'] = { 'type': 'error', 'message': 'Toaster produced an error: %s' % str(e)}
else:
request.session['flash'] = { 'type': 'error', 'message': 'Please verify your form' }
t = loader.get_template('telescopes/new.html')
c = RequestContext(request, {
'form': form,
'aliases': aliases,
})
return HttpResponse(t.render(c))
def show(request, telescope_id):
telescope_id = int( pulsar_id )
telescope = Telescopes.show( telescopes_ids=[pulsar_id])[0]
t = loader.get_template('pulsars/show.html')
c = RequestContext(request, {
'pulsar': pulsar,
})
return HttpResponse(t.render(c))
def get_columns(pulsars):
key = pulsars.keys()[0]
return pulsars[key].keys()
|
plazarREPO_NAMETOASTERPATH_START.@TOASTER_extracted@TOASTER-master@webtoaster@app@controllers@telescopes_controller.py@.PATH_END.py
|
{
"filename": "filtercore.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/pipeline/filter/filtercore.py",
"type": "Python"
}
|
"""
The core filter module. Usually run as a service using filter_runner, but can also be run from the command line.
Usage:
ingest.py [--maxalert=MAX]
[--maxbatch=MAX]
[--group_id=GID]
[--topic_in=TIN]
[--local_db=NAME]
[--send_email=BOOL]
[--send_kafka=BOOL]
[--transfer=BOOL]
[--stats=BOOL]
Options:
--maxalert=MAX Number of alerts to process per batch, default is defined in settings.KAFKA_MAXALERTS
--maxbatch=MAX Maximum number of batches to process, default is unlimited
--group_id=GID Group ID for kafka, default is defined in settings.KAFKA_GROUPID
--topic_in=TIN Kafka topic to use [default: ztf_sherlock]
--local_db=NAME Name of local database to use [default: ztf]
--send_email=BOOL Send email [default: True]
--send_kafka=BOOL Send kafka [default: True]
--transfer=BOOL Transfer results to main [default: True]
--stats=BOOL Write stats [default: True]
"""
import os
import sys
import time
import signal
import json
import tempfile
import math
from typing import Union
import requests
import urllib
import urllib.parse
import numbers
import confluent_kafka
from datetime import datetime
from docopt import docopt
sys.path.append('../../common')
import settings
sys.path.append('../../common/src')
import date_nid
import db_connect
import manage_status
import lasairLogging
import logging
import filters
import watchlists
import watchmaps
import mmagw
sys.path.append('../../common/schema/lasair_schema')
from features.FeatureGroup import FeatureGroup
def now():
return datetime.utcnow().strftime("%H:%M:%S")
class Filter:
"""Filter orchestrates the filter pipeline stage.
"""
def __init__(self,
topic_in: str = 'ztf_sherlock',
group_id: str = settings.KAFKA_GROUPID,
maxalert: (Union[int, str]) = settings.KAFKA_MAXALERTS,
local_db: str = None,
send_email: bool = True,
send_kafka: bool = True,
transfer: bool = True,
stats: bool = True,
log=None):
self.topic_in = topic_in
self.group_id = group_id
self.maxalert = int(maxalert)
self.local_db = local_db or 'ztf'
self.send_email = send_email
self.send_kafka = send_kafka
self.transfer = transfer
self.stats = stats
self.consumer = None
self.database = None
self.log = log or lasairLogging.getLogger("filter")
self.log.info('Topic_in=%s, group_id=%s, maxalert=%d' % (self.topic_in, self.group_id, self.maxalert))
# catch SIGTERM so that we can finish processing cleanly
self.prv_sigterm_handler = signal.signal(signal.SIGTERM, self._sigterm_handler)
self.sigterm_raised = False
def setup(self):
"""Set up connections to Kafka, database, etc. if not already done. It is safe to call this multiple
times. We do this separately from __init__ mostly to facilitate testing."""
# set up the Kafka consumer now
if not self.consumer:
self.consumer = self.make_kafka_consumer()
# set up the link to the local database
if not self.database or not self.database.is_connected():
try:
self.database = db_connect.local(self.local_db)
except Exception as e:
self.log.error('ERROR in Filter: cannot connect to local database' + str(e))
def _sigterm_handler(self, signum, frame):
"""Handle SIGTERM by raising a flag that can be checked during the poll/process loop.
"""
self.sigterm_raised = True
self.log.debug("caught SIGTERM")
# if we have already set a non-default handler then call that too
if self.prv_sigterm_handler is not signal.SIG_DFL and not None:
self.prv_sigterm_handler(signum, frame)
def execute_query(self, query: str):
""" execute_query: run a query and close it, and compalin to slack if failure.
"""
try:
cursor = self.database.cursor(buffered=True)
cursor.execute(query)
cursor.close()
self.database.commit()
except Exception as e:
self.log.error('ERROR filter/execute_query: %s' % str(e))
self.log.info(query)
raise
def truncate_local_database(self):
""" Truncate all the tables in the local database.
"""
self.execute_query('TRUNCATE TABLE objects')
self.execute_query('TRUNCATE TABLE sherlock_classifications')
self.execute_query('TRUNCATE TABLE watchlist_hits')
self.execute_query('TRUNCATE TABLE area_hits')
def make_kafka_consumer(self):
""" Make a kafka consumer.
"""
conf = {
'bootstrap.servers': '%s' % settings.KAFKA_SERVER,
'enable.auto.commit': False, # require explicit commit!
'group.id': self.group_id,
'max.poll.interval.ms': 20 * 60 * 1000, # 20 minute timeout in case queries take time
'default.topic.config': {
'auto.offset.reset': 'earliest'
}
}
self.log.info(str(conf))
self.log.info('Topic in = %s' % self.topic_in)
try:
consumer = confluent_kafka.Consumer(conf)
consumer.subscribe([self.topic_in])
return consumer
except Exception as e:
self.log.error('ERROR cannot connect to kafka' + str(e))
@staticmethod
def create_insert_sherlock(ann: dict):
"""create_insert_sherlock.
Makes the insert query for the sherlock classification
Args:
ann:
"""
# all the sherlock attrs that we want for the database
attrs = [
"classification",
"diaObjectId",
"association_type",
"catalogue_table_name",
"catalogue_object_id",
"catalogue_object_type",
"raDeg",
"decDeg",
"separationArcsec",
"northSeparationArcsec",
"eastSeparationArcsec",
"physical_separation_kpc",
"direct_distance",
"distance",
"z",
"photoZ",
"photoZErr",
"Mag",
"MagFilter",
"MagErr",
"classificationReliability",
"major_axis_arcsec",
"annotator",
"additional_output",
"description",
"summary",
]
sets = {}
for key in attrs:
sets[key] = None
for key, value in ann.items():
if key in attrs and value:
sets[key] = value
if 'description' in attrs and 'description' not in ann:
sets['description'] = 'no description'
# Build the query
query_list = []
query = 'REPLACE INTO sherlock_classifications SET '
for key, value in sets.items():
if value is None:
query_list.append(key + '=NULL')
else:
query_list.append(key + '=' + "'" + str(value).replace("'", '') + "'")
query += ',\n'.join(query_list)
return query
@staticmethod
def create_insert_query(alert: dict):
"""create_insert_query.
Creates an insert sql statement for building the object and
a query for inserting it.
Args:
alert:
"""
lasair_features = FeatureGroup.run_all(alert)
if not lasair_features:
return None
# Make the query
query_list = []
query = 'REPLACE INTO objects SET '
for key, value in lasair_features.items():
if not value:
query_list.append(key + '=NULL')
elif isinstance(value, numbers.Number) and math.isnan(value):
query_list.append(key + '=NULL')
elif isinstance(value, str):
query_list.append(key + '="' + str(value) + '"')
else:
query_list.append(key + '=' + str(value))
query += ',\n'.join(query_list)
return query
def handle_alert(self, alert: dict):
"""alert_filter: handle a single alert.
"""
# Filter to apply to each alert.
diaObjectId = alert['diaObject']['diaObjectId']
# really not interested in alerts that have no detections!
if len(alert['diaSourcesList']) == 0:
return 0
# build the insert query for this object.
# if not wanted, returns 0
query = Filter.create_insert_query(alert)
if not query:
return 0
self.execute_query(query)
# now ingest the sherlock_classifications
if 'annotations' in alert:
annotations = alert['annotations']
if 'sherlock' in annotations:
for ann in annotations['sherlock']:
if "transient_object_id" in ann:
ann.pop('transient_object_id')
ann['diaObjectId'] = diaObjectId
query = Filter.create_insert_sherlock(ann)
self.execute_query(query)
return 1
def consume_alerts(self):
"""Consume a batch of alerts from Kafka.
"""
nalert_in = nalert_out = 0
startt = time.time()
errors = 0
while nalert_in < self.maxalert:
if self.sigterm_raised:
# clean shutdown - stop the consumer
self.log.info("Caught SIGTERM, aborting.")
break
# Here we get the next alert by kafka
msg = self.consumer.poll(timeout=5)
if msg is None:
print('message is null')
break
if msg.error():
self.log.error("ERROR polling Kafka: " + str(msg.error()))
errors += 1
if errors > 100:
break
continue
if msg.value() is None:
print('message value is null')
continue
# Apply filter to each alert
alert = json.loads(msg.value())
nalert_in += 1
# print(json.dumps(alert, indent=2)) ##############
d = self.handle_alert(alert)
nalert_out += d
if nalert_in % 1000 == 0:
self.log.info('nalert_in %d nalert_out %d time %.1f' % \
(nalert_in, nalert_out, time.time() - startt))
sys.stdout.flush()
# refresh the database every 1000 alerts
# make sure everything is committed
self.database.commit()
self.log.info('finished %d in, %d out' % (nalert_in, nalert_out))
if self.stats:
ms = manage_status.manage_status()
nid = date_nid.nid_now()
ms.add({
'today_filter': nalert_in,
'today_filter_out': nalert_out,
}, nid)
return nalert_out
def transfer_to_main(self):
""" Transfer the local database to the main database.
"""
cmd = 'sudo --non-interactive rm /data/mysql/*.txt'
os.system(cmd)
tablelist = [
'objects',
'sherlock_classifications',
'watchlist_hits',
'area_hits',
'mma_area_hits',
]
# Make a CSV file for each local table
for table in tablelist:
query = """
SELECT * FROM %s INTO OUTFILE '/data/mysql/%s.txt'
FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\\n';
""" % (table, table)
try:
self.execute_query(query)
except:
self.log.error('ERROR in filter/transfer_to_main: cannot build CSV from local database')
return False
# Transmit the CSV files to the main database and ingest them
try:
main_database = db_connect.remote(allow_infile=True)
except Exception as e:
self.log.error('ERROR filter/transfer_to_main: %s' % str(e))
return False
commit = True
for table in tablelist:
sql = "LOAD DATA LOCAL INFILE '/data/mysql/%s.txt' " % table
sql += "REPLACE INTO TABLE %s FIELDS TERMINATED BY ',' " % table
sql += "ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
try:
cursor = main_database.cursor(buffered=True)
cursor.execute(sql)
cursor.close()
main_database.commit()
self.log.info('%s ingested to main db' % table)
except Exception as e:
self.log.error('ERROR in filter/transfer_to_main: cannot push %s local to main database: %s' % (table, str(e)))
commit = False
break
main_database.close()
if commit:
self.consumer.commit()
self.log.info('Kafka committed for this batch')
return commit
def write_stats(self, timers: dict, nalerts: int):
""" Write the statistics to lasair status and to prometheus.
"""
if not self.stats:
return
ms = manage_status.manage_status()
nid = date_nid.nid_now()
d = Filter.batch_statistics()
ms.set({
'today_lsst': Filter.grafana_today(),
'today_database': d['count'],
'total_count': d['total_count'],
'min_delay': d['since'], # hours since most recent alert
'nid': nid},
nid)
for name, td in timers.items():
td.add2ms(ms, nid)
if nalerts > 0:
min_str = "{:d}".format(int(d['min_delay'] * 60))
avg_str = "{:d}".format(int(d['avg_delay'] * 60))
max_str = "{:d}".format(int(d['max_delay'] * 60))
else:
min_str = "NaN"
avg_str = "NaN"
max_str = "NaN"
# t = int(1000*time.time())
s = '#HELP lasair_alert_batch_lag Lasair alert batch lag stats\n'
s += '#TYPE gauge\n'
s += 'lasair_alert_batch_lag{type="min"} %s\n' % min_str
s += 'lasair_alert_batch_lag{type="avg"} %s\n' % avg_str
s += 'lasair_alert_batch_lag{type="max"} %s\n' % max_str
try:
filename = '/var/lib/prometheus/node-exporter/lasair.prom'
f = open(filename, 'w')
f.write(s)
f.close()
except:
self.log.error("ERROR in filter/write_stats: Cannot open promethus export file %s" % filename)
@staticmethod
def batch_statistics():
"""How many objects updated since last midnight.
"""
tainow = (time.time() / 86400 + 40587)
midnight = math.floor(tainow - 0.5) + 0.5
msl_main = db_connect.readonly()
cursor = msl_main.cursor(buffered=True, dictionary=True)
# objects modified since last midnight
query = 'SELECT count(*) AS count FROM objects WHERE maxTai > %.1f' % midnight
try:
cursor.execute(query)
for row in cursor:
count = row['count']
break
except:
count = -1
# total number of objects
query = 'SELECT count(*) AS total_count, mjdnow()-max(maxTai) AS since FROM objects'
try:
cursor.execute(query)
for row in cursor:
total_count = row['total_count']
since = 24 * float(row['since'])
break
except:
total_count = -1
since = -1
# statistics for most recent batch
min_delay = -1
avg_delay = -1
max_delay = -1
msl_local = db_connect.local()
cursor = msl_local.cursor(buffered=True, dictionary=True)
query = 'SELECT '
query += 'tainow()-max(maxTai) AS min_delay, '
query += 'tainow()-avg(maxTai) AS avg_delay, '
query += 'tainow()-min(maxTai) AS max_delay '
query += 'FROM objects'
try:
cursor.execute(query)
for row in cursor:
min_delay = 24 * 60 * float(row['min_delay']) # minutes
avg_delay = 24 * 60 * float(row['avg_delay']) # minutes
max_delay = 24 * 60 * float(row['max_delay']) # minutes
break
except:
pass
return {
'total_count': total_count, # number of objects in database
'count': count, # number of objects updated since midnight
'since': since, # time since last object, hours
'min_delay': min_delay, # for grafana min delay in this batch, minutes
'avg_delay': avg_delay, # for grafana avg delay in this batch, minutes
'max_delay': max_delay, # for grafana max delay in this batch, minutes
}
@staticmethod
def grafana_today():
"""How many objects reported today from LSST.
"""
g = datetime.utcnow()
date = '%4d%02d%02d' % (g.year, g.month, g.day)
# do not have this for LSST yet
# url = 'https://monitor.alerts.ztf.uw.edu/api/datasources/proxy/7/api/v1/query?query='
# urltail = 'sum(kafka_log_log_value{ name="LogEndOffset" , night = "%s", program = "MSIP" }) ' \
# '- sum(kafka_log_log_value{ name="LogStartOffset", night = "%s", program="MSIP" })' % (
# date, date)
# try:
# urlquote = url + urllib.parse.quote(urltail)
# resultjson = requests.get(urlquote,
# auth=(settings.GRAFANA_USERNAME, settings.GRAFANA_PASSWORD))
# result = json.loads(resultjson.text)
# alertsstr = result['data']['result'][0]['value'][1]
# today_candidates_ztf = int(alertsstr) // 4
# except Exception as e:
# log = lasairLogging.getLogger("filter")
# log.info('Cannot parse grafana: %s' % str(e))
# today_candidates_ztf = -1
today_candidates_ztf = 0
return today_candidates_ztf
def run_batch(self):
"""Top level method that processes an alert batch.
Does the following:
- Consume alerts from Kafka
- Run watchlists
- Run watchmaps
- Run user filters
- Run annotation queries
- Build CSV file
- Transfer to main database"""
self.setup()
# set up the timers
timers = {}
for name in ['ffeatures', 'fwatchlist', 'fwatchmap', \
'fmmagw', 'ffilters', 'ftransfer', 'ftotal']:
timers[name] = manage_status.timer(name)
self.truncate_local_database()
timers['ftotal'].on()
self.log.info('FILTER batch start %s' % now())
self.log.info("Topic is %s" % self.topic_in)
# consume the alerts from Kafka
timers['ffeatures'].on()
nalerts = self.consume_alerts()
timers['ffeatures'].off()
if nalerts > 0:
# run the watchlists
self.log.info('WATCHLIST start %s' % now())
timers['fwatchlist'].on()
nhits = watchlists.watchlists(self)
timers['fwatchlist'].off()
if nhits is not None:
self.log.info('WATCHLISTS got %d' % nhits)
else:
self.log.error("ERROR in filter/watchlists")
# run the watchmaps
self.log.info('WATCHMAP start %s' % now())
timers['fwatchmap'].on()
nhits = watchmaps.watchmaps(self)
timers['fwatchmap'].off()
if nhits is not None:
self.log.info('WATCHMAPS got %d' % nhits)
else:
self.log.error("ERROR in filter/watchmaps")
# run the MMA/GW events
self.log.info('MMA/GW start %s' % now())
timers['fmmagw'].on()
nhits = mmagw.mmagw(self)
timers['fmmagw'].off()
if nhits is not None:
self.log.info('MMA/GW got %d' % nhits)
else:
self.log.error("ERROR in filter/mmagw")
# run the user filters
self.log.info('Filters start %s' % now())
timers['ffilters'].on()
ntotal = filters.filters(self)
timers['ffilters'].off()
if ntotal is not None:
self.log.info('FILTERS got %d' % ntotal)
else:
self.log.error("ERROR in filter/filters")
# run the annotation queries
self.log.info('ANNOTATION FILTERS start %s' % now())
ntotal = filters.fast_anotation_filters(self)
if ntotal is not None:
self.log.info('ANNOTATION FILTERS got %d' % ntotal)
else:
self.log.error("ERROR in filter/fast_annotation_filters")
# build CSV file with local database and transfer to main
if self.transfer:
timers['ftransfer'].on()
commit = self.transfer_to_main()
timers['ftransfer'].off()
self.log.info('Batch ended')
if not commit:
self.log.info('Transfer to main failed, no commit')
time.sleep(600)
return 0
# Write stats for the batch
timers['ftotal'].off()
self.write_stats(timers, nalerts)
self.log.info('%d alerts processed\n' % nalerts)
return nalerts
if __name__ == "__main__":
#lasairLogging.basicConfig(stream=sys.stdout)
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger()
args = docopt(__doc__)
topic_in = args.get('--topic_in') or 'ztf_sherlock'
group_id = args.get('--group_id') or settings.KAFKA_GROUPID
maxalert = int(args.get('--maxalert') or settings.KAFKA_MAXALERTS)
maxbatch = int(args.get('--maxbatch') or -1)
local_db = args.get('--local_db')
send_email = args.get('--send_email') in ['True', 'true', 'Yes', 'yes']
send_kafka = args.get('--send_kafka') in ['True', 'true', 'Yes', 'yes']
transfer = args.get('--transfer') in ['True', 'true', 'Yes', 'yes']
stats = args.get('--stats') in ['True', 'true', 'Yes', 'yes']
fltr = Filter(topic_in=topic_in, group_id=group_id, maxalert=maxalert, local_db=local_db,
send_email=send_email, send_kafka=send_kafka, transfer=transfer, stats=stats)
n_batch = 0
while not fltr.sigterm_raised:
n_alerts = fltr.run_batch()
n_batch += 1
if n_batch == maxbatch:
log.info(f"Exiting after {n_batch} batches")
sys.exit(0)
if n_alerts == 0: # process got no alerts, so sleep a few minutes
log.info('Waiting for more alerts ....')
time.sleep(settings.WAIT_TIME)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@pipeline@filter@filtercore.py@.PATH_END.py
|
{
"filename": "cm.py",
"repo_name": "mwaskom/seaborn",
"repo_path": "seaborn_extracted/seaborn-master/seaborn/cm.py",
"type": "Python"
}
|
from matplotlib import colors
from seaborn._compat import register_colormap
_rocket_lut = [
[ 0.01060815, 0.01808215, 0.10018654],
[ 0.01428972, 0.02048237, 0.10374486],
[ 0.01831941, 0.0229766 , 0.10738511],
[ 0.02275049, 0.02554464, 0.11108639],
[ 0.02759119, 0.02818316, 0.11483751],
[ 0.03285175, 0.03088792, 0.11863035],
[ 0.03853466, 0.03365771, 0.12245873],
[ 0.04447016, 0.03648425, 0.12631831],
[ 0.05032105, 0.03936808, 0.13020508],
[ 0.05611171, 0.04224835, 0.13411624],
[ 0.0618531 , 0.04504866, 0.13804929],
[ 0.06755457, 0.04778179, 0.14200206],
[ 0.0732236 , 0.05045047, 0.14597263],
[ 0.0788708 , 0.05305461, 0.14995981],
[ 0.08450105, 0.05559631, 0.15396203],
[ 0.09011319, 0.05808059, 0.15797687],
[ 0.09572396, 0.06050127, 0.16200507],
[ 0.10132312, 0.06286782, 0.16604287],
[ 0.10692823, 0.06517224, 0.17009175],
[ 0.1125315 , 0.06742194, 0.17414848],
[ 0.11813947, 0.06961499, 0.17821272],
[ 0.12375803, 0.07174938, 0.18228425],
[ 0.12938228, 0.07383015, 0.18636053],
[ 0.13501631, 0.07585609, 0.19044109],
[ 0.14066867, 0.0778224 , 0.19452676],
[ 0.14633406, 0.07973393, 0.1986151 ],
[ 0.15201338, 0.08159108, 0.20270523],
[ 0.15770877, 0.08339312, 0.20679668],
[ 0.16342174, 0.0851396 , 0.21088893],
[ 0.16915387, 0.08682996, 0.21498104],
[ 0.17489524, 0.08848235, 0.2190294 ],
[ 0.18065495, 0.09009031, 0.22303512],
[ 0.18643324, 0.09165431, 0.22699705],
[ 0.19223028, 0.09317479, 0.23091409],
[ 0.19804623, 0.09465217, 0.23478512],
[ 0.20388117, 0.09608689, 0.23860907],
[ 0.20973515, 0.09747934, 0.24238489],
[ 0.21560818, 0.09882993, 0.24611154],
[ 0.22150014, 0.10013944, 0.2497868 ],
[ 0.22741085, 0.10140876, 0.25340813],
[ 0.23334047, 0.10263737, 0.25697736],
[ 0.23928891, 0.10382562, 0.2604936 ],
[ 0.24525608, 0.10497384, 0.26395596],
[ 0.25124182, 0.10608236, 0.26736359],
[ 0.25724602, 0.10715148, 0.27071569],
[ 0.26326851, 0.1081815 , 0.27401148],
[ 0.26930915, 0.1091727 , 0.2772502 ],
[ 0.27536766, 0.11012568, 0.28043021],
[ 0.28144375, 0.11104133, 0.2835489 ],
[ 0.2875374 , 0.11191896, 0.28660853],
[ 0.29364846, 0.11275876, 0.2896085 ],
[ 0.29977678, 0.11356089, 0.29254823],
[ 0.30592213, 0.11432553, 0.29542718],
[ 0.31208435, 0.11505284, 0.29824485],
[ 0.31826327, 0.1157429 , 0.30100076],
[ 0.32445869, 0.11639585, 0.30369448],
[ 0.33067031, 0.11701189, 0.30632563],
[ 0.33689808, 0.11759095, 0.3088938 ],
[ 0.34314168, 0.11813362, 0.31139721],
[ 0.34940101, 0.11863987, 0.3138355 ],
[ 0.355676 , 0.11910909, 0.31620996],
[ 0.36196644, 0.1195413 , 0.31852037],
[ 0.36827206, 0.11993653, 0.32076656],
[ 0.37459292, 0.12029443, 0.32294825],
[ 0.38092887, 0.12061482, 0.32506528],
[ 0.38727975, 0.12089756, 0.3271175 ],
[ 0.39364518, 0.12114272, 0.32910494],
[ 0.40002537, 0.12134964, 0.33102734],
[ 0.40642019, 0.12151801, 0.33288464],
[ 0.41282936, 0.12164769, 0.33467689],
[ 0.41925278, 0.12173833, 0.33640407],
[ 0.42569057, 0.12178916, 0.33806605],
[ 0.43214263, 0.12179973, 0.33966284],
[ 0.43860848, 0.12177004, 0.34119475],
[ 0.44508855, 0.12169883, 0.34266151],
[ 0.45158266, 0.12158557, 0.34406324],
[ 0.45809049, 0.12142996, 0.34540024],
[ 0.46461238, 0.12123063, 0.34667231],
[ 0.47114798, 0.12098721, 0.34787978],
[ 0.47769736, 0.12069864, 0.34902273],
[ 0.48426077, 0.12036349, 0.35010104],
[ 0.49083761, 0.11998161, 0.35111537],
[ 0.49742847, 0.11955087, 0.35206533],
[ 0.50403286, 0.11907081, 0.35295152],
[ 0.51065109, 0.11853959, 0.35377385],
[ 0.51728314, 0.1179558 , 0.35453252],
[ 0.52392883, 0.11731817, 0.35522789],
[ 0.53058853, 0.11662445, 0.35585982],
[ 0.53726173, 0.11587369, 0.35642903],
[ 0.54394898, 0.11506307, 0.35693521],
[ 0.5506426 , 0.11420757, 0.35737863],
[ 0.55734473, 0.11330456, 0.35775059],
[ 0.56405586, 0.11235265, 0.35804813],
[ 0.57077365, 0.11135597, 0.35827146],
[ 0.5774991 , 0.11031233, 0.35841679],
[ 0.58422945, 0.10922707, 0.35848469],
[ 0.59096382, 0.10810205, 0.35847347],
[ 0.59770215, 0.10693774, 0.35838029],
[ 0.60444226, 0.10573912, 0.35820487],
[ 0.61118304, 0.10450943, 0.35794557],
[ 0.61792306, 0.10325288, 0.35760108],
[ 0.62466162, 0.10197244, 0.35716891],
[ 0.63139686, 0.10067417, 0.35664819],
[ 0.63812122, 0.09938212, 0.35603757],
[ 0.64483795, 0.0980891 , 0.35533555],
[ 0.65154562, 0.09680192, 0.35454107],
[ 0.65824241, 0.09552918, 0.3536529 ],
[ 0.66492652, 0.09428017, 0.3526697 ],
[ 0.67159578, 0.09306598, 0.35159077],
[ 0.67824099, 0.09192342, 0.3504148 ],
[ 0.684863 , 0.09085633, 0.34914061],
[ 0.69146268, 0.0898675 , 0.34776864],
[ 0.69803757, 0.08897226, 0.3462986 ],
[ 0.70457834, 0.0882129 , 0.34473046],
[ 0.71108138, 0.08761223, 0.3430635 ],
[ 0.7175507 , 0.08716212, 0.34129974],
[ 0.72398193, 0.08688725, 0.33943958],
[ 0.73035829, 0.0868623 , 0.33748452],
[ 0.73669146, 0.08704683, 0.33543669],
[ 0.74297501, 0.08747196, 0.33329799],
[ 0.74919318, 0.08820542, 0.33107204],
[ 0.75535825, 0.08919792, 0.32876184],
[ 0.76145589, 0.09050716, 0.32637117],
[ 0.76748424, 0.09213602, 0.32390525],
[ 0.77344838, 0.09405684, 0.32136808],
[ 0.77932641, 0.09634794, 0.31876642],
[ 0.78513609, 0.09892473, 0.31610488],
[ 0.79085854, 0.10184672, 0.313391 ],
[ 0.7965014 , 0.10506637, 0.31063031],
[ 0.80205987, 0.10858333, 0.30783 ],
[ 0.80752799, 0.11239964, 0.30499738],
[ 0.81291606, 0.11645784, 0.30213802],
[ 0.81820481, 0.12080606, 0.29926105],
[ 0.82341472, 0.12535343, 0.2963705 ],
[ 0.82852822, 0.13014118, 0.29347474],
[ 0.83355779, 0.13511035, 0.29057852],
[ 0.83850183, 0.14025098, 0.2876878 ],
[ 0.84335441, 0.14556683, 0.28480819],
[ 0.84813096, 0.15099892, 0.281943 ],
[ 0.85281737, 0.15657772, 0.27909826],
[ 0.85742602, 0.1622583 , 0.27627462],
[ 0.86196552, 0.16801239, 0.27346473],
[ 0.86641628, 0.17387796, 0.27070818],
[ 0.87079129, 0.17982114, 0.26797378],
[ 0.87507281, 0.18587368, 0.26529697],
[ 0.87925878, 0.19203259, 0.26268136],
[ 0.8833417 , 0.19830556, 0.26014181],
[ 0.88731387, 0.20469941, 0.25769539],
[ 0.89116859, 0.21121788, 0.2553592 ],
[ 0.89490337, 0.21785614, 0.25314362],
[ 0.8985026 , 0.22463251, 0.25108745],
[ 0.90197527, 0.23152063, 0.24918223],
[ 0.90530097, 0.23854541, 0.24748098],
[ 0.90848638, 0.24568473, 0.24598324],
[ 0.911533 , 0.25292623, 0.24470258],
[ 0.9144225 , 0.26028902, 0.24369359],
[ 0.91717106, 0.26773821, 0.24294137],
[ 0.91978131, 0.27526191, 0.24245973],
[ 0.92223947, 0.28287251, 0.24229568],
[ 0.92456587, 0.29053388, 0.24242622],
[ 0.92676657, 0.29823282, 0.24285536],
[ 0.92882964, 0.30598085, 0.24362274],
[ 0.93078135, 0.31373977, 0.24468803],
[ 0.93262051, 0.3215093 , 0.24606461],
[ 0.93435067, 0.32928362, 0.24775328],
[ 0.93599076, 0.33703942, 0.24972157],
[ 0.93752831, 0.34479177, 0.25199928],
[ 0.93899289, 0.35250734, 0.25452808],
[ 0.94036561, 0.36020899, 0.25734661],
[ 0.94167588, 0.36786594, 0.2603949 ],
[ 0.94291042, 0.37549479, 0.26369821],
[ 0.94408513, 0.3830811 , 0.26722004],
[ 0.94520419, 0.39062329, 0.27094924],
[ 0.94625977, 0.39813168, 0.27489742],
[ 0.94727016, 0.4055909 , 0.27902322],
[ 0.94823505, 0.41300424, 0.28332283],
[ 0.94914549, 0.42038251, 0.28780969],
[ 0.95001704, 0.42771398, 0.29244728],
[ 0.95085121, 0.43500005, 0.29722817],
[ 0.95165009, 0.44224144, 0.30214494],
[ 0.9524044 , 0.44944853, 0.3072105 ],
[ 0.95312556, 0.45661389, 0.31239776],
[ 0.95381595, 0.46373781, 0.31769923],
[ 0.95447591, 0.47082238, 0.32310953],
[ 0.95510255, 0.47787236, 0.32862553],
[ 0.95569679, 0.48489115, 0.33421404],
[ 0.95626788, 0.49187351, 0.33985601],
[ 0.95681685, 0.49882008, 0.34555431],
[ 0.9573439 , 0.50573243, 0.35130912],
[ 0.95784842, 0.51261283, 0.35711942],
[ 0.95833051, 0.51946267, 0.36298589],
[ 0.95879054, 0.52628305, 0.36890904],
[ 0.95922872, 0.53307513, 0.3748895 ],
[ 0.95964538, 0.53983991, 0.38092784],
[ 0.96004345, 0.54657593, 0.3870292 ],
[ 0.96042097, 0.55328624, 0.39319057],
[ 0.96077819, 0.55997184, 0.39941173],
[ 0.9611152 , 0.5666337 , 0.40569343],
[ 0.96143273, 0.57327231, 0.41203603],
[ 0.96173392, 0.57988594, 0.41844491],
[ 0.96201757, 0.58647675, 0.42491751],
[ 0.96228344, 0.59304598, 0.43145271],
[ 0.96253168, 0.5995944 , 0.43805131],
[ 0.96276513, 0.60612062, 0.44471698],
[ 0.96298491, 0.6126247 , 0.45145074],
[ 0.96318967, 0.61910879, 0.45824902],
[ 0.96337949, 0.6255736 , 0.46511271],
[ 0.96355923, 0.63201624, 0.47204746],
[ 0.96372785, 0.63843852, 0.47905028],
[ 0.96388426, 0.64484214, 0.4861196 ],
[ 0.96403203, 0.65122535, 0.4932578 ],
[ 0.96417332, 0.65758729, 0.50046894],
[ 0.9643063 , 0.66393045, 0.5077467 ],
[ 0.96443322, 0.67025402, 0.51509334],
[ 0.96455845, 0.67655564, 0.52251447],
[ 0.96467922, 0.68283846, 0.53000231],
[ 0.96479861, 0.68910113, 0.53756026],
[ 0.96492035, 0.69534192, 0.5451917 ],
[ 0.96504223, 0.7015636 , 0.5528892 ],
[ 0.96516917, 0.70776351, 0.5606593 ],
[ 0.96530224, 0.71394212, 0.56849894],
[ 0.96544032, 0.72010124, 0.57640375],
[ 0.96559206, 0.72623592, 0.58438387],
[ 0.96575293, 0.73235058, 0.59242739],
[ 0.96592829, 0.73844258, 0.60053991],
[ 0.96612013, 0.74451182, 0.60871954],
[ 0.96632832, 0.75055966, 0.61696136],
[ 0.96656022, 0.75658231, 0.62527295],
[ 0.96681185, 0.76258381, 0.63364277],
[ 0.96709183, 0.76855969, 0.64207921],
[ 0.96739773, 0.77451297, 0.65057302],
[ 0.96773482, 0.78044149, 0.65912731],
[ 0.96810471, 0.78634563, 0.66773889],
[ 0.96850919, 0.79222565, 0.6764046 ],
[ 0.96893132, 0.79809112, 0.68512266],
[ 0.96935926, 0.80395415, 0.69383201],
[ 0.9698028 , 0.80981139, 0.70252255],
[ 0.97025511, 0.81566605, 0.71120296],
[ 0.97071849, 0.82151775, 0.71987163],
[ 0.97120159, 0.82736371, 0.72851999],
[ 0.97169389, 0.83320847, 0.73716071],
[ 0.97220061, 0.83905052, 0.74578903],
[ 0.97272597, 0.84488881, 0.75440141],
[ 0.97327085, 0.85072354, 0.76299805],
[ 0.97383206, 0.85655639, 0.77158353],
[ 0.97441222, 0.86238689, 0.78015619],
[ 0.97501782, 0.86821321, 0.78871034],
[ 0.97564391, 0.87403763, 0.79725261],
[ 0.97628674, 0.87986189, 0.8057883 ],
[ 0.97696114, 0.88568129, 0.81430324],
[ 0.97765722, 0.89149971, 0.82280948],
[ 0.97837585, 0.89731727, 0.83130786],
[ 0.97912374, 0.90313207, 0.83979337],
[ 0.979891 , 0.90894778, 0.84827858],
[ 0.98067764, 0.91476465, 0.85676611],
[ 0.98137749, 0.92061729, 0.86536915]
]
_mako_lut = [
[ 0.04503935, 0.01482344, 0.02092227],
[ 0.04933018, 0.01709292, 0.02535719],
[ 0.05356262, 0.01950702, 0.03018802],
[ 0.05774337, 0.02205989, 0.03545515],
[ 0.06188095, 0.02474764, 0.04115287],
[ 0.06598247, 0.0275665 , 0.04691409],
[ 0.07005374, 0.03051278, 0.05264306],
[ 0.07409947, 0.03358324, 0.05834631],
[ 0.07812339, 0.03677446, 0.06403249],
[ 0.08212852, 0.0400833 , 0.06970862],
[ 0.08611731, 0.04339148, 0.07538208],
[ 0.09009161, 0.04664706, 0.08105568],
[ 0.09405308, 0.04985685, 0.08673591],
[ 0.09800301, 0.05302279, 0.09242646],
[ 0.10194255, 0.05614641, 0.09813162],
[ 0.10587261, 0.05922941, 0.103854 ],
[ 0.1097942 , 0.06227277, 0.10959847],
[ 0.11370826, 0.06527747, 0.11536893],
[ 0.11761516, 0.06824548, 0.12116393],
[ 0.12151575, 0.07117741, 0.12698763],
[ 0.12541095, 0.07407363, 0.1328442 ],
[ 0.12930083, 0.07693611, 0.13873064],
[ 0.13317849, 0.07976988, 0.14465095],
[ 0.13701138, 0.08259683, 0.15060265],
[ 0.14079223, 0.08542126, 0.15659379],
[ 0.14452486, 0.08824175, 0.16262484],
[ 0.14820351, 0.09106304, 0.16869476],
[ 0.15183185, 0.09388372, 0.17480366],
[ 0.15540398, 0.09670855, 0.18094993],
[ 0.15892417, 0.09953561, 0.18713384],
[ 0.16238588, 0.10236998, 0.19335329],
[ 0.16579435, 0.10520905, 0.19960847],
[ 0.16914226, 0.10805832, 0.20589698],
[ 0.17243586, 0.11091443, 0.21221911],
[ 0.17566717, 0.11378321, 0.21857219],
[ 0.17884322, 0.11666074, 0.2249565 ],
[ 0.18195582, 0.11955283, 0.23136943],
[ 0.18501213, 0.12245547, 0.23781116],
[ 0.18800459, 0.12537395, 0.24427914],
[ 0.19093944, 0.1283047 , 0.25077369],
[ 0.19381092, 0.13125179, 0.25729255],
[ 0.19662307, 0.13421303, 0.26383543],
[ 0.19937337, 0.13719028, 0.27040111],
[ 0.20206187, 0.14018372, 0.27698891],
[ 0.20469116, 0.14319196, 0.28359861],
[ 0.20725547, 0.14621882, 0.29022775],
[ 0.20976258, 0.14925954, 0.29687795],
[ 0.21220409, 0.15231929, 0.30354703],
[ 0.21458611, 0.15539445, 0.31023563],
[ 0.21690827, 0.15848519, 0.31694355],
[ 0.21916481, 0.16159489, 0.32366939],
[ 0.2213631 , 0.16471913, 0.33041431],
[ 0.22349947, 0.1678599 , 0.33717781],
[ 0.2255714 , 0.1710185 , 0.34395925],
[ 0.22758415, 0.17419169, 0.35075983],
[ 0.22953569, 0.17738041, 0.35757941],
[ 0.23142077, 0.18058733, 0.3644173 ],
[ 0.2332454 , 0.18380872, 0.37127514],
[ 0.2350092 , 0.18704459, 0.3781528 ],
[ 0.23670785, 0.190297 , 0.38504973],
[ 0.23834119, 0.19356547, 0.39196711],
[ 0.23991189, 0.19684817, 0.39890581],
[ 0.24141903, 0.20014508, 0.4058667 ],
[ 0.24286214, 0.20345642, 0.4128484 ],
[ 0.24423453, 0.20678459, 0.41985299],
[ 0.24554109, 0.21012669, 0.42688124],
[ 0.2467815 , 0.21348266, 0.43393244],
[ 0.24795393, 0.21685249, 0.4410088 ],
[ 0.24905614, 0.22023618, 0.448113 ],
[ 0.25007383, 0.22365053, 0.45519562],
[ 0.25098926, 0.22710664, 0.46223892],
[ 0.25179696, 0.23060342, 0.46925447],
[ 0.25249346, 0.23414353, 0.47623196],
[ 0.25307401, 0.23772973, 0.48316271],
[ 0.25353152, 0.24136961, 0.49001976],
[ 0.25386167, 0.24506548, 0.49679407],
[ 0.25406082, 0.2488164 , 0.50348932],
[ 0.25412435, 0.25262843, 0.51007843],
[ 0.25404842, 0.25650743, 0.51653282],
[ 0.25383134, 0.26044852, 0.52286845],
[ 0.2534705 , 0.26446165, 0.52903422],
[ 0.25296722, 0.2685428 , 0.53503572],
[ 0.2523226 , 0.27269346, 0.54085315],
[ 0.25153974, 0.27691629, 0.54645752],
[ 0.25062402, 0.28120467, 0.55185939],
[ 0.24958205, 0.28556371, 0.55701246],
[ 0.24842386, 0.28998148, 0.56194601],
[ 0.24715928, 0.29446327, 0.56660884],
[ 0.24580099, 0.29899398, 0.57104399],
[ 0.24436202, 0.30357852, 0.57519929],
[ 0.24285591, 0.30819938, 0.57913247],
[ 0.24129828, 0.31286235, 0.58278615],
[ 0.23970131, 0.3175495 , 0.5862272 ],
[ 0.23807973, 0.32226344, 0.58941872],
[ 0.23644557, 0.32699241, 0.59240198],
[ 0.2348113 , 0.33173196, 0.59518282],
[ 0.23318874, 0.33648036, 0.59775543],
[ 0.2315855 , 0.34122763, 0.60016456],
[ 0.23001121, 0.34597357, 0.60240251],
[ 0.2284748 , 0.35071512, 0.6044784 ],
[ 0.22698081, 0.35544612, 0.60642528],
[ 0.22553305, 0.36016515, 0.60825252],
[ 0.22413977, 0.36487341, 0.60994938],
[ 0.22280246, 0.36956728, 0.61154118],
[ 0.22152555, 0.37424409, 0.61304472],
[ 0.22030752, 0.37890437, 0.61446646],
[ 0.2191538 , 0.38354668, 0.61581561],
[ 0.21806257, 0.38817169, 0.61709794],
[ 0.21703799, 0.39277882, 0.61831922],
[ 0.21607792, 0.39736958, 0.61948028],
[ 0.21518463, 0.40194196, 0.62059763],
[ 0.21435467, 0.40649717, 0.62167507],
[ 0.21358663, 0.41103579, 0.62271724],
[ 0.21288172, 0.41555771, 0.62373011],
[ 0.21223835, 0.42006355, 0.62471794],
[ 0.21165312, 0.42455441, 0.62568371],
[ 0.21112526, 0.42903064, 0.6266318 ],
[ 0.21065161, 0.43349321, 0.62756504],
[ 0.21023306, 0.43794288, 0.62848279],
[ 0.20985996, 0.44238227, 0.62938329],
[ 0.20951045, 0.44680966, 0.63030696],
[ 0.20916709, 0.45122981, 0.63124483],
[ 0.20882976, 0.45564335, 0.63219599],
[ 0.20849798, 0.46005094, 0.63315928],
[ 0.20817199, 0.46445309, 0.63413391],
[ 0.20785149, 0.46885041, 0.63511876],
[ 0.20753716, 0.47324327, 0.63611321],
[ 0.20722876, 0.47763224, 0.63711608],
[ 0.20692679, 0.48201774, 0.63812656],
[ 0.20663156, 0.48640018, 0.63914367],
[ 0.20634336, 0.49078002, 0.64016638],
[ 0.20606303, 0.49515755, 0.6411939 ],
[ 0.20578999, 0.49953341, 0.64222457],
[ 0.20552612, 0.50390766, 0.64325811],
[ 0.20527189, 0.50828072, 0.64429331],
[ 0.20502868, 0.51265277, 0.64532947],
[ 0.20479718, 0.51702417, 0.64636539],
[ 0.20457804, 0.52139527, 0.64739979],
[ 0.20437304, 0.52576622, 0.64843198],
[ 0.20418396, 0.53013715, 0.64946117],
[ 0.20401238, 0.53450825, 0.65048638],
[ 0.20385896, 0.53887991, 0.65150606],
[ 0.20372653, 0.54325208, 0.65251978],
[ 0.20361709, 0.5476249 , 0.6535266 ],
[ 0.20353258, 0.55199854, 0.65452542],
[ 0.20347472, 0.55637318, 0.655515 ],
[ 0.20344718, 0.56074869, 0.65649508],
[ 0.20345161, 0.56512531, 0.65746419],
[ 0.20349089, 0.56950304, 0.65842151],
[ 0.20356842, 0.57388184, 0.65936642],
[ 0.20368663, 0.57826181, 0.66029768],
[ 0.20384884, 0.58264293, 0.6612145 ],
[ 0.20405904, 0.58702506, 0.66211645],
[ 0.20431921, 0.59140842, 0.66300179],
[ 0.20463464, 0.59579264, 0.66387079],
[ 0.20500731, 0.60017798, 0.66472159],
[ 0.20544449, 0.60456387, 0.66555409],
[ 0.20596097, 0.60894927, 0.66636568],
[ 0.20654832, 0.61333521, 0.66715744],
[ 0.20721003, 0.61772167, 0.66792838],
[ 0.20795035, 0.62210845, 0.66867802],
[ 0.20877302, 0.62649546, 0.66940555],
[ 0.20968223, 0.63088252, 0.6701105 ],
[ 0.21068163, 0.63526951, 0.67079211],
[ 0.21177544, 0.63965621, 0.67145005],
[ 0.21298582, 0.64404072, 0.67208182],
[ 0.21430361, 0.64842404, 0.67268861],
[ 0.21572716, 0.65280655, 0.67326978],
[ 0.21726052, 0.65718791, 0.6738255 ],
[ 0.21890636, 0.66156803, 0.67435491],
[ 0.220668 , 0.66594665, 0.67485792],
[ 0.22255447, 0.67032297, 0.67533374],
[ 0.22458372, 0.67469531, 0.67578061],
[ 0.22673713, 0.67906542, 0.67620044],
[ 0.22901625, 0.6834332 , 0.67659251],
[ 0.23142316, 0.68779836, 0.67695703],
[ 0.23395924, 0.69216072, 0.67729378],
[ 0.23663857, 0.69651881, 0.67760151],
[ 0.23946645, 0.70087194, 0.67788018],
[ 0.24242624, 0.70522162, 0.67813088],
[ 0.24549008, 0.70957083, 0.67835215],
[ 0.24863372, 0.71392166, 0.67854868],
[ 0.25187832, 0.71827158, 0.67872193],
[ 0.25524083, 0.72261873, 0.67887024],
[ 0.25870947, 0.72696469, 0.67898912],
[ 0.26229238, 0.73130855, 0.67907645],
[ 0.26604085, 0.73564353, 0.67914062],
[ 0.26993099, 0.73997282, 0.67917264],
[ 0.27397488, 0.74429484, 0.67917096],
[ 0.27822463, 0.74860229, 0.67914468],
[ 0.28264201, 0.75290034, 0.67907959],
[ 0.2873016 , 0.75717817, 0.67899164],
[ 0.29215894, 0.76144162, 0.67886578],
[ 0.29729823, 0.76567816, 0.67871894],
[ 0.30268199, 0.76989232, 0.67853896],
[ 0.30835665, 0.77407636, 0.67833512],
[ 0.31435139, 0.77822478, 0.67811118],
[ 0.3206671 , 0.78233575, 0.67786729],
[ 0.32733158, 0.78640315, 0.67761027],
[ 0.33437168, 0.79042043, 0.67734882],
[ 0.34182112, 0.79437948, 0.67709394],
[ 0.34968889, 0.79827511, 0.67685638],
[ 0.35799244, 0.80210037, 0.67664969],
[ 0.36675371, 0.80584651, 0.67649539],
[ 0.3759816 , 0.80950627, 0.67641393],
[ 0.38566792, 0.81307432, 0.67642947],
[ 0.39579804, 0.81654592, 0.67656899],
[ 0.40634556, 0.81991799, 0.67686215],
[ 0.41730243, 0.82318339, 0.67735255],
[ 0.4285828 , 0.82635051, 0.6780564 ],
[ 0.44012728, 0.82942353, 0.67900049],
[ 0.45189421, 0.83240398, 0.68021733],
[ 0.46378379, 0.83530763, 0.6817062 ],
[ 0.47573199, 0.83814472, 0.68347352],
[ 0.48769865, 0.84092197, 0.68552698],
[ 0.49962354, 0.84365379, 0.68783929],
[ 0.5114027 , 0.8463718 , 0.69029789],
[ 0.52301693, 0.84908401, 0.69288545],
[ 0.53447549, 0.85179048, 0.69561066],
[ 0.54578602, 0.8544913 , 0.69848331],
[ 0.55695565, 0.85718723, 0.70150427],
[ 0.56798832, 0.85987893, 0.70468261],
[ 0.57888639, 0.86256715, 0.70802931],
[ 0.5896541 , 0.8652532 , 0.71154204],
[ 0.60028928, 0.86793835, 0.71523675],
[ 0.61079441, 0.87062438, 0.71910895],
[ 0.62116633, 0.87331311, 0.72317003],
[ 0.63140509, 0.87600675, 0.72741689],
[ 0.64150735, 0.87870746, 0.73185717],
[ 0.65147219, 0.8814179 , 0.73648495],
[ 0.66129632, 0.8841403 , 0.74130658],
[ 0.67097934, 0.88687758, 0.74631123],
[ 0.68051833, 0.88963189, 0.75150483],
[ 0.68991419, 0.89240612, 0.75687187],
[ 0.69916533, 0.89520211, 0.76241714],
[ 0.70827373, 0.89802257, 0.76812286],
[ 0.71723995, 0.90086891, 0.77399039],
[ 0.72606665, 0.90374337, 0.7800041 ],
[ 0.73475675, 0.90664718, 0.78615802],
[ 0.74331358, 0.90958151, 0.79244474],
[ 0.75174143, 0.91254787, 0.79884925],
[ 0.76004473, 0.91554656, 0.80536823],
[ 0.76827704, 0.91856549, 0.81196513],
[ 0.77647029, 0.921603 , 0.81855729],
[ 0.78462009, 0.92466151, 0.82514119],
[ 0.79273542, 0.92773848, 0.83172131],
[ 0.8008109 , 0.93083672, 0.83829355],
[ 0.80885107, 0.93395528, 0.84485982],
[ 0.81685878, 0.9370938 , 0.85142101],
[ 0.82483206, 0.94025378, 0.8579751 ],
[ 0.83277661, 0.94343371, 0.86452477],
[ 0.84069127, 0.94663473, 0.87106853],
[ 0.84857662, 0.9498573 , 0.8776059 ],
[ 0.8564431 , 0.95309792, 0.88414253],
[ 0.86429066, 0.95635719, 0.89067759],
[ 0.87218969, 0.95960708, 0.89725384]
]
_vlag_lut = [
[ 0.13850039, 0.41331206, 0.74052025],
[ 0.15077609, 0.41762684, 0.73970427],
[ 0.16235219, 0.4219191 , 0.7389667 ],
[ 0.1733322 , 0.42619024, 0.73832537],
[ 0.18382538, 0.43044226, 0.73776764],
[ 0.19394034, 0.4346772 , 0.73725867],
[ 0.20367115, 0.43889576, 0.73685314],
[ 0.21313625, 0.44310003, 0.73648045],
[ 0.22231173, 0.44729079, 0.73619681],
[ 0.23125148, 0.45146945, 0.73597803],
[ 0.23998101, 0.45563715, 0.7358223 ],
[ 0.24853358, 0.45979489, 0.73571524],
[ 0.25691416, 0.4639437 , 0.73566943],
[ 0.26513894, 0.46808455, 0.73568319],
[ 0.27322194, 0.47221835, 0.73575497],
[ 0.28117543, 0.47634598, 0.73588332],
[ 0.28901021, 0.48046826, 0.73606686],
[ 0.2967358 , 0.48458597, 0.73630433],
[ 0.30436071, 0.48869986, 0.73659451],
[ 0.3118955 , 0.49281055, 0.73693255],
[ 0.31935389, 0.49691847, 0.73730851],
[ 0.32672701, 0.5010247 , 0.73774013],
[ 0.33402607, 0.50512971, 0.73821941],
[ 0.34125337, 0.50923419, 0.73874905],
[ 0.34840921, 0.51333892, 0.73933402],
[ 0.35551826, 0.51744353, 0.73994642],
[ 0.3625676 , 0.52154929, 0.74060763],
[ 0.36956356, 0.52565656, 0.74131327],
[ 0.37649902, 0.52976642, 0.74207698],
[ 0.38340273, 0.53387791, 0.74286286],
[ 0.39025859, 0.53799253, 0.7436962 ],
[ 0.39706821, 0.54211081, 0.744578 ],
[ 0.40384046, 0.54623277, 0.74549872],
[ 0.41058241, 0.55035849, 0.74645094],
[ 0.41728385, 0.55448919, 0.74745174],
[ 0.42395178, 0.55862494, 0.74849357],
[ 0.4305964 , 0.56276546, 0.74956387],
[ 0.4372044 , 0.56691228, 0.75068412],
[ 0.4437909 , 0.57106468, 0.75183427],
[ 0.45035117, 0.5752235 , 0.75302312],
[ 0.45687824, 0.57938983, 0.75426297],
[ 0.46339713, 0.58356191, 0.75551816],
[ 0.46988778, 0.58774195, 0.75682037],
[ 0.47635605, 0.59192986, 0.75816245],
[ 0.48281101, 0.5961252 , 0.75953212],
[ 0.4892374 , 0.60032986, 0.76095418],
[ 0.49566225, 0.60454154, 0.76238852],
[ 0.50206137, 0.60876307, 0.76387371],
[ 0.50845128, 0.61299312, 0.76538551],
[ 0.5148258 , 0.61723272, 0.76693475],
[ 0.52118385, 0.62148236, 0.76852436],
[ 0.52753571, 0.62574126, 0.77013939],
[ 0.53386831, 0.63001125, 0.77180152],
[ 0.54020159, 0.63429038, 0.7734803 ],
[ 0.54651272, 0.63858165, 0.77521306],
[ 0.55282975, 0.64288207, 0.77695608],
[ 0.55912585, 0.64719519, 0.77875327],
[ 0.56542599, 0.65151828, 0.78056551],
[ 0.57170924, 0.65585426, 0.78242747],
[ 0.57799572, 0.6602009 , 0.78430751],
[ 0.58426817, 0.66456073, 0.78623458],
[ 0.590544 , 0.66893178, 0.78818117],
[ 0.59680758, 0.67331643, 0.79017369],
[ 0.60307553, 0.67771273, 0.79218572],
[ 0.60934065, 0.68212194, 0.79422987],
[ 0.61559495, 0.68654548, 0.7963202 ],
[ 0.62185554, 0.69098125, 0.79842918],
[ 0.62810662, 0.69543176, 0.80058381],
[ 0.63436425, 0.69989499, 0.80275812],
[ 0.64061445, 0.70437326, 0.80497621],
[ 0.6468706 , 0.70886488, 0.80721641],
[ 0.65312213, 0.7133717 , 0.80949719],
[ 0.65937818, 0.71789261, 0.81180392],
[ 0.66563334, 0.72242871, 0.81414642],
[ 0.67189155, 0.72697967, 0.81651872],
[ 0.67815314, 0.73154569, 0.81892097],
[ 0.68441395, 0.73612771, 0.82136094],
[ 0.69068321, 0.74072452, 0.82382353],
[ 0.69694776, 0.7453385 , 0.82633199],
[ 0.70322431, 0.74996721, 0.8288583 ],
[ 0.70949595, 0.75461368, 0.83143221],
[ 0.7157774 , 0.75927574, 0.83402904],
[ 0.72206299, 0.76395461, 0.83665922],
[ 0.72835227, 0.76865061, 0.8393242 ],
[ 0.73465238, 0.7733628 , 0.84201224],
[ 0.74094862, 0.77809393, 0.84474951],
[ 0.74725683, 0.78284158, 0.84750915],
[ 0.75357103, 0.78760701, 0.85030217],
[ 0.75988961, 0.79239077, 0.85313207],
[ 0.76621987, 0.79719185, 0.85598668],
[ 0.77255045, 0.8020125 , 0.85888658],
[ 0.77889241, 0.80685102, 0.86181298],
[ 0.78524572, 0.81170768, 0.86476656],
[ 0.79159841, 0.81658489, 0.86776906],
[ 0.79796459, 0.82148036, 0.8707962 ],
[ 0.80434168, 0.82639479, 0.87385315],
[ 0.8107221 , 0.83132983, 0.87695392],
[ 0.81711301, 0.8362844 , 0.88008641],
[ 0.82351479, 0.84125863, 0.88325045],
[ 0.82992772, 0.84625263, 0.88644594],
[ 0.83634359, 0.85126806, 0.8896878 ],
[ 0.84277295, 0.85630293, 0.89295721],
[ 0.84921192, 0.86135782, 0.89626076],
[ 0.85566206, 0.866432 , 0.89959467],
[ 0.86211514, 0.87152627, 0.90297183],
[ 0.86857483, 0.87663856, 0.90638248],
[ 0.87504231, 0.88176648, 0.90981938],
[ 0.88151194, 0.88690782, 0.91328493],
[ 0.88797938, 0.89205857, 0.91677544],
[ 0.89443865, 0.89721298, 0.9202854 ],
[ 0.90088204, 0.90236294, 0.92380601],
[ 0.90729768, 0.90749778, 0.92732797],
[ 0.91367037, 0.91260329, 0.93083814],
[ 0.91998105, 0.91766106, 0.93431861],
[ 0.92620596, 0.92264789, 0.93774647],
[ 0.93231683, 0.9275351 , 0.94109192],
[ 0.93827772, 0.9322888 , 0.94432312],
[ 0.94404755, 0.93686925, 0.94740137],
[ 0.94958284, 0.94123072, 0.95027696],
[ 0.95482682, 0.9453245 , 0.95291103],
[ 0.9597248 , 0.94909728, 0.95525103],
[ 0.96422552, 0.95249273, 0.95723271],
[ 0.96826161, 0.95545812, 0.95882188],
[ 0.97178458, 0.95793984, 0.95995705],
[ 0.97474105, 0.95989142, 0.96059997],
[ 0.97708604, 0.96127366, 0.96071853],
[ 0.97877855, 0.96205832, 0.96030095],
[ 0.97978484, 0.96222949, 0.95935496],
[ 0.9805997 , 0.96155216, 0.95813083],
[ 0.98152619, 0.95993719, 0.95639322],
[ 0.9819726 , 0.95766608, 0.95399269],
[ 0.98191855, 0.9547873 , 0.95098107],
[ 0.98138514, 0.95134771, 0.94740644],
[ 0.98040845, 0.94739906, 0.94332125],
[ 0.97902107, 0.94300131, 0.93878672],
[ 0.97729348, 0.93820409, 0.93385135],
[ 0.9752533 , 0.933073 , 0.92858252],
[ 0.97297834, 0.92765261, 0.92302309],
[ 0.97049104, 0.92200317, 0.91723505],
[ 0.96784372, 0.91616744, 0.91126063],
[ 0.96507281, 0.91018664, 0.90514124],
[ 0.96222034, 0.90409203, 0.89890756],
[ 0.9593079 , 0.89791478, 0.89259122],
[ 0.95635626, 0.89167908, 0.88621654],
[ 0.95338303, 0.88540373, 0.87980238],
[ 0.95040174, 0.87910333, 0.87336339],
[ 0.94742246, 0.87278899, 0.86691076],
[ 0.94445249, 0.86646893, 0.86045277],
[ 0.94150476, 0.86014606, 0.85399191],
[ 0.93857394, 0.85382798, 0.84753642],
[ 0.93566206, 0.84751766, 0.84108935],
[ 0.93277194, 0.8412164 , 0.83465197],
[ 0.92990106, 0.83492672, 0.82822708],
[ 0.92704736, 0.82865028, 0.82181656],
[ 0.92422703, 0.82238092, 0.81541333],
[ 0.92142581, 0.81612448, 0.80902415],
[ 0.91864501, 0.80988032, 0.80264838],
[ 0.91587578, 0.80365187, 0.79629001],
[ 0.9131367 , 0.79743115, 0.78994 ],
[ 0.91041602, 0.79122265, 0.78360361],
[ 0.90771071, 0.78502727, 0.77728196],
[ 0.90501581, 0.77884674, 0.7709771 ],
[ 0.90235365, 0.77267117, 0.76467793],
[ 0.8997019 , 0.76650962, 0.75839484],
[ 0.89705346, 0.76036481, 0.752131 ],
[ 0.89444021, 0.75422253, 0.74587047],
[ 0.89183355, 0.74809474, 0.73962689],
[ 0.88923216, 0.74198168, 0.73340061],
[ 0.88665892, 0.73587283, 0.72717995],
[ 0.88408839, 0.72977904, 0.72097718],
[ 0.88153537, 0.72369332, 0.71478461],
[ 0.87899389, 0.7176179 , 0.70860487],
[ 0.87645157, 0.71155805, 0.7024439 ],
[ 0.8739399 , 0.70549893, 0.6962854 ],
[ 0.87142626, 0.6994551 , 0.69014561],
[ 0.8689268 , 0.69341868, 0.68401597],
[ 0.86643562, 0.687392 , 0.67789917],
[ 0.86394434, 0.68137863, 0.67179927],
[ 0.86147586, 0.67536728, 0.665704 ],
[ 0.85899928, 0.66937226, 0.6596292 ],
[ 0.85654668, 0.66337773, 0.6535577 ],
[ 0.85408818, 0.65739772, 0.64750494],
[ 0.85164413, 0.65142189, 0.64145983],
[ 0.84920091, 0.6454565 , 0.63542932],
[ 0.84676427, 0.63949827, 0.62941 ],
[ 0.84433231, 0.63354773, 0.62340261],
[ 0.84190106, 0.62760645, 0.61740899],
[ 0.83947935, 0.62166951, 0.61142404],
[ 0.8370538 , 0.61574332, 0.60545478],
[ 0.83463975, 0.60981951, 0.59949247],
[ 0.83221877, 0.60390724, 0.593547 ],
[ 0.82980985, 0.59799607, 0.58760751],
[ 0.82740268, 0.59209095, 0.58167944],
[ 0.82498638, 0.5861973 , 0.57576866],
[ 0.82258181, 0.5803034 , 0.56986307],
[ 0.82016611, 0.57442123, 0.56397539],
[ 0.81776305, 0.56853725, 0.55809173],
[ 0.81534551, 0.56266602, 0.55222741],
[ 0.81294293, 0.55679056, 0.5463651 ],
[ 0.81052113, 0.55092973, 0.54052443],
[ 0.80811509, 0.54506305, 0.53468464],
[ 0.80568952, 0.53921036, 0.52886622],
[ 0.80327506, 0.53335335, 0.52305077],
[ 0.80084727, 0.52750583, 0.51725256],
[ 0.79842217, 0.5216578 , 0.51146173],
[ 0.79599382, 0.51581223, 0.50568155],
[ 0.79355781, 0.50997127, 0.49991444],
[ 0.79112596, 0.50412707, 0.49415289],
[ 0.78867442, 0.49829386, 0.48841129],
[ 0.7862306 , 0.49245398, 0.48267247],
[ 0.7837687 , 0.48662309, 0.47695216],
[ 0.78130809, 0.4807883 , 0.47123805],
[ 0.77884467, 0.47495151, 0.46553236],
[ 0.77636283, 0.46912235, 0.45984473],
[ 0.77388383, 0.46328617, 0.45416141],
[ 0.77138912, 0.45745466, 0.44849398],
[ 0.76888874, 0.45162042, 0.44283573],
[ 0.76638802, 0.44577901, 0.43718292],
[ 0.76386116, 0.43994762, 0.43155211],
[ 0.76133542, 0.43410655, 0.42592523],
[ 0.75880631, 0.42825801, 0.42030488],
[ 0.75624913, 0.42241905, 0.41470727],
[ 0.7536919 , 0.41656866, 0.40911347],
[ 0.75112748, 0.41071104, 0.40352792],
[ 0.74854331, 0.40485474, 0.3979589 ],
[ 0.74594723, 0.39899309, 0.39240088],
[ 0.74334332, 0.39312199, 0.38685075],
[ 0.74073277, 0.38723941, 0.3813074 ],
[ 0.73809409, 0.38136133, 0.37578553],
[ 0.73544692, 0.37547129, 0.37027123],
[ 0.73278943, 0.36956954, 0.36476549],
[ 0.73011829, 0.36365761, 0.35927038],
[ 0.72743485, 0.35773314, 0.35378465],
[ 0.72472722, 0.35180504, 0.34831662],
[ 0.72200473, 0.34586421, 0.34285937],
[ 0.71927052, 0.33990649, 0.33741033],
[ 0.71652049, 0.33393396, 0.33197219],
[ 0.71375362, 0.32794602, 0.32654545],
[ 0.71096951, 0.32194148, 0.32113016],
[ 0.70816772, 0.31591904, 0.31572637],
[ 0.70534784, 0.30987734, 0.31033414],
[ 0.70250944, 0.30381489, 0.30495353],
[ 0.69965211, 0.2977301 , 0.2995846 ],
[ 0.6967754 , 0.29162126, 0.29422741],
[ 0.69388446, 0.28548074, 0.28887769],
[ 0.69097561, 0.2793096 , 0.28353795],
[ 0.68803513, 0.27311993, 0.27821876],
[ 0.6850794 , 0.26689144, 0.27290694],
[ 0.682108 , 0.26062114, 0.26760246],
[ 0.67911013, 0.2543177 , 0.26231367],
[ 0.67609393, 0.24796818, 0.25703372],
[ 0.67305921, 0.24156846, 0.25176238],
[ 0.67000176, 0.23511902, 0.24650278],
[ 0.66693423, 0.22859879, 0.24124404],
[ 0.6638441 , 0.22201742, 0.2359961 ],
[ 0.66080672, 0.21526712, 0.23069468]
]
_icefire_lut = [
[ 0.73936227, 0.90443867, 0.85757238],
[ 0.72888063, 0.89639109, 0.85488394],
[ 0.71834255, 0.88842162, 0.8521605 ],
[ 0.70773866, 0.88052939, 0.849422 ],
[ 0.69706215, 0.87271313, 0.84668315],
[ 0.68629021, 0.86497329, 0.84398721],
[ 0.67543654, 0.85730617, 0.84130969],
[ 0.66448539, 0.84971123, 0.83868005],
[ 0.65342679, 0.84218728, 0.83611512],
[ 0.64231804, 0.83471867, 0.83358584],
[ 0.63117745, 0.827294 , 0.83113431],
[ 0.62000484, 0.81991069, 0.82876741],
[ 0.60879435, 0.81256797, 0.82648905],
[ 0.59754118, 0.80526458, 0.82430414],
[ 0.58624247, 0.79799884, 0.82221573],
[ 0.57489525, 0.7907688 , 0.82022901],
[ 0.56349779, 0.78357215, 0.81834861],
[ 0.55204294, 0.77640827, 0.81657563],
[ 0.54052516, 0.76927562, 0.81491462],
[ 0.52894085, 0.76217215, 0.81336913],
[ 0.51728854, 0.75509528, 0.81194156],
[ 0.50555676, 0.74804469, 0.81063503],
[ 0.49373871, 0.7410187 , 0.80945242],
[ 0.48183174, 0.73401449, 0.80839675],
[ 0.46982587, 0.72703075, 0.80747097],
[ 0.45770893, 0.72006648, 0.80667756],
[ 0.44547249, 0.71311941, 0.80601991],
[ 0.43318643, 0.70617126, 0.80549278],
[ 0.42110294, 0.69916972, 0.80506683],
[ 0.40925101, 0.69211059, 0.80473246],
[ 0.3976693 , 0.68498786, 0.80448272],
[ 0.38632002, 0.67781125, 0.80431024],
[ 0.37523981, 0.67057537, 0.80420832],
[ 0.36442578, 0.66328229, 0.80417474],
[ 0.35385939, 0.65593699, 0.80420591],
[ 0.34358916, 0.64853177, 0.8043 ],
[ 0.33355526, 0.64107876, 0.80445484],
[ 0.32383062, 0.63356578, 0.80467091],
[ 0.31434372, 0.62600624, 0.8049475 ],
[ 0.30516161, 0.618389 , 0.80528692],
[ 0.29623491, 0.61072284, 0.80569021],
[ 0.28759072, 0.60300319, 0.80616055],
[ 0.27923924, 0.59522877, 0.80669803],
[ 0.27114651, 0.5874047 , 0.80730545],
[ 0.26337153, 0.57952055, 0.80799113],
[ 0.25588696, 0.57157984, 0.80875922],
[ 0.248686 , 0.56358255, 0.80961366],
[ 0.24180668, 0.55552289, 0.81055123],
[ 0.23526251, 0.54739477, 0.8115939 ],
[ 0.22921445, 0.53918506, 0.81267292],
[ 0.22397687, 0.53086094, 0.8137141 ],
[ 0.21977058, 0.52241482, 0.81457651],
[ 0.21658989, 0.51384321, 0.81528511],
[ 0.21452772, 0.50514155, 0.81577278],
[ 0.21372783, 0.49630865, 0.81589566],
[ 0.21409503, 0.48734861, 0.81566163],
[ 0.2157176 , 0.47827123, 0.81487615],
[ 0.21842857, 0.46909168, 0.81351614],
[ 0.22211705, 0.45983212, 0.81146983],
[ 0.22665681, 0.45052233, 0.80860217],
[ 0.23176013, 0.44119137, 0.80494325],
[ 0.23727775, 0.43187704, 0.80038017],
[ 0.24298285, 0.42261123, 0.79493267],
[ 0.24865068, 0.41341842, 0.78869164],
[ 0.25423116, 0.40433127, 0.78155831],
[ 0.25950239, 0.39535521, 0.77376848],
[ 0.2644736 , 0.38651212, 0.76524809],
[ 0.26901584, 0.37779582, 0.75621942],
[ 0.27318141, 0.36922056, 0.746605 ],
[ 0.27690355, 0.3607736 , 0.73659374],
[ 0.28023585, 0.35244234, 0.72622103],
[ 0.28306009, 0.34438449, 0.71500731],
[ 0.28535896, 0.33660243, 0.70303975],
[ 0.28708711, 0.32912157, 0.69034504],
[ 0.28816354, 0.32200604, 0.67684067],
[ 0.28862749, 0.31519824, 0.66278813],
[ 0.28847904, 0.30869064, 0.6482815 ],
[ 0.28770912, 0.30250126, 0.63331265],
[ 0.28640325, 0.29655509, 0.61811374],
[ 0.28458943, 0.29082155, 0.60280913],
[ 0.28233561, 0.28527482, 0.58742866],
[ 0.27967038, 0.2798938 , 0.57204225],
[ 0.27665361, 0.27465357, 0.55667809],
[ 0.27332564, 0.2695165 , 0.54145387],
[ 0.26973851, 0.26447054, 0.52634916],
[ 0.2659204 , 0.25949691, 0.511417 ],
[ 0.26190145, 0.25458123, 0.49668768],
[ 0.2577151 , 0.24971691, 0.48214874],
[ 0.25337618, 0.24490494, 0.46778758],
[ 0.24890842, 0.24013332, 0.45363816],
[ 0.24433654, 0.23539226, 0.4397245 ],
[ 0.23967922, 0.23067729, 0.4260591 ],
[ 0.23495608, 0.22598894, 0.41262952],
[ 0.23018113, 0.22132414, 0.39945577],
[ 0.22534609, 0.21670847, 0.38645794],
[ 0.22048761, 0.21211723, 0.37372555],
[ 0.2156198 , 0.20755389, 0.36125301],
[ 0.21074637, 0.20302717, 0.34903192],
[ 0.20586893, 0.19855368, 0.33701661],
[ 0.20101757, 0.19411573, 0.32529173],
[ 0.19619947, 0.18972425, 0.31383846],
[ 0.19140726, 0.18540157, 0.30260777],
[ 0.1866769 , 0.1811332 , 0.29166583],
[ 0.18201285, 0.17694992, 0.28088776],
[ 0.17745228, 0.17282141, 0.27044211],
[ 0.17300684, 0.16876921, 0.26024893],
[ 0.16868273, 0.16479861, 0.25034479],
[ 0.16448691, 0.16091728, 0.24075373],
[ 0.16043195, 0.15714351, 0.23141745],
[ 0.15652427, 0.15348248, 0.22238175],
[ 0.15277065, 0.14994111, 0.21368395],
[ 0.14918274, 0.14653431, 0.20529486],
[ 0.14577095, 0.14327403, 0.19720829],
[ 0.14254381, 0.14016944, 0.18944326],
[ 0.13951035, 0.13723063, 0.18201072],
[ 0.13667798, 0.13446606, 0.17493774],
[ 0.13405762, 0.13188822, 0.16820842],
[ 0.13165767, 0.12950667, 0.16183275],
[ 0.12948748, 0.12733187, 0.15580631],
[ 0.12755435, 0.1253723 , 0.15014098],
[ 0.12586516, 0.12363617, 0.1448459 ],
[ 0.12442647, 0.12213143, 0.13992571],
[ 0.12324241, 0.12086419, 0.13539995],
[ 0.12232067, 0.11984278, 0.13124644],
[ 0.12166209, 0.11907077, 0.12749671],
[ 0.12126982, 0.11855309, 0.12415079],
[ 0.12114244, 0.11829179, 0.1212385 ],
[ 0.12127766, 0.11828837, 0.11878534],
[ 0.12284806, 0.1179729 , 0.11772022],
[ 0.12619498, 0.11721796, 0.11770203],
[ 0.129968 , 0.11663788, 0.11792377],
[ 0.13410011, 0.11625146, 0.11839138],
[ 0.13855459, 0.11606618, 0.11910584],
[ 0.14333775, 0.11607038, 0.1200606 ],
[ 0.148417 , 0.11626929, 0.12125453],
[ 0.15377389, 0.11666192, 0.12268364],
[ 0.15941427, 0.11723486, 0.12433911],
[ 0.16533376, 0.11797856, 0.12621303],
[ 0.17152547, 0.11888403, 0.12829735],
[ 0.17797765, 0.11994436, 0.13058435],
[ 0.18468769, 0.12114722, 0.13306426],
[ 0.19165663, 0.12247737, 0.13572616],
[ 0.19884415, 0.12394381, 0.1385669 ],
[ 0.20627181, 0.12551883, 0.14157124],
[ 0.21394877, 0.12718055, 0.14472604],
[ 0.22184572, 0.12893119, 0.14802579],
[ 0.22994394, 0.13076731, 0.15146314],
[ 0.23823937, 0.13267611, 0.15502793],
[ 0.24676041, 0.13462172, 0.15870321],
[ 0.25546457, 0.13661751, 0.16248722],
[ 0.26433628, 0.13865956, 0.16637301],
[ 0.27341345, 0.14070412, 0.17034221],
[ 0.28264773, 0.14277192, 0.1743957 ],
[ 0.29202272, 0.14486161, 0.17852793],
[ 0.30159648, 0.14691224, 0.1827169 ],
[ 0.31129002, 0.14897583, 0.18695213],
[ 0.32111555, 0.15103351, 0.19119629],
[ 0.33107961, 0.1530674 , 0.19543758],
[ 0.34119892, 0.15504762, 0.1996803 ],
[ 0.35142388, 0.15701131, 0.20389086],
[ 0.36178937, 0.1589124 , 0.20807639],
[ 0.37229381, 0.16073993, 0.21223189],
[ 0.38288348, 0.16254006, 0.2163249 ],
[ 0.39359592, 0.16426336, 0.22036577],
[ 0.40444332, 0.16588767, 0.22434027],
[ 0.41537995, 0.16745325, 0.2282297 ],
[ 0.42640867, 0.16894939, 0.23202755],
[ 0.43754706, 0.17034847, 0.23572899],
[ 0.44878564, 0.1716535 , 0.23932344],
[ 0.4601126 , 0.17287365, 0.24278607],
[ 0.47151732, 0.17401641, 0.24610337],
[ 0.48300689, 0.17506676, 0.2492737 ],
[ 0.49458302, 0.17601892, 0.25227688],
[ 0.50623876, 0.17687777, 0.255096 ],
[ 0.5179623 , 0.17765528, 0.2577162 ],
[ 0.52975234, 0.17835232, 0.2601134 ],
[ 0.54159776, 0.17898292, 0.26226847],
[ 0.55348804, 0.17956232, 0.26416003],
[ 0.56541729, 0.18010175, 0.26575971],
[ 0.57736669, 0.180631 , 0.26704888],
[ 0.58932081, 0.18117827, 0.26800409],
[ 0.60127582, 0.18175888, 0.26858488],
[ 0.61319563, 0.1824336 , 0.2687872 ],
[ 0.62506376, 0.18324015, 0.26858301],
[ 0.63681202, 0.18430173, 0.26795276],
[ 0.64842603, 0.18565472, 0.26689463],
[ 0.65988195, 0.18734638, 0.26543435],
[ 0.67111966, 0.18948885, 0.26357955],
[ 0.68209194, 0.19216636, 0.26137175],
[ 0.69281185, 0.19535326, 0.25887063],
[ 0.70335022, 0.19891271, 0.25617971],
[ 0.71375229, 0.20276438, 0.25331365],
[ 0.72401436, 0.20691287, 0.25027366],
[ 0.73407638, 0.21145051, 0.24710661],
[ 0.74396983, 0.21631913, 0.24380715],
[ 0.75361506, 0.22163653, 0.24043996],
[ 0.7630579 , 0.22731637, 0.23700095],
[ 0.77222228, 0.23346231, 0.23356628],
[ 0.78115441, 0.23998404, 0.23013825],
[ 0.78979746, 0.24694858, 0.22678822],
[ 0.79819286, 0.25427223, 0.22352658],
[ 0.80630444, 0.26198807, 0.22040877],
[ 0.81417437, 0.27001406, 0.21744645],
[ 0.82177364, 0.27837336, 0.21468316],
[ 0.82915955, 0.28696963, 0.21210766],
[ 0.83628628, 0.2958499 , 0.20977813],
[ 0.84322168, 0.30491136, 0.20766435],
[ 0.84995458, 0.31415945, 0.2057863 ],
[ 0.85648867, 0.32358058, 0.20415327],
[ 0.86286243, 0.33312058, 0.20274969],
[ 0.86908321, 0.34276705, 0.20157271],
[ 0.87512876, 0.3525416 , 0.20064949],
[ 0.88100349, 0.36243385, 0.19999078],
[ 0.8866469 , 0.37249496, 0.1997976 ],
[ 0.89203964, 0.38273475, 0.20013431],
[ 0.89713496, 0.39318156, 0.20121514],
[ 0.90195099, 0.40380687, 0.20301555],
[ 0.90648379, 0.41460191, 0.20558847],
[ 0.9106967 , 0.42557857, 0.20918529],
[ 0.91463791, 0.43668557, 0.21367954],
[ 0.91830723, 0.44790913, 0.21916352],
[ 0.92171507, 0.45922856, 0.22568002],
[ 0.92491786, 0.4705936 , 0.23308207],
[ 0.92790792, 0.48200153, 0.24145932],
[ 0.93073701, 0.49341219, 0.25065486],
[ 0.93343918, 0.5048017 , 0.26056148],
[ 0.93602064, 0.51616486, 0.27118485],
[ 0.93850535, 0.52748892, 0.28242464],
[ 0.94092933, 0.53875462, 0.29416042],
[ 0.94330011, 0.5499628 , 0.30634189],
[ 0.94563159, 0.56110987, 0.31891624],
[ 0.94792955, 0.57219822, 0.33184256],
[ 0.95020929, 0.5832232 , 0.34508419],
[ 0.95247324, 0.59419035, 0.35859866],
[ 0.95471709, 0.60510869, 0.37236035],
[ 0.95698411, 0.61595766, 0.38629631],
[ 0.95923863, 0.62676473, 0.40043317],
[ 0.9615041 , 0.6375203 , 0.41474106],
[ 0.96371553, 0.64826619, 0.42928335],
[ 0.96591497, 0.65899621, 0.44380444],
[ 0.96809871, 0.66971662, 0.45830232],
[ 0.9702495 , 0.6804394 , 0.47280492],
[ 0.9723881 , 0.69115622, 0.48729272],
[ 0.97450723, 0.70187358, 0.50178034],
[ 0.9766108 , 0.712592 , 0.51626837],
[ 0.97871716, 0.72330511, 0.53074053],
[ 0.98082222, 0.73401769, 0.54520694],
[ 0.9829001 , 0.74474445, 0.5597019 ],
[ 0.98497466, 0.75547635, 0.57420239],
[ 0.98705581, 0.76621129, 0.58870185],
[ 0.98913325, 0.77695637, 0.60321626],
[ 0.99119918, 0.78771716, 0.61775821],
[ 0.9932672 , 0.79848979, 0.63231691],
[ 0.99535958, 0.80926704, 0.64687278],
[ 0.99740544, 0.82008078, 0.66150571],
[ 0.9992197 , 0.83100723, 0.6764127 ]
]
_flare_lut = [
[0.92907237, 0.68878959, 0.50411509],
[0.92891402, 0.68494686, 0.50173994],
[0.92864754, 0.68116207, 0.4993754],
[0.92836112, 0.67738527, 0.49701572],
[0.9280599, 0.67361354, 0.49466044],
[0.92775569, 0.66983999, 0.49230866],
[0.9274375, 0.66607098, 0.48996097],
[0.927111, 0.66230315, 0.48761688],
[0.92677996, 0.6585342, 0.485276],
[0.92644317, 0.65476476, 0.48293832],
[0.92609759, 0.65099658, 0.48060392],
[0.925747, 0.64722729, 0.47827244],
[0.92539502, 0.64345456, 0.47594352],
[0.92503106, 0.6396848, 0.47361782],
[0.92466877, 0.6359095, 0.47129427],
[0.92429828, 0.63213463, 0.46897349],
[0.92392172, 0.62835879, 0.46665526],
[0.92354597, 0.62457749, 0.46433898],
[0.9231622, 0.6207962, 0.46202524],
[0.92277222, 0.61701365, 0.45971384],
[0.92237978, 0.61322733, 0.45740444],
[0.92198615, 0.60943622, 0.45509686],
[0.92158735, 0.60564276, 0.45279137],
[0.92118373, 0.60184659, 0.45048789],
[0.92077582, 0.59804722, 0.44818634],
[0.92036413, 0.59424414, 0.44588663],
[0.91994924, 0.5904368, 0.44358868],
[0.91952943, 0.58662619, 0.4412926],
[0.91910675, 0.58281075, 0.43899817],
[0.91868096, 0.57899046, 0.4367054],
[0.91825103, 0.57516584, 0.43441436],
[0.91781857, 0.57133556, 0.43212486],
[0.9173814, 0.56750099, 0.4298371],
[0.91694139, 0.56366058, 0.42755089],
[0.91649756, 0.55981483, 0.42526631],
[0.91604942, 0.55596387, 0.42298339],
[0.9155979, 0.55210684, 0.42070204],
[0.9151409, 0.54824485, 0.4184247],
[0.91466138, 0.54438817, 0.41617858],
[0.91416896, 0.54052962, 0.41396347],
[0.91366559, 0.53666778, 0.41177769],
[0.91315173, 0.53280208, 0.40962196],
[0.91262605, 0.52893336, 0.40749715],
[0.91208866, 0.52506133, 0.40540404],
[0.91153952, 0.52118582, 0.40334346],
[0.91097732, 0.51730767, 0.4013163],
[0.910403, 0.51342591, 0.39932342],
[0.90981494, 0.50954168, 0.39736571],
[0.90921368, 0.5056543, 0.39544411],
[0.90859797, 0.50176463, 0.39355952],
[0.90796841, 0.49787195, 0.39171297],
[0.90732341, 0.4939774, 0.38990532],
[0.90666382, 0.49008006, 0.38813773],
[0.90598815, 0.486181, 0.38641107],
[0.90529624, 0.48228017, 0.38472641],
[0.90458808, 0.47837738, 0.38308489],
[0.90386248, 0.47447348, 0.38148746],
[0.90311921, 0.4705685, 0.37993524],
[0.90235809, 0.46666239, 0.37842943],
[0.90157824, 0.46275577, 0.37697105],
[0.90077904, 0.45884905, 0.37556121],
[0.89995995, 0.45494253, 0.37420106],
[0.89912041, 0.4510366, 0.37289175],
[0.8982602, 0.44713126, 0.37163458],
[0.89737819, 0.44322747, 0.37043052],
[0.89647387, 0.43932557, 0.36928078],
[0.89554477, 0.43542759, 0.36818855],
[0.89458871, 0.4315354, 0.36715654],
[0.89360794, 0.42764714, 0.36618273],
[0.89260152, 0.42376366, 0.36526813],
[0.8915687, 0.41988565, 0.36441384],
[0.89050882, 0.41601371, 0.36362102],
[0.8894159, 0.41215334, 0.36289639],
[0.888292, 0.40830288, 0.36223756],
[0.88713784, 0.40446193, 0.36164328],
[0.88595253, 0.40063149, 0.36111438],
[0.88473115, 0.39681635, 0.3606566],
[0.88347246, 0.39301805, 0.36027074],
[0.88217931, 0.38923439, 0.35995244],
[0.880851, 0.38546632, 0.35970244],
[0.87947728, 0.38172422, 0.35953127],
[0.87806542, 0.37800172, 0.35942941],
[0.87661509, 0.37429964, 0.35939659],
[0.87511668, 0.37062819, 0.35944178],
[0.87357554, 0.36698279, 0.35955811],
[0.87199254, 0.3633634, 0.35974223],
[0.87035691, 0.35978174, 0.36000516],
[0.86867647, 0.35623087, 0.36033559],
[0.86694949, 0.35271349, 0.36073358],
[0.86516775, 0.34923921, 0.36120624],
[0.86333996, 0.34580008, 0.36174113],
[0.86145909, 0.3424046, 0.36234402],
[0.85952586, 0.33905327, 0.36301129],
[0.85754536, 0.33574168, 0.36373567],
[0.855514, 0.33247568, 0.36451271],
[0.85344392, 0.32924217, 0.36533344],
[0.8513284, 0.32604977, 0.36620106],
[0.84916723, 0.32289973, 0.36711424],
[0.84696243, 0.31979068, 0.36806976],
[0.84470627, 0.31673295, 0.36907066],
[0.84240761, 0.31371695, 0.37010969],
[0.84005337, 0.31075974, 0.37119284],
[0.83765537, 0.30784814, 0.3723105],
[0.83520234, 0.30499724, 0.37346726],
[0.83270291, 0.30219766, 0.37465552],
[0.83014895, 0.29946081, 0.37587769],
[0.82754694, 0.29677989, 0.37712733],
[0.82489111, 0.29416352, 0.37840532],
[0.82218644, 0.29160665, 0.37970606],
[0.81942908, 0.28911553, 0.38102921],
[0.81662276, 0.28668665, 0.38236999],
[0.81376555, 0.28432371, 0.383727],
[0.81085964, 0.28202508, 0.38509649],
[0.8079055, 0.27979128, 0.38647583],
[0.80490309, 0.27762348, 0.3878626],
[0.80185613, 0.2755178, 0.38925253],
[0.79876118, 0.27347974, 0.39064559],
[0.79562644, 0.27149928, 0.39203532],
[0.79244362, 0.2695883, 0.39342447],
[0.78922456, 0.26773176, 0.3948046],
[0.78596161, 0.26594053, 0.39617873],
[0.7826624, 0.26420493, 0.39754146],
[0.77932717, 0.26252522, 0.39889102],
[0.77595363, 0.2609049, 0.4002279],
[0.77254999, 0.25933319, 0.40154704],
[0.76911107, 0.25781758, 0.40284959],
[0.76564158, 0.25635173, 0.40413341],
[0.76214598, 0.25492998, 0.40539471],
[0.75861834, 0.25356035, 0.40663694],
[0.75506533, 0.25223402, 0.40785559],
[0.75148963, 0.2509473, 0.40904966],
[0.74788835, 0.24970413, 0.41022028],
[0.74426345, 0.24850191, 0.41136599],
[0.74061927, 0.24733457, 0.41248516],
[0.73695678, 0.24620072, 0.41357737],
[0.73327278, 0.24510469, 0.41464364],
[0.72957096, 0.24404127, 0.4156828],
[0.72585394, 0.24300672, 0.41669383],
[0.7221226, 0.24199971, 0.41767651],
[0.71837612, 0.24102046, 0.41863486],
[0.71463236, 0.24004289, 0.41956983],
[0.7108932, 0.23906316, 0.42048681],
[0.70715842, 0.23808142, 0.42138647],
[0.70342811, 0.2370976, 0.42226844],
[0.69970218, 0.23611179, 0.42313282],
[0.69598055, 0.2351247, 0.42397678],
[0.69226314, 0.23413578, 0.42480327],
[0.68854988, 0.23314511, 0.42561234],
[0.68484064, 0.23215279, 0.42640419],
[0.68113541, 0.23115942, 0.42717615],
[0.67743412, 0.23016472, 0.42792989],
[0.67373662, 0.22916861, 0.42866642],
[0.67004287, 0.22817117, 0.42938576],
[0.66635279, 0.22717328, 0.43008427],
[0.66266621, 0.22617435, 0.43076552],
[0.65898313, 0.22517434, 0.43142956],
[0.65530349, 0.22417381, 0.43207427],
[0.65162696, 0.22317307, 0.4327001],
[0.64795375, 0.22217149, 0.43330852],
[0.64428351, 0.22116972, 0.43389854],
[0.64061624, 0.22016818, 0.43446845],
[0.63695183, 0.21916625, 0.43502123],
[0.63329016, 0.21816454, 0.43555493],
[0.62963102, 0.2171635, 0.43606881],
[0.62597451, 0.21616235, 0.43656529],
[0.62232019, 0.21516239, 0.43704153],
[0.61866821, 0.21416307, 0.43749868],
[0.61501835, 0.21316435, 0.43793808],
[0.61137029, 0.21216761, 0.4383556],
[0.60772426, 0.2111715, 0.43875552],
[0.60407977, 0.21017746, 0.43913439],
[0.60043678, 0.20918503, 0.43949412],
[0.59679524, 0.20819447, 0.43983393],
[0.59315487, 0.20720639, 0.44015254],
[0.58951566, 0.20622027, 0.44045213],
[0.58587715, 0.20523751, 0.44072926],
[0.5822395, 0.20425693, 0.44098758],
[0.57860222, 0.20328034, 0.44122241],
[0.57496549, 0.20230637, 0.44143805],
[0.57132875, 0.20133689, 0.4416298],
[0.56769215, 0.20037071, 0.44180142],
[0.5640552, 0.19940936, 0.44194923],
[0.56041794, 0.19845221, 0.44207535],
[0.55678004, 0.1975, 0.44217824],
[0.55314129, 0.19655316, 0.44225723],
[0.54950166, 0.19561118, 0.44231412],
[0.54585987, 0.19467771, 0.44234111],
[0.54221157, 0.19375869, 0.44233698],
[0.5385549, 0.19285696, 0.44229959],
[0.5348913, 0.19197036, 0.44222958],
[0.53122177, 0.1910974, 0.44212735],
[0.52754464, 0.19024042, 0.44199159],
[0.52386353, 0.18939409, 0.44182449],
[0.52017476, 0.18856368, 0.44162345],
[0.51648277, 0.18774266, 0.44139128],
[0.51278481, 0.18693492, 0.44112605],
[0.50908361, 0.18613639, 0.4408295],
[0.50537784, 0.18534893, 0.44050064],
[0.50166912, 0.18457008, 0.44014054],
[0.49795686, 0.18380056, 0.43974881],
[0.49424218, 0.18303865, 0.43932623],
[0.49052472, 0.18228477, 0.43887255],
[0.48680565, 0.1815371, 0.43838867],
[0.48308419, 0.18079663, 0.43787408],
[0.47936222, 0.18006056, 0.43733022],
[0.47563799, 0.17933127, 0.43675585],
[0.47191466, 0.17860416, 0.43615337],
[0.46818879, 0.17788392, 0.43552047],
[0.46446454, 0.17716458, 0.43486036],
[0.46073893, 0.17645017, 0.43417097],
[0.45701462, 0.17573691, 0.43345429],
[0.45329097, 0.17502549, 0.43271025],
[0.44956744, 0.17431649, 0.4319386],
[0.44584668, 0.17360625, 0.43114133],
[0.44212538, 0.17289906, 0.43031642],
[0.43840678, 0.17219041, 0.42946642],
[0.43469046, 0.17148074, 0.42859124],
[0.4309749, 0.17077192, 0.42769008],
[0.42726297, 0.17006003, 0.42676519],
[0.42355299, 0.16934709, 0.42581586],
[0.41984535, 0.16863258, 0.42484219],
[0.41614149, 0.16791429, 0.42384614],
[0.41244029, 0.16719372, 0.42282661],
[0.40874177, 0.16647061, 0.42178429],
[0.40504765, 0.16574261, 0.42072062],
[0.401357, 0.16501079, 0.41963528],
[0.397669, 0.16427607, 0.418528],
[0.39398585, 0.16353554, 0.41740053],
[0.39030735, 0.16278924, 0.41625344],
[0.3866314, 0.16203977, 0.41508517],
[0.38295904, 0.16128519, 0.41389849],
[0.37928736, 0.16052483, 0.41270599],
[0.37562649, 0.15974704, 0.41151182],
[0.37197803, 0.15895049, 0.41031532],
[0.36833779, 0.15813871, 0.40911916],
[0.36470944, 0.15730861, 0.40792149],
[0.36109117, 0.15646169, 0.40672362],
[0.35748213, 0.15559861, 0.40552633],
[0.353885, 0.15471714, 0.40432831],
[0.35029682, 0.15381967, 0.4031316],
[0.34671861, 0.1529053, 0.40193587],
[0.34315191, 0.15197275, 0.40074049],
[0.33959331, 0.15102466, 0.3995478],
[0.33604378, 0.15006017, 0.39835754],
[0.33250529, 0.14907766, 0.39716879],
[0.32897621, 0.14807831, 0.39598285],
[0.3254559, 0.14706248, 0.39480044],
[0.32194567, 0.14602909, 0.39362106],
[0.31844477, 0.14497857, 0.39244549],
[0.31494974, 0.14391333, 0.39127626],
[0.31146605, 0.14282918, 0.39011024],
[0.30798857, 0.1417297, 0.38895105],
[0.30451661, 0.14061515, 0.38779953],
[0.30105136, 0.13948445, 0.38665531],
[0.2975886, 0.1383403, 0.38552159],
[0.29408557, 0.13721193, 0.38442775]
]
_crest_lut = [
[0.6468274, 0.80289262, 0.56592265],
[0.64233318, 0.80081141, 0.56639461],
[0.63791969, 0.7987162, 0.56674976],
[0.6335316, 0.79661833, 0.56706128],
[0.62915226, 0.7945212, 0.56735066],
[0.62477862, 0.79242543, 0.56762143],
[0.62042003, 0.79032918, 0.56786129],
[0.61606327, 0.78823508, 0.56808666],
[0.61171322, 0.78614216, 0.56829092],
[0.60736933, 0.78405055, 0.56847436],
[0.60302658, 0.78196121, 0.56864272],
[0.59868708, 0.77987374, 0.56879289],
[0.59435366, 0.77778758, 0.56892099],
[0.59001953, 0.77570403, 0.56903477],
[0.58568753, 0.77362254, 0.56913028],
[0.58135593, 0.77154342, 0.56920908],
[0.57702623, 0.76946638, 0.56926895],
[0.57269165, 0.76739266, 0.5693172],
[0.56835934, 0.76532092, 0.56934507],
[0.56402533, 0.76325185, 0.56935664],
[0.55968429, 0.76118643, 0.56935732],
[0.55534159, 0.75912361, 0.56934052],
[0.55099572, 0.75706366, 0.56930743],
[0.54664626, 0.75500662, 0.56925799],
[0.54228969, 0.75295306, 0.56919546],
[0.53792417, 0.75090328, 0.56912118],
[0.53355172, 0.74885687, 0.5690324],
[0.52917169, 0.74681387, 0.56892926],
[0.52478243, 0.74477453, 0.56881287],
[0.52038338, 0.74273888, 0.56868323],
[0.5159739, 0.74070697, 0.56854039],
[0.51155269, 0.73867895, 0.56838507],
[0.50711872, 0.73665492, 0.56821764],
[0.50267118, 0.73463494, 0.56803826],
[0.49822926, 0.73261388, 0.56785146],
[0.49381422, 0.73058524, 0.56767484],
[0.48942421, 0.72854938, 0.56751036],
[0.48505993, 0.72650623, 0.56735752],
[0.48072207, 0.72445575, 0.56721583],
[0.4764113, 0.72239788, 0.56708475],
[0.47212827, 0.72033258, 0.56696376],
[0.46787361, 0.71825983, 0.56685231],
[0.46364792, 0.71617961, 0.56674986],
[0.45945271, 0.71409167, 0.56665625],
[0.45528878, 0.71199595, 0.56657103],
[0.45115557, 0.70989276, 0.5664931],
[0.44705356, 0.70778212, 0.56642189],
[0.44298321, 0.70566406, 0.56635683],
[0.43894492, 0.70353863, 0.56629734],
[0.43493911, 0.70140588, 0.56624286],
[0.43096612, 0.69926587, 0.5661928],
[0.42702625, 0.69711868, 0.56614659],
[0.42311977, 0.69496438, 0.56610368],
[0.41924689, 0.69280308, 0.56606355],
[0.41540778, 0.69063486, 0.56602564],
[0.41160259, 0.68845984, 0.56598944],
[0.40783143, 0.68627814, 0.56595436],
[0.40409434, 0.68408988, 0.56591994],
[0.40039134, 0.68189518, 0.56588564],
[0.39672238, 0.6796942, 0.56585103],
[0.39308781, 0.67748696, 0.56581581],
[0.38949137, 0.67527276, 0.56578084],
[0.38592889, 0.67305266, 0.56574422],
[0.38240013, 0.67082685, 0.56570561],
[0.37890483, 0.66859548, 0.56566462],
[0.37544276, 0.66635871, 0.56562081],
[0.37201365, 0.66411673, 0.56557372],
[0.36861709, 0.6618697, 0.5655231],
[0.36525264, 0.65961782, 0.56546873],
[0.36191986, 0.65736125, 0.56541032],
[0.35861935, 0.65509998, 0.56534768],
[0.35535621, 0.65283302, 0.56528211],
[0.35212361, 0.65056188, 0.56521171],
[0.34892097, 0.64828676, 0.56513633],
[0.34574785, 0.64600783, 0.56505539],
[0.34260357, 0.64372528, 0.5649689],
[0.33948744, 0.64143931, 0.56487679],
[0.33639887, 0.6391501, 0.56477869],
[0.33334501, 0.63685626, 0.56467661],
[0.33031952, 0.63455911, 0.564569],
[0.3273199, 0.63225924, 0.56445488],
[0.32434526, 0.62995682, 0.56433457],
[0.32139487, 0.62765201, 0.56420795],
[0.31846807, 0.62534504, 0.56407446],
[0.3155731, 0.62303426, 0.56393695],
[0.31270304, 0.62072111, 0.56379321],
[0.30985436, 0.61840624, 0.56364307],
[0.30702635, 0.61608984, 0.56348606],
[0.30421803, 0.61377205, 0.56332267],
[0.30143611, 0.61145167, 0.56315419],
[0.29867863, 0.60912907, 0.56298054],
[0.29593872, 0.60680554, 0.56280022],
[0.29321538, 0.60448121, 0.56261376],
[0.2905079, 0.60215628, 0.56242036],
[0.28782827, 0.5998285, 0.56222366],
[0.28516521, 0.59749996, 0.56202093],
[0.28251558, 0.59517119, 0.56181204],
[0.27987847, 0.59284232, 0.56159709],
[0.27726216, 0.59051189, 0.56137785],
[0.27466434, 0.58818027, 0.56115433],
[0.2720767, 0.58584893, 0.56092486],
[0.26949829, 0.58351797, 0.56068983],
[0.26693801, 0.58118582, 0.56045121],
[0.26439366, 0.57885288, 0.56020858],
[0.26185616, 0.57652063, 0.55996077],
[0.25932459, 0.57418919, 0.55970795],
[0.25681303, 0.57185614, 0.55945297],
[0.25431024, 0.56952337, 0.55919385],
[0.25180492, 0.56719255, 0.5589305],
[0.24929311, 0.56486397, 0.5586654],
[0.24678356, 0.56253666, 0.55839491],
[0.24426587, 0.56021153, 0.55812473],
[0.24174022, 0.55788852, 0.55785448],
[0.23921167, 0.55556705, 0.55758211],
[0.23668315, 0.55324675, 0.55730676],
[0.23414742, 0.55092825, 0.55703167],
[0.23160473, 0.54861143, 0.5567573],
[0.22905996, 0.54629572, 0.55648168],
[0.22651648, 0.54398082, 0.5562029],
[0.22396709, 0.54166721, 0.55592542],
[0.22141221, 0.53935481, 0.55564885],
[0.21885269, 0.53704347, 0.55537294],
[0.21629986, 0.53473208, 0.55509319],
[0.21374297, 0.53242154, 0.5548144],
[0.21118255, 0.53011166, 0.55453708],
[0.2086192, 0.52780237, 0.55426067],
[0.20605624, 0.52549322, 0.55398479],
[0.20350004, 0.5231837, 0.55370601],
[0.20094292, 0.52087429, 0.55342884],
[0.19838567, 0.51856489, 0.55315283],
[0.19582911, 0.51625531, 0.55287818],
[0.19327413, 0.51394542, 0.55260469],
[0.19072933, 0.51163448, 0.5523289],
[0.18819045, 0.50932268, 0.55205372],
[0.18565609, 0.50701014, 0.55177937],
[0.18312739, 0.50469666, 0.55150597],
[0.18060561, 0.50238204, 0.55123374],
[0.178092, 0.50006616, 0.55096224],
[0.17558808, 0.49774882, 0.55069118],
[0.17310341, 0.49542924, 0.5504176],
[0.17063111, 0.49310789, 0.55014445],
[0.1681728, 0.49078458, 0.54987159],
[0.1657302, 0.48845913, 0.54959882],
[0.16330517, 0.48613135, 0.54932605],
[0.16089963, 0.48380104, 0.54905306],
[0.15851561, 0.48146803, 0.54877953],
[0.15615526, 0.47913212, 0.54850526],
[0.15382083, 0.47679313, 0.54822991],
[0.15151471, 0.47445087, 0.54795318],
[0.14924112, 0.47210502, 0.54767411],
[0.1470032, 0.46975537, 0.54739226],
[0.14480101, 0.46740187, 0.54710832],
[0.14263736, 0.46504434, 0.54682188],
[0.14051521, 0.46268258, 0.54653253],
[0.13843761, 0.46031639, 0.54623985],
[0.13640774, 0.45794558, 0.5459434],
[0.13442887, 0.45556994, 0.54564272],
[0.1325044, 0.45318928, 0.54533736],
[0.13063777, 0.4508034, 0.54502674],
[0.12883252, 0.44841211, 0.5447104],
[0.12709242, 0.44601517, 0.54438795],
[0.1254209, 0.44361244, 0.54405855],
[0.12382162, 0.44120373, 0.54372156],
[0.12229818, 0.43878887, 0.54337634],
[0.12085453, 0.4363676, 0.54302253],
[0.11949938, 0.43393955, 0.54265715],
[0.11823166, 0.43150478, 0.54228104],
[0.11705496, 0.42906306, 0.54189388],
[0.115972, 0.42661431, 0.54149449],
[0.11498598, 0.42415835, 0.54108222],
[0.11409965, 0.42169502, 0.54065622],
[0.11331533, 0.41922424, 0.5402155],
[0.11263542, 0.41674582, 0.53975931],
[0.1120615, 0.4142597, 0.53928656],
[0.11159738, 0.41176567, 0.53879549],
[0.11125248, 0.40926325, 0.53828203],
[0.11101698, 0.40675289, 0.53774864],
[0.11089152, 0.40423445, 0.53719455],
[0.11085121, 0.4017095, 0.53662425],
[0.11087217, 0.39917938, 0.53604354],
[0.11095515, 0.39664394, 0.53545166],
[0.11110676, 0.39410282, 0.53484509],
[0.11131735, 0.39155635, 0.53422678],
[0.11158595, 0.38900446, 0.53359634],
[0.11191139, 0.38644711, 0.5329534],
[0.11229224, 0.38388426, 0.53229748],
[0.11273683, 0.38131546, 0.53162393],
[0.11323438, 0.37874109, 0.53093619],
[0.11378271, 0.37616112, 0.53023413],
[0.11437992, 0.37357557, 0.52951727],
[0.11502681, 0.37098429, 0.52878396],
[0.11572661, 0.36838709, 0.52803124],
[0.11646936, 0.36578429, 0.52726234],
[0.11725299, 0.3631759, 0.52647685],
[0.1180755, 0.36056193, 0.52567436],
[0.1189438, 0.35794203, 0.5248497],
[0.11984752, 0.35531657, 0.52400649],
[0.1207833, 0.35268564, 0.52314492],
[0.12174895, 0.35004927, 0.52226461],
[0.12274959, 0.34740723, 0.52136104],
[0.12377809, 0.34475975, 0.52043639],
[0.12482961, 0.34210702, 0.51949179],
[0.125902, 0.33944908, 0.51852688],
[0.12699998, 0.33678574, 0.51753708],
[0.12811691, 0.33411727, 0.51652464],
[0.12924811, 0.33144384, 0.51549084],
[0.13039157, 0.32876552, 0.51443538],
[0.13155228, 0.32608217, 0.51335321],
[0.13272282, 0.32339407, 0.51224759],
[0.13389954, 0.32070138, 0.51111946],
[0.13508064, 0.31800419, 0.50996862],
[0.13627149, 0.31530238, 0.50878942],
[0.13746376, 0.31259627, 0.50758645],
[0.13865499, 0.30988598, 0.50636017],
[0.13984364, 0.30717161, 0.50511042],
[0.14103515, 0.30445309, 0.50383119],
[0.14222093, 0.30173071, 0.50252813],
[0.14339946, 0.2990046, 0.50120127],
[0.14456941, 0.29627483, 0.49985054],
[0.14573579, 0.29354139, 0.49847009],
[0.14689091, 0.29080452, 0.49706566],
[0.1480336, 0.28806432, 0.49563732],
[0.1491628, 0.28532086, 0.49418508],
[0.15028228, 0.28257418, 0.49270402],
[0.15138673, 0.27982444, 0.49119848],
[0.15247457, 0.27707172, 0.48966925],
[0.15354487, 0.2743161, 0.48811641],
[0.15459955, 0.27155765, 0.4865371],
[0.15563716, 0.26879642, 0.4849321],
[0.1566572, 0.26603191, 0.48330429],
[0.15765823, 0.26326032, 0.48167456],
[0.15862147, 0.26048295, 0.48005785],
[0.15954301, 0.25770084, 0.47845341],
[0.16043267, 0.25491144, 0.4768626],
[0.16129262, 0.25211406, 0.4752857],
[0.1621119, 0.24931169, 0.47372076],
[0.16290577, 0.24649998, 0.47217025],
[0.16366819, 0.24368054, 0.47063302],
[0.1644021, 0.24085237, 0.46910949],
[0.16510882, 0.2380149, 0.46759982],
[0.16579015, 0.23516739, 0.46610429],
[0.1664433, 0.2323105, 0.46462219],
[0.16707586, 0.22944155, 0.46315508],
[0.16768475, 0.22656122, 0.46170223],
[0.16826815, 0.22366984, 0.46026308],
[0.16883174, 0.22076514, 0.45883891],
[0.16937589, 0.21784655, 0.45742976],
[0.16990129, 0.21491339, 0.45603578],
[0.1704074, 0.21196535, 0.45465677],
[0.17089473, 0.20900176, 0.4532928],
[0.17136819, 0.20602012, 0.45194524],
[0.17182683, 0.20302012, 0.45061386],
[0.17227059, 0.20000106, 0.44929865],
[0.17270583, 0.19695949, 0.44800165],
[0.17313804, 0.19389201, 0.44672488],
[0.17363177, 0.19076859, 0.44549087]
]
_lut_dict = dict(
rocket=_rocket_lut,
mako=_mako_lut,
icefire=_icefire_lut,
vlag=_vlag_lut,
flare=_flare_lut,
crest=_crest_lut,
)
for _name, _lut in _lut_dict.items():
_cmap = colors.ListedColormap(_lut, _name)
locals()[_name] = _cmap
_cmap_r = colors.ListedColormap(_lut[::-1], _name + "_r")
locals()[_name + "_r"] = _cmap_r
register_colormap(_name, _cmap)
register_colormap(_name + "_r", _cmap_r)
del colors, register_colormap
|
mwaskomREPO_NAMEseabornPATH_START.@seaborn_extracted@seaborn-master@seaborn@cm.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/EXOSIMS/util/KeplerSTM_C/__init__.py",
"type": "Python"
}
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@EXOSIMS@util@KeplerSTM_C@__init__.py@.PATH_END.py
|
|
{
"filename": "history.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/langchain_core/runnables/history.py",
"type": "Python"
}
|
from __future__ import annotations
import inspect
from collections.abc import Sequence
from types import GenericAlias
from typing import (
TYPE_CHECKING,
Any,
Callable,
Optional,
Union,
)
from pydantic import BaseModel
from typing_extensions import override
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.load.load import load
from langchain_core.runnables.base import Runnable, RunnableBindingBase, RunnableLambda
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain_core.runnables.utils import (
ConfigurableFieldSpec,
Output,
get_unique_config_specs,
)
from langchain_core.utils.pydantic import create_model_v2
if TYPE_CHECKING:
from langchain_core.language_models.base import LanguageModelLike
from langchain_core.messages.base import BaseMessage
from langchain_core.runnables.config import RunnableConfig
from langchain_core.tracers.schemas import Run
MessagesOrDictWithMessages = Union[Sequence["BaseMessage"], dict[str, Any]]
GetSessionHistoryCallable = Callable[..., BaseChatMessageHistory]
class RunnableWithMessageHistory(RunnableBindingBase):
"""Runnable that manages chat message history for another Runnable.
A chat message history is a sequence of messages that represent a conversation.
RunnableWithMessageHistory wraps another Runnable and manages the chat message
history for it; it is responsible for reading and updating the chat message
history.
The formats supported for the inputs and outputs of the wrapped Runnable
are described below.
RunnableWithMessageHistory must always be called with a config that contains
the appropriate parameters for the chat message history factory.
By default, the Runnable is expected to take a single configuration parameter
called `session_id` which is a string. This parameter is used to create a new
or look up an existing chat message history that matches the given session_id.
In this case, the invocation would look like this:
`with_history.invoke(..., config={"configurable": {"session_id": "bar"}})`
; e.g., ``{"configurable": {"session_id": "<SESSION_ID>"}}``.
The configuration can be customized by passing in a list of
``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter (see
example below).
In the examples, we will use a chat message history with an in-memory
implementation to make it easy to experiment and see the results.
For production use cases, you will want to use a persistent implementation
of chat message history, such as ``RedisChatMessageHistory``.
Parameters:
get_session_history: Function that returns a new BaseChatMessageHistory.
This function should either take a single positional argument
`session_id` of type string and return a corresponding
chat message history instance.
input_messages_key: Must be specified if the base runnable accepts a dict
as input. The key in the input dict that contains the messages.
output_messages_key: Must be specified if the base Runnable returns a dict
as output. The key in the output dict that contains the messages.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
history_factory_config: Configure fields that should be passed to the
chat history factory. See ``ConfigurableFieldSpec`` for more details.
Example: Chat message history with an in-memory implementation for testing.
.. code-block:: python
from operator import itemgetter
from typing import List
from langchain_openai.chat_models import ChatOpenAI
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.documents import Document
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from pydantic import BaseModel, Field
from langchain_core.runnables import (
RunnableLambda,
ConfigurableFieldSpec,
RunnablePassthrough,
)
from langchain_core.runnables.history import RunnableWithMessageHistory
class InMemoryHistory(BaseChatMessageHistory, BaseModel):
\"\"\"In memory implementation of chat message history.\"\"\"
messages: List[BaseMessage] = Field(default_factory=list)
def add_messages(self, messages: List[BaseMessage]) -> None:
\"\"\"Add a list of messages to the store\"\"\"
self.messages.extend(messages)
def clear(self) -> None:
self.messages = []
# Here we use a global variable to store the chat message history.
# This will make it easier to inspect it to see the underlying results.
store = {}
def get_by_session_id(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryHistory()
return store[session_id]
history = get_by_session_id("1")
history.add_message(AIMessage(content="hello"))
print(store) # noqa: T201
Example where the wrapped Runnable takes a dictionary input:
.. code-block:: python
from typing import Optional
from langchain_community.chat_models import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
prompt = ChatPromptTemplate.from_messages([
("system", "You're an assistant who's good at {ability}"),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
])
chain = prompt | ChatAnthropic(model="claude-2")
chain_with_history = RunnableWithMessageHistory(
chain,
# Uses the get_by_session_id function defined in the example
# above.
get_by_session_id,
input_messages_key="question",
history_messages_key="history",
)
print(chain_with_history.invoke( # noqa: T201
{"ability": "math", "question": "What does cosine mean?"},
config={"configurable": {"session_id": "foo"}}
))
# Uses the store defined in the example above.
print(store) # noqa: T201
print(chain_with_history.invoke( # noqa: T201
{"ability": "math", "question": "What's its inverse"},
config={"configurable": {"session_id": "foo"}}
))
print(store) # noqa: T201
Example where the session factory takes two keys, user_id and conversation id):
.. code-block:: python
store = {}
def get_session_history(
user_id: str, conversation_id: str
) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = InMemoryHistory()
return store[(user_id, conversation_id)]
prompt = ChatPromptTemplate.from_messages([
("system", "You're an assistant who's good at {ability}"),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
])
chain = prompt | ChatAnthropic(model="claude-2")
with_message_history = RunnableWithMessageHistory(
chain,
get_session_history=get_session_history,
input_messages_key="question",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
with_message_history.invoke(
{"ability": "math", "question": "What does cosine mean?"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}}
)
"""
get_session_history: GetSessionHistoryCallable
input_messages_key: Optional[str] = None
output_messages_key: Optional[str] = None
history_messages_key: Optional[str] = None
history_factory_config: Sequence[ConfigurableFieldSpec]
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "runnable"]
def __init__(
self,
runnable: Union[
Runnable[
Union[MessagesOrDictWithMessages],
Union[str, BaseMessage, MessagesOrDictWithMessages],
],
LanguageModelLike,
],
get_session_history: GetSessionHistoryCallable,
*,
input_messages_key: Optional[str] = None,
output_messages_key: Optional[str] = None,
history_messages_key: Optional[str] = None,
history_factory_config: Optional[Sequence[ConfigurableFieldSpec]] = None,
**kwargs: Any,
) -> None:
"""Initialize RunnableWithMessageHistory.
Args:
runnable: The base Runnable to be wrapped. Must take as input one of:
1. A sequence of BaseMessages
2. A dict with one key for all messages
3. A dict with one key for the current input string/message(s) and
a separate key for historical messages. If the input key points
to a string, it will be treated as a HumanMessage in history.
Must return as output one of:
1. A string which can be treated as an AIMessage
2. A BaseMessage or sequence of BaseMessages
3. A dict with a key for a BaseMessage or sequence of BaseMessages
get_session_history: Function that returns a new BaseChatMessageHistory.
This function should either take a single positional argument
`session_id` of type string and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
session_id: str,
*,
user_id: Optional[str]=None
) -> BaseChatMessageHistory:
...
Or it should take keyword arguments that match the keys of
`session_history_config_specs` and return a corresponding
chat message history instance.
.. code-block:: python
def get_session_history(
*,
user_id: str,
thread_id: str,
) -> BaseChatMessageHistory:
...
input_messages_key: Must be specified if the base runnable accepts a dict
as input. Default is None.
output_messages_key: Must be specified if the base runnable returns a dict
as output. Default is None.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
history_factory_config: Configure fields that should be passed to the
chat history factory. See ``ConfigurableFieldSpec`` for more details.
Specifying these allows you to pass multiple config keys
into the get_session_history factory.
**kwargs: Arbitrary additional kwargs to pass to parent class
``RunnableBindingBase`` init.
"""
history_chain: Runnable = RunnableLambda(
self._enter_history, self._aenter_history
).with_config(run_name="load_history")
messages_key = history_messages_key or input_messages_key
if messages_key:
history_chain = RunnablePassthrough.assign(
**{messages_key: history_chain}
).with_config(run_name="insert_history")
runnable_sync: Runnable = runnable.with_listeners(on_end=self._exit_history)
runnable_async: Runnable = runnable.with_alisteners(on_end=self._aexit_history)
def _call_runnable_sync(_input: Any) -> Runnable:
return runnable_sync
async def _call_runnable_async(_input: Any) -> Runnable:
return runnable_async
bound: Runnable = (
history_chain
| RunnableLambda(
_call_runnable_sync,
_call_runnable_async,
).with_config(run_name="check_sync_or_async")
).with_config(run_name="RunnableWithMessageHistory")
if history_factory_config:
_config_specs = history_factory_config
else:
# If not provided, then we'll use the default session_id field
_config_specs = [
ConfigurableFieldSpec(
id="session_id",
annotation=str,
name="Session ID",
description="Unique identifier for a session.",
default="",
is_shared=True,
),
]
super().__init__(
get_session_history=get_session_history,
input_messages_key=input_messages_key,
output_messages_key=output_messages_key,
bound=bound,
history_messages_key=history_messages_key,
history_factory_config=_config_specs,
**kwargs,
)
self._history_chain = history_chain
@property
def config_specs(self) -> list[ConfigurableFieldSpec]:
"""Get the configuration specs for the RunnableWithMessageHistory."""
return get_unique_config_specs(
super().config_specs + list(self.history_factory_config)
)
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> type[BaseModel]:
from langchain_core.messages import BaseMessage
fields: dict = {}
if self.input_messages_key and self.history_messages_key:
fields[self.input_messages_key] = (
Union[str, BaseMessage, Sequence[BaseMessage]],
...,
)
elif self.input_messages_key:
fields[self.input_messages_key] = (Sequence[BaseMessage], ...)
else:
return create_model_v2(
"RunnableWithChatHistoryInput",
module_name=self.__class__.__module__,
root=(Sequence[BaseMessage], ...),
)
return create_model_v2( # type: ignore[call-overload]
"RunnableWithChatHistoryInput",
field_definitions=fields,
module_name=self.__class__.__module__,
)
@property
@override
def OutputType(self) -> type[Output]:
output_type = self._history_chain.OutputType
return output_type
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> type[BaseModel]:
"""Get a pydantic model that can be used to validate output to the Runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic output schema that depends on which
configuration the Runnable is invoked with.
This method allows to get an output schema for a specific configuration.
Args:
config: A config to use when generating the schema.
Returns:
A pydantic model that can be used to validate output.
"""
root_type = self.OutputType
if (
inspect.isclass(root_type)
and not isinstance(root_type, GenericAlias)
and issubclass(root_type, BaseModel)
):
return root_type
return create_model_v2(
"RunnableWithChatHistoryOutput",
root=root_type,
module_name=self.__class__.__module__,
)
def _is_not_async(self, *args: Sequence[Any], **kwargs: dict[str, Any]) -> bool:
return False
async def _is_async(self, *args: Sequence[Any], **kwargs: dict[str, Any]) -> bool:
return True
def _get_input_messages(
self, input_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
) -> list[BaseMessage]:
from langchain_core.messages import BaseMessage
# If dictionary, try to pluck the single key representing messages
if isinstance(input_val, dict):
if self.input_messages_key:
key = self.input_messages_key
elif len(input_val) == 1:
key = list(input_val.keys())[0]
else:
key = "input"
input_val = input_val[key]
# If value is a string, convert to a human message
if isinstance(input_val, str):
from langchain_core.messages import HumanMessage
return [HumanMessage(content=input_val)]
# If value is a single message, convert to a list
elif isinstance(input_val, BaseMessage):
return [input_val]
# If value is a list or tuple...
elif isinstance(input_val, (list, tuple)):
# Handle empty case
if len(input_val) == 0:
return list(input_val)
# If is a list of list, then return the first value
# This occurs for chat models - since we batch inputs
if isinstance(input_val[0], list):
if len(input_val) != 1:
msg = f"Expected a single list of messages. Got {input_val}."
raise ValueError(msg)
return input_val[0]
return list(input_val)
else:
msg = (
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {input_val}."
)
raise ValueError(msg)
def _get_output_messages(
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
) -> list[BaseMessage]:
from langchain_core.messages import BaseMessage
# If dictionary, try to pluck the single key representing messages
if isinstance(output_val, dict):
if self.output_messages_key:
key = self.output_messages_key
elif len(output_val) == 1:
key = list(output_val.keys())[0]
else:
key = "output"
# If you are wrapping a chat model directly
# The output is actually this weird generations object
if key not in output_val and "generations" in output_val:
output_val = output_val["generations"][0][0]["message"]
else:
output_val = output_val[key]
if isinstance(output_val, str):
from langchain_core.messages import AIMessage
return [AIMessage(content=output_val)]
# If value is a single message, convert to a list
elif isinstance(output_val, BaseMessage):
return [output_val]
elif isinstance(output_val, (list, tuple)):
return list(output_val)
else:
msg = (
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {output_val}."
)
raise ValueError(msg)
def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]:
hist: BaseChatMessageHistory = config["configurable"]["message_history"]
messages = hist.messages.copy()
if not self.history_messages_key:
# return all messages
input_val = (
input if not self.input_messages_key else input[self.input_messages_key]
)
messages += self._get_input_messages(input_val)
return messages
async def _aenter_history(
self, input: dict[str, Any], config: RunnableConfig
) -> list[BaseMessage]:
hist: BaseChatMessageHistory = config["configurable"]["message_history"]
messages = (await hist.aget_messages()).copy()
if not self.history_messages_key:
# return all messages
input_val = (
input if not self.input_messages_key else input[self.input_messages_key]
)
messages += self._get_input_messages(input_val)
return messages
def _exit_history(self, run: Run, config: RunnableConfig) -> None:
hist: BaseChatMessageHistory = config["configurable"]["message_history"]
# Get the input messages
inputs = load(run.inputs)
input_messages = self._get_input_messages(inputs)
# If historic messages were prepended to the input messages, remove them to
# avoid adding duplicate messages to history.
if not self.history_messages_key:
historic_messages = config["configurable"]["message_history"].messages
input_messages = input_messages[len(historic_messages) :]
# Get the output messages
output_val = load(run.outputs)
output_messages = self._get_output_messages(output_val)
hist.add_messages(input_messages + output_messages)
async def _aexit_history(self, run: Run, config: RunnableConfig) -> None:
hist: BaseChatMessageHistory = config["configurable"]["message_history"]
# Get the input messages
inputs = load(run.inputs)
input_messages = self._get_input_messages(inputs)
# If historic messages were prepended to the input messages, remove them to
# avoid adding duplicate messages to history.
if not self.history_messages_key:
historic_messages = await hist.aget_messages()
input_messages = input_messages[len(historic_messages) :]
# Get the output messages
output_val = load(run.outputs)
output_messages = self._get_output_messages(output_val)
await hist.aadd_messages(input_messages + output_messages)
def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig:
config = super()._merge_configs(*configs)
expected_keys = [field_spec.id for field_spec in self.history_factory_config]
configurable = config.get("configurable", {})
missing_keys = set(expected_keys) - set(configurable.keys())
parameter_names = _get_parameter_names(self.get_session_history)
if missing_keys and parameter_names:
example_input = {self.input_messages_key: "foo"}
example_configurable = {
missing_key: "[your-value-here]" for missing_key in missing_keys
}
example_config = {"configurable": example_configurable}
msg = (
f"Missing keys {sorted(missing_keys)} in config['configurable'] "
f"Expected keys are {sorted(expected_keys)}."
f"When using via .invoke() or .stream(), pass in a config; "
f"e.g., chain.invoke({example_input}, {example_config})"
)
raise ValueError(msg)
if len(expected_keys) == 1:
if parameter_names:
# If arity = 1, then invoke function by positional arguments
message_history = self.get_session_history(
configurable[expected_keys[0]]
)
else:
if not config:
config["configurable"] = {}
message_history = self.get_session_history()
else:
# otherwise verify that names of keys patch and invoke by named arguments
if set(expected_keys) != set(parameter_names):
msg = (
f"Expected keys {sorted(expected_keys)} do not match parameter "
f"names {sorted(parameter_names)} of get_session_history."
)
raise ValueError(msg)
message_history = self.get_session_history(
**{key: configurable[key] for key in expected_keys}
)
config["configurable"]["message_history"] = message_history
return config
def _get_parameter_names(callable_: GetSessionHistoryCallable) -> list[str]:
"""Get the parameter names of the callable."""
sig = inspect.signature(callable_)
return list(sig.parameters.keys())
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@langchain_core@runnables@history.py@.PATH_END.py
|
{
"filename": "_traceref.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/error_z/_traceref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TracerefValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="traceref", parent_name="scatter3d.error_z", **kwargs
):
super(TracerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@error_z@_traceref.py@.PATH_END.py
|
{
"filename": "test_plummer.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/test/suite/ext_tests/test_plummer.py",
"type": "Python"
}
|
import numpy
from amuse.test import amusetest
from amuse.units import nbody_system
from amuse.units import units
from amuse.ic.plummer import new_plummer_model, MakePlummerModel
class TestData(amusetest.TestCase):
pass
class TestPlummer(TestData):
def test1(self):
numpy.random.seed(0)
# print numpy.random.get_state()
m = MakePlummerModel(2)
m1, p, v = m.new_model()
self.assertEqual(m1[0, 0], 0.5)
self.assertEqual(m1[1, 0], 0.5)
self.assertAlmostEqual(p[0, 0], -0.729636617171, 5)
self.assertAlmostEqual(p[1, 0], -0.713272921751, 5)
self.assertAlmostEqual(p[0, 1], 0.379570256435, 5)
self.assertAlmostEqual(p[1, 1], -0.930290757081, 5)
def test2(self):
convert_nbody = nbody_system.nbody_to_si(6 | units.kg, 7 | units.m)
stars = new_plummer_model(2, convert_nbody)
self.assertEqual(stars[0].mass.value_in(units.kg), 3.0)
self.assertEqual(stars[1].mass.value_in(units.kg), 3.0)
def test3(self):
stars = new_plummer_model(2, None)
self.assertEqual(stars[0].mass.value_in(nbody_system.mass), 0.5)
self.assertEqual(stars[1].mass.value_in(nbody_system.mass), 0.5)
def test4(self):
stars = new_plummer_model(2, do_scale=True)
self.assertAlmostEqual(stars.kinetic_energy(), 0.25 | nbody_system.energy)
self.assertAlmostEqual(stars.potential_energy(G=nbody_system.G), -0.50 | nbody_system.energy)
self.assertAlmostEqual(stars.center_of_mass(), [0, 0, 0] | nbody_system.length)
self.assertAlmostEqual(stars.center_of_mass_velocity(), [0, 0, 0] | nbody_system.speed)
self.assertAlmostEqual(stars.mass.sum(), 1.00 | nbody_system.mass)
self.assertAlmostEqual(stars.virial_radius(), 1.00 | nbody_system.length)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@test@suite@ext_tests@test_plummer.py@.PATH_END.py
|
{
"filename": "byte2human.py",
"repo_name": "jan-rybizki/Chempy",
"repo_path": "Chempy_extracted/Chempy-master/Chempy/input/yields/West17/byte2human.py",
"type": "Python"
}
|
#! /bin/env python3
"""
Module for human-readable byte strings.
"""
import sys
_units = ('','k','M','G','T','P','E','Z','Y')
def byte2human(size,
strip = True,
SI = False,
short = False,
length = 3):
"""
Return byte string in human-readible format.
"""
assert 2 < length < 6, 'Length out of Range'
su = 'B'
if SI:
prefix = ''
div = 1000
else:
prefix = 'i'
div = 1024
div_lim = 1000 * (1 - 0.5 * 10**(-length) - 2.e-15)
if short:
prefix = ''
su = ''
asize = abs(size)
osize = round(asize)
assert abs((osize + 1.e-16)/(asize + 1.e-16) - 1) < 1.e-14
xsize = osize
i = 0
while xsize > div_lim:
xsize /= div
i += 1
if length == 3:
rnd_lim = 10 * (1 - 0.5 * 10**(-length + 1) - 2.e-15)
if (xsize >= rnd_lim) or (i == 0):
sv = '{:3d}'.format(int(round(xsize)))
else:
sv = '{:3.1f}'.format(xsize)
elif length == 4:
rnd_lim1 = 100 * (1 - 0.5 * 10**(-length + 1) - 2.e-15)
rnd_lim2 = 10 * (1 - 0.5 * 10**(-length + 1) - 2.e-15)
if (xsize >= rnd_lim1) or (i == 0):
sv = '{:4d}'.format(int(round(xsize)))
elif (xsize >= rnd_lim2):
sv = '{:4.1f}'.format(xsize)
else:
sv = '{:4.2f}'.format(xsize)
elif length == 5:
rnd_lim1 = 1000 * (1 - 0.5 * 10**(-length + 1) - 2.e-15)
rnd_lim2 = 100 * (1 - 0.5 * 10**(-length + 1) - 2.e-15)
rnd_lim3 = 10 * (1 - 0.5 * 10**(-length + 1) - 2.e-15)
if i > 0 and 999 < round(xsize*div) < div:
xsize *= div
i -= 1
sv = '{:4d}'.format(int(round(xsize)))
sv = sv[0] + ',' + sv[1:4]
elif (xsize >= rnd_lim1) or (i == 0):
sv = '{:5d}'.format(int(round(xsize)))
elif (xsize >= rnd_lim2):
sv = '{:5.1f}'.format(xsize)
elif (xsize >= rnd_lim3):
sv = '{:5.2f}'.format(xsize)
else:
sv = '{:5.3f}'.format(xsize)
else:
raise Exception('Length out of Range')
if i >= len(_units):
sv = '*'*length
unit = _units[i]
if i >= 1:
unit += prefix
unit = unit + su
if round(size) < 0:
sv = '-' + sv.strip()
sv = ' '*(length - len(sv)) + sv
s = sv + ' ' + unit
if strip:
s = s.strip()
return s
if __name__ == '__main__':
argv = sys.argv
if len(argv) == 2:
try:
print(byte2human(round(float(argv[1]))))
except:
print('***')
|
jan-rybizkiREPO_NAMEChempyPATH_START.@Chempy_extracted@Chempy-master@Chempy@input@yields@West17@byte2human.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "bencebecsy/FurgeHullam",
"repo_path": "FurgeHullam_extracted/FurgeHullam-main/README.md",
"type": "Markdown"
}
|
# FürgeHullám
Fast likelihood for sinusoidal signals in pulsar timing array data utilizing:
1) Pre-calculated interpolated inner products
2) Analytic pulsar phase marginalization
3) Numeric pulsar distance marginalization
Name from Hungarian words "fürge" (meaning quick) and "hullám" (meaning wave).
For more details on the methods see: [arXiv:2406.16331](https://arxiv.org/abs/2406.16331)
For a tutorial on how to install and run the code see: [Quick-start Guide](https://github.com/bencebecsy/FurgeHullam/blob/main/docs/how_to_run_FurgeHullam.md)
Citation:
```
@ARTICLE{2024arXiv240616331B,
author = {{B{\'e}csy}, Bence},
title = "{Efficient Bayesian inference and model selection for continuous waves in pulsar timing array data}",
journal = {arXiv e-prints},
keywords = {General Relativity and Quantum Cosmology, Astrophysics - High Energy Astrophysical Phenomena},
year = 2024,
month = jun,
eid = {arXiv:2406.16331},
pages = {arXiv:2406.16331},
doi = {10.48550/arXiv.2406.16331},
archivePrefix = {arXiv},
eprint = {2406.16331},
primaryClass = {gr-qc},
adsurl = {https://ui.adsabs.harvard.edu/abs/2024arXiv240616331B},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
|
bencebecsyREPO_NAMEFurgeHullamPATH_START.@FurgeHullam_extracted@FurgeHullam-main@README.md@.PATH_END.py
|
{
"filename": "test_cored_steep_ellipsoid.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_LensModel/test_Profiles/test_cored_steep_ellipsoid.py",
"type": "Python"
}
|
__author__ = "sibirrer"
import numpy as np
import pytest
import numpy.testing as npt
from lenstronomy.Util import param_util
class TestCSP(object):
"""Tests the cored steep ellipsoid (CSE)"""
def setup_method(self):
from lenstronomy.LensModel.Profiles.cored_steep_ellipsoid import CSE
self.CSP = CSE(axis="product_avg")
def test_function(self):
kwargs = {"a": 2, "s": 1, "e1": 0.0, "e2": 0.0, "center_x": 0, "center_y": 0}
x = np.array([1.0, 2])
y = np.array([2, 0])
f_ = self.CSP.function(x, y, **kwargs)
npt.assert_almost_equal(f_, [1.09016, 0.96242], decimal=5)
def test_derivatives(self):
kwargs = {"a": 2, "s": 1, "e1": 0.0, "e2": 0.0, "center_x": 0, "center_y": 0}
x = np.array([1.0, 2])
y = np.array([2, 0])
f_x, f_y = self.CSP.derivatives(x, y, **kwargs)
npt.assert_almost_equal(f_x, [0.2367, 0.55279], decimal=5)
npt.assert_almost_equal(f_y, [0.4734, 0.0], decimal=5)
def test_hessian(self):
kwargs = {"a": 2, "s": 1, "e1": 0.0, "e2": 0.0, "center_x": 0, "center_y": 0}
x = np.array([1.0, 2])
y = np.array([2, 0])
f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(x, y, **kwargs)
npt.assert_almost_equal(f_xy, f_yx, decimal=5)
npt.assert_almost_equal(f_xx, [0.16924, -0.09751], decimal=5)
npt.assert_almost_equal(f_xy, [-0.13493, -0.0], decimal=5)
npt.assert_almost_equal(f_yy, [-0.03315, 0.27639], decimal=5)
def test_ellipticity(self):
"""Test the definition of the ellipticity normalization (along major axis or
product averaged axes)"""
x, y = np.linspace(start=0.001, stop=10, num=100), np.zeros(100)
kwargs_round = {
"a": 2,
"s": 1,
"e1": 0.0,
"e2": 0.0,
"center_x": 0,
"center_y": 0,
}
phi_q, q = param_util.ellipticity2phi_q(0.3, 0)
kwargs = {"a": 2, "s": 1, "e1": 0.3, "e2": 0.0, "center_x": 0, "center_y": 0}
f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(x, y, **kwargs_round)
kappa_round = 1.0 / 2 * (f_xx + f_yy)
f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(x, y, **kwargs)
kappa_major = 1.0 / 2 * (f_xx + f_yy)
f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(y, x, **kwargs)
kappa_minor = 1.0 / 2 * (f_xx + f_yy)
# import matplotlib.pyplot as plt
# plt.plot(x, kappa_major/kappa_round, ',-', label='major/round', alpha=0.5)
# plt.plot(x, kappa_minor/kappa_round, '--', label='minor/round', alpha=0.5)
#
# plt.plot(x, np.sqrt(kappa_minor*kappa_major)/kappa_round,label='prod/kappa_round')
# plt.legend()
# plt.show()
npt.assert_almost_equal(
kappa_round, np.sqrt(kappa_minor * kappa_major), decimal=1
)
if __name__ == "__main__":
pytest.main()
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_LensModel@test_Profiles@test_cored_steep_ellipsoid.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "cajohare/AxionLimits",
"repo_path": "AxionLimits_extracted/AxionLimits-master/limit_data/AxionMass/README.md",
"type": "Markdown"
}
|
# Axion mass predictions
These come from matching the cosmological abundance in axions to the observed dark matter abundance. The list is presumably incomplete, but these are the ones I have been able to source so far. Contact me if any are missing.
|
cajohareREPO_NAMEAxionLimitsPATH_START.@AxionLimits_extracted@AxionLimits-master@limit_data@AxionMass@README.md@.PATH_END.py
|
{
"filename": "_zerolinecolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/_zerolinecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZerolinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="zerolinecolor", parent_name="layout.xaxis", **kwargs
):
super(ZerolinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@_zerolinecolor.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/tests/test_optional/test_matplotlylib/__init__.py",
"type": "Python"
}
|
import warnings
def setup_package():
warnings.filterwarnings("ignore")
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@tests@test_optional@test_matplotlylib@__init__.py@.PATH_END.py
|
{
"filename": "_valuesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sankey/link/_valuesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValuesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="valuesrc", parent_name="sankey.link", **kwargs):
super(ValuesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sankey@link@_valuesrc.py@.PATH_END.py
|
{
"filename": "ex3.py",
"repo_name": "astrodee/threadcount",
"repo_path": "threadcount_extracted/threadcount-master/docs/source/examples/ex3.py",
"type": "Python"
}
|
# examples/ex3.py
import threadcount as tc
from threadcount.procedures import ( # noqa: F401
set_rcParams,
analyze_outflow_extent,
)
set_rcParams.set_params()
# Create the settings dict for measuring the outflow extent.
analyze_settings = {
"one_gauss_input_file": "ex3_5007_simple_model.txt",
"line": tc.lines.L_OIII5007,
# https://mpdaf.readthedocs.io/en/latest/api/mpdaf.obj.Image.html#mpdaf.obj.Image.mask_region
# each entry in the list is a dictionary, where they keys are the
# parameters for the function mask_region() in mpdaf, and the values
# are the corresponding values. For now, both "unit_center" and "unit_radius"
# MUST be included and MUST have the value None. (i.e. it only works in
# pixels).
"mask_region_arguments": [],
"maximum_sigma_A": 50, # maximum allowed sigma in Angstroms.
"velocity_mask_limit": 60,
# manual_galaxy_region format
# [min_row, max_row, min_column, max_column] --> array[min_row:max_row+1,min_column:max_column+1]
# 'None' means it will continue to image edge.
"manual_galaxy_region": [30, 39, None, None],
"verbose": False,
"vertical_average_region": 1,
"contour_levels": [0.5, 0.9],
# Select from contour_levels list which contour to choose to define outflow region.
"outflow_contour_level": 0.5,
"output_base_name": "ex3output",
"galaxy_center_pixel": [35, 62], # row,col
"velocity_vmax": 140, # sets the image display maximum value.
"arcsec_per_pixel": "header",
"units": "header",
}
analyze_outflow_extent.run(analyze_settings)
print("Finished with script.")
set_rcParams.reset_params()
|
astrodeeREPO_NAMEthreadcountPATH_START.@threadcount_extracted@threadcount-master@docs@source@examples@ex3.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/time/utils.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time utilities.
In particular, routines to do basic arithmetic on numbers represented by two
doubles, using the procedure of Shewchuk, 1997, Discrete & Computational
Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
def day_frac(val1, val2, factor=1., divisor=1.):
"""
Return the sum of ``val1`` and ``val2`` as two float64s, an integer part
and the fractional remainder. If ``factor`` is not 1.0 then multiply the
sum by ``factor``. If ``divisor`` is not 1.0 then divide the sum by
``divisor``.
The arithmetic is all done with exact floating point operations so no
precision is lost to rounding error. This routine assumes the sum is less
than about 1e16, otherwise the ``frac`` part will be greater than 1.0.
Returns
-------
day, frac : float64
Integer and fractional part of val1 + val2.
"""
# Add val1 and val2 exactly, returning the result as two float64s.
# The first is the approximate sum (with some floating point error)
# and the second is the error of the float64 sum.
sum12, err12 = two_sum(val1, val2)
if np.any(factor != 1.):
sum12, carry = two_product(sum12, factor)
carry += err12 * factor
sum12, err12 = two_sum(sum12, carry)
if np.any(divisor != 1.):
q1 = sum12 / divisor
p1, p2 = two_product(q1, divisor)
d1, d2 = two_sum(sum12, -p1)
d2 += err12
d2 -= p2
q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost
sum12, err12 = two_sum(q1, q2)
# get integer fraction
day = np.round(sum12)
extra, frac = two_sum(sum12, -day)
frac += extra + err12
return day, frac
def two_sum(a, b):
"""
Add ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate sum (with some floating point error)
and the second is the error of the float64 sum.
Using the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
sum, err : float64
Approximate sum of a + b and the exact floating point error
"""
x = a + b
eb = x - a
eb = b - eb
ea = x - b
ea = a - ea
return x, ea + eb
def two_product(a, b):
"""
Multiple ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate product (with some floating point error)
and the second is the error of the float64 product.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
prod, err : float64
Approximate product a * b and the exact floating point error
"""
x = a * b
ah, al = split(a)
bh, bl = split(b)
y1 = ah * bh
y = x - y1
y2 = al * bh
y -= y2
y3 = ah * bl
y -= y3
y4 = al * bl
y = y4 - y
return x, y
def split(a):
"""
Split float64 in two aligned parts.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
c = 134217729. * a # 2**27+1.
abig = c - a
ah = c - abig
al = a - ah
return ah, al
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@time@utils.py@.PATH_END.py
|
{
"filename": "bhm_spiders_agn.py",
"repo_name": "sdss/target_selection",
"repo_path": "target_selection_extracted/target_selection-main/python/target_selection/cartons/bhm_spiders_agn.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Tom Dwelly
# @Date: 2020-03-03
# @Filename: bhm_spiders_agn.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
# derived from guide.py
# ### flake8: noqa
# isort: skip_file
import peewee
from peewee import JOIN
from peewee import fn
from target_selection.cartons.base import BaseCarton
# general catalogdb imports
from sdssdb.peewee.sdss5db.catalogdb import (
Catalog,
EROSITASupersetv1AGN,
)
# imports of existing spectro catalogue
from sdssdb.peewee.sdss5db.catalogdb import (
CatalogFromSDSS_DR19p_Speclite,
SDSS_DR19p_Speclite,
)
# additional imports required by bhm_spiders_agn_lsdr10
from sdssdb.peewee.sdss5db.catalogdb import (
CatalogToLegacy_Survey_DR10,
Legacy_Survey_DR10,
)
# additional imports required by bhm_spiders_agn_gaiadr3
from sdssdb.peewee.sdss5db.catalogdb import (
CatalogToGaia_DR3,
Gaia_DR3,
)
# # additional imports required by bhm_spiders_agn_ps1dr2
# from sdssdb.peewee.sdss5db.catalogdb import (
# Panstarrs1,
# CatalogToPanstarrs1,
# )
from target_selection.mag_flux import AB2nMgy
# from target_selection.mag_flux import AB2Jy
# DEBUG STUFF TO USE TEMP TABLE
# CatalogToSDSS_DR19p_Speclite._meta.table_name = 'temp_catalog_to_sdss_dr19p_speclite'
# CatalogToSDSS_DR19p_Speclite._meta._schema = 'sandbox'
# used by cartons that need to compute Galactic latitude:
north_gal_pole_ra = 192.85948 # deg, J2000
north_gal_pole_dec = +27.12825 # deg, J2000
# maskbits to determine if X-ray detection is potentially problematic
# https://wiki.mpe.mpg.de/eRosita/EroFollowup/SDSSV_data_model#Notes_on_ero_flags_column_carrying_eRASS_quality_flagging_information # noqa
ero_flags_mask = (
0
+ 2**0 # FLAG_SP_SNR - eRASS source is located in/near a supernova remnant # noqa
+ 2**1 # FLAG_SP_BPS - eRASS source is located in/near a bright (X-ray?) point source # noqa
+ 2**2 # FLAG_SP_SCL - eRASS source is located in/near a star cluster # noqa
+ 2**3 # FLAG_SP_LGA - eRASS source is located in/near a local galaxy # noqa
+ 2**4 # FLAG_SP_GC - eRASS source is located in/near a globular cluster # noqa
+ 2
** 5 # FLAG_SP_GC_CONS - eRASS source is located in/near a globular cluster (conservative criteria) # noqa
# + 2**6 # FLAG_NO_RADEC_ERR - eRASS source is missing an estimate of uncertainty on its sky position # noqa
# + 2**7 # FLAG_NO_EXT_ERR - eRASS source is missing an estimate of uncertainty on its X-ray extent # noqa
# + 2**8 # FLAG_NO_CTS_ERR - eRASS source is missing an estimate of uncertainty on the number of detected X-ray counts # noqa
# + 2**9 # FLAG_HARD_SRC - eRASS source is significantly detected in the 2.3-5keV band (in either eRASS:1 3B or eRASS:3 3B, with DET_LIKE_3 > 8, 30arcsec radius ) # noqa
)
# ############################################
# ############################################
# ############################################
# ############################################
# This provides the following BHM SPIDERS AGN cartons in v1.0:
# * bhm_spiders_agn_lsdr10
# * bhm_spiders_agn_gaiadr3
# * bhm_spiders_agn_sep
# * bhm_spiders_agn_tda
# * bhm_spiders_agn_hard
# * bhm_spiders_agn_lsdr10_d3
# * bhm_spiders_agn_gaiadr3_d3
# * bhm_spiders_agn_sep_d3
# * bhm_spiders_agn_tda_d3
# * bhm_spiders_agn_hard_d3
# ############################################
# ############################################
# ############################################
# ############################################
# some reference points for AB->nMgy conversions
# 30.0 AB = 1e-3 nMgy
# 22.5 AB = 1.0 nMgy
# 22.0 AB = 1.58489 nMgy
# 21.5 AB = 2.51189 nMgy
# 21.0 AB = 3.98107 nMgy
# 20.0 AB = 10.0 nMgy
# 18.5 AB = 39.8107 nMgy
# 16.5 AB = 251.189 nMgy
# 14.0 AB = 2511.89 nMgy
# 13.5 AB = 3981.07 nMgy
# some reference points for AB->Jy conversions (for ps1_dr2 fluxes)
# 30.0 AB = 3.631e-9 Jy
# 22.5 AB = 3.631e-6 Jy
# 22.0 AB = 5.754e-6 Jy
# 21.5 AB = 9.120e-6 Jy
# 21.0 AB = 1.445e-5 Jy
# 20.5 AB = 2.291e-5 Jy
# 18.5 AB = 1.445e-4 Jy
# 16.5 AB = 9.120e-4 Jy
# 14.0 AB = 9.120e-3 Jy
# 13.5 AB = 1.445e-2 Jy
# Notes on catalogdb.panstarrs1.flags aka objInfoFlag from ObjectThin
# https://outerspace.stsci.edu/display/PANSTARRS/PS1+ObjectThin+table+fields
# https://outerspace.stsci.edu/display/PANSTARRS/PS1+Object+Flags
# select objects that have the GOOD_STACK flag set:
# Flag name value decimal Notes
# GOOD_STACK 0x08000000 134217728 good-quality object in the stack (> 1 good stack measurement)
# Use these two flags to decide whether to use aper mags or not
# Flag name value decimal Notes
# EXT 0x00800000 8388608 extended in our data (eg, PS)
# EXT_ALT 0x01000000 16777216 extended in external data (eg, 2MASS)
"""
# noqa
# Notes on how many targets to expect:
sdss5db=> SELECT ero_version,xmatch_method,xmatch_version,opt_cat,ero_flux_type,count(*)
FROM erosita_superset_v1_agn GROUP BY ero_version,xmatch_method,xmatch_version,opt_cat,ero_flux_type;
ero_version | xmatch_method | xmatch_version | opt_cat | ero_flux_type | count
---------------------------------+-----------------+--------------------------+---------+---------------+---------
eRASS_s1_3B_221031_poscorr | XPS/NWAY | JBJWMS_24Nov22 | lsdr10 | 2.3-5keV | 3433
eRASS_s3_1B_220829_poscorr_v006 | XPS/NWAY_EROTDA | JWMS_06Oct22_erotda | lsdr10 | 0.2-2.3keV | 9703
eRASS_s3_1B_221007_poscorr_v007 | XPS/NWAY | JWMS_06Feb23_nomask | lsdr10 | 0.2-2.3keV | 1974450
eRASS_s3_1B_221007_poscorr_v007 | XPS/NWAY | JWMS_21Oct22 | lsdr10 | 0.2-2.3keV | 1895479
eRASS_s3_1B_221007_poscorr_v007 | XPS/NWAY | JWMS_21Oct22 | lsdr9 | 0.2-2.3keV | 47172
eRASS_s3_1B_221007_poscorr_v007 | XPS/NWAY | JWMS_21Oct22_cw2020_gedr | gedr3 | 0.2-2.3keV | 1298743
eRASS_s3_1B_221007_poscorr_v007 | XPS/NWAY | JWMS_24Oct22 | gedr3 | 0.2-2.3keV | 2465166
eRASS_s3_1B_221007_poscorr_v007 | XPS/NWAY | JWMS_24Oct22_nomask | lsdr10 | 0.2-2.3keV | 1937267
eRASS_s5_V29C | XPS/NWAY | JWTL_Oct22 | gedr3 | 0.2-2.3keV | 2007
eRASS_s5_V29C | XPS/NWAY | JWTL_Oct22 | lsdr10 | 0.2-2.3keV | 5207
(10 rows)
"""
# Notes on avoiding saturated legacysurvey sources
# https://www.legacysurvey.org/dr8/bitmasks/
# Bit Name Description
# 0 NPRIMARY touches a pixel that is outside the BRICK_PRIMARY region of a brick
# 1 BRIGHT touches a pixel within the locus of a radius-magnitude relation for
# Tycho-2 stars or one for Gaia DR2 stars to G < 13
# 2 SATUR_G touches a pixel that was saturated in at least one g-band image
# 3 SATUR_R touches a pixel that was saturated in at least one r-band image
# 4 SATUR_Z touches a pixel that was saturated in at least one z-band image
# 5 ALLMASK_G touches a pixel that has any of the ALLMASK_G bits set
# 6 ALLMASK_R touches a pixel that has any of the ALLMASK_R bits set
# 7 ALLMASK_Z touches a pixel that has any of the ALLMASK_Z bits set
# 8 WISEM1 touches a pixel in a WISEMASK_W1 bright star mask
# 9 WISEM2 touches a pixel in a WISEMASK_W2 bright star mask
# 10 BAILOUT touches a pixel in a blob where we "bailed out" of source fitting
# 11 MEDIUM touches a pixel within the locus of a radius-magnitude relation
# for Gaia DR2 stars to G < 16
# 12 GALAXY touches a pixel in an SGA large galaxy
# 13 CLUSTER touches a pixel in a globular cluster
#
# so, mask to avoid saturated targets is 2**2 + 2**3 + 2**4 = 4+8+16 = 28
#
# END PREAMBLE
# ##################################################################################
class BhmSpidersAgnLsdr10Carton(BaseCarton):
name = "bhm_spiders_agn_lsdr10"
category = "science"
mapper = "BHM"
program = "bhm_spiders"
tile = False
instrument = "BOSS"
can_offset = True
only_faintest_cadence = False
def build_query(self, version_id, query_region=None):
c = Catalog.alias()
x = EROSITASupersetv1AGN.alias()
ls = Legacy_Survey_DR10.alias()
c2ls = CatalogToLegacy_Survey_DR10.alias()
instrument = peewee.Value(self.instrument)
fiberflux_r_max = AB2nMgy(self.parameters["fibermag_r_min"])
fiberflux_r_min = AB2nMgy(self.parameters["fibermag_r_max"])
fiberflux_i_max = AB2nMgy(self.parameters["fibermag_i_min"])
fiberflux_i_min = AB2nMgy(self.parameters["fibermag_i_max"])
fiberflux_z_max = AB2nMgy(self.parameters["fibermag_z_min"])
fiberflux_z_min = AB2nMgy(self.parameters["fibermag_z_max"])
fiberflux_r_max_for_core = AB2nMgy(self.parameters["fibermag_r_min_for_core"])
fiberflux_r_min_for_core = AB2nMgy(self.parameters["fibermag_r_max_for_core"])
fiberflux_i_max_for_core = AB2nMgy(self.parameters["fibermag_i_min_for_core"])
fiberflux_i_min_for_core = AB2nMgy(self.parameters["fibermag_i_max_for_core"])
fiberflux_z_max_for_core = AB2nMgy(self.parameters["fibermag_z_min_for_core"])
fiberflux_z_min_for_core = AB2nMgy(self.parameters["fibermag_z_max_for_core"])
fiberflux_r_min_for_cadence1 = AB2nMgy(self.parameters["fibermag_r_for_cadence1"])
fiberflux_r_min_for_cadence2 = AB2nMgy(self.parameters["fibermag_r_for_cadence2"])
fiberflux_i_min_for_cadence1 = AB2nMgy(self.parameters["fibermag_i_for_cadence1"])
fiberflux_i_min_for_cadence2 = AB2nMgy(self.parameters["fibermag_i_for_cadence2"])
gaia_g_max_for_cadence1 = self.parameters["gaia_g_max_for_cadence1"]
gaia_rp_max_for_cadence1 = self.parameters["gaia_rp_max_for_cadence1"]
# #########################################################################
# prepare the spectroscopy catalogue
spec_sn_thresh = self.parameters["spec_sn_thresh"]
spec_z_err_thresh = self.parameters["spec_z_err_thresh"]
# SDSS DR19p
# first downslect only 'good' spectra
c2s19 = CatalogFromSDSS_DR19p_Speclite.alias()
ss19 = SDSS_DR19p_Speclite.alias()
s19 = (
ss19.select(
ss19.pk.alias("s19_pk"),
)
.where(
ss19.sn_median_all >= spec_sn_thresh,
ss19.zwarning == 0,
ss19.z_err <= spec_z_err_thresh,
ss19.z_err > 0.0,
ss19.specprimary > 0,
)
.alias("s19")
)
#########################################################################
# compute the abs(Galactic latitude):
gal_lat = peewee.fn.abs(
90.0 - peewee.fn.q3c_dist(north_gal_pole_ra, north_gal_pole_dec, c.ra, c.dec)
)
# logic is written this backwards way so that a failure to meet any core
# criterion results in non-core status
is_core = peewee.Case(
None,
(
(gal_lat < self.parameters["min_gal_lat_for_core"], False),
(c.dec < self.parameters["min_dec_for_core"], False),
(x.ero_flux < self.parameters["min_ero_flux_for_core"], False),
(x.ero_det_like < self.parameters["min_det_like_for_core"], False),
(
~(
(
ls.fiberflux_r.between(
fiberflux_r_min_for_core, fiberflux_r_max_for_core
)
)
| (
ls.fiberflux_i.between(
fiberflux_i_min_for_core, fiberflux_i_max_for_core
)
)
| (
ls.fiberflux_z.between(
fiberflux_z_min_for_core, fiberflux_z_max_for_core
)
)
),
False,
),
(ls.maskbits.bin_and(2**13) > 0, False), # demote globular clusters and MCs
(x.ero_flags.bin_and(ero_flags_mask) > 0, False), # demote problematic X-ray data
),
True,
)
# value = peewee.Value(self.parameters.get('value', 1.0)).cast('float')
value = peewee.Case(None, ((is_core, self.parameters.get("value", 1.0)),), 0.0).cast(
"float"
)
# priority is determined from individual target properties
# start with a priority floor value (per carton)
# then increment if any conditions are met:
# add +dpriority_match_flags if target is a secondary cross-match (match_flag > 1)
# add +dpriority_det_like if target has a low value of ero_det_like
# add +dpriority_has_spec if target has existing good SDSS spectroscopy
priority_1 = peewee.Case(None, ((x.xmatch_flags > 1, 1),), 0)
priority_2 = peewee.Case(
None, ((x.ero_det_like < self.parameters["det_like_for_priority"], 1),), 0
)
priority_3 = peewee.Case(None, ((s19.c.s19_pk.is_null(False), 1),), 0)
priority_4 = peewee.Case(None, ((is_core, 0),), 1)
priority_5 = peewee.Case(None, ((x.ero_flags.bin_and(2**9) == 0, 1),), 0)
# priority = fn.max(
priority = (
self.parameters["priority_floor"]
+ priority_1 * self.parameters["dpriority_match_flags"]
+ priority_2 * self.parameters["dpriority_det_like"]
+ priority_3 * self.parameters["dpriority_has_spec"]
+ priority_4 * self.parameters["dpriority_non_core"]
+ priority_5 * self.parameters["dpriority_not_hard"]
)
# choose cadence based on fiber magnitude in r-band
cadence1 = self.parameters["cadence1"]
cadence2 = self.parameters["cadence2"]
cadence3 = self.parameters["cadence3"]
# cadence4 = 'unknown_cadence'
cadence = peewee.Case(
None,
(
(
(
(ls.fiberflux_r > fiberflux_r_min_for_cadence1)
| (ls.fiberflux_i > fiberflux_i_min_for_cadence1)
| (ls.gaia_phot_g_mean_mag.between(0.1, gaia_g_max_for_cadence1))
| (ls.gaia_phot_rp_mean_mag.between(0.1, gaia_rp_max_for_cadence1))
),
cadence1,
),
(
(ls.fiberflux_r > fiberflux_r_min_for_cadence2)
| (ls.fiberflux_i > fiberflux_i_min_for_cadence2),
cadence2,
),
# ((ls.fiberflux_r < fiberflux_r_min_for_cadence2) &
# (ls.fiberflux_i < fiberflux_i_min_for_cadence2),
# cadence3),
),
cadence3,
)
# cadence4)
# compute transformed SDSS mags for pointlike and extended sources separately
# transform the legacysurvey grz into sdss psfmag griz
# extract coeffs from fit logs via:
# awk 'BEGIN {print("coeffs = {")} /POLYFIT/{ if($3~/sdss_psfmag/){pe="p"} else if ($3~/sdss_fiber2mag/){pe="e"} else{pe="error"}; printf("\"%s%d_%s\": %s,\n", substr($3,length($3)), $8, pe, $10)} END {print("}")}' bhm_spiders_agn_lsdr8_*/lsdr8_*mag_to_sdss_*mag_?_results.log # noqa
coeffs = {
"g2_e": -0.113816,
"g1_e": 0.317176,
"g0_e": 0.094145,
"i2_e": -0.415858,
"i1_e": 0.168922,
"i0_e": -0.010771,
"r2_e": 0.029398,
"r1_e": -0.019938,
"r0_e": 0.354042,
"z2_e": -0.111262,
"z1_e": 0.237656,
"z0_e": 0.148923,
"g2_p": 0.187193,
"g1_p": -0.184362,
"g0_p": 0.049492,
"i2_p": -0.098979,
"i1_p": -0.405518,
"i0_p": 0.009688,
"r2_p": -0.001935,
"r1_p": 0.098201,
"r0_p": 0.050321,
"z2_p": -0.034163,
"z1_p": 0.109878,
"z0_p": -0.030167,
}
nMgy_min = 1e-3 # equiv to AB=30
# pointlike - start from ls8 (psf)fluxes
g0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_g))
r0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_r))
i0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_i))
z0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_z))
g_r_p = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.flux_g) / peewee.fn.greatest(nMgy_min, ls.flux_r)
)
r_z_p = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.flux_r) / peewee.fn.greatest(nMgy_min, ls.flux_z)
)
# extended - start from ls8 fiberfluxes
g0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_g))
r0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_r))
i0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_i))
z0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_z))
g_r_e = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.fiberflux_g)
/ peewee.fn.greatest(nMgy_min, ls.fiberflux_r)
)
r_z_e = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.fiberflux_r)
/ peewee.fn.greatest(nMgy_min, ls.fiberflux_z)
)
g_p = g0_p + coeffs["g0_p"] + coeffs["g1_p"] * g_r_p + coeffs["g2_p"] * g_r_p * g_r_p
r_p = r0_p + coeffs["r0_p"] + coeffs["r1_p"] * g_r_p + coeffs["r2_p"] * g_r_p * g_r_p
i_p = r0_p + coeffs["i0_p"] + coeffs["i1_p"] * r_z_p + coeffs["i2_p"] * r_z_p * r_z_p
z_p = z0_p + coeffs["z0_p"] + coeffs["z1_p"] * r_z_p + coeffs["z2_p"] * r_z_p * r_z_p
g_e = g0_e + coeffs["g0_e"] + coeffs["g1_e"] * g_r_e + coeffs["g2_e"] * g_r_e * g_r_e
r_e = r0_e + coeffs["r0_e"] + coeffs["r1_e"] * g_r_e + coeffs["r2_e"] * g_r_e * g_r_e
i_e = r0_e + coeffs["i0_e"] + coeffs["i1_e"] * r_z_e + coeffs["i2_e"] * r_z_e * r_z_e
z_e = z0_e + coeffs["z0_e"] + coeffs["z1_e"] * r_z_e + coeffs["z2_e"] * r_z_e * r_z_e
# validity checks - set limits semi-manually
g_r_p_min = -0.25
g_r_p_max = 1.75
r_z_p_min = -0.5
r_z_p_max = 2.5
g_r_e_min = 0.0
g_r_e_max = 1.75
r_z_e_min = 0.2
r_z_e_max = 1.6
valid_p = (
g0_p.between(0.1, 29.9)
& r0_p.between(0.1, 29.9)
& z0_p.between(0.1, 29.9)
& g_r_p.between(g_r_p_min, g_r_p_max)
& r_z_p.between(r_z_p_min, r_z_p_max)
)
valid_e = (
g0_e.between(0.1, 29.9)
& r0_e.between(0.1, 29.9)
& z0_e.between(0.1, 29.9)
& g_r_e.between(g_r_e_min, g_r_e_max)
& r_z_e.between(r_z_e_min, r_z_e_max)
)
# We want to switch between psfmags and fibermags depending on
# ls.type parameter (PSF or extended)
# For 'PSF' targets, we use psfmags, but for extended sources use fiber2mags
opt_prov = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, "sdss_psfmag_from_lsdr10"),
((ls.type != "PSF") & valid_e, "sdss_fiber2mag_from_lsdr10"),
),
"undefined",
)
magnitude_g = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, g_p.cast("float")),
((ls.type != "PSF") & valid_e, g_e.cast("float")),
),
"NaN",
)
magnitude_r = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, r_p.cast("float")),
((ls.type != "PSF") & valid_e, r_e.cast("float")),
),
"NaN",
)
magnitude_i = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, i_p.cast("float")),
((ls.type != "PSF") & valid_e, i_e.cast("float")),
),
"NaN",
)
magnitude_z = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, z_p.cast("float")),
((ls.type != "PSF") & valid_e, z_e.cast("float")),
),
"NaN",
)
magnitude_gaia_g = peewee.Case(
None, ((ls.gaia_phot_g_mean_mag.between(0.1, 29.9), ls.gaia_phot_g_mean_mag),), "NaN"
)
magnitude_gaia_bp = peewee.Case(
None, ((ls.gaia_phot_bp_mean_mag.between(0.1, 29.9), ls.gaia_phot_bp_mean_mag),), "NaN"
)
magnitude_gaia_rp = peewee.Case(
None, ((ls.gaia_phot_rp_mean_mag.between(0.1, 29.9), ls.gaia_phot_rp_mean_mag),), "NaN"
)
query = (
c.select(
c.catalogid.alias("catalogid"),
priority.alias("priority"),
value.alias("value"),
cadence.alias("cadence"),
instrument.alias("instrument"),
opt_prov.alias("optical_prov"),
magnitude_g.alias("g"),
magnitude_r.alias("r"),
magnitude_i.alias("i"),
magnitude_z.alias("z"),
magnitude_gaia_g.alias("gaia_g"),
magnitude_gaia_bp.alias("bp"),
magnitude_gaia_rp.alias("rp"),
ls.ls_id.alias("ls_id"), # extra
ls.gaia_dr3_source_id.alias("gaia_dr3_source_id"), # extra
ls.gaia_dr2_source_id.alias("gaia_dr2_source_id"), # extra
x.ero_detuid.alias("ero_detuid"), # extra
x.ero_flux.alias("ero_flux"), # extra
x.ero_det_like.alias("ero_det_like"), # extra
x.ero_flags.alias("ero_flags"), # extra
x.xmatch_flags.alias("xmatch_flags"), # extra
x.xmatch_metric.alias("xmatch_metric"), # extra
s19.c.s19_pk.alias("sdss_dr19p_speclite_pk"), # extra
c.ra.alias("ra"), # extra
c.dec.alias("dec"), # extra
g0_p.alias("ls10_mag_g"), # extra
r0_p.alias("ls10_mag_r"), # extra
i0_p.alias("ls10_mag_i"), # extra
z0_p.alias("ls10_mag_z"), # extra
g0_e.alias("ls10_fibermag_g"), # extra
r0_e.alias("ls10_fibermag_r"), # extra
i0_e.alias("ls10_fibermag_i"), # extra
z0_e.alias("ls10_fibermag_z"), # extra
ls.nobs_g.alias("ls10_nobs_g"),
ls.nobs_r.alias("ls10_nobs_r"),
ls.nobs_i.alias("ls10_nobs_i"),
ls.nobs_z.alias("ls10_nobs_z"),
ls.type.alias("ls10_type"), # extra
ls.shape_r.alias("ls10_shape_r"), # extra
is_core.alias("is_core"), # extra
ls.ref_cat.alias("ls_ref_cat"), # extra
ls.ref_id.alias("ls_ref_id"), # extra
ls.maskbits.alias("ls_maskbits"), # extra
ls.fitbits.alias("ls_fitbits"), # extra
gal_lat.alias("abs_gal_lat"), # extra
)
.join(c2ls)
.join(ls)
.join(x, on=(ls.ls_id == x.ls_id))
# start joining the spectroscopy
.switch(c)
.join(c2s19, JOIN.LEFT_OUTER)
.join(s19, JOIN.LEFT_OUTER, on=(s19.c.s19_pk == c2s19.target_id))
# finished joining the spectroscopy
# admin criteria
.where(
c.version_id == version_id,
c2ls.version_id == version_id,
fn.coalesce(c2s19.version_id, version_id) == version_id,
c2ls.best >> True,
# fn.coalesce(c2s19.best, True) >> True,
)
# science criteria
.where(
(x.ero_version == self.parameters["ero_version"]),
(x.xmatch_method == self.parameters["xmatch_method"]),
(
(x.xmatch_version == self.parameters["xmatch_version1"])
| (x.xmatch_version == self.parameters["xmatch_version2"])
),
(
(x.opt_cat == self.parameters["opt_cat1"])
| (x.opt_cat == self.parameters["opt_cat2"])
),
(x.xmatch_metric >= self.parameters["p_any_min"]),
(
(ls.fiberflux_r.between(fiberflux_r_min, fiberflux_r_max))
| (ls.fiberflux_i.between(fiberflux_i_min, fiberflux_i_max))
| (ls.fiberflux_z.between(fiberflux_z_min, fiberflux_z_max))
),
(x.ero_det_like > self.parameters["det_like_min"]),
# (ls.maskbits.bin_and(2**2 + 2**3 + 2**4) == 0), # avoid saturated sources
# avoid bright stars and globular clusters:
# (ls.maskbits.bin_and(2**1 + 2**13) == 0),
# always avoid very bright stars, see https://www.legacysurvey.org/dr10/bitmasks/
(ls.maskbits.bin_and(2**1) == 0),
# (ls.nobs_r > 0), # always require r-band coverage
# ((ls.nobs_g > 0) | (ls.nobs_z > 0)), # plus at least one other optical band
# gaia safety checks to avoid bad ls photometry
~(ls.gaia_phot_g_mean_mag.between(0.1, self.parameters["gaia_g_mag_limit"])),
~(ls.gaia_phot_rp_mean_mag.between(0.1, self.parameters["gaia_rp_mag_limit"])),
)
# .group_by(ls) # avoid duplicates - we trust the legacy survey entries
.distinct([ls.ls_id]) # avoid duplicates - we trust the legacy survey entries
)
if self.only_faintest_cadence:
query = query.where(cadence == cadence3)
if query_region:
query = query.where(
peewee.fn.q3c_radial_query(
c.ra, c.dec, query_region[0], query_region[1], query_region[2]
)
)
return query
#
# END BhmSpidersAgnLsdr10Carton
# ##################################################################################
class BhmSpidersAgnLsdr10D3Carton(BhmSpidersAgnLsdr10Carton):
name = "bhm_spiders_agn_lsdr10_d3"
only_faintest_cadence = True
# we can get away with just inheriting the selection code from
# the lsdr10 hemisphere match and adjusting the parameters only
# ##################################################################################
class BhmSpidersAgnHardCarton(BhmSpidersAgnLsdr10Carton):
name = "bhm_spiders_agn_hard"
class BhmSpidersAgnHardD3Carton(BhmSpidersAgnLsdr10Carton):
name = "bhm_spiders_agn_hard_d3"
only_faintest_cadence = True
# # Testing of the North part of lsdr10 (i.e. dr9)
# # we can get away with just inheriting the selection code from
# # the lsdr10 hemisphere match and adjusting the parameters only
# # ##################################################################################
# class BhmSpidersAgnLsdr10NorthCarton(BhmSpidersAgnLsdr10Carton):
# name = 'bhm_spiders_agn_lsdr10_north'
class BhmSpidersAgnGaiadr3Carton(BaseCarton):
name = "bhm_spiders_agn_gaiadr3"
category = "science"
mapper = "BHM"
program = "bhm_spiders"
tile = False
instrument = "BOSS"
can_offset = True
only_faintest_cadence = False
def build_query(self, version_id, query_region=None):
c = Catalog.alias()
x = EROSITASupersetv1AGN.alias()
g3 = Gaia_DR3.alias()
c2g3 = CatalogToGaia_DR3.alias()
instrument = peewee.Value(self.instrument)
gaia_g_max_for_cadence1 = self.parameters["gaia_g_max_for_cadence1"]
gaia_rp_max_for_cadence1 = self.parameters["gaia_rp_max_for_cadence1"]
gaia_g_max_for_cadence2 = self.parameters["gaia_g_max_for_cadence2"]
gaia_rp_max_for_cadence2 = self.parameters["gaia_rp_max_for_cadence2"]
# these control matching to spectroscopy
spec_sn_thresh = self.parameters["spec_sn_thresh"]
spec_z_err_thresh = self.parameters["spec_z_err_thresh"]
# #########################################################################
# prepare the spectroscopy catalogues
# SDSS DR19p
# downslect only 'good' spectra
c2s19 = CatalogFromSDSS_DR19p_Speclite.alias()
ss19 = SDSS_DR19p_Speclite.alias()
s19 = (
ss19.select(
ss19.pk.alias("s19_pk"),
)
.where(
ss19.sn_median_all >= spec_sn_thresh,
ss19.zwarning == 0,
ss19.z_err <= spec_z_err_thresh,
ss19.z_err > 0.0,
ss19.specprimary > 0,
)
.alias("s19")
)
# #########################################################################
# compute the abs(Galactic latitude):
gal_lat = peewee.fn.abs(
90.0 - peewee.fn.q3c_dist(north_gal_pole_ra, north_gal_pole_dec, c.ra, c.dec)
)
is_core = peewee.Case(
None,
(
(gal_lat < self.parameters["min_gal_lat_for_core"], False),
(c.dec < self.parameters["min_dec_for_core"], False),
(x.ero_flux < self.parameters["min_ero_flux_for_core"], False),
(x.ero_det_like < self.parameters["min_det_like_for_core"], False),
(g3.phot_g_mean_mag < self.parameters["min_gaia_g_for_core"], False),
(g3.phot_g_mean_mag > self.parameters["max_gaia_g_for_core"], False),
),
True,
)
# value = peewee.Value(self.parameters.get('value', 1.0)).cast('float')
value = peewee.Case(
None,
# ((gal_lat > self.parameters['in_plane_lat_cut'],
# self.parameters.get('value', 1.0)),),
((is_core, self.parameters.get("value", 1.0)),),
0.0,
).cast("float")
# priority is determined by target properties
# start with a priority floor value (per carton)
# then increment if any conditions are met:
# add +dpriority_match_flags if target is a secondary cross-match (match_flag > 1)
# add +dpriority_det_like if target has a low value of ero_det_like
# add +dpriority_has_spec if target has existing good SDSS spectroscopy
priority_1 = peewee.Case(None, ((x.xmatch_flags > 1, 1),), 0)
priority_2 = peewee.Case(
None, ((x.ero_det_like < self.parameters["det_like_for_priority"], 1),), 0
)
priority_3 = peewee.Case(None, ((s19.c.s19_pk.is_null(False), 1),), 0)
priority_4 = peewee.Case(None, ((is_core, 0),), 1)
priority_5 = peewee.Case(None, ((x.ero_flags.bin_and(2**9) == 0, 1),), 0)
priority = (
self.parameters["priority_floor"]
+ priority_1 * self.parameters["dpriority_match_flags"]
+ priority_2 * self.parameters["dpriority_det_like"]
+ priority_3 * self.parameters["dpriority_has_spec"]
+ priority_4 * self.parameters["dpriority_non_core"]
+ priority_5 * self.parameters["dpriority_not_hard"]
)
# choose cadence based on magnitude in Gaia G and RP-bands
cadence1 = self.parameters["cadence1"]
cadence2 = self.parameters["cadence2"]
cadence3 = self.parameters["cadence3"]
cadence4 = "unknown_cadence"
cadence = peewee.Case(
None,
(
(
(g3.phot_g_mean_mag < gaia_g_max_for_cadence1)
| (g3.phot_rp_mean_mag < gaia_rp_max_for_cadence1),
cadence1,
),
(
(g3.phot_g_mean_mag < gaia_g_max_for_cadence2)
| (g3.phot_rp_mean_mag < gaia_rp_max_for_cadence2),
cadence2,
),
(
(g3.phot_g_mean_mag >= gaia_g_max_for_cadence2)
& (g3.phot_rp_mean_mag >= gaia_rp_max_for_cadence2),
cadence3,
),
),
cadence4,
)
# compute transformed SDSS mags
# transform the Gaia dr3 G,BP,RP into sdss psfmag griz
# piecewise transformation either side of BP-RP=1.8
# fit to blue end is cubic, fit to red end is quadratic
# awk 'BEGIN {print("coeffs = {")} /POLYFIT/{ if(FILENAME~/_red/){pe="red"} else if (FILENAME~/_blue/){pe="blue"} else{pe="error"}; printf("\"%s%d_%s\": %s,\n", substr($3,length($3)), $8, pe, $10)} END {print("}")}' bhm_spiders_agn_gaiadr2_red/gdr2_*mag_to_sdss_*mag_?_results.log bhm_spiders_agn_gaiadr2_blue/gdr2_*mag_to_sdss_*mag_?_results.log # noqa
coeffs = {
"g2_red": 0.081178,
"g1_red": 0.355677,
"g0_red": 0.510306,
"i2_red": 0.048864,
"i1_red": -0.287475,
"i0_red": -0.336712,
"r2_red": 0.028080,
"r1_red": 0.542331,
"r0_red": -1.055168,
"z2_red": -0.131385,
"z1_red": 0.302555,
"z0_red": -1.381648,
"g3_blue": 0.639054,
"g2_blue": -1.739187,
"g1_blue": 1.420330,
"g0_blue": -0.194071,
"i3_blue": 0.780585,
"i2_blue": -2.549848,
"i1_blue": 1.489880,
"i0_blue": -0.241381,
"r3_blue": 0.575494,
"r2_blue": -2.077000,
"r1_blue": 1.573302,
"r0_blue": -0.295026,
"z3_blue": 1.064986,
"z2_blue": -3.162969,
"z1_blue": 1.493750,
"z0_blue": -0.199582,
}
g_blue = (
g3.phot_g_mean_mag
+ coeffs["g0_blue"]
+ coeffs["g1_blue"] * g3.bp_rp
+ coeffs["g2_blue"] * g3.bp_rp * g3.bp_rp
+ coeffs["g3_blue"] * g3.bp_rp * g3.bp_rp * g3.bp_rp
)
r_blue = (
g3.phot_g_mean_mag
+ coeffs["r0_blue"]
+ coeffs["r1_blue"] * g3.bp_rp
+ coeffs["r2_blue"] * g3.bp_rp * g3.bp_rp
+ coeffs["r3_blue"] * g3.bp_rp * g3.bp_rp * g3.bp_rp
)
i_blue = (
g3.phot_g_mean_mag
+ coeffs["i0_blue"]
+ coeffs["i1_blue"] * g3.bp_rp
+ coeffs["i2_blue"] * g3.bp_rp * g3.bp_rp
+ coeffs["i3_blue"] * g3.bp_rp * g3.bp_rp * g3.bp_rp
)
z_blue = (
g3.phot_g_mean_mag
+ coeffs["z0_blue"]
+ coeffs["z1_blue"] * g3.bp_rp
+ coeffs["z2_blue"] * g3.bp_rp * g3.bp_rp
+ coeffs["z3_blue"] * g3.bp_rp * g3.bp_rp * g3.bp_rp
)
g_red = (
g3.phot_g_mean_mag
+ coeffs["g0_red"]
+ coeffs["g1_red"] * g3.bp_rp
+ coeffs["g2_red"] * g3.bp_rp * g3.bp_rp
)
r_red = (
g3.phot_g_mean_mag
+ coeffs["r0_red"]
+ coeffs["r1_red"] * g3.bp_rp
+ coeffs["r2_red"] * g3.bp_rp * g3.bp_rp
)
i_red = (
g3.phot_g_mean_mag
+ coeffs["i0_red"]
+ coeffs["i1_red"] * g3.bp_rp
+ coeffs["i2_red"] * g3.bp_rp * g3.bp_rp
)
z_red = (
g3.phot_g_mean_mag
+ coeffs["z0_red"]
+ coeffs["z1_red"] * g3.bp_rp
+ coeffs["z2_red"] * g3.bp_rp * g3.bp_rp
)
# validity checks - set limits semi-manually
bp_rp_min = 0.0
bp_rp_max = 3.0
valid = (
g3.phot_g_mean_mag.between(0.1, 29.9)
& g3.phot_bp_mean_mag.between(0.1, 29.9)
& g3.phot_rp_mean_mag.between(0.1, 29.9)
& g3.bp_rp.between(bp_rp_min, bp_rp_max)
)
opt_prov = peewee.Case(None, ((valid, "sdss_psfmag_from_gdr3"),), "undefined")
magnitude_g = peewee.Case(
None,
(
(valid & (g3.bp_rp < 1.8), g_blue),
(valid & (g3.bp_rp > 1.8), g_red),
),
"NaN",
)
magnitude_r = peewee.Case(
None,
(
(valid & (g3.bp_rp < 1.8), r_blue),
(valid & (g3.bp_rp > 1.8), r_red),
),
"NaN",
)
magnitude_i = peewee.Case(
None,
(
(valid & (g3.bp_rp < 1.8), i_blue),
(valid & (g3.bp_rp > 1.8), i_red),
),
"NaN",
)
magnitude_z = peewee.Case(
None,
(
(valid & (g3.bp_rp < 1.8), z_blue),
(valid & (g3.bp_rp > 1.8), z_red),
),
"NaN",
)
query = (
c.select(
c.catalogid.alias("catalogid"),
x.ero_detuid.alias("ero_detuid"), # extra
x.ero_flux.alias("ero_flux"), # extra
x.ero_det_like.alias("ero_det_like"), # extra
g3.source_id.alias("gaia_dr3_source_id"), # extra
s19.c.s19_pk.alias("sdss_dr19p_speclite_pk"), # extra
c.ra.alias("ra"), # extra
c.dec.alias("dec"), # extra
priority.alias("priority"),
value.alias("value"),
cadence.alias("cadence"),
instrument.alias("instrument"),
opt_prov.alias("optical_prov"),
magnitude_g.alias("g"),
magnitude_r.alias("r"),
magnitude_i.alias("i"),
magnitude_z.alias("z"),
g3.phot_g_mean_mag.alias("gaia_g"),
g3.phot_bp_mean_mag.alias("bp"),
g3.phot_rp_mean_mag.alias("rp"),
is_core.alias("is_core"), # extra
gal_lat.alias("abs_gal_lat"), # extra
x.xmatch_version.alias("xmatch_version"), # extra
)
.join(c2g3)
.where(
c.version_id == version_id,
c2g3.version_id == version_id,
fn.coalesce(c2s19.version_id, version_id) == version_id,
c2g3.best >> True,
)
.join(g3)
.join(x, on=(g3.source_id == x.gaia_dr3_source_id))
# start joining the spectroscopy
.switch(c)
.join(c2s19, JOIN.LEFT_OUTER)
.join(s19, JOIN.LEFT_OUTER, on=(s19.c.s19_pk == c2s19.target_id))
# finished joining the spectroscopy
.where(
(x.ero_version == self.parameters["ero_version"]),
(
(x.xmatch_method == self.parameters["xmatch_method1"])
| (x.xmatch_method == self.parameters["xmatch_method2"])
),
(
(x.xmatch_version == self.parameters["xmatch_version1"])
| (x.xmatch_version == self.parameters["xmatch_version2"])
),
(x.opt_cat == self.parameters["opt_cat"]),
(x.xmatch_metric >= self.parameters["p_any_min"]),
(g3.phot_g_mean_mag > self.parameters["gaia_g_mag_limit"]),
(g3.phot_rp_mean_mag > self.parameters["gaia_rp_mag_limit"]),
(x.ero_det_like > self.parameters["det_like_min"]),
)
.distinct([g3.source_id]) # avoid duplicates - we trust the gaia ids
)
if self.only_faintest_cadence:
query = query.where(cadence == cadence3)
if query_region:
query = query.where(
peewee.fn.q3c_radial_query(
c.ra, c.dec, query_region[0], query_region[1], query_region[2]
)
)
return query
#
# END BhmSpidersAgnGaiadr3Carton
# For the version that goes via catwise202,
# we can get away with just inheriting the selection code from
# the gaia dr3 hemisphere match and adjusting the parameters only
# ##################################################################################
class BhmSpidersAgnGaiadr3viaCW2020Carton(BhmSpidersAgnGaiadr3Carton):
name = "bhm_spiders_agn_gaiadr3_viacw2020"
# ##################################################################################
class BhmSpidersAgnGaiadr3BothCarton(BhmSpidersAgnGaiadr3Carton):
name = "bhm_spiders_agn_gaiadr3_both"
# ##################################################################################
# we can get away with just inheriting the selection code from
# the gaia dr3 hemisphere match and adjusting the parameters only
class BhmSpidersAgnSepCarton(BhmSpidersAgnGaiadr3Carton):
name = "bhm_spiders_agn_sep"
# ##################################################################################
class BhmSpidersAgnGaiadr3D3CartonCarton(BhmSpidersAgnGaiadr3Carton):
name = "bhm_spiders_agn_gaiadr3_d3"
only_faintest_cadence = True
# ##################################################################################
class BhmSpidersAgnSepD3Carton(BhmSpidersAgnGaiadr3Carton):
name = "bhm_spiders_agn_sep_d3"
only_faintest_cadence = True
class BhmSpidersAgnTdaCarton(BaseCarton):
name = "bhm_spiders_agn_tda"
category = "science"
mapper = "BHM"
program = "bhm_spiders"
tile = False
instrument = "BOSS"
can_offset = True
only_faintest_cadence = False
def build_query(self, version_id, query_region=None):
c = Catalog.alias()
x = EROSITASupersetv1AGN.alias()
ls = Legacy_Survey_DR10.alias()
c2ls = CatalogToLegacy_Survey_DR10.alias()
instrument = peewee.Value(self.instrument)
fiberflux_r_max = AB2nMgy(self.parameters["fibermag_r_min"])
fiberflux_r_min = AB2nMgy(self.parameters["fibermag_r_max"])
fiberflux_i_max = AB2nMgy(self.parameters["fibermag_i_min"])
fiberflux_i_min = AB2nMgy(self.parameters["fibermag_i_max"])
fiberflux_z_max = AB2nMgy(self.parameters["fibermag_z_min"])
fiberflux_z_min = AB2nMgy(self.parameters["fibermag_z_max"])
fiberflux_r_min_for_cadence1 = AB2nMgy(self.parameters["fibermag_r_for_cadence1"])
fiberflux_r_min_for_cadence2 = AB2nMgy(self.parameters["fibermag_r_for_cadence2"])
gaia_g_max_for_cadence1 = self.parameters["gaia_g_max_for_cadence1"]
gaia_rp_max_for_cadence1 = self.parameters["gaia_rp_max_for_cadence1"]
# choose cadence based on fiber magnitude in r-band + gaia G,RP
cadence1 = self.parameters["cadence1"]
cadence2 = self.parameters["cadence2"]
cadence3 = self.parameters["cadence3"]
cadence4 = "unknown_cadence"
cadence = peewee.Case(
None,
(
(
(
(ls.fiberflux_r > fiberflux_r_min_for_cadence1)
| (ls.gaia_phot_g_mean_mag.between(0.1, gaia_g_max_for_cadence1))
| (ls.gaia_phot_rp_mean_mag.between(0.1, gaia_rp_max_for_cadence1))
),
cadence1,
),
(ls.fiberflux_r > fiberflux_r_min_for_cadence2, cadence2),
(ls.fiberflux_r <= fiberflux_r_min_for_cadence2, cadence3),
),
cadence4,
)
value = peewee.Value(self.parameters["value"]).cast("float")
priority = peewee.Value(self.parameters["priority_floor"]).cast("integer")
# compute transformed SDSS mags for pointlike and extended sources separately
# transform the legacysurvey grz into sdss psfmag griz
# extract coeffs from fit logs via:
# awk 'BEGIN {print("coeffs = {")} /POLYFIT/{ if($3~/sdss_psfmag/){pe="p"} else if ($3~/sdss_fiber2mag/){pe="e"} else{pe="error"}; printf("\"%s%d_%s\": %s,\n", substr($3,length($3)), $8, pe, $10)} END {print("}")}' bhm_spiders_agn_lsdr8_*/lsdr8_*mag_to_sdss_*mag_?_results.log # noqa
coeffs = {
"g2_e": -0.113816,
"g1_e": 0.317176,
"g0_e": 0.094145,
"i2_e": -0.415858,
"i1_e": 0.168922,
"i0_e": -0.010771,
"r2_e": 0.029398,
"r1_e": -0.019938,
"r0_e": 0.354042,
"z2_e": -0.111262,
"z1_e": 0.237656,
"z0_e": 0.148923,
"g2_p": 0.187193,
"g1_p": -0.184362,
"g0_p": 0.049492,
"i2_p": -0.098979,
"i1_p": -0.405518,
"i0_p": 0.009688,
"r2_p": -0.001935,
"r1_p": 0.098201,
"r0_p": 0.050321,
"z2_p": -0.034163,
"z1_p": 0.109878,
"z0_p": -0.030167,
}
nMgy_min = 1e-3 # equiv to AB=30
# pointlike - start from ls8 (psf)fluxes
g0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_g))
r0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_r))
i0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_i))
z0_p = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.flux_z))
g_r_p = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.flux_g) / peewee.fn.greatest(nMgy_min, ls.flux_r)
)
r_z_p = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.flux_r) / peewee.fn.greatest(nMgy_min, ls.flux_z)
)
# extended - start from ls8 fiberfluxes
g0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_g))
r0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_r))
i0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_i))
z0_e = 22.5 - 2.5 * peewee.fn.log(peewee.fn.greatest(nMgy_min, ls.fiberflux_z))
g_r_e = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.fiberflux_g)
/ peewee.fn.greatest(nMgy_min, ls.fiberflux_r)
)
r_z_e = -2.5 * peewee.fn.log(
peewee.fn.greatest(nMgy_min, ls.fiberflux_r)
/ peewee.fn.greatest(nMgy_min, ls.fiberflux_z)
)
g_p = g0_p + coeffs["g0_p"] + coeffs["g1_p"] * g_r_p + coeffs["g2_p"] * g_r_p * g_r_p
r_p = r0_p + coeffs["r0_p"] + coeffs["r1_p"] * g_r_p + coeffs["r2_p"] * g_r_p * g_r_p
i_p = r0_p + coeffs["i0_p"] + coeffs["i1_p"] * r_z_p + coeffs["i2_p"] * r_z_p * r_z_p
z_p = z0_p + coeffs["z0_p"] + coeffs["z1_p"] * r_z_p + coeffs["z2_p"] * r_z_p * r_z_p
g_e = g0_e + coeffs["g0_e"] + coeffs["g1_e"] * g_r_e + coeffs["g2_e"] * g_r_e * g_r_e
r_e = r0_e + coeffs["r0_e"] + coeffs["r1_e"] * g_r_e + coeffs["r2_e"] * g_r_e * g_r_e
i_e = r0_e + coeffs["i0_e"] + coeffs["i1_e"] * r_z_e + coeffs["i2_e"] * r_z_e * r_z_e
z_e = z0_e + coeffs["z0_e"] + coeffs["z1_e"] * r_z_e + coeffs["z2_e"] * r_z_e * r_z_e
# validity checks - set limits semi-manually
g_r_p_min = -0.25
g_r_p_max = 1.75
r_z_p_min = -0.5
r_z_p_max = 2.5
g_r_e_min = 0.0
g_r_e_max = 1.75
r_z_e_min = 0.2
r_z_e_max = 1.6
valid_p = (
g0_p.between(0.1, 29.9)
& r0_p.between(0.1, 29.9)
& z0_p.between(0.1, 29.9)
& g_r_p.between(g_r_p_min, g_r_p_max)
& r_z_p.between(r_z_p_min, r_z_p_max)
)
valid_e = (
g0_e.between(0.1, 29.9)
& r0_e.between(0.1, 29.9)
& z0_e.between(0.1, 29.9)
& g_r_e.between(g_r_e_min, g_r_e_max)
& r_z_e.between(r_z_e_min, r_z_e_max)
)
# We want to switch between psfmags and fibermags depending on
# ls.type parameter (PSF or extended)
# For 'PSF' targets, we use psfmags, but for extended sources use fiber2mags
opt_prov = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, "sdss_psfmag_from_lsdr10"),
((ls.type != "PSF") & valid_e, "sdss_fiber2mag_from_lsdr10"),
),
"undefined",
)
magnitude_g = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, g_p.cast("float")),
((ls.type != "PSF") & valid_e, g_e.cast("float")),
),
"NaN",
)
magnitude_r = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, r_p.cast("float")),
((ls.type != "PSF") & valid_e, r_e.cast("float")),
),
"NaN",
)
magnitude_i = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, i_p.cast("float")),
((ls.type != "PSF") & valid_e, i_e.cast("float")),
),
"NaN",
)
magnitude_z = peewee.Case(
None,
(
((ls.type == "PSF") & valid_p, z_p.cast("float")),
((ls.type != "PSF") & valid_e, z_e.cast("float")),
),
"NaN",
)
magnitude_gaia_g = peewee.Case(
None, ((ls.gaia_phot_g_mean_mag.between(0.1, 29.9), ls.gaia_phot_g_mean_mag),), "NaN"
)
magnitude_gaia_bp = peewee.Case(
None, ((ls.gaia_phot_bp_mean_mag.between(0.1, 29.9), ls.gaia_phot_bp_mean_mag),), "NaN"
)
magnitude_gaia_rp = peewee.Case(
None, ((ls.gaia_phot_rp_mean_mag.between(0.1, 29.9), ls.gaia_phot_rp_mean_mag),), "NaN"
)
query = (
c.select(
c.catalogid.alias("catalogid"),
priority.alias("priority"),
value.alias("value"),
cadence.alias("cadence"),
instrument.alias("instrument"),
opt_prov.alias("optical_prov"),
magnitude_g.alias("g"),
magnitude_r.alias("r"),
magnitude_i.alias("i"),
magnitude_z.alias("z"),
magnitude_gaia_g.alias("gaia_g"),
magnitude_gaia_bp.alias("bp"),
magnitude_gaia_rp.alias("rp"),
ls.ls_id.alias("ls_id"), # extra
ls.gaia_dr3_source_id.alias("gaia_dr3_source_id"), # extra
ls.gaia_dr2_source_id.alias("gaia_dr2_source_id"), # extra
x.ero_detuid.alias("ero_detuid"), # extra
x.ero_flux.alias("ero_flux"), # extra
x.ero_det_like.alias("ero_det_like"), # extra
x.ero_flags.alias("ero_flags"), # extra
x.xmatch_flags.alias("xmatch_flags"), # extra
x.xmatch_metric.alias("xmatch_metric"), # extra
c.ra.alias("ra"), # extra
c.dec.alias("dec"), # extra
g0_p.alias("ls10_mag_g"), # extra
r0_p.alias("ls10_mag_r"), # extra
i0_p.alias("ls10_mag_i"), # extra
z0_p.alias("ls10_mag_z"), # extra
g0_e.alias("ls10_fibermag_g"), # extra
r0_e.alias("ls10_fibermag_r"), # extra
i0_e.alias("ls10_fibermag_i"), # extra
z0_e.alias("ls10_fibermag_z"), # extra
ls.nobs_g.alias("ls10_nobs_g"),
ls.nobs_r.alias("ls10_nobs_r"),
ls.nobs_i.alias("ls10_nobs_i"),
ls.nobs_z.alias("ls10_nobs_z"),
ls.type.alias("ls10_type"), # extra
ls.shape_r.alias("ls10_shape_r"), # extra
ls.ref_cat.alias("ls_ref_cat"), # extra
ls.ref_id.alias("ls_ref_id"), # extra
ls.maskbits.alias("ls_maskbits"), # extra
ls.fitbits.alias("ls_fitbits"), # extra
# gal_lat.alias('abs_gal_lat'), # extra
)
.join(c2ls)
.join(ls)
.join(x, on=(ls.ls_id == x.ls_id))
# admin criteria
.where(
c.version_id == version_id,
c2ls.version_id == version_id,
c2ls.best >> True,
)
# science criteria
.where(
(x.ero_version == self.parameters["ero_version"]),
(x.xmatch_method == self.parameters["xmatch_method"]),
(x.xmatch_version == self.parameters["xmatch_version"]),
(x.opt_cat == self.parameters["opt_cat"]),
(
(ls.fiberflux_r.between(fiberflux_r_min, fiberflux_r_max))
| (ls.fiberflux_i.between(fiberflux_i_min, fiberflux_i_max))
| (ls.fiberflux_z.between(fiberflux_z_min, fiberflux_z_max))
),
# gaia safety checks to avoid bad ls photometry
~(ls.gaia_phot_g_mean_mag.between(0.1, self.parameters["gaia_g_mag_limit"])),
~(ls.gaia_phot_rp_mean_mag.between(0.1, self.parameters["gaia_rp_mag_limit"])),
)
# .group_by(ls) # avoid duplicates - we trust the legacy survey entries
.distinct([ls.ls_id]) # avoid duplicates - we trust the legacy survey entries
)
if self.only_faintest_cadence:
query = query.where(cadence == cadence3)
if query_region:
query = query.where(
peewee.fn.q3c_radial_query(
c.ra, c.dec, query_region[0], query_region[1], query_region[2]
)
)
return query
# ##################################################################################
class BhmSpidersAgnTdaD3Carton(BhmSpidersAgnTdaCarton):
name = "bhm_spiders_agn_tda_d3"
only_faintest_cadence = True
|
sdssREPO_NAMEtarget_selectionPATH_START.@target_selection_extracted@target_selection-main@python@target_selection@cartons@bhm_spiders_agn.py@.PATH_END.py
|
{
"filename": "binom.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/scipy/stats/binom.py",
"type": "Python"
}
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jax._src.scipy.stats.binom import (
logpmf as logpmf,
pmf as pmf,
)
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@scipy@stats@binom.py@.PATH_END.py
|
{
"filename": "test_mpi.py",
"repo_name": "adrn/schwimmbad",
"repo_path": "schwimmbad_extracted/schwimmbad-main/tests/test_mpi.py",
"type": "Python"
}
|
# type: ignore
"""
I couldn't figure out how to get py.test and MPI to play nice together,
so this is a script that tests the MPIPool
"""
# Standard library
import random
from schwimmbad._test_helpers import _batch_function, _function, isclose
def _callback(x):
pass
def test_mpi(pool):
all_tasks = [[random.random() for i in range(1000)]]
# test map alone
for tasks in all_tasks:
results = pool.map(_function, tasks)
for r1, r2 in zip(results, [_function(x) for x in tasks]):
assert isclose(r1, r2)
assert len(results) == len(tasks)
# test map with callback
for tasks in all_tasks:
results = pool.map(_function, tasks, callback=_callback)
for r1, r2 in zip(results, [_function(x) for x in tasks]):
assert isclose(r1, r2)
assert len(results) == len(tasks)
# test batched map
results = pool.batched_map(_batch_function, tasks)
for r in results:
assert all([isclose(x, 42.01) for x in r])
print("All tests passed")
if __name__ == "__main__":
from schwimmbad.mpi import MPIPool
with MPIPool() as pool:
test_mpi(pool)
|
adrnREPO_NAMEschwimmbadPATH_START.@schwimmbad_extracted@schwimmbad-main@tests@test_mpi.py@.PATH_END.py
|
{
"filename": "_zmid.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/heatmap/_zmid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="zmid", parent_name="heatmap", **kwargs):
super(ZmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@heatmap@_zmid.py@.PATH_END.py
|
{
"filename": "logm0_c1_late.py",
"repo_name": "ArgonneCPAC/diffmah",
"repo_path": "diffmah_extracted/diffmah-main/diffmah/diffmahpop_kernels/bimod_logm0_kernels/logm0_c1_late.py",
"type": "Python"
}
|
"""
"""
from collections import OrderedDict, namedtuple
from jax import jit as jjit
from jax import numpy as jnp
from jax import value_and_grad, vmap
from ...bfgs_wrapper import bfgs_adam_fallback
from ...utils import _inverse_sigmoid, _sig_slope, _sigmoid
DEFAULT_LGM0POP_C1_PDICT = OrderedDict(
lgm0pop_c1_ytp_late=0.027,
lgm0pop_c1_ylo_late=-0.048,
lgm0pop_c1_clip_x0_late=8.443,
lgm0pop_c1_clip_ylo_late=0.145,
lgm0pop_c1_clip_yhi_late=0.002,
lgm0pop_c1_t_obs_x0_late=6.377,
)
LGM0Pop_C1_Params = namedtuple("LGM0Pop_C1_Params", DEFAULT_LGM0POP_C1_PDICT.keys())
DEFAULT_LGM0POP_C1_PARAMS = LGM0Pop_C1_Params(**DEFAULT_LGM0POP_C1_PDICT)
LGM0POP_C1_BOUNDS_DICT = OrderedDict(
lgm0pop_c1_ytp_late=(0.001, 0.1),
lgm0pop_c1_ylo_late=(-0.05, -0.001),
lgm0pop_c1_clip_x0_late=(4.0, 11.0),
lgm0pop_c1_clip_ylo_late=(0.02, 0.15),
lgm0pop_c1_clip_yhi_late=(0.001, 0.05),
lgm0pop_c1_t_obs_x0_late=(3.0, 10.0),
)
LGM0POP_C1_BOUNDS = LGM0Pop_C1_Params(**LGM0POP_C1_BOUNDS_DICT)
_C1_UPNAMES = ["u_" + key for key in LGM0Pop_C1_Params._fields]
LGM0Pop_C1_UParams = namedtuple("LGM0Pop_C1_UParams", _C1_UPNAMES)
XTP = 10.0
GLOBAL_K = 0.25
CLIP_TP_K = 1.0
K_BOUNDING = 0.1
@jjit
def _pred_c1_kern(params, t_obs, t_peak):
pred_c1 = _sig_slope(
t_obs,
XTP,
params.lgm0pop_c1_ytp_late,
params.lgm0pop_c1_t_obs_x0_late,
GLOBAL_K,
params.lgm0pop_c1_ylo_late,
0.0,
)
clip = _sigmoid(
t_peak,
params.lgm0pop_c1_clip_x0_late,
CLIP_TP_K,
params.lgm0pop_c1_clip_ylo_late,
params.lgm0pop_c1_clip_yhi_late,
)
pred_c1 = jnp.clip(pred_c1, min=clip)
return pred_c1
@jjit
def _mse(x, y):
d = y - x
return jnp.mean(d * d)
@jjit
def _loss_kern_scalar(params, loss_data):
t_obs, t_peak, target_c1 = loss_data
pred_c1 = _pred_c1_kern(params, t_obs, t_peak)
return _mse(target_c1, pred_c1)
@jjit
def global_loss_kern(params, global_loss_data):
loss = 0.0
for loss_data in global_loss_data:
loss = loss + _loss_kern_scalar(params, loss_data)
return loss
global_loss_and_grads_kern = jjit(value_and_grad(global_loss_kern))
def fit_global_c1_model(global_loss_data, p_init=DEFAULT_LGM0POP_C1_PARAMS):
_res = bfgs_adam_fallback(global_loss_and_grads_kern, p_init, global_loss_data)
p_best, loss_best, fit_terminates, code_used = _res
return p_best, loss_best, fit_terminates, code_used
@jjit
def _get_bounded_c1_param(u_param, bound):
lo, hi = bound
mid = 0.5 * (lo + hi)
return _sigmoid(u_param, mid, K_BOUNDING, lo, hi)
@jjit
def _get_unbounded_c1_param(param, bound):
lo, hi = bound
mid = 0.5 * (lo + hi)
return _inverse_sigmoid(param, mid, K_BOUNDING, lo, hi)
_C = (0, 0)
_get_bounded_c1_params_kern = jjit(vmap(_get_bounded_c1_param, in_axes=_C))
_get_unbounded_c1_params_kern = jjit(vmap(_get_unbounded_c1_param, in_axes=_C))
@jjit
def get_bounded_c1_params(u_params):
u_params = jnp.array([getattr(u_params, u_pname) for u_pname in _C1_UPNAMES])
params = _get_bounded_c1_params_kern(
jnp.array(u_params), jnp.array(LGM0POP_C1_BOUNDS)
)
params = LGM0Pop_C1_Params(*params)
return params
@jjit
def get_unbounded_c1_params(params):
params = jnp.array([getattr(params, pname) for pname in LGM0Pop_C1_Params._fields])
u_params = _get_unbounded_c1_params_kern(
jnp.array(params), jnp.array(LGM0POP_C1_BOUNDS)
)
u_params = LGM0Pop_C1_UParams(*u_params)
return u_params
DEFAULT_LGM0POP_C1_U_PARAMS = LGM0Pop_C1_UParams(
*get_unbounded_c1_params(DEFAULT_LGM0POP_C1_PARAMS)
)
|
ArgonneCPACREPO_NAMEdiffmahPATH_START.@diffmah_extracted@diffmah-main@diffmah@diffmahpop_kernels@bimod_logm0_kernels@logm0_c1_late.py@.PATH_END.py
|
{
"filename": "autocall.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/core/autocall.py",
"type": "Python"
}
|
# encoding: utf-8
"""
Autocall capabilities for IPython.core.
Authors:
* Brian Granger
* Fernando Perez
* Thomas Kluyver
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class IPyAutocall(object):
""" Instances of this class are always autocalled
This happens regardless of 'autocall' variable state. Use this to
develop macro-like mechanisms.
"""
_ip = None
rewrite = True
def __init__(self, ip=None):
self._ip = ip
def set_ip(self, ip):
"""Will be used to set _ip point to current ipython instance b/f call
Override this method if you don't want this to happen.
"""
self._ip = ip
class ExitAutocall(IPyAutocall):
"""An autocallable object which will be added to the user namespace so that
exit, exit(), quit or quit() are all valid ways to close the shell."""
rewrite = False
def __call__(self):
self._ip.ask_exit()
class ZMQExitAutocall(ExitAutocall):
"""Exit IPython. Autocallable, so it needn't be explicitly called.
Parameters
----------
keep_kernel : bool
If True, leave the kernel alive. Otherwise, tell the kernel to exit too
(default).
"""
def __call__(self, keep_kernel=False):
self._ip.keepkernel_on_exit = keep_kernel
self._ip.ask_exit()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@core@autocall.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/textfont/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@textfont@__init__.py@.PATH_END.py
|
{
"filename": "constants.py",
"repo_name": "gammapy/enrico",
"repo_path": "enrico_extracted/enrico-master/enrico/constants.py",
"type": "Python"
}
|
"""Commonly used constants"""
MEV_TO_ERG = 1.602e-6
ERG_TO_MEV = 1/MEV_TO_ERG
met_ref = 240106987.-23*60.-6
mjd_ref= 54689.
#mjd_ref = 51910.
jd_ref = mjd_ref + 2400000.5
DAY_IN_SECOND = 86400
SpectrumPath = "Spectrum"
EbinPath = "Ebin"
LightcurvePath = "LightCurve"
TSMapPath = "TSMap"
AppLCPath = "AppertureLightCurve"
FoldedLCPath = "FoldedLightCurve"
ScanPath = "Scan"
|
gammapyREPO_NAMEenricoPATH_START.@enrico_extracted@enrico-master@enrico@constants.py@.PATH_END.py
|
{
"filename": "torch_optimizer.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/backend/torch/optimizers/torch_optimizer.py",
"type": "Python"
}
|
import torch
from keras.src import optimizers
from keras.src.optimizers.base_optimizer import BaseOptimizer
from keras.src.utils import torch_utils
class TorchOptimizer(BaseOptimizer):
def __new__(cls, *args, **kwargs):
# Import locally to avoid circular imports.
from keras.src.backend.torch.optimizers import torch_adadelta
from keras.src.backend.torch.optimizers import torch_adagrad
from keras.src.backend.torch.optimizers import torch_adam
from keras.src.backend.torch.optimizers import torch_adamax
from keras.src.backend.torch.optimizers import torch_adamw
from keras.src.backend.torch.optimizers import torch_lion
from keras.src.backend.torch.optimizers import torch_nadam
from keras.src.backend.torch.optimizers import torch_rmsprop
from keras.src.backend.torch.optimizers import torch_sgd
OPTIMIZERS = {
optimizers.Adadelta: torch_adadelta.Adadelta,
optimizers.Adagrad: torch_adagrad.Adagrad,
optimizers.Adam: torch_adam.Adam,
optimizers.Adamax: torch_adamax.Adamax,
optimizers.AdamW: torch_adamw.AdamW,
optimizers.Lion: torch_lion.Lion,
optimizers.Nadam: torch_nadam.Nadam,
optimizers.RMSprop: torch_rmsprop.RMSprop,
optimizers.SGD: torch_sgd.SGD,
}
if cls in OPTIMIZERS:
return OPTIMIZERS[cls](*args, **kwargs)
return super().__new__(cls)
@torch_utils.no_grad
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
torch._foreach_mul_(
[v.value for v in variables if self._use_weight_decay(v)],
1 - self.weight_decay * self._get_current_learning_rate(),
)
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@backend@torch@optimizers@torch_optimizer.py@.PATH_END.py
|
{
"filename": "write_Phantomtest.py",
"repo_name": "bwvdnbro/CMacIonize",
"repo_path": "CMacIonize_extracted/CMacIonize-master/test/write_Phantomtest.py",
"type": "Python"
}
|
import struct
import numpy as np
def write_block(file, data):
sizestruct = struct.pack("i", len(data))
file.write(sizestruct)
file.write(data)
file.write(sizestruct)
# generate random data arrays
x = np.random.rand(100)
y = np.random.rand(100)
z = np.random.rand(100)
h = np.random.rand(100).astype("f")
file = open("Phantomtest.dat", "wb")
# write header
# most info here is ignored by PhantomSnapshotDensityFunction, except the units
write_block(file, struct.pack("7s", b"ignored"))
write_block(file, struct.pack("2s", b"FT"))
# write ints block
write_block(file, struct.pack("i", 2))
tags = [b"nblocks ", b"ints block "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="i")
vals[0] = 1
vals[1] = 42
write_block(file, struct.pack("i" * 2, *vals))
# write int8s block
write_block(file, struct.pack("i", 2))
tags = [b"ignored ", b"int8s block "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="b")
vals[0] = 1
vals[1] = 2
write_block(file, struct.pack("b" * 2, *vals))
# write int16s block
write_block(file, struct.pack("i", 2))
tags = [b"ignored ", b"int16s block "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="h")
vals[0] = 1
vals[1] = 2
write_block(file, struct.pack("h" * 2, *vals))
# write int32s block
write_block(file, struct.pack("i", 2))
tags = [b"ignored ", b"int32s block "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="i")
vals[0] = 1
vals[1] = 2
write_block(file, struct.pack("i" * 2, *vals))
# write int64s block
write_block(file, struct.pack("i", 2))
tags = [b"npartoftype ", b"int64s block "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="l")
vals[0] = 100
vals[1] = 2
write_block(file, struct.pack("l" * 2, *vals))
# write reals block
write_block(file, struct.pack("i", 2))
tags = [b"massoftype ", b"tag not used "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="d")
vals[0] = 0.01
vals[1] = 42.0
write_block(file, struct.pack("d" * 2, *vals))
# write real4s block
write_block(file, struct.pack("i", 2))
tags = [b"massoftype ", b"tag not used "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="f")
vals[0] = 0.01
vals[1] = 42.0
write_block(file, struct.pack("f" * 2, *vals))
# write real8s block
write_block(file, struct.pack("i", 2))
tags = [b"umass ", b"udist "]
tagstr = b""
for tag in tags:
tagstr += struct.pack("16s", tag)
write_block(file, tagstr)
vals = np.zeros(2, dtype="d")
vals[0] = 1.0
vals[1] = 1.0
write_block(file, struct.pack("d" * 2, *vals))
write_block(file, struct.pack("i", 2))
# write data block
numbers = [0, 0, 0, 0, 0, 3, 1, 0]
write_block(file, struct.pack("liiiiiiii", 100, *numbers))
numbers = [0, 0, 0, 0, 0, 0, 0, 0]
write_block(file, struct.pack("liiiiiiii", 0, *numbers))
write_block(file, struct.pack("16s", b"x "))
write_block(file, struct.pack("d" * 100, *x))
write_block(file, struct.pack("16s", b"y "))
write_block(file, struct.pack("d" * 100, *y))
write_block(file, struct.pack("16s", b"z "))
write_block(file, struct.pack("d" * 100, *z))
write_block(file, struct.pack("16s", b"h "))
write_block(file, struct.pack("f" * 100, *h))
# we don't write data after this point, since we don't need to read it...
# write reference file
reffile = open("Phantom_data.txt", "w")
for i in range(100):
reffile.write(
"{x:.14e}\t{y:.14e}\t{z:.14e}\t{h:.14e}\n".format(
x=x[i], y=y[i], z=z[i], h=h[i]
)
)
|
bwvdnbroREPO_NAMECMacIonizePATH_START.@CMacIonize_extracted@CMacIonize-master@test@write_Phantomtest.py@.PATH_END.py
|
{
"filename": "igimf_epoch_6.py",
"repo_name": "juzikong/photGalIMF",
"repo_path": "photGalIMF_extracted/photGalIMF-main/simulation_results_from_galaxy_evol/example/igimf_epoch_6.py",
"type": "Python"
}
|
# File to define a custom IMF
# The return value represents the chosen IMF value for the input mass
def custom_imf(mass, time): # there is no time dependence for IGIMF
if mass < 0.08:
return 0
elif mass < 0.101:
return -881912095498.0004 * mass + 126349388048.4443
elif mass < 0.10201:
return -852999343379.5463 * mass + 123429200084.48044
elif mass < 0.10510100501:
return -771825086636.5791 * mass + 115067381227.91583
elif mass < 0.10828567056280801:
return -698375642355.5582 * mass + 107272041085.80531
elif mass < 0.11156683466653165:
return -631915891670.408 * mass + 100004803063.36438
elif mass < 0.11494742132376223:
return -571780672073.184 * mass + 93229890421.70459
elif mass < 0.11843044313729356:
return -517368120134.2647 * mass + 86913950148.33691
elif mass < 0.12201900399479669:
return -468133647751.17334 * mass + 81025888759.68959
elif mass < 0.12571630183484303:
return -423584491638.08563 * mass + 75536719227.383
elif mass < 0.12952563149674062:
return -383274781503.5729 * mass + 70419418274.561
elif mass < 0.13345038765672337:
return -346801077557.1967 * mass + 65648793339.83267
elif mass < 0.13749406785310975:
return -313798332681.9032 * mass + 61201358553.86909
elif mass < 0.14166027560312686:
return -283936238859.25653 * mass + 57055219118.036606
elif mass < 0.14595272361417722:
return -256915921281.43967 * mass + 53189963515.95951
elif mass < 0.15037523709241038:
return -232466947062.03006 * mass + 49586563027.230675
elif mass < 0.15493175715154747:
return -210344618608.2964 * mass + 46227278048.721466
elif mass < 0.1596263443249965:
return -190327524564.86612 * mass + 43095570762.188644
elif mass < 0.16446318218438824:
return -172215323817.93555 * mass + 40176023718.31866
elif mass < 0.1694465810677574:
return -155826740380.95596 * mass + 37454263936.35827
elif mass < 0.17458098192069152:
return -140997749093.59116 * mass + 34916892145.66835
elif mass < 0.1798709602538704:
return -127579933975.75043 * mass + 32551416820.89501
elif mass < 0.18532123022052294:
return -115439002806.02321 * mass + 30346192685.970474
elif mass < 0.190936648817435:
return -104453443057.75208 * mass + 28290363384.209488
elif mass < 0.19672222021325209:
return -94513305740.80682 * mass + 26373808032.28923
elif mass < 0.20268310020793384:
return -85519104976.9103 * mass + 24587091394.955666
elif mass < 0.20882460082733445:
return -77380822295.08893 * mass + 22921417435.19663
elif mass < 0.21515219505700353:
return -70017005681.73714 * mass + 21368586011.210938
elif mass < 0.2216715217194258:
return -63353954367.92658 * mass + 19920952506.94763
elif mass < 0.22838839049904613:
return -57324981195.24943 * mass + 18571390197.54771
elif mass < 0.23530878711955774:
return -51869745177.25965 * mass + 17313255164.34588
elif mass < 0.24243887867806746:
return -46933647576.609474 * mass + 16140353586.739586
elif mass < 0.24978501914089157:
return -42467285453.54647 * mass + 15046911249.911201
elif mass < 0.2573537550058797:
return -38425957216.49946 * mass + 14027545118.26312
elif mass < 0.26515183113631285:
return -34769215226.14052 * mass + 13077236834.640993
elif mass < 0.27318619677157424:
return -31460460975.12397 * mass + 12191308014.872707
elif mass < 0.2814640117199497:
return -28466578791.900406 * mass + 11365397216.00891
elif mass < 0.28999265273907593:
return -25757604402.43424 * mass + 10595438464.85373
elif mass < 0.29877972010972265:
return -23306425032.748863 * mass + 9877641241.114536
elif mass < 0.30783304440876735:
return -21088508050.685318 * mass + 9208471816.603436
elif mass < 0.31716069348739745:
return -19081655431.023422 * mass + 8584635858.633564
elif mass < 0.32677097966075913:
return -17265781586.4992 * mass + 8003062211.957123
elif mass < 0.33667246711545984:
return -15622712341.196539 * mass + 7460887779.420978
elif mass < 0.34687397954152543:
return -14136003034.26778 * mass + 6955443426.885202
elif mass < 0.3573846079956132:
return -12790773933.533293 * mass + 6484240843.032959
elif mass < 0.36821371900248834:
return -11573561311.646477 * mass + 6044960289.365638
elif mass < 0.3793709629019827:
return -10472182694.378483 * mass + 5635439180.096561
elif mass < 0.39086628244887567:
return -9475614932.292295 * mass + 5253661435.698137
elif mass < 0.40270992167335906:
return -8573883875.54401 * mass + 4897747557.71019
elif mass < 0.41491243500998354:
return -7757964547.586873 * mass + 4565945375.935192
elif mass < 0.4274846967032211:
return -7019690818.683823 * mass + 4256621422.4751673
elif mass < 0.4404379104980254:
return -6351673675.18426 * mass + 3968252890.1399918
elif mass < 0.453783619624026:
return -5747227266.568564 * mass + 3699420135.6402307
elif mass < 0.4675337170822536:
return -5200301990.110294 * mass + 3448799690.66144
elif mass < 0.48170045624356295:
return -4705423943.412504 * mass + 3215157746.403937
elif mass < 0.49629646176819914:
return -4257640138.8508053 * mass + 2997344079.521927
elif mass < 0.5113347408562374:
return -3852468931.589354 * mass + 2794286389.553832
elif mass < 0.5268286948389223:
return -3485855165.0324383 * mass + 2604985019.9686236
elif mass < 0.5427921311212365:
return -3154129584.782682 * mass + 2428508036.838693
elif mass < 0.5592392754863411:
return -2853972114.905384 * mass + 2263986640.914044
elif mass < 0.5761847847728528:
return -2582378628.942507 * mass + 2110610890.5078216
elif mass < 0.593643759936255:
return -2336630883.1087937 * mass + 1967625714.139305
elif mass < 0.6116317595060835:
return -2114269310.7448237 * mass + 1834327193.3037505
elif mass < 0.6301648134508773:
return -1913068405.742814 * mass + 1710059097.0706878
elif mass < 0.6492594374632522:
return -1731014448.562266 * mass + 1594209651.4456558
elif mass < 0.6689326476778262:
return -1566285351.917694 * mass + 1486208527.597762
elif mass < 0.689201975835112:
return -1417232424.4142952 * mass + 1385524034.1202335
elif mass < 0.7100854849048917:
return -1282363869.6177766 * mass + 1291660499.5043395
elif mass < 0.7316017851829947:
return -1160329855.4088144 * mass + 1204155831.9407647
elif mass < 0.7537700508758246:
return -1049909004.1847667 * mass + 1122579244.432484
elif mass < 0.7766100371874131:
return -949996168.7014714 * mass + 1046529134.0237803
elif mass < 0.8001420979242287:
return -859591371.2048419 * mass + 975631104.701447
elif mass < 0.8243872036334308:
return -777789795.1523108 * mass + 909536124.2368587
elif mass < 0.8493669602907274:
return -703772729.355241 * mass + 847918805.8942916
elif mass < 0.8751036285544969:
return -636799373.9069062 * mass + 790475806.5464351
elif mass < 0.9016201436033267:
return -576199425.885282 * mass + 736924333.3106526
elif mass < 0.9289401355746512:
return -521366370.6256449 * mass + 687000751.3549962
elif mass < 0.9570879506226987:
return -471751411.4175549 * mass + 640459286.0192342
elif mass < 0.9860886726145172:
return -426857976.8721611 * mass + 597070812.8618844
elif mass < 1.0159681454834097:
return -378224034.20224994 * mass + 548611192.837717
elif mass < 1.0467529962597026:
return -342742154.28123385 * mass + 512208975.96922934
elif mass < 1.078470658799368:
return -310588893.6146222 * mass + 478222169.8875282
elif mass < 1.1111493982316476:
return -281451988.41692126 * mass + 446490503.8791636
elif mass < 1.1448183361474649:
return -255048468.93246916 * mass + 416864341.73713684
elif mass < 1.1795074765510691:
return -231121911.30959457 * mass + 389203976.12528235
elif mass < 1.215247732598043:
return -209439947.281311 * mass + 363378969.76383543
elif mass < 1.2520709541434965:
return -189792007.4675967 * mass + 339267540.3298471
elif mass < 1.2900099561249987:
return -171987276.38238186 * mass + 316755986.1713393
elif mass < 1.3290985478055424:
return -155852839.28501594 * mass + 295738150.12725586
elif mass < 1.3693715629025982:
return -141232002.87908453 * mass + 276114918.9249596
elif mass < 1.4108648906301098:
return -127982773.5493596 * mass + 257793755.7942201
elif mass < 1.4536155076810928:
return -115976478.35815808 * mass + 240688264.09393674
elif mass < 1.4976615111793377:
return -105096515.40857038 * mass + 224717779.89378324
elif mass < 1.5430421526295828:
return -95237221.43822642 * mass + 209806991.5892322
elif mass < 1.589797872896412:
return -86302845.64635111 * mass + 195885584.7567351
elif mass < 1.6379703382430462:
return -78206619.78770545 * mass + 182887910.57358485
elif mass < 1.6876024774621488:
return -70869915.5029363 * mass + 170752676.23962244
elif mass < 1.7387385201317294:
return -64221480.701078944 * mass + 159422655.94017956
elif mass < 1.791424036030241:
return -58196747.57857353 * mass + 148844420.98790362
elif mass < 1.8457059757459933:
return -52737205.554136746 * mass + 138968087.87036958
elif mass < 1.9016327125170727:
return -47789833.029834844 * mass + 129747083.01574302
elif mass < 1.9592540853390523:
return -43306582.45961368 * mass + 121137923.16692841
elif mass < 2.018621443378911:
return -39243913.72449903 * mass + 113100010.32866889
elif mass < 2.0797876917347353:
return -35562371.28275334 * mass + 105595440.32068571
elif mass < 2.14280733858199:
return -32226200.988276925 * mass + 98588824.03384253
elif mass < 2.2077365437483625:
return -29203002.856017157 * mass + 92047120.54667199
elif mass < 2.2746331687604817:
return -26463416.40203488 * mass + 85939481.3150872
elif mass < 2.3435568284070927:
return -23980835.502444685 * mass + 80237104.70076022
elif mass < 2.4145689438646563:
return -21731150.001893587 * mass + 74913100.15190408
elif mass < 2.4877327974326993:
return -19692511.562272504 * mass + 69942361.39625351
elif mass < 2.5631135889277075:
return -17845121.47753356 * mass + 65301448.04800619
elif mass < 2.640778493785806:
return -16171038.394003037 * mass + 60968475.07059142
elif mass < 2.7207967229260097:
return -14654004.068820138 * mass + 56923009.57401629
elif mass < 2.8032395844273905:
return -13279285.474247394 * mass + 53145974.45994402
elif mass < 2.888180547075125:
return -12033531.714499747 * mass + 49619558.46035055
elif mass < 2.9756953058320486:
return -10904644.36544424 * mass + 46327132.145368725
elif mass < 3.0658618492940652:
return -9881659.977970002 * mass + 43253169.50430538
elif mass < 3.1587605291895247:
return -8954643.603935985 * mass + 40383174.730044276
elif mass < 3.2544741319844963:
return -8114592.310631201 * mass + 37703613.86152058
elif mass < 3.353087952657759:
return -7353347.746728442 * mass + 35201850.96198223
elif mass < 3.4546898707112415:
return -6663516.910575382 * mass + 32866088.532013968
elif mass < 3.559370428483663:
return -6038400.351360663 * mass + 30685311.876376472
elif mass < 3.667222911837147:
return -5471927.105856929 * mass + 28649237.16229057
elif mass < 3.7783434332887245:
return -4958595.738863783 * mass + 26748262.92422453
elif mass < 3.892831017660806:
return -4493420.914756326 * mass + 24973424.786514346
elif mass < 4.010787690326946:
return -4071884.9812498903 * mass + 23316353.19027959
elif mass < 4.132318568131543:
return -3689894.095182401 * mass + 21769233.92531387
elif mass < 4.257531953064498:
return -3343738.464214127 * mass + 20324771.28080991
elif mass < 4.386539428774305:
return -3030056.3183271675 * mass + 18976153.641166396
elif mass < 4.519455960005596:
return -2745801.2612217274 * mass + 17717021.364621893
elif mass < 4.656399995049725:
return -2488212.684538307 * mass + 16541436.793256415
elif mass < 4.797493571299727:
return -2254788.957574303 * mass + 15443856.252927545
elif mass < 4.94286242400368:
return -2043263.1321236799 * mass + 14419103.91111523
elif mass < 5.092636098313417:
return -1851580.9264861692 * mass + 13462347.369371472
elif mass < 5.246948064728412:
return -1677880.7748385717 * mass + 12569074.875304293
elif mass < 5.405935838037748:
return -1520475.7482114013 * mass + 11735074.04662867
elif mass < 5.569741099866129:
return -1377837.171488895 * mass + 10956412.006936198
elif mass < 5.738509824933173:
return -1248579.7773293327 * mass + 10229416.839531496
elif mass < 5.912392411138472:
return -1131448.2528230778 * mass + 9550660.27187066
elif mass < 6.091543813588379:
return -1025305.0482320755 * mass + 8916941.508941254
elif mass < 6.27612368268392:
return -929119.3294146868 * mass + 8325272.139359356
elif mass < 6.466296506392926:
return -841956.966641636 * mass + 7772862.042986926
elif mass < 6.662231756833138:
return -762971.4625816531 * mass + 7257106.233641686
elif mass < 6.8641040412969385:
return -691395.7313472136 * mass + 6775572.574825793
elif mass < 7.072093257852278:
return -626534.6487633276 * mass + 6325990.310560303
elif mass < 7.286384755658459:
return -567758.3015101728 * mass + 5906239.357245501
elif mass < 7.507169500139667:
return -514495.8695739057 * mass + 5514340.306029102
elif mass < 7.734644243163398:
return -466230.0825977056 * mass + 5148445.088564688
elif mass < 7.969011698375493:
return -422492.19629126147 * mass + 4806828.262119563
elif mass < 8.210480721847969:
return -382857.44011296297 * mass + 4487878.872949734
elif mass < 8.459266498200684:
return -346940.8920130764 * mass + 4190092.859566341
elif mass < 8.715590732362662:
return -314393.74017471925 * mass + 3912065.9600711716
elif mass < 8.979681847143983:
return -284899.89544761827 * mass + 3652487.0901141223
elif mass < 9.251775186794292:
return -258172.9215758934 * mass + 3410132.1602485017
elif mass < 9.532113226729347:
return -233953.25340609462 * mass + 3183858.3035202585
elif mass < 9.820945789612473:
return -212005.67606085283 * mass + 2972598.4860824393
elif mass < 10.118530267983521:
return -192117.04059527343 * mass + 2775356.475408798
elif mass < 10.42513185363369:
return -174094.1939521161 * mass + 2591202.1423872104
elif mass < 10.741023773930646:
return -157762.10311133554 * mass + 2419267.0751323723
elif mass < 11.06648753530452:
return -142962.15521672467 * mass + 2258740.483838777
elif mass < 11.401813174111782:
return -129550.61717061569 * mass + 2108865.3773591933
elif mass < 11.747299515100543:
return -117397.23973695925 * mass + 1968934.9934820938
elif mass < 12.103254437707607:
return -106383.99259577072 * mass + 1838289.4660695463
elif mass < 12.469995150424586:
return -96403.91806463679 * mass + 1716312.7133444645
elif mass < 12.847848473477601:
return -87360.09235456264 * mass + 1602429.5326492898
elif mass < 13.237151130072446:
return -79164.6842722866 * mass + 1496102.8879772783
elif mass < 13.638250046464773:
return -71738.10222743743 * mass + 1396831.37748537
elif mass < 14.051502661122703:
return -65008.221260554754 * mass + 1304146.869047045
elif mass < 14.477277243257383:
return -58909.68258489232 * mass + 1217612.2926927358
elif mass < 14.915953221005326:
return -53383.25883957604 * mass + 1136819.579530759
elif mass < 15.367921519555008:
return -48375.27888946114 * mass + 1061387.7374272011
elif mass < 15.833584909519045:
return -43837.10658551415 * mass + 990961.054370338
elif mass < 16.31335836586238:
return -39724.66842374193 * mass + 925207.4210497297
elif mass < 16.807669437706377:
return -35998.02551516624 * mass + 863816.764735992
elif mass < 17.316958629338316:
return -32620.98571012829 * mass + 806499.5870788775
elif mass < 17.841679792765895:
return -29560.752109917725 * mass + 752985.5989275265
elif mass < 18.382300532166493:
return -26787.604552143403 * mass + 703022.4457347527
elif mass < 18.93930262059167:
return -24274.61097653632 * mass + 656374.5175350609
elif mass < 19.51318242929822:
return -21997.365868049095 * mass + 612821.837884678
elif mass < 20.104451370088384:
return -19933.75323709776 * mass + 572159.0265245157
elif mass < 20.71363635105343:
return -18063.73183503036 * mass + 534194.3308734964
elif mass < 21.3412802461267:
return -16369.140518938324 * mass + 498748.721785933
elif mass < 21.987942378864584:
return -14833.521875538247 * mass + 465655.0493083184
elif mass < 22.654199020886562:
return -13441.962391212743 * mass + 434757.25445444183
elif mass < 23.340643905418442:
return -12180.947616004374 * mass + 405909.6332822564
elif mass < 24.047888756396528:
return -11038.230914917563 * mass + 378976.1498013189
elif mass < 24.7765638336041:
return -10002.71453190961 * mass + 353829.79447141325
elif mass < 25.52731849432614:
return -9064.341811481938 * mass + 330351.9852669377
elif mass < 26.300821772022715:
return -8213.999531154454 * mass + 308432.0084826039
elif mass < 27.097762972536774:
return -7443.429396312369 * mass + 287966.49664371257
elif mass < 27.91885228836761:
return -6745.1478378761585 * mass + 268858.94105872384
elif mass < 28.764821431557436:
return -6112.373333903076 * mass + 251019.2367157852
elif mass < 29.63642428575506:
return -5538.96054927282 * mass + 234363.25737670768
elif mass < 30.53443757803772:
return -5019.34065385514 * mass + 218812.45886509324
elif mass < 31.459661571089846:
return -4548.467239534732 * mass + 204293.50867755222
elif mass < 32.41292077635544:
return -4121.767310858169 * mass + 190737.94017148804
elif mass < 33.395064688799785:
return -3735.096873336325 * mass + 178081.8296986817
elif mass < 34.40696854393511:
return -3384.7006880894783 * mass + 166265.495162195
elif mass < 35.44953409778489:
return -3067.175801981311 * mass + 155233.21457503116
elif mass < 36.52369043048187:
return -2779.4384990565954 * mass + 144932.96329337286
elif mass < 37.63039477421591:
return -2518.6943523251293 * mass + 135316.16868532004
elif mass < 38.77063336626942:
return -2282.4110850403245 * mass + 126337.48107813275
elif mass < 39.94542232790075:
return -2098.830304819598 * mass + 119149.66772500594
elif mass < 41.15580856985847:
return -1886.2412854129125 * mass + 110574.58168060276
elif mass < 42.402870725333756:
return -1721.2251525174197 * mass + 103716.39177930114
elif mass < 43.68772011118209:
return -1558.5915916816336 * mass + 96757.97174053216
elif mass < 45.01150171827101:
return -1400.4085968484894 * mass + 89779.67112006168
elif mass < 46.37539523183634:
return -1267.9757671119032 * mass + 83750.65664387631
elif mass < 47.78061608275621:
return -1157.3487629808835 * mass + 78569.80448206994
elif mass < 49.22841653067981:
return -1047.9670308564878 * mass + 73296.4201529144
elif mass < 50.720086779975944:
return -948.9152231708367 * mass + 68376.38739686998
elif mass < 52.256956129496:
return -852.2676650968601 * mass + 63426.434415172145
elif mass < 53.84039415717585:
return -771.6413266114362 * mass + 59164.75518994806
elif mass < 55.47181194053243:
return -698.6369762907558 * mass + 55188.962723190736
elif mass < 57.1526633141425:
return -632.5330787055476 * mass + 51479.82283481699
elif mass < 58.88444616522433:
return -572.6793366195942 * mass + 48019.56213315388
elif mass < 60.668703768476796:
return -518.4840159751182 * mass + 44791.43434745292
elif mass < 62.50702616136541:
return -469.4137314874244 * mass + 41779.9596309389
elif mass < 64.40105156108095:
return -428.70207155685165 * mass + 39207.686490530454
elif mass < 66.35246782443328:
return -388.1554827750738 * mass + 36573.149005620304
elif mass < 68.36301395198143:
return -351.44119004559644 * mass + 34115.354301338746
elif mass < 70.43448163774043:
return -318.1430781657978 * mass + 31816.660841848618
elif mass < 72.5687168658456:
return -288.0456767502926 * mass + 29677.91564178936
elif mass < 74.76762155559759:
return -258.3598336364044 * mass + 27500.73324765288
elif mass < 77.03315525635374:
return -236.07609477009555 * mass + 25816.499419184136
elif mass < 79.36733689377651:
return -213.69788027666138 * mass + 24075.721432927
elif mass < 81.77224656899482:
return -191.6048703181 * mass + 22303.55636399969
elif mass < 84.25002741228194:
return -175.1308349925522 * mass + 20941.720795221983
elif mass < 86.8028874929015:
return -158.52447133121225 * mass + 19528.907705399888
elif mass < 89.4331017868239:
return -143.49062072130545 * mass + 18211.122915502798
elif mass < 92.14301420406645:
return -129.85589276250371 * mass + 16978.53657696608
elif mass < 94.93503967746386:
return -117.53733946730361 * mass + 15832.331460204688
elif mass < 97.81166631473069:
return -106.38621211242004 * mass + 14763.31224564384
elif mass < 100.77545761573333:
return -96.27220727177901 * mass + 13763.303578911187
elif mass < 103.82905475694768:
return -87.11809610786906 * mass + 12830.763433328146
elif mass < 106.97517894513796:
return -78.83277492961786 * mass + 11961.139319274958
elif mass < 110.21663384235457:
return -71.33396883599468 * mass + 11150.205667431943
elif mass < 113.55630806441175:
return -65.25968997331644 * mass + 10474.128501135436
elif mass < 116.99717775507149:
return -58.39214107599081 * mass + 9686.645525210588
elif mass < 120.54230923822792:
return -52.82258508484542 * mass + 9027.137875397693
elif mass < 124.19486175045546:
return -48.33882451770344 * mass + 8480.626537567978
elif mass < 127.95809025635602:
return -43.725775306179074 * mass + 7902.089208774394
elif mass < 131.83534834921383:
return -39.551609523690175 * mass + 7362.731228954252
elif mass < 135.83009123954335:
return -35.774718468081005 * mass + 6859.918666609693
elif mass < 139.94587883419274:
return -32.754930560282254 * mass + 6445.70371016537
elif mass < 144.1863789087476:
return -29.621013927287606 * mass + 6003.374484117547
elif mass < 148.55537037606157:
return -27.133515506848582 * mass + 5642.235489786828
elif mass < 153.05674665382662:
return -24.211271151611026 * mass + 5204.37643183128
elif mass < 157.69451913418422:
return -21.878122000081255 * mass + 4842.8025954939685
elif mass < 162.47282075846914:
return -20.0457000220736 * mass + 4550.067523571768
elif mass < 167.39590970027152:
return -18.37401625079183 * mass + 4275.345142828897
elif mass < 172.46817316009944:
return -16.851098356037333 * mass + 4017.9099444185154
elif mass < 177.69413127502364:
return -15.701958729294592 * mass + 3817.7722681209907
elif mass < 183.07844114678812:
return -14.662876310591198 * mass + 3631.325700812953
elif mass < 188.62590099197692:
return -14.655764398061297 * mass + 3630.4831542698353
elif mass < 194.3414544179348:
return -16.798722448739376 * mass + 4041.2264183852003
elif mass < 198.24771765173531:
return 0 * mass + 0
elif mass < 198.24771765173531:
return 0 * mass + 0
else:
return 0
|
juzikongREPO_NAMEphotGalIMFPATH_START.@photGalIMF_extracted@photGalIMF-main@simulation_results_from_galaxy_evol@example@igimf_epoch_6.py@.PATH_END.py
|
{
"filename": "_searching_functions.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py3/numpy/array_api/_searching_functions.py",
"type": "Python"
}
|
from __future__ import annotations
from ._array_object import Array
from ._dtypes import _result_type, _real_numeric_dtypes
from typing import Optional, Tuple
import numpy as np
def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
"""
Array API compatible wrapper for :py:func:`np.argmax <numpy.argmax>`.
See its docstring for more information.
"""
if x.dtype not in _real_numeric_dtypes:
raise TypeError("Only real numeric dtypes are allowed in argmax")
return Array._new(np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims)))
def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
"""
Array API compatible wrapper for :py:func:`np.argmin <numpy.argmin>`.
See its docstring for more information.
"""
if x.dtype not in _real_numeric_dtypes:
raise TypeError("Only real numeric dtypes are allowed in argmin")
return Array._new(np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims)))
def nonzero(x: Array, /) -> Tuple[Array, ...]:
"""
Array API compatible wrapper for :py:func:`np.nonzero <numpy.nonzero>`.
See its docstring for more information.
"""
return tuple(Array._new(i) for i in np.nonzero(x._array))
def where(condition: Array, x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.where <numpy.where>`.
See its docstring for more information.
"""
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.where(condition._array, x1._array, x2._array))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py3@numpy@array_api@_searching_functions.py@.PATH_END.py
|
{
"filename": "interpolate.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/interpolate/interpolate.py",
"type": "Python"
}
|
""" Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
dot, ravel, poly1d, asarray, intp)
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
r"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : `numpy.poly1d` instance
The Lagrange interpolating polynomial.
Examples
--------
Interpolate :math:`f(x) = x^3` by 3 points.
>>> from scipy.interpolate import lagrange
>>> x = np.array([0, 1, 2])
>>> y = x**3
>>> poly = lagrange(x, y)
Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
it is given by
.. math::
\begin{aligned}
L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
&= x (-2 + 3x)
\end{aligned}
>>> from numpy.polynomial.polynomial import Polynomial
>>> Polynomial(poly).coef
array([ 3., -2., 0.])
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of zeroth, first, second or third order) or as an
integer specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
If any of `points` have a dimension of size 1, linear interpolation will
return an array of `nan` values. Nearest-neighbor interpolation will work
as usual in this case.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class _ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("_ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
# The 3 private functions below can be called by splmake().
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# Remove the 3 private functions above as well when removing splmake
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj, cvals, k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),) + index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv)
res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv)
else:
res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)
res.shape = oldshape + sh
return res
# When `spltopp` gets removed, also remove the _ppform class.
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple."""
return _ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@interpolate@interpolate.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "johnh2o2/cuvarbase",
"repo_path": "cuvarbase_extracted/cuvarbase-master/cuvarbase/utils.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pkg_resources
def weights(err):
""" generate observation weights from uncertainties """
w = np.power(err, -2)
return w/sum(w)
def find_kernel(name):
return pkg_resources.resource_filename('cuvarbase',
'kernels/%s.cu' % (name))
def _module_reader(fname, cpp_defs=None):
txt = open(fname, 'r').read()
if cpp_defs is None:
return txt
preamble = ['#define {key} {value}'.format(key=key,
value=('' if value is None
else value))
for key, value in cpp_defs.items()]
txt = txt.replace('//{CPP_DEFS}', '\n'.join(preamble))
return txt
def tophat_window(t, t0, d):
w_window = np.zeros_like(t)
w_window[np.absolute(t - t0) < d] += 1.
return w_window / max(w_window)
def gaussian_window(t, t0, d):
w_window = np.exp(-0.5 * np.power(t - t0, 2) / (d * d))
return w_window / (1. if len(w_window) == 0 else max(w_window))
def autofrequency(t, nyquist_factor=5, samples_per_peak=5,
minimum_frequency=None,
maximum_frequency=None, **kwargs):
"""
Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float (optional, default=5)
The approximate number of desired samples across the typical peak
nyquist_factor : float (optional, default=5)
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float (optional)
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float (optional)
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
Returns
-------
frequency : ndarray or Quantity
The heuristically-determined optimal frequency bin
"""
baseline = max(t) - min(t)
n_samples = len(t)
df = 1. / (baseline * samples_per_peak)
nf0 = 1
if minimum_frequency is not None:
nf0 = max([nf0, int(minimum_frequency / df)])
if maximum_frequency is not None:
Nf = int(maximum_frequency / df) - nf0
else:
Nf = int(0.5 * samples_per_peak * nyquist_factor * n_samples)
return df * (nf0 + np.arange(Nf))
def dphase(dt, freq):
dph = dt * freq - np.floor(dt * freq)
dph_final = dph if dph < 0.5 else 1 - dph
return dph_final
def get_autofreqs(t, **kwargs):
autofreqs_kwargs = {var: value for var, value in kwargs.items()
if var in ['minimum_frequency', 'maximum_frequency',
'nyquist_factor', 'samples_per_peak']}
return autofrequency(t, **autofreqs_kwargs)
|
johnh2o2REPO_NAMEcuvarbasePATH_START.@cuvarbase_extracted@cuvarbase-master@cuvarbase@utils.py@.PATH_END.py
|
{
"filename": "_hovertextsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choropleth/_hovertextsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertextsrc", parent_name="choropleth", **kwargs):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choropleth@_hovertextsrc.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "LoganAMorrison/blackthorn",
"repo_path": "blackthorn_extracted/blackthorn-master/blackthorn/old/utils.py",
"type": "Python"
}
|
from typing import Dict, Union, List, Tuple
import numpy as np
from .rh_neutrino import Gen
from .fields import UpQuark, CharmQuark, TopQuark
from .fields import DownQuark, StrangeQuark, BottomQuark
from .fields import Electron, Muon, Tau
from .fields import ElectronNeutrino, MuonNeutrino, TauNeutrino
LEPTON_STR_GEN: List[Tuple[str, Gen]] = [
("e", Gen.Fst),
("mu", Gen.Snd),
("tau", Gen.Trd),
]
UP_QUARK_STR_GEN: List[Tuple[str, Gen]] = [
("u", Gen.Fst),
("c", Gen.Snd),
("t", Gen.Trd),
]
DOWN_QUARK_STR_GEN: List[Tuple[str, Gen]] = [
("d", Gen.Fst),
("s", Gen.Snd),
("b", Gen.Trd),
]
UpQuarkType = Union[UpQuark, CharmQuark, TopQuark]
DownQuarkType = Union[DownQuark, StrangeQuark, BottomQuark]
ChargedLeptonType = Union[Electron, Muon, Tau]
NeutrinoType = Union[ElectronNeutrino, MuonNeutrino, TauNeutrino]
UP_QUARKS: Tuple[UpQuark, CharmQuark, TopQuark] = (UpQuark(), CharmQuark(), TopQuark())
DOWN_QUARKS: Tuple[DownQuark, StrangeQuark, BottomQuark] = (
DownQuark(),
StrangeQuark(),
BottomQuark(),
)
CHARGED_LEPTONS: Tuple[Electron, Muon, Tau] = (Electron(), Muon(), Tau())
NEUTRINOS: Tuple[ElectronNeutrino, MuonNeutrino, TauNeutrino] = (
ElectronNeutrino(),
MuonNeutrino(),
TauNeutrino(),
)
PDG_TO_NAME_DICT: Dict[int, str] = {
# quarks
1: "d",
2: "u",
3: "s",
4: "c",
5: "b",
6: "t",
# Leptons
11: "e",
12: "ve",
13: "mu",
14: "vmu",
15: "tau",
16: "vtau",
# Bosons
21: "g",
22: "a",
23: "z",
24: "w",
25: "h",
# Anti quarks
-1: "d~",
-2: "u~",
-3: "s~",
-4: "c~",
-5: "b~",
-6: "t~",
# Anti charged lepton
-11: "e~",
-13: "mu~",
-15: "tau~",
# Anti bosons
-24: "w~",
}
def gen_to_index(gen: Gen) -> int:
if gen == Gen.Fst:
return 0
elif gen == Gen.Snd:
return 1
else:
return 2
def gen_to_up_quark(gen: Gen) -> UpQuarkType:
return UP_QUARKS[gen_to_index(gen)]
def gen_to_down_quark(gen: Gen) -> DownQuarkType:
return DOWN_QUARKS[gen_to_index(gen)]
def gen_to_charged_lepton(gen: Gen) -> ChargedLeptonType:
return CHARGED_LEPTONS[gen_to_index(gen)]
def gen_to_neutrino(gen: Gen) -> NeutrinoType:
return NEUTRINOS[gen_to_index(gen)]
def kallen_lambda(a: float, b: float, c: float) -> float:
return a**2 + b**2 + c**2 - 2 * (a * b + a * c + b * c)
def energies_two_body(q: float, m1: float, m2: float) -> Tuple[float, float]:
e1 = (q**2 + m1**2 - m2**2) / (2 * q)
e2 = (q**2 - m1**2 + m2**2) / (2 * q)
return (e1, e2)
def pdg_to_name(pdg: int) -> str:
name = PDG_TO_NAME_DICT.get(pdg)
if name is None:
raise ValueError(f"Invalid PDG code {pdg}.")
return name
TEX_DICT = {
"v": r"$\nu$",
"vi": r"$\nu_{i}$",
"vj": r"$\nu_{j}$",
"ve": r"$\nu_{e}$",
"vmu": r"$\nu_{\mu}$",
"vtau": r"$\nu_{\tau}$",
"ell": r"$\ell^{\pm}$",
"elli": r"$\ell_{i}^{\pm}$",
"ellj": r"$\ell_{j}^{\pm}$",
"e": r"$e^{\pm}$",
"mu": r"$\mu^{\pm}$",
"tau": r"$\tau^{\pm}$",
"ellbar": r"$\ell^{\mp}$",
"ellibar": r"$\ell_{i}^{\mp}$",
"elljbar": r"$\ell_{j}^{\mp}$",
"ebar": r"$e^{\mp}$",
"mubar": r"$\mu^{\mp}$",
"taubar": r"$\tau^{\mp}$",
"u": r"$u$",
"ui": r"$u_{i}$",
"uj": r"$u_{j}$",
"c": r"$c$",
"t": r"$t$",
"ubar": r"$\bar{u}$",
"uibar": r"$\bar{u}_{i}$",
"ujbar": r"$\bar{u}_{j}$",
"cbar": r"$\bar{c}$",
"tbar": r"$\bar{t}$",
"d": r"$d$",
"di": r"$d_{i}$",
"dj": r"$d_{j}$",
"s": r"$s$",
"b": r"$b$",
"dbar": r"$\bar{d}$",
"dibar": r"$\bar{d}_{i}$",
"djbar": r"$\bar{d}_{j}$",
"sbar": r"$\bar{s}$",
"bbar": r"$\bar{b}$",
"h": r"$H$",
"z": r"$Z$",
"w": r"$W^{\pm}$",
"a": r"$\gamma$",
"wbar": r"$W^{\mp}$",
"pi": r"$\pi^{\pm}$",
"pibar": r"$\pi^{\mp}$",
"k": r"$K^{\pm}$",
"eta": r"$\eta$",
"pi0": r"$\pi^{0}$",
"k0": r"$K^{0}$",
"k0bar": r"$\bar{K}^{0}$",
}
def state_to_latex(state: str) -> str:
"""
Convert a string with space-separated states into a LaTeX equivalent.
"""
return " + ".join([TEX_DICT[s] for s in state.split(" ")])
|
LoganAMorrisonREPO_NAMEblackthornPATH_START.@blackthorn_extracted@blackthorn-master@blackthorn@old@utils.py@.PATH_END.py
|
{
"filename": "edmcmc.py",
"repo_name": "avanderburg/edmcmc",
"repo_path": "edmcmc_extracted/edmcmc-main/edmcmc.py",
"type": "Python"
}
|
import numpy as np
import time
import pdb
class mcmcstr:
def __init__(self, chains, flatchains, allneglogl, flatneglogl, whichwalker, whichlink,
fullchains, fullneglogl, nburnin, nlink, nwalkers, npar, acceptancerate):
self.acceptancerate = acceptancerate
self.chains = chains
self.flatchains = flatchains
self.allneglogl = allneglogl
self.flatneglogl = flatneglogl
self.whichwalker = whichwalker
self.whichlink = whichlink
self.fullchains = fullchains
self.nburnin = nburnin
self.nlink = nlink
self.nwalkers = nwalkers
self.npar = npar
self.lastpos = fullchains[:,nlink-1,:]
self.fullneglogl = fullneglogl
mnll = np.unravel_index(np.argmin(fullneglogl, axis=None), fullneglogl.shape)
self.bestpar = fullchains[mnll[0],mnll[1],:]
def get_chains(self, nburnin = None, nthin = 1, flat=False, returnDiag=False):
if nburnin == None:
nburnin = self.nburnin
indices = nburnin + np.arange(np.floor((self.nlink - nburnin)/nthin)) * nthin
cutchains = self.fullchains[:,[int(index) for index in indices],:]
if returnDiag:
cutneglogl = self.fullneglogl[:,[int(index) for index in indices]]
fullwhichwalker = np.transpose(np.outer(np.ones(self.nlink), np.arange(self.nwalkers)))
cutwhichwalker = fullwhichwalker[:,[int(index) for index in indices]]
fullwhichlink = np.outer(np.ones(self.nwalkers), np.arange(self.nlink))
cutwhichlink = fullwhichlink[:,[int(index) for index in indices]]
if not flat:
if not returnDiag:
return cutchains
if returnDiag:
return cutchains,cutwhichlink, cutwhichwalker, cutneglogl
if flat:
cutflatchains = np.zeros((self.nwalkers * (len(indices)), self.npar))
if returnDiag:
cutflatneglogl = np.zeros(self.nwalkers * (len(indices)))
cutflatwhichwalker = np.zeros(self.nwalkers * (len(indices)))
cutflatwhichlink = np.zeros(self.nwalkers * (len(indices)))
for i in range(self.nwalkers):
for j in range(self.npar):
cutflatchains[i * (len(indices)):(i + 1)* (len(indices)), j] = cutchains[i,:,j]
if returnDiag:
cutflatneglogl[i * (len(indices)):(i + 1)* (len(indices))] = cutneglogl[i,:]
cutflatwhichwalker[i * (len(indices)):(i + 1)* (len(indices))] = cutwhichwalker[i,:]
cutflatwhichlink[i * (len(indices)):(i + 1)* (len(indices))] = cutwhichlink[i,:]
if not returnDiag:
return cutflatchains
if returnDiag:
return cutflatchains, cutflatwhichlink, cutflatwhichwalker, cutflatneglogl#, ...
def onegelmanrubin(self, chain): #taken from http://joergdietrich.github.io/emcee-convergence.html
ssq = np.var(chain, axis=1, ddof=1)
W = np.mean(ssq, axis=0)
thetab = np.mean(chain, axis=1)
thetabb = np.mean(thetab, axis=0)
m = chain.shape[0]
n = chain.shape[1]
B = n / (m - 1) * np.sum((thetabb - thetab)**2, axis=0)
var_theta = (n - 1) / n * W + 1 / n * B
rhat = np.sqrt(var_theta / W)
return rhat
def gelmanrubin(self, nburnin = None, nend = None):
if nburnin == None:
nburnin = self.nburnin
if nend == None:
nend = self.nlink
cutchains = self.fullchains[:,nburnin:nend,:]
grstats = np.zeros(self.npar)
for i in range(self.npar):
grstats[i] = self.onegelmanrubin(cutchains[:,:,i])
return grstats
def wrapfunctionmulti(theseinputs, likelihoodfunction, args):
theseoutputs = np.zeros(theseinputs[0].shape[0])
#print('test')
if args != None:
for i in range(len(theseoutputs)):
theseoutputs[i] = likelihoodfunction(theseinputs[0][i], *args)
else:
for i in range(len(theseoutputs)):
theseoutputs[i] = likelihoodfunction(theseinputs[0][i])
return theseoutputs
def edmcmc(function, startparams_in, width_in, nwalkers=50, nlink=10000, nburnin=500, gamma_param=None,
method='loglikelihood',parinfo=None, quiet=False, pos_in=None, args = None, ncores=1, bigjump=False,
m1mac=True,adapt=False, dispersion_param = 1e-2):
#method can be loglikelihood, chisq, or mpfit
#outputs = pnew, perror, chains, whichwalker, whichlink, allneglogl,
#pos_in is an array with size (nwalkers, npar) of starting positions.
if ncores ==1: ncorestouse = 1
if ncores >1:
if not m1mac: import multiprocessing as mp
if m1mac: import multiprocess as mp
numcorestotal = mp.cpu_count()
ncorestouse = round(ncores)
if ncorestouse > numcorestotal:
print('Asked for more cores than exist on the machine, limiting to ' + str(numcorestotal))
ncorestouse = numcorestotal
if ncorestouse < 1:
print('Asked for too few cores, reverting to 1')
ncorestouse = 1
possiblemethods = ['loglikelihood', 'negloglikelihood', 'chisq']
if not method in possiblemethods:
method = 'loglikelihood'
print('Method unrecognized, reverting to log likelihood')
width = np.copy(width_in)
startparams = np.copy(startparams_in)
if len(width) != len(startparams):
print('Length of width array not equal to length of startparams. Returning')
return 0
if nburnin > nlink:
nburnin = np.floor(nlink/2.0)
npar = len(startparams)
if nwalkers < 3 * npar or nwalkers <10:
print('Not enough walkers - increasing to 3x number of parameters or a minimum of 10.')
nwalkers = max((3 * npar, 10))
onedimension = False
if npar == 1:
onedimension = True
npar = 2
startparams = np.append(startparams, 0)
width = np.append(width, 0)
limits = np.zeros((2,npar))
limits[0,:] = -1*np.inf
limits[1,:] = np.inf
if parinfo != None:
if len(parinfo) != len(startparams):
print('Length of parinfo not equal to length of startparams. Returning')
return 0
for i in range(len(parinfo)):
pi = parinfo[i]
if pi['fixed']:
width[i] = 0
if pi['limited'][0]:
limits[0,i] = pi['limits'][0]
if pi['limited'][1]:
limits[1,i] = pi['limits'][1]
nfree = np.sum(width !=0)
llimits = np.tile(limits[0,:], (nwalkers, 1))
ulimits = np.tile(limits[1,:], (nwalkers, 1))
if gamma_param == None: gamma_param = 2.38/np.sqrt(2.0*nfree)
position = np.zeros((nwalkers, (nlink), npar))
allneglogl = np.zeros((nwalkers, nlink))
accepted = np.zeros((nlink)) + np.nan
lastneglogl = np.zeros(nwalkers) + np.inf
allneglogl[:,0] = lastneglogl
thispos = np.zeros((nwalkers, npar))
infs = np.zeros(nwalkers) + np.inf
for i in range(nwalkers):
if np.all(pos_in == None):
counter = 0
while lastneglogl[i] == np.inf:
counter = counter + 1
if counter % 1000000 == 0:
print("Can't find good starting parameters: Attempt number ", counter)
thispos[i,:] = np.random.normal(0,1,npar) * width + startparams
lowerlimit = thispos[i,:] < limits[0,:]
upperlimit = thispos[i,:] > limits[1,:]
if not any(lowerlimit.tolist() + upperlimit.tolist()):
if args != None:
output = function(thispos[i,:], *args)
if args == None:
output = function(thispos[i,:])
if method == 'loglikelihood':
lastneglogl[i] = -1 * output
if method == 'negloglikelihood':
lastneglogl[i] = output
if method == 'chisq':
lastneglogl[i] = 0.5 * output
if np.all(pos_in != None):
thispos[i,:] = pos_in[i,:]
if args != None:
output = function(thispos[i,:], *args)
if args == None:
output = function(thispos[i,:])
if method == 'loglikelihood':
lastneglogl[i] = -1 * output
if method == 'negloglikelihood':
lastneglogl[i] = output
if method == 'chisq':
lastneglogl[i] = 0.5 * output
position[i,0,:] = thispos[i,:]
starttime = time.time()
lastprinttime = starttime
naccept = 0.0
ntotal = 0.0
ninstant = 10
instantrate = np.nan
randintbank1 = np.random.randint(0, nwalkers-1, (nlink, nwalkers))
randintbank2 = np.random.randint(0, nwalkers-2, (nlink, nwalkers))
normalbank = np.random.normal(0,1,(nlink, nwalkers, npar))
uniformbank = np.random.uniform(0,1,(nlink, nwalkers))
if ncorestouse > 1: P = mp.Pool(processes=ncorestouse)
for i in range(1, nlink):
js = randintbank1[i,:]#random.randint(nwalkers-1)
ks = np.arange(nwalkers)
js = js + (js >= ks)
j2s = randintbank2[i,:]
j2s = j2s + (j2s >= np.minimum(ks,js))
j2s = j2s + (j2s >= np.maximum(ks,js))
jthpos = position[js,i-1,:]
j2thpos = position[j2s,i-1,:]
if adapt and i % 10 == 9 and np.isfinite(instantrate): #Make adaptive changes to the gamma parameter to optimize the acceptance rate near 0.234.
ratedifference = instantrate / 0.234
gamma_param = gamma_param * max([min([ratedifference, 1.5]), 0.5])
thisgamma = gamma_param
if bigjump and i % 10 == 9: thisgamma = 1
newpars = position[:,i-1,:] + thisgamma * (1 + normalbank[i,:,:] * dispersion_param) * (j2thpos-jthpos)
lowerlimits = newpars < llimits
upperlimits = newpars > ulimits
outofranges = np.logical_or(np.any(lowerlimits, axis=1), np.any(upperlimits, axis=1))
outputs = np.zeros(nwalkers)
theseneglogls = np.zeros(nwalkers)
if ncorestouse > 1:
tocalc = np.where(np.logical_not(outofranges))[0]
wrapperfunction = wrapfunctionmulti
groups = []
npercore = np.ceil(len(tocalc)/ncorestouse)
for iii in range(ncorestouse):
thisgroup = tocalc[int(npercore*iii):int(np.minimum(npercore*(iii+1), len(tocalc)))]
groups.append(thisgroup)
inputs = []
if args != None:
for group in groups:
inputs.append(((newpars[group,:],),function,args))
if args == None:
for group in groups:
inputs.append(((newpars[group,:],),function,None))
outputswrapped = P.starmap(wrapperfunction, inputs)
outputswrappedconcat = np.zeros(0)
for iii in range(len(outputswrapped)):
outputswrappedconcat = np.concatenate((outputswrappedconcat, np.array(outputswrapped[iii])))
outputs[tocalc] = np.array(outputswrappedconcat)
if ncorestouse == 1:
tocalc = np.where(np.logical_not(outofranges))[0]
if args != None:
for k in tocalc:
outputs[k] = function(newpars[k,:], *args)
else:
for k in tocalc:
outputs[k] = function(newpars[k,:])
if method == 'loglikelihood':
theseneglogls = np.choose(outofranges, (-1 * outputs, infs))
if method == 'negloglikelihood':
theseneglogls = np.choose(outofranges, (outputs, infs))
if method == 'chisq':
theseneglogls = np.choose(outofranges, (0.5 * outputs, infs))
qs = np.exp(lastneglogl - theseneglogls)
rs = uniformbank[i,:]
accept = np.transpose(np.tile(1*(rs <= qs),(npar,1)))
thispos = np.choose(accept, (position[:,i-1,:], newpars))
position[:,i,:] = thispos
newneglogl = np.choose(accept[:,0], (lastneglogl, theseneglogls))
lastneglogl = newneglogl
allneglogl[:,i] = newneglogl
naccept = naccept + np.sum(accept[:,0])
accepted[i] = np.sum(accept[:,0])
ntotal = ntotal + nwalkers
thistime = time.time()
tremaining = (thistime - starttime)/float(i) * (nlink - i - 1.0)
days = np.floor(tremaining / 3600.0 / 24.0)
hours = np.floor(tremaining / 3600.0 - 24 * days)
minutes = np.floor(tremaining/60 - 24 * 60 * days - 60 * hours)
seconds = (tremaining - 24 * 3600 * days - 3600 * hours - 60*minutes)
if adapt and i > ninstant:
instantrate = round(np.sum(accepted[i-ninstant:i])/(nwalkers*ninstant),2)
outstr = str(int(days)) + ' days, ' + str(int(hours)).zfill(2) + ':' + str(int(minutes)).zfill(2) + ':' + str(round(seconds, 1)).zfill(4) + ' remains. Link ' + str(i+1) + ' of ' +str(nlink) + '. Overall acceptance rate = ' + str(round(naccept/ntotal,2)) + ', instantaneous = ' + str(instantrate)
if (not adapt) or i < ninstant:
instantrate = np.nan
outstr = str(int(days)) + ' days, ' + str(int(hours)).zfill(2) + ':' + str(int(minutes)).zfill(2) + ':' + str(round(seconds, 1)).zfill(4) + ' remains. Link ' + str(i+1) + ' of ' +str(nlink) + '. Acceptance rate = ' + str(round(naccept/ntotal,2))
if not quiet and (thistime - lastprinttime > 0.01 or i == nlink -1):
lastprinttime = thistime
print(outstr,end="\r")
if i == nlink -1:
print(outstr)
if ncorestouse > 1:
P.close()
P.join()
if not onedimension:
chainsout = position[:,nburnin:nlink,:]#np.zeros((nwalkers, (nlink - nburnin), npar))
flatchainsout = np.zeros((nwalkers * (nlink - nburnin), npar))
allnegloglout = allneglogl[:, nburnin:nlink]#np.zeros((nwalkers, nlink - nburnin))
flatnegloglout = np.zeros((nlink - nburnin)*nwalkers)
whichwalkerout = np.zeros((nlink - nburnin)*nwalkers)
whichlinkout = np.zeros((nlink - nburnin)*nwalkers)
for i in range(nwalkers):
for j in range(npar):
flatchainsout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin), j] = position[i,nburnin:nlink,j]
flatnegloglout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin)] = allneglogl[i,nburnin:nlink]
whichwalkerout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin)] += i
whichlinkout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin)] = np.arange(nburnin, nlink)
if onedimension:
npar = 1
chainsout = position[:,nburnin:nlink,0]#np.zeros((nwalkers, (nlink - nburnin), npar))
flatchainsout = np.zeros((nwalkers * (nlink - nburnin), 1))
allnegloglout = allneglogl[:, nburnin:nlink]#np.zeros((nwalkers, nlink - nburnin))
flatnegloglout = np.zeros((nlink - nburnin)*nwalkers)
whichwalkerout = np.zeros((nlink - nburnin)*nwalkers)
whichlinkout = np.zeros((nlink - nburnin)*nwalkers)
for i in range(nwalkers):
flatchainsout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin), 0] = position[i,nburnin:nlink,0]
flatnegloglout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin)] = allneglogl[i,nburnin:nlink]
whichwalkerout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin)] += i
whichlinkout[i * (nlink - nburnin):(i + 1)* (nlink - nburnin)] = np.arange(nburnin, nlink)
return(mcmcstr(chainsout, flatchainsout, allnegloglout, flatnegloglout, whichwalkerout,
whichlinkout, position, allneglogl, nburnin, nlink, nwalkers, npar, acceptancerate=naccept/ntotal))
|
avanderburgREPO_NAMEedmcmcPATH_START.@edmcmc_extracted@edmcmc-main@edmcmc.py@.PATH_END.py
|
{
"filename": "spec.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/agents/agent_toolkits/openapi/spec.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.spec import (
ReducedOpenAPISpec,
reduce_openapi_spec,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ReducedOpenAPISpec": "langchain_community.agent_toolkits.openapi.spec",
"reduce_openapi_spec": "langchain_community.agent_toolkits.openapi.spec",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ReducedOpenAPISpec",
"reduce_openapi_spec",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@agents@agent_toolkits@openapi@spec.py@.PATH_END.py
|
{
"filename": "psoap_retrieve_ST3.py",
"repo_name": "iancze/PSOAP",
"repo_path": "PSOAP_extracted/PSOAP-master/scripts/psoap_retrieve_ST3.py",
"type": "Python"
}
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description="Reconstruct the composite spectra for A and B component.")
args = parser.parse_args()
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
from scipy.linalg import cho_factor, cho_solve
from psoap import constants as C
from psoap.data import redshift, lredshift, Chunk
from psoap import covariance
from psoap import orbit
from psoap import utils
import multiprocessing as mp
import yaml
try:
f = open("config.yaml")
config = yaml.load(f)
f.close()
except FileNotFoundError as e:
print("You need to copy a config.yaml file to this directory, and then edit the values to your particular case.")
raise
# Load the list of chunks
chunks = ascii.read(config["chunk_file"])
def process_chunk(row):
order, wl0, wl1 = row
print("Processing order {}, wl0: {:.1f}, wl1: {:.1f}".format(order, wl0, wl1))
chunk = Chunk.open(order, wl0, wl1, limit=config["epoch_limit"])
n_epochs = chunk.n_epochs
n_pix = chunk.n_pix
# Use the parameters specified in the yaml file to create the spectra
pars = config["parameters"]
q_in = pars["q_in"]
K_in = pars["K_in"] # km/s
e_in = pars["e_in"] #
omega_in = pars["omega_in"] # deg
P_in = pars["P_in"] # days
T0_in = pars["T0_in"] # epoch
q_out = pars["q_out"]
K_out = pars["K_out"] # km/s
e_out = pars["e_out"] #
omega_out = pars["omega_out"] # deg
P_out = pars["P_out"] # days
T0_out = pars["T0_out"] # epoch
gamma = pars["gamma"] # km/s
amp_f = pars["amp_f"] # flux
l_f = pars["l_f"] # km/s
amp_g = pars["amp_g"] # flux
l_g = pars["l_g"] # km/s
amp_h = pars["amp_h"] # flux
l_h = pars["l_h"] # km/s
dates = chunk.date1D
orb = orbit.ST3(q_in, K_in, e_in, omega_in, P_in, T0_in, q_out, K_out, e_out, omega_out, P_out, T0_out, gamma, obs_dates=dates)
# predict velocities for each epoch
vAs, vBs, vCs = orb.get_component_velocities()
# shift wavelengths according to these velocities to rest-frame of A component
wls = chunk.wl
lwls = chunk.lwl
lwls_A = lredshift(lwls, -vAs[:,np.newaxis])
lwls_B = lredshift(lwls, -vBs[:,np.newaxis])
lwls_C = lredshift(lwls, -vCs[:,np.newaxis])
chunk.apply_mask()
lwls_A = lwls_A[chunk.mask]
lwls_B = lwls_B[chunk.mask]
lwls_C = lwls_C[chunk.mask]
# reload this, including the masked data
fl = chunk.fl
sigma = chunk.sigma
dates = chunk.date1D
# Spectra onto which we want to predict new spectra.
# These are 2X finely spaced as the data, and span the maximum range of the spectra at 0
# velocity (barycentric frame).
n_pix_predict = 2 * n_pix
# These are the same input wavelegths.
lwls_A_predict = np.linspace(np.min(lwls_A), np.max(lwls_A), num=n_pix_predict)
wls_A_predict = np.exp(lwls_A_predict)
lwls_B_predict = lwls_A_predict
wls_B_predict = wls_A_predict
lwls_C_predict = lwls_A_predict
wls_C_predict = wls_A_predict
mu, Sigma = covariance.predict_f_g_h(lwls_A.flatten(), lwls_B.flatten(), lwls_C.flatten(), fl.flatten(), sigma.flatten(), lwls_A_predict, lwls_B_predict, lwls_C_predict, mu_f=0.0, mu_g=0.0, mu_h=0.0, amp_f=amp_f, l_f=l_f, amp_g=amp_g, l_g=l_g, amp_h=amp_h, l_h=l_h)
sigma_diag = np.sqrt(np.diag(Sigma))
mu_f = mu[0:n_pix_predict]
sigma_f = sigma_diag[0:n_pix_predict]
mu_g = mu[n_pix_predict:2 * n_pix_predict]
sigma_g = sigma_diag[n_pix_predict: 2 * n_pix_predict]
mu_h = mu[2 * n_pix_predict:]
sigma_h = sigma_diag[2 * n_pix_predict:]
fig, ax = plt.subplots(nrows=3, sharex=True)
ax[0].plot(wls_A_predict, mu_f, "b")
ax[0].set_ylabel(r"$f$")
ax[1].plot(wls_B_predict, mu_g, "g")
ax[1].set_ylabel(r"$g$")
ax[2].plot(wls_C_predict, mu_h, "r")
ax[2].set_ylabel(r"$h$")
ax[-1].set_xlabel(r"$\lambda\,[\AA]$")
plots_dir = "plots_" + C.chunk_fmt.format(order, wl0, wl1)
fig.savefig(plots_dir + "/reconstructed.png", dpi=300)
plt.close("all")
np.save(plots_dir + "/f.npy", np.vstack((wls_A_predict, mu_f, sigma_f)))
np.save(plots_dir + "/g.npy", np.vstack((wls_B_predict, mu_g, sigma_g)))
np.save(plots_dir + "/h.npy", np.vstack((wls_C_predict, mu_h, sigma_h)))
np.save(plots_dir + "/mu.npy", mu)
np.save(plots_dir + "/Sigma.npy", Sigma)
# A laptop (e.g., mine) doesn't have enough memory to do this in parallel, so only serial for now
for chunk in chunks:
process_chunk(chunk)
|
ianczeREPO_NAMEPSOAPPATH_START.@PSOAP_extracted@PSOAP-master@scripts@psoap_retrieve_ST3.py@.PATH_END.py
|
{
"filename": "qphot.py",
"repo_name": "vterron/lemon",
"repo_path": "lemon_extracted/lemon-master/qphot.py",
"type": "Python"
}
|
#! /usr/bin/env python2
# Copyright (c) 2012 Victor Terron. All rights reserved.
# Institute of Astrophysics of Andalusia, IAA-CSIC
#
# This file is part of LEMON.
#
# LEMON is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
"""
This module implements a wrapper for the IRAF tasks 'qphot' (quick aperture
photometer) and 'txdump' (which print fields from selected records in an
APPHOT/DAOPHOT text database). The routines implemented in this module provide
a way to automatically do photometry on an image returning the records in a
qphot.QPhot instance. Temporary files are used for both qphot and txdump, but
they are automatically removed, so the entire process takes place in memory,
from the user's perspective.
"""
import collections
import functools
import itertools
import logging
import math
import os
import os.path
import re
import sys
import tempfile
import warnings
# LEMON modules
import fitsimage
import util
# Tell PyRAF to skip all graphics initialization and run in terminal-only mode.
# Otherwise we will get annoying warning messages (such as "could not open
# XWindow display" or "No graphics display available for this session") when
# working at a remote terminal or at a terminal without any X Windows support.
# Any tasks which attempt to display graphics will fail, of course, but we are
# not going to make use of any of them, anyway.
os.environ["PYRAF_NO_DISPLAY"] = "1"
# When PyRAF is imported, it creates, unless it already exists, a pyraf/
# directory for cache in the current working directory. It also complains that
# "Warning: no login.cl found" if this IRAF file cannot be found either. Avoid
# these two annoying messages, and do not clutter the filesystem with pyraf/
# directories, by temporarily changing the current working directory to that of
# LEMON, where the pyraf/ directory and login.cl were generated by setup.py.
with util.tmp_chdir(os.path.dirname(os.path.abspath(__file__))):
import pyraf.iraf
from pyraf.iraf import digiphot, apphot # 'digiphot.apphot' package
# Turn PyRAF process caching off; otherwise, if we spawn multiple processes
# and run them in parallel, each one of them would use the same IRAF running
# executable, which could sometimes result in the most arcane of errors.
pyraf.iraf.prcacheOff()
# Decorate pyraf.subproc.Subprocess.__del__() to catch the SubprocessError
# exception that it occasionally raises (when the process is not gone after
# sending the TERM and KILL signals to it) and log it with level DEBUG on the
# root logger. As explained in the Python data model, uncaught exceptions in
# __del__() are ignored and a warning message, which we want to get rid of,
# such as the following, printed to the standard error instead:
#
# Exception pyraf.subproc.SubprocessError: SubprocessError("Failed
# kill of subproc 24600, '/iraf/iraf/bin.linux/x_images.e -c', with
# signals ['TERM', 'KILL']",) in <bound method Subprocess.__del__
# of <Subprocess '/iraf/iraf/bin.linux/x_images.e -c', at
# 7f9f3f408710>> ignored
def log_uncaught_exceptions(func):
"""Decorator to log uncaught exceptions with level DEBUG.
This decorator catches any exception raised by the decorated function and
logs it with level DEBUG on the root logger. Only subclasses of Exception
are caught, as we do not want to log SystemExit or KeyboardInterrupt. The
usage of this decorator makes probably only sense when the function raising
the uncaught exception cannot be fixed, for example when working with a
third-party library.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
type, value, traceback = sys.exc_info()
msg = "%s raised %s('%s')" % (func.__name__, type.__name__, value)
logging.debug(msg)
return wrapper
pyraf.subproc.Subprocess.__del__ = log_uncaught_exceptions(
pyraf.subproc.Subprocess.__del__
)
class MissingFITSKeyword(RuntimeWarning):
""" Warning about keywords that cannot be read from a header (non-fatal) """
pass
typename = "QPhotResult"
field_names = "x, y, mag, sum, flux, stdev"
class QPhotResult(collections.namedtuple(typename, field_names)):
"""Encapsulate the photometry of an astronomical object. In other words,
each one of the lines that for each object are output by IRAF's qphot are
parsed and saved as an object of this class.
x, y - the x- and y-coordinates of the center of the object.
mag - the instrumental magnitude of the object in the aperture.
sum - the total number of counts in the aperture *including* the sky.
flux - the total number of counts in the aperture *excluding* the sky.
stdev - the standard deviation of the best estimate of the sky value,
per pixel.
"""
def snr(self, gain):
"""Return the signal-to-noise ratio of the photometric measurement.
The method returns the S/N, a quantitative measurement of the accuracy
with which the object was observed. The signal-to-noise ratio tells us
the relative size of the desired signal to the underlying noise or
background light. The noise is defined as the standard deviation of a
single measurement from the mean of the measurements made on an object.
For photon arrivals, the statistical noise fluctuation is represented
by the Poisson distribution. For bright sources where the sky
background is negligible, S/N = total counts / sqrt(total counts).
When the sky is not insignificant, the formula becomes S/N = (total
counts - sky counts) / sqrt(total counts).
Astronomers typically consider a good photoelectric measurement as one
that has a signal-to-noise ratio of 100, or in other words, the noise
is one percent of the signal. This implies an observational error of
0.01 magnitude. [Henden, Astronomical Photometry, 1982]
The 'gain' parameter gives the gain of the CCD in e-/ADU, as the above
formulas apply to the number of electrons. Using the ADUs instead would
introduce an error equal to the square root of the actual gain. In case
the gain is unknown you may use a value of one (so as many electrons as
ADUs will be used in the formula), as long as you understand that,
although one is a common gain among many instruments, results may be
only approximate.
As counterintuitive as it seems, IRAF's qphot may return negative
values of 'sum': e.g., if one of the steps of the data calibration
(such as the bias subtraction) resulted in pixels with negative values.
When that is the case, this method returns a negative SNR, which should
be viewed as a red flag that something went wrong with this photometric
measurement.
"""
if gain <= 0:
raise ValueError("CCD gain must be a positive value")
# Division by zero must be avoided, as sum and flux may both be zero if
# photometry is done on celestial coordinates that do not correspond to
# any astronomical object, or if it is so faint that it is not visible.
if not self.sum:
return 0.0
elif self.sum < 0.0:
return -(abs(self.flux * gain) / math.sqrt(abs(self.sum * gain)))
else:
return (self.flux * gain) / math.sqrt(self.sum * gain)
class QPhot(list):
"""The photometry of an image, as returned by IRAF's qphot.
This class stores the result of the photometry done by IRAF's qphot (quick
aperture photometer) on an image. A QPhotResult object is created for each
object listed in the text file: after calling QPhot.run(), this subclass of
the built-in list contains the photometric measurement of each astronomical
object. The order of these QPhotResult objects is guaranteed to respect
that in which coordinates are listed in the text file. In other words: the
i-th QPhotResult object corresponds to the i-th astronomical object.
"""
def __init__(self, img_path, coords_path):
"""Instantiation method for the QPhot class.
img_path - path to the FITS image on which to do photometry.
coords_path - path to the text file with the celestial coordinates
(right ascension and declination) of the astronomical
objects to be measured. These objects must be listed one
per line, in two columns. Note that this class does *not*
apply proper-motion correction, so the coordinates must
be corrected before being written to the file. In case
the proper motions of the objects are listed in the file,
in columns third and fourth, ValueError is raised.
"""
super(list, self).__init__()
self.image = fitsimage.FITSImage(img_path)
self.coords_path = coords_path
for ra, dec, pm_ra, pm_dec in util.load_coordinates(self.coords_path):
if ra == 0 and dec == 0:
msg = (
"the right ascension and declination of one or more "
"astronomical objects in '%s' is zero. This is a very bad "
"sign: these are the celestial coordinates that SExtractor "
"uses for sources detected on a FITS image that has not been "
"calibrated astrometrically (may that be your case?), and "
"without that it is impossible to do photometry on the "
"desired coordinates" % self.coords_path
)
# Emit warning only once
warnings.warn(msg)
break
if pm_ra is not None or pm_dec is not None:
msg = (
"at least one object in the '%s' file lists its proper "
"motions. This is not allowed. The coordinates must be "
"written to the file already adjusted for their proper "
"motions, as this class cannot apply any correction"
% self.coords_path
)
raise ValueError(msg)
@property
def path(self):
""" Return the path to the FITS image. """
return self.image.path
def clear(self):
""" Remove all the photometric measurements. """
del self[:]
def run(self, annulus, dannulus, aperture, exptimek, cbox=0):
"""Run IRAF's qphot on the FITS image.
This method is a wrapper, equivalent to (1) running 'qphot' on a FITS
image and (2) using 'txdump' in order to extract some fields from the
resulting text database. This subroutine, therefore, allows to easily
do photometry on a FITS image, storing as a sequence of QPhotResult
objects the photometric measurements. The method returns the number
of astronomical objects on which photometry has been done.
No matter how convenient, QPhot.run() should still be seen as a
low-level method: INDEF objects will have a magnitude and standard
deviation of None, but it does not pay attention to the saturation
levels -- the sole reason for this being that IRAF's qphot, per se,
provides no way of knowing if one or more pixels in the aperture are
above some saturation level. For this very reason, the recommended
approach to doing photometry is to use photometry(), a convenience
function defined below.
In the first step, photometry is done, using qphot, on the astronomical
objects whose coordinates have been listed, one per line, in the text
file passed as an argument to QPhot.__init__(). The output of this IRAF
task is saved to temporary file (a), an APPHOT text database from which
'txdump' extracts the fields to another temporary file, (b). Then this
file is parsed, the information of each of its lines, one per object,
used in order to create a QPhotResult object. All previous photometric
measurements are lost every time this method is run. All the temporary
files (a, b) are guaranteed to be deleted on exit, even if an error is
encountered.
An important note: you may find extremely confusing that, although the
input that this method accepts are celestial coordinates (the right
ascension and declination of the astronomical objects to be measured),
the resulting QPhotResult objects store the x- and y-coordinates of
their centers. The reason for this is that IRAF's qphot supports
'world' coordinates as the system of the input coordinates, but the
output coordinate system options are 'logical', 'tv', and 'physical':
http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?qphot
The x- and y-coordinates of the centers of the astronomical objects may
be reported as -1 if their celestial coordinates fall considerably off
those of the FITS image on which photometry is done. This is caused by
a bug in IRAF, which, as of v.2.16.1, outputs invalid floating-point
numbers (such as '-299866.375-58') when the astronomical object is
approximately >= 100 degrees off the image boundaries. In those cases,
the fallback value of -1 does not give us any other information than
that the object falls off the image. This must probably be one of the
least important bugs ever, considering how wrong the input celestial
coordinates must be.
Bug report that we have submitted to the IRAF development team:
[URL] http://iraf.net/forum/viewtopic.php?showtopic=1468373
Arguments:
annulus - the inner radius of the sky annulus, in pixels.
dannulus - the width of the sky annulus, in pixels.
aperture - the aperture radius, in pixels.
exptimek - the image header keyword containing the exposure time, in
seconds. Needed by qphot in order to normalize the computed
magnitudes to an exposure time of one time unit. In case it
cannot be read from the FITS header, the MissingFITSKeyword
warning is issued and the default value of '' used instead.
Although non-fatal, this means that magnitudes will not be
normalized, which probably is not what you want.
cbox - the width of the centering box, in pixels. Accurate centers for
each astronomical object are computed using the centroid
centering algorithm. This means that, unless this argument is
zero (the default value), photometry is not done exactly on the
specified coordinates, but instead where IRAF has determined
that the actual, accurate center of each object is. This is
usually a good thing, and helps improve the photometry.
"""
self.clear() # empty object
try:
# Temporary file to which the APPHOT text database produced by
# qphot will be saved. Even if empty, it must be deleted before
# calling qphot. Otherwise, an error message, stating that the
# operation "would overwrite existing file", will be thrown.
output_fd, qphot_output = tempfile.mkstemp(
prefix=os.path.basename(self.path), suffix=".qphot_output", text=True
)
os.close(output_fd)
os.unlink(qphot_output)
# In case the FITS image does not contain the keyword for the
# exposure time, qphot() shows a message such as: "Warning: Image
# NGC2264-mosaic.fits Keyword: EXPTIME not found". This, however,
# is not a warning, but a simple message written to standard error.
# Filter this stream so that, if this message is written, it is
# captured and issued as a MissingFITSkeyword warning instead.
# Note the two whitespaces before 'Keyword'
regexp = "Warning: Image (?P<msg>{0} Keyword: {1} " "not found)".format(
self.path, exptimek
)
args = sys.stderr, regexp, MissingFITSKeyword
stderr = util.StreamToWarningFilter(*args)
# Run qphot on the image and save the output to our temporary file.
kwargs = dict(
cbox=cbox,
annulus=annulus,
dannulus=dannulus,
aperture=aperture,
coords=self.coords_path,
output=qphot_output,
exposure=exptimek,
wcsin="world",
interactive="no",
Stderr=stderr,
)
apphot.qphot(self.path, **kwargs)
# Make sure the output was written to where we said
assert os.path.exists(qphot_output)
# Now extract the records from the APPHOT text database. We need
# the path of another temporary file to which to save them. Even
# if empty, we need to delete the temporary file created by
# mkstemp(), as IRAF will not overwrite it.
txdump_fd, txdump_output = tempfile.mkstemp(
prefix=os.path.basename(self.path), suffix=".qphot_txdump", text=True
)
os.close(txdump_fd)
os.unlink(txdump_output)
# The type casting of Stdout to string is needed as txdump will not
# work with unicode, if we happen to come across it: PyRAF requires
# that redirection be to a file handle or string.
txdump_fields = ["xcenter", "ycenter", "mag", "sum", "flux", "stdev"]
pyraf.iraf.txdump(
qphot_output,
fields=",".join(txdump_fields),
Stdout=str(txdump_output),
expr="yes",
)
# Now open the output file again and parse the output of txdump,
# creating a QPhotResult object for each record.
with open(txdump_output, "rt") as txdump_fd:
for line in txdump_fd:
fields = line.split()
# As of IRAF v.2.16.1, the qphot task may output an invalid
# floating-point number (such as "-299866.375-58") when the
# coordinates of the object to be measured fall considerably
# off (>= ~100 degrees) the image. That raises ValueError
# ("invalid literal for float()") when we attempt to convert
# the output xcenter or ycenter to float. In those cases, we
# use -1 as the fallback value.
try:
xcenter_str = fields[0]
xcenter = float(xcenter_str)
msg = "%s: xcenter = %.8f" % (self.path, xcenter)
logging.debug(msg)
except ValueError, e:
msg = "%s: can't convert xcenter = '%s' to float (%s)"
logging.debug(msg % (self.path, xcenter_str, str(e)))
msg = "%s: xcenter set to -1 (fallback value)"
logging.debug(msg % self.path)
xcenter = -1
try:
ycenter_str = fields[1]
ycenter = float(ycenter_str)
msg = "%s: ycenter = %.8f" % (self.path, ycenter)
logging.debug(msg)
except ValueError, e:
msg = "%s: can't convert ycenter = '%s' to float (%s)"
logging.debug(msg % (self.path, ycenter_str, str(e)))
msg = "%s: ycenter set to -1 (fallback value)"
logging.debug(msg % self.path)
ycenter = -1
try:
mag_str = fields[2]
mag = float(mag_str)
msg = "%s: mag = %.5f" % (self.path, mag)
logging.debug(msg)
except ValueError: # float("INDEF")
assert mag_str == "INDEF"
msg = "%s: mag = None ('INDEF')" % self.path
logging.debug(msg)
mag = None
sum_ = float(fields[3])
msg = "%s: sum = %.5f" % (self.path, sum_)
logging.debug(msg)
flux = float(fields[4])
msg = "%s: flux = %.5f" % (self.path, flux)
logging.debug(msg)
try:
stdev_str = fields[5]
stdev = float(stdev_str)
msg = "%s: stdev = %.5f" % (self.path, stdev)
logging.debug(msg)
except ValueError: # float("INDEF")
assert stdev_str == "INDEF"
msg = "%s: stdev = None ('INDEF')" % self.path
logging.debug(msg)
stdev = None
args = xcenter, ycenter, mag, sum_, flux, stdev
self.append(QPhotResult(*args))
finally:
# Remove temporary files. The try-except is necessary because an
# exception may be raised before 'qphot_output' and 'txdump_output'
# have been defined.
try:
util.clean_tmp_files(qphot_output)
except NameError:
pass
try:
util.clean_tmp_files(txdump_output)
except NameError:
pass
return len(self)
def get_coords_file(coordinates, year, epoch):
"""Return a coordinates file with the exact positions of the objects.
Loop over 'coordinates', an iterable of astromatic.Coordinates objects, and
apply proper motion correction, obtaining their exact positions for a given
date. These proper-motion corrected coordinates are written to a temporary
text file, listed one astronomical object per line and in two columns:
right ascension and declination. Returns the path to the temporary file.
The user of this function is responsible for deleting the file when done
with it.
Both 'year' and 'epoch' may be decimal numbers, such as 2014.25 for April
1, 2014 (since, in common years, April 1 is the 91st day of the year, and
91 / 365 = 0.24931507 = ~0.25). Please refer to the documentation of the
Coordinates.get_exact_coordinates() method for further information.
"""
kwargs = dict(prefix="%f_" % year, suffix="_J%d.coords" % epoch, text=True)
fd, path = tempfile.mkstemp(**kwargs)
fmt = "\t".join(["%.10f", "%.10f\n"])
for coord in coordinates:
# Do not apply any correction if pm_ra and pm_dec are None (which means
# that the proper motion of the object is unknown) or zero (because in
# this case the coordinates are always the same). Make sure also that
# either none or both proper motions are None: we cannot know one but
# not the other!
if None in (coord.pm_ra, coord.pm_dec):
assert coord.pm_ra is None
assert coord.pm_dec is None
if coord.pm_ra or coord.pm_dec:
coord = coord.get_exact_coordinates(year, epoch=epoch)
os.write(fd, fmt % coord[:2])
os.close(fd)
return path
def run(
img,
coordinates,
epoch,
aperture,
annulus,
dannulus,
maximum,
datek,
timek,
exptimek,
uncimgk,
cbox=0,
):
"""Do photometry on a FITS image.
This convenience function does photometry on a FITSImage object, applying
proper-motion correction to a series of astronomical objects and measuring
them. The FITS image must have been previously calibrated astrometrically,
so that right ascensions and declinations are meaningful. Returns a QPhot
object, using None as the magnitude of those astronomical objects that are
INDEF (i.e., so faint that qphot could not measure anything) and positive
infinity if they are saturated (i.e., if one or more pixels in the aperture
are above the saturation level).
Arguments:
img - the fitsimage.FITSImage object on which to do photometry.
coordinates - an iterable of astromatic.Coordinates objects, one for each
astronomical object to be measured.
epoch - the epoch of the coordinates of the astronomical objects, used to
compute the proper-motion correction. Must be an integer, such as
2000 for J2000.
aperture - the aperture radius, in pixels.
annulus - the inner radius of the sky annulus, in pixels.
dannulus - the width of the sky annulus, in pixels.
maximum - number of ADUs at which saturation arises. If one or more pixels
in the aperture are above this value, the magnitude of the
astronomical object is set to positive infinity. For coadded
observations, the effective saturation level is obtained by
multiplying this value by the number of coadded images.
datek - the image header keyword containing the date of the observation, in
the format specified in the FITS Standard. The old date format was
'yy/mm/dd' and may be used only for dates from 1900 through 1999.
The new Y2K compliant date format is 'yyyy-mm-dd' or
'yyyy-mm-ddTHH:MM:SS[.sss]'. This keyword is not necessary if none
of the astromatic.Coordinates objects have a known proper motion.
When that is the case, it is not even read from the FITS header.
timek - the image header keyword containing the time at which the
observation started, in the format HH:MM:SS[.sss]. This keyword is
not necessary (and, therefore, is ignored) if the time is included
directly as part of the 'datek' keyword value with the format
'yyyy-mm-ddTHH:MM:SS[.sss]'. As with 'datek', if no object has a
proper motion this keyword is ignored and not read from the header.
exptimek - the image header keyword containing the exposure time. Needed
by qphot in order to normalize the computed magnitudes to an
exposure time of one time unit.
uncimgk - the image header keyword containing the path to the image used to
check for saturation. It is expected to be the original FITS file
(that is, before any calibration step, since corrections such as
flat-fielding may move a saturated pixel below the saturation
level) of the very image on which photometry is done. If this
argument is set to an empty string or None, saturation is checked
for on the same FITS image used for photometry, 'img'.
cbox - the width of the centering box, in pixels. Accurate centers for each
astronomical object are computed using the centroid centering
algorithm. This means that, unless this argument is zero (the
default value), photometry is not done exactly on the specified
coordinates, but instead where IRAF has determined that the actual,
accurate center of each object is. This is usually a good thing, and
helps improve the photometry.
"""
kwargs = dict(date_keyword=datek, time_keyword=timek, exp_keyword=exptimek)
# The date of observation is only actually needed when we need to apply
# proper motion corrections. Therefore, don't call FITSImage.year() unless
# one or more of the astromatic.Coordinates objects have a proper motion.
# This avoids an unnecessary KeyError exception when we do photometry on a
# FITS image without the 'datek' or 'timek' keywords (for example, a mosaic
# created with IPAC's Montage): when that happens we cannot apply proper
# motion corrections, that's right, but that's not an issue if none of our
# objects have a known proper motion.
for coord in coordinates:
if coord.pm_ra or coord.pm_dec:
try:
year = img.year(**kwargs)
break
except KeyError as e:
# Include the missing FITS keyword in the exception message
regexp = "keyword '(?P<keyword>.*?)' not found"
match = re.search(regexp, str(e))
assert match is not None
msg = (
"{0}: keyword '{1}' not found. It is needed in order "
"to be able to apply proper-motion correction, as one "
"or more astronomical objects have known proper motions".format(
img.path, match.group("keyword")
)
)
raise KeyError(msg)
else:
# No object has a known proper motion, so don't call
# FITSImage.year(). Use the same value as the epoch, so that when
# get_coords_file() below applies the proper motion correction the
# input and output coordinates are the same.
year = epoch
# The proper-motion corrected objects coordinates
coords_path = get_coords_file(coordinates, year, epoch)
img_qphot = QPhot(img.path, coords_path)
img_qphot.run(annulus, dannulus, aperture, exptimek, cbox=cbox)
# How do we know whether one or more pixels in the aperture are above a
# saturation threshold? As suggested by Frank Valdes at the IRAF.net
# forums, we can make a mask of the saturated values, on which we can do
# photometry using the same aperture. If we get a non-zero flux, we know it
# has saturation: http://iraf.net/forum/viewtopic.php?showtopic=1466068
if not uncimgk:
orig_img_path = img.path
else:
orig_img_path = img.read_keyword(uncimgk)
if not os.path.exists(orig_img_path):
msg = "image %s (keyword '%s' of image %s) does not exist"
args = orig_img_path, uncimgk, img.path
raise IOError(msg % args)
try:
# Temporary file to which the saturation mask is saved
basename = os.path.basename(orig_img_path)
mkstemp_prefix = "%s_satur_mask_%d_ADUS_" % (basename, maximum)
kwargs = dict(prefix=mkstemp_prefix, suffix=".fits", text=True)
mask_fd, satur_mask_path = tempfile.mkstemp(**kwargs)
os.close(mask_fd)
# IRAF's imexpr won't overwrite the file. Instead, it will raise an
# IrafError exception stating that "IRAF task terminated abnormally
# ERROR (1121, "FXF: EOF encountered while reading FITS file".
os.unlink(satur_mask_path)
# The expression that will be given to 'imexpr'. The space after the
# colon is needed to avoid sexigesimal interpretation. 'a' is the first
# and only operand, linked to our image at the invokation of the task.
expr = "a>%d ? 1 : 0" % maximum
logging.debug("%s: imexpr = '%s'" % (img.path, expr))
logging.debug("%s: a = %s" % (img.path, orig_img_path))
logging.info("%s: Running IRAF's imexpr..." % img.path)
pyraf.iraf.images.imexpr(
expr,
a=orig_img_path,
output=satur_mask_path,
verbose="yes",
Stdout=util.LoggerWriter("debug"),
)
assert os.path.exists(satur_mask_path)
msg = "%s: IRAF's imexpr OK" % img.path
logging.info(msg)
msg = "%s: IRAF's imexpr output = %s"
logging.debug(msg % (img.path, satur_mask_path))
# Now we just do photometry again, on the same pixels, but this time on
# the saturation mask. Those objects for which we get a non-zero flux
# will be known to be saturated and their magnitude set to infinity.
# If 'cbox' is other than zero, the center of each object may have been
# recentered by qphot using the centroid centering algorithm. When that
# is the case, we need to feed run() with these more accurate centers.
# Since the QPhotResult objects contain x- and y-coordinates (qphot
# does not support 'world' coordinates as the output system), we need
# to convert them back to right ascension and declination.
if cbox:
root, _ = os.path.splitext(os.path.basename(img.path))
kwargs = dict(prefix=root + "_", suffix="_satur.coords", text=True)
os.unlink(coords_path)
fd, coords_path = tempfile.mkstemp(**kwargs)
for object_phot in img_qphot:
centered_x, centered_y = object_phot.x, object_phot.y
ra, dec = img.pix2world(centered_x, centered_y)
os.write(fd, "{0} {1}\n".format(ra, dec))
os.close(fd)
mask_qphot = QPhot(satur_mask_path, coords_path)
# No centering this time: if cbox != 0 the accurate centers for each
# astronomical object have been computed using the centroid centering
# algorithm, so we're already feeding run() with the accurate values.
mask_qphot.run(annulus, dannulus, aperture, exptimek, cbox=0)
os.unlink(coords_path)
assert len(img_qphot) == len(mask_qphot)
for object_phot, object_mask in itertools.izip(img_qphot, mask_qphot):
if __debug__:
# In cbox != 0 we cannot expect the coordinates to be the exact
# same: the previous call to run() returned x and y coordinates
# that we converted to celestial coordinates, and now qphot is
# giving as output image coordinates again. It is unavoidable
# to lose some precision. Anyway, this does not affect the
# result: photometry was still done on almost the absolute
# exact coordinates that we wanted it to.
if not cbox:
assert object_phot.x == object_mask.x
assert object_phot.y == object_mask.y
if object_mask.flux > 0:
object_phot = object_phot._replace(mag=float("infinity"))
finally:
# Remove saturation mask. The try-except is necessary because an
# exception may be raised before 'satur_mask_path' is defined.
try:
util.clean_tmp_files(satur_mask_path)
except NameError:
pass
return img_qphot
|
vterronREPO_NAMElemonPATH_START.@lemon_extracted@lemon-master@qphot.py@.PATH_END.py
|
{
"filename": "move_report_db_done_pngs.py",
"repo_name": "HETDEX/elixer",
"repo_path": "elixer_extracted/elixer-main/elixer/move_report_db_done_pngs.py",
"type": "Python"
}
|
"""
open all 3 db files
get list of inserted detectIDs that are in all 3
move those corresponding files out of all_pngs to done_pngs
"""
#todo: right now this is just a clone of make_report_db
#from hetdex_api import sqlite_utils as sql
import sqlite3 as sql
import numpy as np
#import argparse
import shutil
import os
import glob
# def parse_commandline(auto_force=False):
# desc = "move already processed image files"
# #parser = argparse.ArgumentParser(description=desc)
# #parser.add_argument('--prefix', help="report prefix", required=True, type=str)
# #parser.add_argument('--img_dir', help="Directory with the images", required=True, type=str)
# #parser.add_argument('--img_name', help="Wildcard image name", required=True, type=str)
# #parser.add_argument('--move_dir', help="Directory to move processed images", required=True, type=str)
# #parser.add_argument('--mv2dir', help="mv to directory (leaves in cwd if not provided)",required=False,type=str)
#
# args = parser.parse_args()
#
# return args
def get_db_connection(fn,readonly=True):
"""
return a SQLite3 databse connection object for the provide databse filename
assumes file exists (will trap exception if not and return None)
:param fn:
:return: None or connection object
"""
conn = None
try:
if fn is not None:
if readonly:
conn = sqlite3.connect("file:" +fn + "?mode=ro",uri=True)
else:
conn = sqlite3.connect(fn)
except Error as e:
print(e)
return conn
def fetch_all_detectids(conn):
"""
wrapper just to make code cleaner
:param detectid:
:param report_type:
:return:
"""
try:
if type(conn) == sqlite3.Connection:
cursor = conn.cursor()
#there should be exactly one row
sql_statement = """SELECT detectid from report"""
cursor.execute(sql_statement)
rows_detections = cursor.fetchall()
cursor.close()
return [r[0] for r in rows_detections]
except Exception as e:
print(e)
def main():
#go blind?
#args = parse_commandline()
#make sure directories exist
if os.path.exists("all_pngs"):
if os.path.exists("done_pngs"):
pass #all good
else:
print("done_pngs not found")
exit(-1)
else:
print("all_pngs not found")
exit(-1)
dets_std = [] #standard reports
dets_nei = [] #neighbors
dets_mini = [] #mini
dets_intersect = []
db_std = glob.glob("elixer_reports_*[0-9].db") #should be exactly one
db_nei = glob.glob("elixer_reports_[0-9]*_nei.db") #should be exactly one
db_mini = glob.glob("elixer_reports_[0-9]*_mini.db") #should be exactly one
if len(db_std) != len(db_nei) != len(db_mini) != 1:
print("Error locating databases")
exit(-1)
db_std = db_std[0]
db_nei = db_nei[0]
db_mini = db_mini[0]
#open the (3) databases? (one at a time is fine) and get lists of detectIDs
conn = get_db_connection(db_std)
dets_std = fetch_all_detectids(conn)
conn.close()
conn = get_db_connection(db_nei)
dets_nei = fetch_all_detectids(conn)
conn.close()
conn = get_db_connection(db_mini)
dets_mini = fetch_all_detectids(conn)
conn.close()
dets_std = np.array(dets_std)
dets_nei = np.array(dets_nei)
dets_mini = np.array(dets_mini)
#keep those in all 3 lists
dets_intersect = np.intersect1d(dets_std,dets_nei)
dets_intersect = np.intersect1d(dets_intersect,dets_mini)
#move those files
exception_count = 0
print(f"Moving {len(dets_intersect)} x3 detection files ... ")
for d in dets_intersect:
srcfn = f"all_pngs/{d}.png"
destfn = f"done_pngs/{d}.png"
try:
shutil.move(srcfn, destfn)
except Exception as e:
print(e)
exception_count += 1
srcfn = f"all_pngs/{d}_nei.png"
destfn = f"done_pngs/{d}_nei.png"
try:
shutil.move(srcfn, destfn)
except Exception as e:
print(e)
exception_count += 1
srcfn = f"all_pngs/{d}_mini.png"
destfn = f"done_pngs/{d}_mini.png"
try:
shutil.move(srcfn, destfn)
except Exception as e:
print(e)
exception_count += 1
if exception_count > 100:
print("too many exceptions, something wrong")
exit(-1)
print(f"complete")
if __name__ == '__main__':
main()
|
HETDEXREPO_NAMEelixerPATH_START.@elixer_extracted@elixer-main@elixer@move_report_db_done_pngs.py@.PATH_END.py
|
{
"filename": "fitsrec.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/io/fits/fitsrec.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import warnings
import weakref
from functools import reduce
import numpy as np
from numpy import char as chararray
from .column import (ASCIITNULL, FITS2NUMPY, ASCII2NUMPY, ASCII2STR, ColDefs,
_AsciiColDefs, _FormatX, _FormatP, _VLF, _get_index,
_wrapx, _unwrapx, _makep, Delayed)
from .util import decode_ascii, encode_ascii
from ...extern.six import string_types, PY2
from ...extern.six.moves import range, zip
from ...utils import lazyproperty
from ...utils.compat import suppress
class FITS_record(object):
"""
FITS record class.
`FITS_record` is used to access records of the `FITS_rec` object.
This will allow us to deal with scaled columns. It also handles
conversion/scaling of columns in ASCII tables. The `FITS_record`
class expects a `FITS_rec` object as input.
"""
def __init__(self, input, row=0, start=None, end=None, step=None,
base=None, **kwargs):
"""
Parameters
----------
input : array
The array to wrap.
row : int, optional
The starting logical row of the array.
start : int, optional
The starting column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
end : int, optional
The ending column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
"""
self.array = input
self.row = row
if base:
width = len(base)
else:
width = self.array._nfields
s = slice(start, end, step).indices(width)
self.start, self.end, self.step = s
self.base = base
def __getitem__(self, key):
if isinstance(key, string_types):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError("Key '{}' does not exist.".format(key))
elif isinstance(key, slice):
return type(self)(self.array, self.row, key.start, key.stop,
key.step, self)
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError('Index out of bounds')
return self.array.field(indx)[self.row]
def __setitem__(self, key, value):
if isinstance(key, string_types):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError("Key '{}' does not exist.".format(key))
elif isinstance(key, slice):
for indx in range(slice.start, slice.stop, slice.step):
indx = self._get_indx(indx)
self.array.field(indx)[self.row] = value
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError('Index out of bounds')
self.array.field(indx)[self.row] = value
def __len__(self):
return len(range(self.start, self.end, self.step))
def __repr__(self):
"""
Display a single row.
"""
outlist = []
for idx in range(len(self)):
outlist.append(repr(self[idx]))
return '({})'.format(', '.join(outlist))
def field(self, field):
"""
Get the field data of the record.
"""
return self.__getitem__(field)
def setfield(self, field, value):
"""
Set the field data of the record.
"""
self.__setitem__(field, value)
@lazyproperty
def _bases(self):
bases = [weakref.proxy(self)]
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def _get_index(self, indx):
indices = np.ogrid[:self.array._nfields]
for base in reversed(self._bases):
if base.step < 1:
s = slice(base.start, None, base.step)
else:
s = slice(base.start, base.end, base.step)
indices = indices[s]
return indices[indx]
class FITS_rec(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
def __new__(subtype, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(subtype, input.shape, input.dtype,
buf=input.data)
else:
self = np.recarray.__new__(subtype, input.shape, input.dtype,
buf=input.data, strides=input.strides)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super(FITS_rec, self).__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = \
super(FITS_rec, self).__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in ['_converted', '_heapoffset', '_heapsize', '_nfields',
'_gap', '_uint', 'parnames', '_coldefs']:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == '_coldefs':
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, '_heapoffset', 0)
self._heapsize = getattr(obj, '_heapsize', 0)
self._gap = getattr(obj, '_gap', 0)
self._uint = getattr(obj, '_uint', False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data,
arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
# Make sure the data is a listener for changes to the columns
columns._add_listener(data)
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
array_size = 0
else:
array_size = len(arr)
n = min(array_size, nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
if not n:
# The input column had an empty array, so just use the fill
# value
continue
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat,
nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY['L'] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord('F')
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord('F'), ord('T'))
elif (columns[idx]._physical_values and
columns[idx]._pseudo_unsigned_ints):
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ('S', 'U'):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (inarr.dtype.kind == outarr.dtype.kind and
inarr.dtype.kind in ('U', 'S') and
inarr.dtype != outarr.dtype):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape((n, inarr_rowsize))
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# Force use of the normal ndarray repr (rather than the new
# one added for recarray in Numpy 1.10) for backwards compat
return np.ndarray.__repr__(self)
def __getitem__(self, key):
if self._coldefs is None:
return super(FITS_rec, self).__getitem__(key)
if isinstance(key, string_types):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super(FITS_rec, self).__setitem__(key, value)
if isinstance(key, string_types):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError('Input tuple or list required to have {} '
'elements.'.format(self._nfields))
else:
raise TypeError('Assignment requires a FITS_record, tuple, or '
'list as input.')
if PY2:
# avoid falling back through to ndarray.__getslice__
def __getslice__(self, start, end):
return self.__getitem__(slice(start, end))
def __setslice__(self, start, end, value):
self.__setitem__(slice(start, end), value)
def copy(self, order='C'):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super(FITS_rec, self).copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
@property
def columns(self):
"""
A user-visible accessor for the coldefs.
See https://aeon.stsci.edu/ssb/trac/pyfits/ticket/44
"""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get('_coldefs')
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__['_coldefs'] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__['_coldefs']
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, '_coldefs', None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, '_coldefs', None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == 'U':
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
if format.dtype.itemsize == 0:
warnings.warn(
'Field {!r} has a repeat count of 0 in its format code, '
'indicating an empty field.'.format(key))
return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while (isinstance(base, FITS_rec) and
isinstance(base.base, np.recarray)):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP):
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, 'base', None) is not None:
self_base = self_base.base
else:
break
if getattr(base, 'base', None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value,
new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = '_update_column_{0}'.format(attr)
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1:]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise IOError(
"Could not find heap data for the {!r} variable-length "
"array column.".format(column.name))
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
count = field[idx, 0]
if recformat.dtype == 'a':
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset:offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset:offset + arr_len].view(dt)
dummy[idx].dtype = dummy[idx].dtype.newbyteorder('>')
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx],
recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = ASCII2NUMPY[format[0]]
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode('ascii')
if len(nullval) > format.width:
nullval = nullval[:format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii('D'), encode_ascii('E'))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be conerted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b'':
dummy = np.where(np.char.strip(dummy) == b'', null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
indx = self.names.index(column.name)
raise ValueError(
'{}; the header may be missing the necessary TNULL{} '
'keyword or the table contains invalid data'.format(
exc, indx + 1))
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
(_str, _bool, _number, _scale, _zero, bscale, bzero, dim) = \
self._get_scale_factors(column)
indx = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif len(field.shape) == 1: # No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems:
warnings.warn(
'TDIM{} value {:d} does not fit with the size of '
'the array items ({:d}). TDIM{:d} will be ignored.'
.format(indx + 1, self._coldefs[indx].dims,
actual_nitems, indx + 1))
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if (_number and (_scale or _zero) and not column._physical_values):
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == 'I':
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == 'J':
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == 'K':
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2 ** 63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == 'K':
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
"Overflow detected while applying TZERO{0:d}. "
"Returning unscaled data.".format(indx + 1))
else:
field = test_overflow
else:
field += bzero
elif _bool and field.dtype != bool:
field = np.equal(field, ord('T'))
elif _str:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim:
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = ('|{}{}'.format(fmt, dim[-1]), dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self):
"""
Returns a pointer into the table's raw data to its heap (if present).
This is returned as a numpy byte array.
"""
if self._heapsize:
raw_data = self._get_raw_data().view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset:heap_end]
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self.nbytes + self._heapsize
base = self
while hasattr(base, 'base') and base.base is not None:
base = base.base
if hasattr(base, 'nbytes') and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == 'A'
_bool = column.format.format == 'L'
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ('', None, 1)
_zero = bzero not in ('', None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for indx, name in enumerate(self.dtype.names):
column = self._coldefs[indx]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'a' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [len(arr) for arr in self._converted[name]]
raw_field[:len(npts), 0] = npts
raw_field[1:, 1] = (np.add.accumulate(raw_field[:-1, 0]) *
dtype.itemsize)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
_str, _bool, _number, _scale, _zero, bscale, bzero, _ = \
self._get_scale_factors(column)
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(indx, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(indx, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0],
np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (np.array([ord('F')], dtype=np.int8)[0],
np.array([ord('T')], dtype=np.int8)[0])
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Sn' (where n in string length) on Python 2--this is maintain the
# existing user expectation of not being returned Python 2-style
# unicode strings. One Python 3 the array in _converted is of dtype
# 'Un' so that elements read out of the array are normal Python 3 str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' (regardless of Python
# version) because when reading an existing FITS table the raw data is
# just ASCII strings, and represented in Numpy as an S array.
# However, when a user creates a new table from scratch, they *might*
# pass in a column containing unicode strings (dtype 'U'), especially
# on Python 3 where this will be the default. Therefore the
# output_field of the raw array is actually a unicode array. But we
# still want to make sure the data is encodable as ASCII. Later when
# we write out the array we use, in the dtype 'U' case, a different
# write routine that writes row by row and encodes any 'U' columns to
# ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == 'U' and output_field.dtype.kind == 'S':
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
"Could not save column '{0}': Contains characters that "
"cannot be encoded as ASCII as required by FITS, starting "
"at the index {1!r} of the column, and the index {2} of "
"the string at that location.".format(
self._coldefs[col_idx].name,
exc.index[0] if len(exc.index) == 1 else exc.index,
exc.start))
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The the index of the "end" column of the record, beyond
# which we can't write
end = super(FITS_rec, self).field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn('Column {!r} starting point overlaps the previous '
'column.'.format(col_idx + 1))
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn('Column {!r} ending point overlaps the next '
'column.'.format(col_idx + 1))
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
print(format)
if 'A' in format:
_pc = '{:'
else:
_pc = '{:>'
fmt = ''.join([_pc, format[1:], ASCII2STR[format[0]], '}',
(' ' * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = (format.precision == 0 and
format.format in ('F', 'E', 'D'))
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
"Value {!r} does not fit into the output's itemsize of "
"{}.".format(value, spans[col_idx]))
if trailing_decimal and value[0] == ' ':
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + '.'
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if 'D' in format:
output_field.replace(encode_ascii('E'), encode_ascii('D'))
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if (field.dtype.char in ('S', 'U') and
not isinstance(field, chararray.chararray)):
field = field.view(chararray.chararray)
return field
def _rstrip_inplace(array, chars=None):
"""
Performs an in-place rstrip operation on string arrays.
This is necessary since the built-in `np.char.rstrip` in Numpy does not
perform an in-place calculation. This can be removed if ever
https://github.com/numpy/numpy/issues/6303 is implemented (however, for
the purposes of this module the only in-place vectorized string functions
we need are rstrip and encode).
"""
for item in np.nditer(array, flags=['zerosize_ok'],
op_flags=['readwrite']):
item[...] = item.item().rstrip(chars)
class _UnicodeArrayEncodeError(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super(_UnicodeArrayEncodeError, self).__init__(encoding, object_,
start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype(('S{0}'.format(inarray.dtype.itemsize // 4),
inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [['readonly'], ['writeonly', 'allocate']]
it = np.nditer([inarray, out], op_dtypes=op_dtypes,
op_flags=op_flags, flags=['zerosize_ok'])
try:
for initem, outitem in it:
outitem[...] = initem.item().encode('ascii')
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == 'U' for d in dtypes)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@io@fits@fitsrec.py@.PATH_END.py
|
{
"filename": "core.py",
"repo_name": "dstein64/kmeans1d",
"repo_path": "kmeans1d_extracted/kmeans1d-master/kmeans1d/core.py",
"type": "Python"
}
|
from collections import namedtuple
import ctypes
import os
from typing import Sequence
import kmeans1d._core # type: ignore
Clustered = namedtuple('Clustered', 'clusters centroids')
_DLL = ctypes.cdll.LoadLibrary(kmeans1d._core.__file__)
version_txt = os.path.join(os.path.dirname(__file__), 'version.txt')
with open(version_txt, 'r') as f:
__version__ = f.read().strip()
def cluster(array: Sequence[float], k: int) -> Clustered:
"""
:param array: A sequence of floats
:param k: Number of clusters (int)
:return: A tuple with (clusters, centroids)
"""
assert k > 0, f'Invalid k: {k}'
n = len(array)
assert n > 0, f'Invalid len(array): {n}'
k = min(k, n)
c_array = (ctypes.c_double * n)(*array)
c_n = ctypes.c_ulong(n)
c_k = ctypes.c_ulong(k)
c_clusters = (ctypes.c_ulong * n)()
c_centroids = (ctypes.c_double * k)()
_DLL.cluster(c_array, c_n, c_k, c_clusters, c_centroids)
clusters = list(c_clusters)
centroids = list(c_centroids)
output = Clustered(clusters=clusters, centroids=centroids)
return output
|
dstein64REPO_NAMEkmeans1dPATH_START.@kmeans1d_extracted@kmeans1d-master@kmeans1d@core.py@.PATH_END.py
|
{
"filename": "pipeline.py",
"repo_name": "alixviolet/PAWS",
"repo_path": "PAWS_extracted/PAWS-main/pipeline.py",
"type": "Python"
}
|
import os
import shutil
from astropy.io import fits
import sys
import numpy as np
import logging
import multiprocessing
from multiprocessing import Pool
import csv
import pandas as pd
import matplotlib.pyplot as plt
import math
from statistics import mean
import ipywidgets as widgets
from ipywidgets import HBox, VBox
from IPython.display import display
from setup import datapathWidget,number,coadd, res_widget,ispecpathWidget, input_widget, osx
ispec_dir = ispecpathWidget.value
if ispec_dir[-1] != '/':
ispec_dir=ispec_dir+'/'
import ispec
oss = osx.value
if oss == 'Mac/iOS':
code = 'moog'
else:
code = 'width'
def parameters_from_ew(b):
resolution=res_widget.value
mask = input_widget.value
to_resolution = resolution
if number.value=='Multiple':
paths = [ f.path for f in os.scandir(datapathWidget.value) if f.is_dir()]
if number.value=='Single':
paths = [datapathWidget.value]
file_name = '/prepared_1_.fits'
for p in paths:
#first estimating the vsini from FWHM to determine whether EW will work
spectrum= ispec.read_spectrum(p+file_name)
template = ispec.read_spectrum(ispec_dir + "/input/spectra/templates/NARVAL.Sun.370_1048nm/template.txt.gz") #for template matching
models, ccf = ispec.cross_correlate_with_template(spectrum, template, \
lower_velocity_limit=-200, upper_velocity_limit=200, \
velocity_step=1.0, fourier=False)
c = 299792458.0 # m/s
fwhmm = models[0].sig() * (2*np.sqrt(2*np.log(2)))
vsini = 0.662557*fwhmm - 6.119825 #from calibration - see Freckelton et al 2024
if vsini > 10:
print('vsini estimated to be too high for Equivalent Widths - saving estimates based on spectral type instead. Continue to synthesis :) ')
if mask =='G2':
t = 5700
l = 4.0
feh = 0.05
a = 0.00
vmic =ispec.estimate_vmic(t, l,feh)
if mask == 'K5':
t=4440
l=4.6
feh = 0.5
a = 0.00
vmic = ispec.estimate_vmic(t, l,feh)
if mask == 'F3':
t=6750
l=4.2
feh = 0.00
a = 0.00
vmic = ispec.estimate_vmic(t, l,feh)
params = ['teff','logg','MH','alpha','vmic']
values = [t,l,feh,a,vmic]
errs = [0,0,0,0,0]
df = pd.DataFrame(zip(params,values), columns=['Parameter','Value'])
print(df)
df['Errors'] = errs
df.to_csv(p +'/params.csv', index=False)
else:
####=============fitting line regions===========================
if resolution <= 50000:
line_regions_with_atomic_data = ispec.read_line_regions(ispec_dir + "input/regions/47000_GES/{}_ew_ispec_good_for_params_all_extended.txt".format(code))
elif 50000< resolution <= 90000:
line_regions_with_atomic_data = ispec.read_line_regions('solar/SOPHIE/solar_line_regions_all_ew.txt')
elif resolution > 90000:
line_regions_with_atomic_data = ispec.read_line_regions('solar/HARPS/solar_line_regions_all_ew_12.txt')
line_regions_with_atomic_data = line_regions_with_atomic_data[np.logical_or(line_regions_with_atomic_data['element'] == "Fe 1", line_regions_with_atomic_data['element'] == "Fe 2")]
smoothed_star_spectrum = ispec.convolve_spectrum(spectrum, 2*to_resolution)
line_regions_with_atomic_data = ispec.adjust_linemasks(smoothed_star_spectrum, line_regions_with_atomic_data, max_margin=0.5)
star_continuum_model = ispec.fit_continuum(spectrum, fixed_value=1.0, model="Fixed value")
#--- Fit the lines but do NOT cross-match with any atomic linelist since they already have that information
linemasks = ispec.fit_lines(line_regions_with_atomic_data, spectrum , star_continuum_model,\
atomic_linelist = None, \
max_atomic_wave_diff = 0.005,\
check_derivatives = False, \
discard_gaussian=False, \
smoothed_spectrum=None, \
discard_voigt=True, \
free_mu=True, crossmatch_with_mu=False, closest_match=False)
# Exclude lines that have not been successfully cross matched with the atomic data
# because we cannot calculate the chemical abundance (it will crash the corresponding routines)
rejected_by_atomic_line_not_found = (linemasks['wave_nm'] == 0)
linemasks = linemasks[~rejected_by_atomic_line_not_found]
###=============discarding any bad masks===========================
flux_peak = spectrum['flux'][linemasks['peak']]
flux_base = spectrum['flux'][linemasks['base']]
flux_top = spectrum['flux'][linemasks['top']]
bad_mask = np.logical_or(linemasks['wave_peak'] <= linemasks['wave_base'], linemasks['wave_peak'] >= linemasks['wave_top'])
bad_mask = np.logical_or(bad_mask, flux_peak >= flux_base)
bad_mask = np.logical_or(bad_mask, flux_peak >= flux_top)
linemasks = linemasks[~bad_mask]
#================Exclude lines with EW equal to zero=========
rejected_by_zero_ew = (linemasks['ew'] == 0)
linemasks = linemasks[~rejected_by_zero_ew]
#================Exclude lines that may be affected by tellurics========
rejected_by_telluric_line = (linemasks['telluric_wave_peak'] != 0)
linemasks = linemasks[~rejected_by_telluric_line]
#================Model spectra from EW===========================
if mask =='G2':
initial_teff = 5700
initial_logg = 4.0
initial_MH = 0.05
initial_alpha = 0.00
initial_vmic =ispec.estimate_vmic(initial_teff, initial_logg, initial_MH)
if mask == 'K5':
initial_teff=4440
initial_logg=4.6
initial_MH = 0.5
initial_alpha = 0.00
initial_vmic = ispec.estimate_vmic(initial_teff, initial_logg, initial_MH)
if mask == 'F3':
initial_teff=6750
initial_logg=4.2
initial_MH = 0.00
initial_alpha = 0.00
initial_vmic = ispec.estimate_vmic(initial_teff, initial_logg, initial_MH)
max_iterations = 15
model = ispec_dir + "input/atmospheres/ATLAS9.Castelli/"
atomic_linelist_file= ispec_dir +"/input/linelists/transitions/SPECTRUM.300_1100nm/atomic_lines.tsv"
solar_abundances_file = ispec_dir + "/input/abundances/Grevesse.1998/stdatom.dat"
# Load model atmospheres
modeled_layers_pack = ispec.load_modeled_layers_pack(model)
# Load SPECTRUM abundances
solar_abundances = ispec.read_solar_abundances(solar_abundances_file)
# Validate parameters
if not ispec.valid_atmosphere_target(modeled_layers_pack, {'teff':initial_teff, 'logg':initial_logg, 'MH':initial_MH, 'alpha':initial_alpha}):
msg = "The specified effective temperature, gravity (log g) and metallicity [M/H] \
fall out of theatmospheric models."
print(msg)
# Reduced equivalent width
# Filter too weak/strong lines
# * Criteria presented in paper of GALA
efilter = np.logical_and(linemasks['ewr'] >= -6.0, linemasks['ewr'] <= -4.3)
# Filter high excitation potential lines
# * Criteria from Eric J. Bubar "Equivalent Width Abundance Analysis In Moog"
efilter = np.logical_and(efilter, linemasks['lower_state_eV'] <= 5.0)
efilter = np.logical_and(efilter, linemasks['lower_state_eV'] >= 0.5)
## Filter also bad fits
efilter = np.logical_and(efilter, linemasks['rms'] < 1.00)
# no flux
noflux = spectrum['flux'][linemasks['peak']] < 1.0e-10
efilter = np.logical_and(efilter, np.logical_not(noflux))
unfitted = linemasks['fwhm'] == 0
efilter = np.logical_and(efilter, np.logical_not(unfitted))
results = ispec.model_spectrum_from_ew(linemasks[efilter], modeled_layers_pack, \
solar_abundances, initial_teff, initial_logg, initial_MH, initial_alpha, initial_vmic, \
free_params=["teff", "logg","vmic"], \
adjust_model_metalicity=True, \
max_iterations=max_iterations, \
enhance_abundances=True, \
#outliers_detection = "robust", \
#outliers_weight_limit = 0.90, \
outliers_detection = "sigma_clipping", \
#sigma_level = 3, \
tmp_dir = None, \
code=code)
params, errors, status, x_over_h, selected_x_over_h, fitted_lines_params, used_linemasks = results
data = []
for key,value in params.items():
data.append([key,value])
errs=[]
for key,value in errors.items():
errs.append(value)
df = pd.DataFrame(data, columns=['Parameter','Value'])
df['Errors']=errs
df.to_csv(p +'/params.csv', index=False)
stat=[]
for key,value in status.items():
stat.append([key,value])
df=pd.DataFrame(stat)
df.to_csv(p +'/status.csv', index=False)
df =pd.DataFrame(fitted_lines_params)
df.to_csv(p +'/fitted_line_params.csv', index=False)
df =pd.DataFrame(used_linemasks)
df.to_csv(p +'/used_linemasks.csv', index=False)
np.savetxt(p + '/x_over_h_ew.csv',x_over_h, delimiter=',')
file=open(p +'/selected_x_over_h.txt', 'w')
for element in selected_x_over_h:
file.write(str(element)+ '\n')
x_h = [i for i in x_over_h if str(i) != 'nan']
lower_state_e=[row[4] for row in used_linemasks]
element = [row[0] for row in used_linemasks]
lower_state_ev_1 = [lower_state_e[i] for i in range(len(lower_state_e)) if element[i]=='Fe 1']
lower_state_ev_2 = [lower_state_e[i] for i in range(len(lower_state_e)) if element[i]=='Fe 2' ]
avg = [mean(x_h) for i in range(len(lower_state_e))]
x_h_1 = [x_h[i] for i in range(len(x_h)) if element[i]=='Fe 1' ]
x_h_2 = [x_h[i] for i in range(len(x_h)) if element[i]=='Fe 2' ]
return
run = widgets.Button(description='Run Analysis Preparation')
out9 = widgets.Output(layout={'border': '1px solid black'})
box9 = widgets.VBox([widgets.VBox([run]),out9])
run.on_click(parameters_from_ew)
display(box9)
|
alixvioletREPO_NAMEPAWSPATH_START.@PAWS_extracted@PAWS-main@pipeline.py@.PATH_END.py
|
{
"filename": "_samples_generator.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py3/sklearn/datasets/_samples_generator.py",
"type": "Python"
}
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import array
import numbers
import warnings
from collections.abc import Iterable
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
from ..utils.random import sample_without_replacement
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions."""
if dimensions > 30:
return np.hstack(
[
rng.randint(2, size=(samples, dimensions - 30)),
_generate_hypercube(samples, 30, rng),
]
)
out = sample_without_replacement(2**dimensions, samples, random_state=rng).astype(
dtype=">u4", copy=False
)
out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:]
return out
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 1, None, closed="left")],
"n_informative": [Interval(Integral, 1, None, closed="left")],
"n_redundant": [Interval(Integral, 0, None, closed="left")],
"n_repeated": [Interval(Integral, 0, None, closed="left")],
"n_classes": [Interval(Integral, 1, None, closed="left")],
"n_clusters_per_class": [Interval(Integral, 1, None, closed="left")],
"weights": ["array-like", None],
"flip_y": [Interval(Real, 0, 1, closed="both")],
"class_sep": [Interval(Real, 0, None, closed="neither")],
"hypercube": ["boolean"],
"shift": [Interval(Real, None, None, closed="neither"), "array-like", None],
"scale": [Interval(Real, 0, None, closed="neither"), "array-like", None],
"shuffle": ["boolean"],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_classification(
n_samples=100,
n_features=20,
*,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=None,
):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an ``n_informative``-dimensional hypercube with sides of
length ``2*class_sep`` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, ``X`` horizontally stacks features in the following
order: the primary ``n_informative`` features, followed by ``n_redundant``
linear combinations of the informative features, followed by ``n_repeated``
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
``X[:, :n_informative + n_redundant + n_repeated]``.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features. These comprise ``n_informative``
informative features, ``n_redundant`` redundant features,
``n_repeated`` duplicated features and
``n_features-n_informative-n_redundant-n_repeated`` useless features
drawn at random.
n_informative : int, default=2
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension ``n_informative``. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, default=2
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, default=0
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, default=2
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, default=2
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
default=None
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
then the last class weight is automatically inferred.
More than ``n_samples`` samples may be returned if the sum of
``weights`` exceeds 1. Note that the actual class proportions will
not exactly match ``weights`` when ``flip_y`` isn't 0.
flip_y : float, default=0.01
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder. Note that the default setting flip_y > 0 might lead
to less than ``n_classes`` in y in some cases.
class_sep : float, default=1.0
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : bool, default=True
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, ndarray of shape (n_features,) or None, default=0.0
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, ndarray of shape (n_features,) or None, default=1.0
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : bool, default=True
Shuffle the samples and the features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for class membership of each sample.
See Also
--------
make_blobs : Simplified variant.
make_multilabel_classification : Unrelated generator for multilabel tasks.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(random_state=42)
>>> X.shape
(100, 20)
>>> y.shape
(100,)
>>> list(y[:5])
[0, 0, 1, 1, 0]
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError(
"Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features"
)
# Use log2 to avoid overflow errors
if n_informative < np.log2(n_classes * n_clusters_per_class):
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(
msg.format(
n_classes, n_clusters_per_class, n_informative, 2**n_informative
)
)
if weights is not None:
if len(weights) not in [n_classes, n_classes - 1]:
raise ValueError(
"Weights specified but incompatible with number of classes."
)
if len(weights) == n_classes - 1:
if isinstance(weights, list):
weights = weights + [1.0 - sum(weights)]
else:
weights = np.resize(weights, n_classes)
weights[-1] = 1.0 - sum(weights[:-1])
else:
weights = [1.0 / n_classes] * n_classes
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters by weight
n_samples_per_cluster = [
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
for k in range(n_clusters)
]
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(
float, copy=False
)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.uniform(size=(n_clusters, 1))
centroids *= generator.uniform(size=(1, n_informative))
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative))
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1
X[:, n_informative : n_informative + n_redundant] = np.dot(
X[:, :n_informative], B
)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp)
X[:, n : n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.standard_normal(size=(n_samples, n_useless))
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.uniform(size=n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.uniform(size=n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.uniform(size=n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 1, None, closed="left")],
"n_classes": [Interval(Integral, 1, None, closed="left")],
"n_labels": [Interval(Integral, 0, None, closed="left")],
"length": [Interval(Integral, 1, None, closed="left")],
"allow_unlabeled": ["boolean"],
"sparse": ["boolean"],
"return_indicator": [StrOptions({"dense", "sparse"}), "boolean"],
"return_distributions": ["boolean"],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_multilabel_classification(
n_samples=100,
n_features=20,
*,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=True,
sparse=False,
return_indicator="dense",
return_distributions=False,
random_state=None,
):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features.
n_classes : int, default=5
The number of classes of the classification problem.
n_labels : int, default=2
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, default=50
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, default=True
If ``True``, some instances might not belong to any class.
sparse : bool, default=False
If ``True``, return a sparse feature matrix.
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : {'dense', 'sparse'} or False, default='dense'
If ``'dense'`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, default=False
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
The label sets. Sparse matrix should be of CSR format.
p_c : ndarray of shape (n_classes,)
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : ndarray of shape (n_features, n_classes)
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> X, y = make_multilabel_classification(n_labels=3, random_state=42)
>>> X.shape
(100, 20)
>>> y.shape
(100, 5)
>>> list(y[:3])
[array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])]
"""
generator = check_random_state(random_state)
p_c = generator.uniform(size=n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.uniform(size=(n_features, n_classes))
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c, generator.uniform(size=y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.uniform(size=n_words))
return words, y
X_indices = array.array("i")
X_indptr = array.array("i", [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, "sparse", "dense"):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == "sparse"))
Y = lb.fit([range(n_classes)]).transform(Y)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_hastie_10_2(n_samples=12000, *, random_state=None):
"""Generate data for binary classification used in Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=12000
The number of samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 10)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
See Also
--------
make_gaussian_quantiles : A generalization of this dataset approach.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)
y[y == 0.0] = -1.0
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 1, None, closed="left")],
"n_informative": [Interval(Integral, 0, None, closed="left")],
"n_targets": [Interval(Integral, 1, None, closed="left")],
"bias": [Interval(Real, None, None, closed="neither")],
"effective_rank": [Interval(Integral, 1, None, closed="left"), None],
"tail_strength": [Interval(Real, 0, 1, closed="both")],
"noise": [Interval(Real, 0, None, closed="left")],
"shuffle": ["boolean"],
"coef": ["boolean"],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_regression(
n_samples=100,
n_features=100,
*,
n_informative=10,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=True,
coef=False,
random_state=None,
):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
n_informative : int, default=10
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, default=1
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, default=0.0
The bias term in the underlying linear model.
effective_rank : int, default=None
If not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
If None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None. When a float, it should be
between 0 and 1.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
shuffle : bool, default=True
Shuffle the samples and the features.
coef : bool, default=False
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
The output values.
coef : ndarray of shape (n_features,) or (n_features, n_targets)
The coefficient of the underlying linear model. It is returned only if
coef is True.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42)
>>> X
array([[ 0.4967..., -0.1382... ],
[ 0.6476..., 1.523...],
[-0.2341..., -0.2341...],
[-0.4694..., 0.5425...],
[ 1.579..., 0.7674...]])
>>> y
array([ 6.737..., 37.79..., -10.27..., 0.4017..., 42.22...])
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.standard_normal(size=(n_samples, n_features))
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator,
)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.uniform(
size=(n_informative, n_targets)
)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 0, None, closed="left"), tuple],
"shuffle": ["boolean"],
"noise": [Interval(Real, 0, None, closed="left"), None],
"random_state": ["random_state"],
"factor": [Interval(Real, 0, 1, closed="left")],
},
prefer_skip_nested_validation=True,
)
def make_circles(
n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8
):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, it is the total number of points generated.
For odd numbers, the inner circle will have one point more than the
outer circle.
If two-element tuple, number of points in outer circle and inner
circle.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
factor : float, default=.8
Scale factor between inner and outer circle in the range `[0, 1)`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
Examples
--------
>>> from sklearn.datasets import make_circles
>>> X, y = make_circles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[1, 1, 1, 0, 0]
"""
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else: # n_samples is a tuple
if len(n_samples) != 2:
raise ValueError("When a tuple, n_samples must have exactly two elements.")
n_samples_out, n_samples_in = n_samples
generator = check_random_state(random_state)
# so as not to have the first point = last point, we set endpoint=False
linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)
outer_circ_x = np.cos(linspace_out)
outer_circ_y = np.sin(linspace_out)
inner_circ_x = np.cos(linspace_in) * factor
inner_circ_y = np.sin(linspace_in) * factor
X = np.vstack(
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
).T
y = np.hstack(
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left"), tuple],
"shuffle": ["boolean"],
"noise": [Interval(Real, 0, None, closed="left"), None],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles.
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, the total number of points generated.
If two-element tuple, number of points in each of two moons.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
"""
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError as e:
raise ValueError(
"`n_samples` can be either an int or a two-element tuple."
) from e
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5
X = np.vstack(
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
).T
y = np.hstack(
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left"), "array-like"],
"n_features": [Interval(Integral, 1, None, closed="left")],
"centers": [Interval(Integral, 1, None, closed="left"), "array-like", None],
"cluster_std": [Interval(Real, 0, None, closed="left"), "array-like"],
"center_box": [tuple],
"shuffle": ["boolean"],
"random_state": ["random_state"],
"return_centers": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def make_blobs(
n_samples=100,
n_features=2,
*,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
return_centers=False,
):
"""Generate isotropic Gaussian blobs for clustering.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or array-like of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster.
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
See Also
--------
make_classification : A more intricate variant.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(n_samples, numbers.Integral):
# Set n_centers by looking at centers arg
if centers is None:
centers = 3
if isinstance(centers, numbers.Integral):
n_centers = centers
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
else:
centers = check_array(centers)
n_features = centers.shape[1]
n_centers = centers.shape[0]
else:
# Set n_centers by looking at [n_samples] arg
n_centers = len(n_samples)
if centers is None:
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
if not isinstance(centers, Iterable):
raise ValueError(
"Parameter `centers` must be array-like. Got {!r} instead".format(
centers
)
)
if len(centers) != n_centers:
raise ValueError(
"Length of `n_samples` not consistent with number of "
f"centers. Got n_samples = {n_samples} and centers = {centers}"
)
centers = check_array(centers)
n_features = centers.shape[1]
# stds: if cluster_std is given as list, it must be consistent
# with the n_centers
if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers:
raise ValueError(
"Length of `clusters_std` not consistent with "
"number of centers. Got centers = {} "
"and cluster_std = {}".format(centers, cluster_std)
)
if isinstance(cluster_std, numbers.Real):
cluster_std = np.full(len(centers), cluster_std)
if isinstance(n_samples, Iterable):
n_samples_per_center = n_samples
else:
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
cum_sum_n_samples = np.cumsum(n_samples_per_center)
X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64)
y = np.empty(shape=(sum(n_samples_per_center),), dtype=int)
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0
end_idx = cum_sum_n_samples[i]
X[start_idx:end_idx] = generator.normal(
loc=centers[i], scale=std, size=(n, n_features)
)
y[start_idx:end_idx] = i
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if return_centers:
return X, y, centers
else:
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 5, None, closed="left")],
"noise": [Interval(Real, 0.0, None, closed="left")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None):
"""Generate the "Friedman #1" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features. Should be at least 5.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> X, y = make_friedman1(random_state=42)
>>> X.shape
(100, 10)
>>> y.shape
(100,)
>>> list(y[:3])
[16.8..., 5.8..., 9.4...]
"""
generator = check_random_state(random_state)
X = generator.uniform(size=(n_samples, n_features))
y = (
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3]
+ 5 * X[:, 4]
+ noise * generator.standard_normal(size=(n_samples))
)
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"noise": [Interval(Real, 0, None, closed="left")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_friedman2(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> X, y = make_friedman2(random_state=42)
>>> X.shape
(100, 4)
>>> y.shape
(100,)
>>> list(y[:3])
[1229.4..., 27.0..., 65.6...]
"""
generator = check_random_state(random_state)
X = generator.uniform(size=(n_samples, 4))
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (
X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2
) ** 0.5 + noise * generator.standard_normal(size=(n_samples))
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"noise": [Interval(Real, 0, None, closed="left")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_friedman3(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman3
>>> X, y = make_friedman3(random_state=42)
>>> X.shape
(100, 4)
>>> y.shape
(100,)
>>> list(y[:3])
[1.5..., 0.9..., 0.4...]
"""
generator = check_random_state(random_state)
X = generator.uniform(size=(n_samples, 4))
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan(
(X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]
) + noise * generator.standard_normal(size=(n_samples))
return X, y
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 1, None, closed="left")],
"effective_rank": [Interval(Integral, 1, None, closed="left")],
"tail_strength": [Interval(Real, 0, 1, closed="both")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_low_rank_matrix(
n_samples=100,
n_features=100,
*,
effective_rank=10,
tail_strength=0.5,
random_state=None,
):
"""Generate a mostly low rank matrix with bell-shaped singular values.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
effective_rank : int, default=10
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile. The value should be between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(
generator.standard_normal(size=(n_samples, n)),
mode="economic",
check_finite=False,
)
v, _ = linalg.qr(
generator.standard_normal(size=(n_features, n)),
mode="economic",
check_finite=False,
)
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = (1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 1, None, closed="left")],
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
"data_transposed": ["boolean", Hidden(StrOptions({"deprecated"}))],
},
prefer_skip_nested_validation=True,
)
def make_sparse_coded_signal(
n_samples,
*,
n_components,
n_features,
n_nonzero_coefs,
random_state=None,
data_transposed="deprecated",
):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix `Y = DX`, such that `D` is of shape `(n_features, n_components)`,
`X` is of shape `(n_components, n_samples)` and each column of `X` has exactly
`n_nonzero_coefs` non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
Number of samples to generate.
n_components : int
Number of components in the dictionary.
n_features : int
Number of features of the dataset to generate.
n_nonzero_coefs : int
Number of active (non-zero) coefficients in each sample.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
data_transposed : bool, default=False
By default, Y, D and X are not transposed.
.. versionadded:: 1.1
.. versionchanged:: 1.3
Default value changed from True to False.
.. deprecated:: 1.3
`data_transposed` is deprecated and will be removed in 1.5.
Returns
-------
data : ndarray of shape (n_features, n_samples) or (n_samples, n_features)
The encoded signal (Y). The shape is `(n_samples, n_features)` if
`data_transposed` is False, otherwise it's `(n_features, n_samples)`.
dictionary : ndarray of shape (n_features, n_components) or \
(n_components, n_features)
The dictionary with normalized components (D). The shape is
`(n_components, n_features)` if `data_transposed` is False, otherwise it's
`(n_features, n_components)`.
code : ndarray of shape (n_components, n_samples) or (n_samples, n_components)
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X). The shape is `(n_samples, n_components)`
if `data_transposed` is False, otherwise it's `(n_components, n_samples)`.
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.standard_normal(size=(n_features, n_components))
D /= np.sqrt(np.sum((D**2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.standard_normal(size=n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
# TODO(1.5) remove data_transposed
# raise warning if data_transposed is not passed explicitly
if data_transposed != "deprecated":
warnings.warn(
"data_transposed was deprecated in version 1.3 and will be removed in 1.5.",
FutureWarning,
)
else:
data_transposed = False
# transpose if needed
if not data_transposed:
Y, D, X = Y.T, D.T, X.T
return map(np.squeeze, (Y, D, X))
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_sparse_uncorrelated(n_samples=100, n_features=10, *, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design.
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(
loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]),
scale=np.ones(n_samples),
)
return X, y
@validate_params(
{
"n_dim": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_spd_matrix(n_dim, *, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_dim, n_dim)
The random symmetric, positive-definite matrix.
See Also
--------
make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix.
Examples
--------
>>> from sklearn.datasets import make_spd_matrix
>>> make_spd_matrix(n_dim=2, random_state=42)
array([[2.09..., 0.34...],
[0.34..., 0.21...]])
"""
generator = check_random_state(random_state)
A = generator.uniform(size=(n_dim, n_dim))
U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False)
X = np.dot(np.dot(U, 1.0 + np.diag(generator.uniform(size=n_dim))), Vt)
return X
@validate_params(
{
"n_dim": [Hidden(None), Interval(Integral, 1, None, closed="left")],
"alpha": [Interval(Real, 0, 1, closed="both")],
"norm_diag": ["boolean"],
"smallest_coef": [Interval(Real, 0, 1, closed="both")],
"largest_coef": [Interval(Real, 0, 1, closed="both")],
"sparse_format": [
StrOptions({"bsr", "coo", "csc", "csr", "dia", "dok", "lil"}),
None,
],
"random_state": ["random_state"],
"dim": [
Interval(Integral, 1, None, closed="left"),
Hidden(StrOptions({"deprecated"})),
],
},
prefer_skip_nested_validation=True,
)
def make_sparse_spd_matrix(
n_dim=None,
*,
alpha=0.95,
norm_diag=False,
smallest_coef=0.1,
largest_coef=0.9,
sparse_format=None,
random_state=None,
dim="deprecated",
):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int, default=1
The size of the random matrix to generate.
.. versionchanged:: 1.4
Renamed from ``dim`` to ``n_dim``.
alpha : float, default=0.95
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity. The value should be in the range 0 and 1.
norm_diag : bool, default=False
Whether to normalize the output matrix to make the leading diagonal
elements all 1.
smallest_coef : float, default=0.1
The value of the smallest coefficient between 0 and 1.
largest_coef : float, default=0.9
The value of the largest coefficient between 0 and 1.
sparse_format : str, default=None
String representing the output sparse format, such as 'csc', 'csr', etc.
If ``None``, return a dense numpy ndarray.
.. versionadded:: 1.4
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
dim : int, default=1
The size of the random matrix to generate.
.. deprecated:: 1.4
`dim` is deprecated and will be removed in 1.6.
Returns
-------
prec : ndarray or sparse matrix of shape (dim, dim)
The generated matrix. If ``sparse_format=None``, this would be an ndarray.
Otherwise, this will be a sparse matrix of the specified format.
See Also
--------
make_spd_matrix : Generate a random symmetric, positive-definite matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
Examples
--------
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42)
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
"""
random_state = check_random_state(random_state)
# TODO(1.6): remove in 1.6
# Also make sure to change `n_dim` default back to 1 and deprecate None
if n_dim is not None and dim != "deprecated":
raise ValueError(
"`dim` and `n_dim` cannot be both specified. Please use `n_dim` only "
"as `dim` is deprecated in v1.4 and will be removed in v1.6."
)
if dim != "deprecated":
warnings.warn(
(
"dim was deprecated in version 1.4 and will be removed in 1.6."
"Please use ``n_dim`` instead."
),
FutureWarning,
)
_n_dim = dim
elif n_dim is None:
_n_dim = 1
else:
_n_dim = n_dim
chol = -sp.eye(_n_dim)
aux = sp.random(
m=_n_dim,
n=_n_dim,
density=1 - alpha,
data_rvs=lambda x: random_state.uniform(
low=smallest_coef, high=largest_coef, size=x
),
random_state=random_state,
)
# We need to avoid "coo" format because it does not support slicing
aux = sp.tril(aux, k=-1, format="csc")
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(_n_dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = chol.T @ chol
if norm_diag:
# Form the diagonal vector into a row matrix
d = sp.diags(1.0 / np.sqrt(prec.diagonal()))
prec = d @ prec @ d
if sparse_format is None:
return prec.toarray()
else:
return prec.asformat(sparse_format)
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"noise": [Interval(Real, 0, None, closed="left")],
"random_state": ["random_state"],
"hole": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None, hole=False):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the Swiss Roll.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
hole : bool, default=False
If True generates the swiss roll with hole dataset.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", 2nd edition,
Chapter 6, 2014.
https://homepages.ecs.vuw.ac.nz/~marslast/Code/Ch6/lle.py
"""
generator = check_random_state(random_state)
if not hole:
t = 1.5 * np.pi * (1 + 2 * generator.uniform(size=n_samples))
y = 21 * generator.uniform(size=n_samples)
else:
corners = np.array(
[[np.pi * (1.5 + i), j * 7] for i in range(3) for j in range(3)]
)
corners = np.delete(corners, 4, axis=0)
corner_index = generator.choice(8, n_samples)
parameters = generator.uniform(size=(2, n_samples)) * np.array([[np.pi], [7]])
t, y = corners[corner_index].T + parameters
x = t * np.cos(t)
z = t * np.sin(t)
X = np.vstack((x, y, z))
X += noise * generator.standard_normal(size=(3, n_samples))
X = X.T
t = np.squeeze(t)
return X, t
@validate_params(
{
"n_samples": [Interval(Integral, 1, None, closed="left")],
"noise": [Interval(Real, 0, None, closed="left")],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_s_curve(n_samples=100, *, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the S curve.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.uniform(size=(1, n_samples)) - 0.5)
X = np.empty(shape=(n_samples, 3), dtype=np.float64)
X[:, 0] = np.sin(t)
X[:, 1] = 2.0 * generator.uniform(size=n_samples)
X[:, 2] = np.sign(t) * (np.cos(t) - 1)
X += noise * generator.standard_normal(size=(3, n_samples)).T
t = np.squeeze(t)
return X, t
@validate_params(
{
"mean": ["array-like", None],
"cov": [Interval(Real, 0, None, closed="left")],
"n_samples": [Interval(Integral, 1, None, closed="left")],
"n_features": [Interval(Integral, 1, None, closed="left")],
"n_classes": [Interval(Integral, 1, None, closed="left")],
"shuffle": ["boolean"],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_gaussian_quantiles(
*,
mean=None,
cov=1.0,
n_samples=100,
n_features=2,
n_classes=3,
shuffle=True,
random_state=None,
):
r"""Generate isotropic Gaussian and label samples by quantile.
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array-like of shape (n_features,), default=None
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, default=1.0
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, default=100
The total number of points equally divided among classes.
n_features : int, default=2
The number of features for each sample.
n_classes : int, default=3
The number of classes.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
Examples
--------
>>> from sklearn.datasets import make_gaussian_quantiles
>>> X, y = make_gaussian_quantiles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[2, 0, 1, 0, 2]
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack(
[
np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes),
]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
@validate_params(
{
"shape": [tuple],
"n_clusters": [Interval(Integral, 1, None, closed="left")],
"noise": [Interval(Real, 0, None, closed="left")],
"minval": [Interval(Real, None, None, closed="neither")],
"maxval": [Interval(Real, None, None, closed="neither")],
"shuffle": ["boolean"],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_biclusters(
shape,
n_clusters,
*,
noise=0.0,
minval=10,
maxval=100,
shuffle=True,
random_state=None,
):
"""Generate a constant block diagonal structure array for biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int
The number of biclusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : float, default=10
Minimum value of a bicluster.
maxval : float, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
See Also
--------
make_checkerboard: Generate an array with block checkerboard structure for
biclustering.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters))
col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters))
row_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes)]
)
col_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes)]
)
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack([row_labels == c for c in range(n_clusters)])
cols = np.vstack([col_labels == c for c in range(n_clusters)])
return result, rows, cols
@validate_params(
{
"shape": [tuple],
"n_clusters": [Interval(Integral, 1, None, closed="left"), "array-like"],
"noise": [Interval(Real, 0, None, closed="left")],
"minval": [Interval(Real, None, None, closed="neither")],
"maxval": [Interval(Real, None, None, closed="neither")],
"shuffle": ["boolean"],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def make_checkerboard(
shape,
n_clusters,
*,
noise=0.0,
minval=10,
maxval=100,
shuffle=True,
random_state=None,
):
"""Generate an array with block checkerboard structure for biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : float, default=10
Minimum value of a bicluster.
maxval : float, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
See Also
--------
make_biclusters : Generate an array with constant block diagonal structure
for biclustering.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(
n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)
)
col_sizes = generator.multinomial(
n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)
)
row_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes)]
)
col_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes)]
)
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(
[
row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)
]
)
cols = np.vstack(
[
col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)
]
)
return result, rows, cols
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py3@sklearn@datasets@_samples_generator.py@.PATH_END.py
|
{
"filename": "test_hbias.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/pyccl/tests/test_hbias.py",
"type": "Python"
}
|
import numpy as np
import pytest
import pyccl as ccl
COSMO = ccl.Cosmology(
Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,
transfer_function='bbks', matter_power_spectrum='linear')
HBFS = [ccl.halos.HaloBiasSheth99,
ccl.halos.HaloBiasSheth01,
ccl.halos.HaloBiasTinker10,
ccl.halos.HaloBiasBhattacharya11]
MS = [1E13, [1E12, 1E15], np.array([1E12, 1E15])]
MFOF = ccl.halos.MassDef('fof', 'matter')
MVIR = ccl.halos.MassDef('vir', 'critical')
MDFS = [MVIR, MVIR, MFOF, MVIR]
@pytest.mark.parametrize('bM_class', HBFS)
def test_bM_subclasses_smoke(bM_class):
bM = bM_class()
for m in MS:
b = bM(COSMO, m, 0.9)
assert np.all(np.isfinite(b))
assert np.shape(b) == np.shape(m)
@pytest.mark.parametrize('bM_pair', zip(HBFS, MDFS))
def test_bM_mdef_raises(bM_pair):
bM_class, mdef = bM_pair
with pytest.raises(ValueError):
bM_class(mass_def=mdef)
def test_bM_SO_allgood():
bM = ccl.halos.HaloBiasTinker10(mass_def=MVIR)
for m in MS:
b = bM(COSMO, m, 0.9)
assert np.all(np.isfinite(b))
assert np.shape(b) == np.shape(m)
@pytest.mark.parametrize('name', ['Tinker10', 'Sheth99'])
def test_bM_from_string(name):
bM_class = ccl.halos.HaloBias.from_name(name)
bM = bM_class()
for m in MS:
b = bM(COSMO, m, 0.9)
assert np.all(np.isfinite(b))
assert np.shape(b) == np.shape(m)
def test_bM_from_string_raises():
with pytest.raises(KeyError):
ccl.halos.HaloBias.from_name('Tinker11')
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@pyccl@tests@test_hbias.py@.PATH_END.py
|
{
"filename": "_points.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/violin/_points.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class PointsValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="points", parent_name="violin", **kwargs):
super(PointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values", ["all", "outliers", "suspectedoutliers", False]
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@violin@_points.py@.PATH_END.py
|
{
"filename": "drSpecSpectra.py",
"repo_name": "lwa-project/lsl",
"repo_path": "lsl_extracted/lsl-main/scripts/drSpecSpectra.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
Given a DR spectrometer file, plot the time averaged spectra for each
polarization product.
"""
import os
import sys
import math
import numpy as np
import argparse
from lsl.reader.ldp import LWADataFile, DRSpecFile
from lsl.misc import parser as aph
import matplotlib.pyplot as plt
from lsl.misc import telemetry
telemetry.track_script()
def _best_freq_units(freq):
"""Given a numpy array of frequencies in Hz, return a new array with the
frequencies in the best units possible (kHz, MHz, etc.)."""
# Figure out how large the data are
scale = int(math.log10(freq.max()))
if scale >= 9:
divis = 1e9
units = 'GHz'
elif scale >= 6:
divis = 1e6
units = 'MHz'
elif scale >= 3:
divis = 1e3
units = 'kHz'
else:
divis = 1
units = 'Hz'
# Convert the frequency
newFreq = freq / divis
# Return units and freq
return (newFreq, units)
def main(args):
idf = LWADataFile(args.filename)
if not isinstance(idf, DRSpecFile):
raise RuntimeError("File '%s' does not appear to be a valid DR spectrometer file" % os.path.basename(args.filename))
# Basic file informaiton
nFramesFile = idf.get_info('nframe')
srate = idf.get_info('sample_rate')
beam = idf.get_info('beam')
beampols = idf.get_info('nbeampol')
tInt = idf.get_info('tint')
LFFT = idf.get_info('LFFT')
products = idf.get_info('data_products')
# Offset in frames for beampols beam/tuning/pol. sets
args.skip = idf.offset(args.skip)
# Number of frames to integrate over
maxFrames = 10000
nFrames = int(args.average / tInt)
args.average = nFrames * tInt
# Number of remaining chunks
maxFramesTime = maxFrames*tInt
nChunks = int(math.ceil(1.0*(nFrames)/maxFrames))
# Date & Central Frequnecy
beginDate = idf.get_info('start_time').datetime
central_freq1 = idf.get_info('freq1')
central_freq2 = idf.get_info('freq2')
freq = np.fft.fftfreq(LFFT, d=1.0/srate)
freq = np.fft.fftshift(freq)
# File summary
print(f"Filename: {args.filename}")
print(f"Date of First Frame: {str(beginDate)}")
print(f"Beam: {beam}")
print(f"Tune/Pols: {beampols}")
print(f"Sample Rate: {srate} Hz")
print(f"Tuning Frequency: {central_freq1:.3f} Hz (1); {central_freq2:.3f} Hz (2)")
print(f"Frames: {nFramesFile} ({nFramesFile*tInt:.3f} s)")
print("---")
print(f"Transform Length: {LFFT} channels")
print(f"Integration Time: {tInt:.3f} s")
print("---")
print(f"Offset: {args.skip:.3f} s ({args.skip*srate*beampols/4096} frames)")
print(f"Integration: {args.average:.3f} s ({nFrames} frames; {nFrames} frames per beam/tune/pol)")
print(f"Chunks: {nChunks}")
# Sanity check
if args.skip/tInt > nFramesFile:
raise RuntimeError("Requested offset is greater than file length")
if nFrames > (nFramesFile - args.skip/tInt):
raise RuntimeError("Requested integration time+offset is greater than file length")
# Master loop over all of the file chunks
masterWeight = np.zeros((nChunks, 2*len(products), LFFT))
masterSpectra = np.zeros((nChunks, 2*len(products), LFFT))
for i in range(nChunks):
print(f"Working on chunk #{i+1} of {nChunks}")
try:
readT, t, data = idf.read(args.average/nChunks)
except Exception as e:
print(f"Error: {str(e)}")
continue
## Integrate up the chunck
data = data.mean(axis=1)
## Save
for stand in range(data.shape[0]):
masterSpectra[i,stand,:] = data[stand,:]
masterWeight[i,stand,:] = int(readT*srate/LFFT)
## We don't really need the data array anymore, so delete it
del(data)
# Now that we have read through all of the chunks, perform the final averaging by
# dividing by all of the chunks
spec = np.squeeze( (masterWeight*masterSpectra).sum(axis=0) / masterWeight.sum(axis=0) )
# Frequencies
freq1 = freq + central_freq1
freq2 = freq + central_freq2
# The plots: This is setup for the current configuration of 20 beampols
fig = plt.figure()
figsX = int(round(math.sqrt(2*len(products))))
figsY = 2*len(products) // figsX
# Put the frequencies in the best units possible
freq1, units1 = _best_freq_units(freq1)
freq2, units2 = _best_freq_units(freq2)
for i in range(masterSpectra.shape[1]):
if i/len(products) == 0:
freq = freq1
units = units1
else:
freq = freq2
units = units2
ax = fig.add_subplot(figsX,figsY,i+1)
currSpectra = np.squeeze( np.log10(spec[i,:])*10.0 )
ax.plot(freq, currSpectra, label=f"{i+1} (avg)")
# If there is more than one chunk, plot the difference between the global
# average and each chunk
if nChunks > 1 and args.disable_chunks:
for j in range(nChunks):
# Some files are padded by zeros at the end and, thus, carry no
# weight in the average spectra. Skip over those.
if masterWeight[j,i,:].sum() == 0:
continue
# Calculate the difference between the spectra and plot
subspectra = np.squeeze( np.log10(masterSpectra[j,i,:])*10.0 )
diff = subspectra - currSpectra
ax.plot(freq, diff, label='%i' % j)
ax.set_title(f"Beam {beam}, Tune. {i//len(products)}, {products[i % len(products)]}")
ax.set_xlabel(f"Frequency [{units}]")
ax.set_ylabel('P.S.D. [dB/RBW]')
ax.set_xlim([freq.min(), freq.max()])
ax.legend(loc=0)
print(f"For beam {beam}, tune. {i//len(products)}, {products[i % len(products)]} maximum in PSD at {freq[np.where(spec[i,:]==spec[i,:].max())][0]:.3f} {units}")
print(f"RBW: {freq[1]-freq[0]:.4f} {units}")
plt.subplots_adjust(hspace=0.35, wspace=0.30)
plt.show()
# Save spectra image if requested
if args.output is not None:
fig.savefig(args.output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='read in DR spectrometer files and create a collection of time-averaged spectra',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('filename', type=str,
help='filename to process')
parser.add_argument('-s', '--skip', type=aph.positive_or_zero_float, default=0.0,
help='skip the specified number of seconds at the beginning of the file')
parser.add_argument('-a', '--average', type=aph.positive_float, default=10.0,
help='number of seconds of data to average for spectra')
parser.add_argument('-q', '--quiet', dest='verbose', action='store_false',
help='run %(prog)s in silent mode')
parser.add_argument('-d', '--disable-chunks', action='store_true',
help='disable plotting chunks in addition to the global average')
parser.add_argument('-o', '--output', type=str,
help='output file name for spectra image')
args = parser.parse_args()
main(args)
|
lwa-projectREPO_NAMElslPATH_START.@lsl_extracted@lsl-main@scripts@drSpecSpectra.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/candlestick/increasing/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="candlestick.increasing.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@candlestick@increasing@line@_color.py@.PATH_END.py
|
{
"filename": "strict_mode.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/framework/strict_mode.py",
"type": "Python"
}
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python strict deprecation mode enabler."""
from tensorflow.python.util.tf_export import tf_export
STRICT_MODE = False
@tf_export("experimental.enable_strict_mode")
def enable_strict_mode():
"""If called, enables strict mode for all behaviors.
Used to switch all deprecation warnings to raise errors instead.
"""
global STRICT_MODE
STRICT_MODE = True
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@framework@strict_mode.py@.PATH_END.py
|
{
"filename": "split_saved_model_gen.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/proto_splitter/testdata/split_saved_model_gen.py",
"type": "Python"
}
|
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generates SavedModel test data for Merger.
Constructs chunked proto test data containing a SavedModel.
Example command:
bazel run tensorflow/tools/proto_splitter/testdata:split_saved_model_gen -- \
--path /tmp \
--saved_model_type=split-standard \
--export=pb,cpb
"""
from collections.abc import Sequence
import os
from absl import app
from absl import flags
from absl import logging
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.lib.io import file_io
from tensorflow.tools.proto_splitter import constants
from tensorflow.tools.proto_splitter.python import saved_model as split_saved_model
from tensorflow.tools.proto_splitter.python import test_util
STANDARD_SIZES = [100, 100, 1000, 100, 1000, 500, 100, 100, 100]
def _split_and_write(
path: str,
saved_model: saved_model_pb2.SavedModel,
max_size: int,
export_files: Sequence[str],
):
"""Writes the .pb, .pbtxt and .cpb files for a SavedModel."""
constants.debug_set_max_size(max_size)
if "pbtxt" in export_files:
output_path = f"{path}.pbtxt"
file_io.write_string_to_file(output_path, str(saved_model))
logging.info(" %s written", output_path)
if "pb" in export_files:
output_path = f"{path}.pb"
file_io.write_string_to_file(output_path, saved_model.SerializeToString())
logging.info(" %s written", output_path)
if "cpb" in export_files:
splitter = split_saved_model.SavedModelSplitter(saved_model)
splitter.write(path)
chunks, _ = splitter.split()
if len(chunks) > 1:
logging.info(" %s.cpb written", path)
else:
raise RuntimeError(
"For some reason this graph was not chunked, so a .cpb file was not"
" produced. Raising an error since this should not be the case."
)
def split_standard(path: str, export_files: Sequence[str]):
"""Splits a standard SavedModel."""
fn1 = [100, 100, 100]
fn2 = [100, 500]
fn3 = [100]
fn4 = [100, 100]
max_size = 500
constants.debug_set_max_size(max_size)
graph_def = test_util.make_graph_def_with_constant_nodes(
STANDARD_SIZES, fn1=fn1, fn2=fn2, fn3=fn3, fn4=fn4
)
proto = saved_model_pb2.SavedModel()
proto.meta_graphs.add().graph_def.CopyFrom(graph_def)
_split_and_write(path, proto, max_size, export_files)
VALID_SAVED_MODEL_TYPES = {
"split-standard": split_standard,
}
ALL_SAVED_MODEL_TYPES = ", ".join(VALID_SAVED_MODEL_TYPES.keys())
SPLITTER_TESTDATA_PATH = flags.DEFINE_string(
"path", None, help="Path to testdata directory."
)
SAVED_MODEL_TYPES = flags.DEFINE_multi_string(
"saved_model_type",
"all",
help=(
"Type(s) of saved model to export. Valid types: all, "
f"{ALL_SAVED_MODEL_TYPES}"
),
)
EXPORT_FILES = flags.DEFINE_multi_string(
"export",
"all",
help="List of files to export. Valid options: all, pb, pbtxt, cpb",
)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if "all" in EXPORT_FILES.value:
export_files = ["pb", "pbtxt", "cpb"]
else:
export_files = EXPORT_FILES.value
if "all" in SAVED_MODEL_TYPES.value:
saved_model_types = VALID_SAVED_MODEL_TYPES.keys()
else:
saved_model_types = SAVED_MODEL_TYPES.value
for v in saved_model_types:
if v not in VALID_SAVED_MODEL_TYPES:
raise ValueError(
"Invalid flag passed to `saved_model_type`: "
f"{v}\nValid saved model types:"
f" {ALL_SAVED_MODEL_TYPES}"
)
logging.info("Generating saved model %s", v)
f = VALID_SAVED_MODEL_TYPES[v]
f(os.path.join(SPLITTER_TESTDATA_PATH.value, v), export_files)
if __name__ == "__main__":
app.run(main)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@proto_splitter@testdata@split_saved_model_gen.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "aewallin/allantools",
"repo_path": "allantools_extracted/allantools-master/tests/Keysight53230A_ti_noise_floor/__init__.py",
"type": "Python"
}
|
# for python import
|
aewallinREPO_NAMEallantoolsPATH_START.@allantools_extracted@allantools-master@tests@Keysight53230A_ti_noise_floor@__init__.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnelarea/textfont/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="funnelarea.textfont", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnelarea@textfont@_size.py@.PATH_END.py
|
{
"filename": "test_lvc_candidates.py",
"repo_name": "virajkaram/emgwcave",
"repo_path": "emgwcave_extracted/emgwcave-main/tests/test_lvc_candidates.py",
"type": "Python"
}
|
"""
Test for ZTF in EMGWCave
"""
from emgwcave.__main__ import filter_candidates, setup_output_directories
import unittest
from emgwcave.skymap_utils import get_mjd_from_skymap
from astropy.time import Time
skymap_path = 'data/skymaps/2023-04-17T22-42-11_bayestar.multiorder.fits'
mjd_event = get_mjd_from_skymap(skymap_path=skymap_path)
start_date_jd = mjd_event + 2400000.5
time_window_days = 3.0
end_date_jd = Time('2023-04-25T22:42:11').jd # Different from start_date_jd +
# time_window_days
outdir = 'data/output'
NUM_CANDIDATES = 5
CANDIDATE_NAMES = ['ZTF23aagpsii', 'ZTF23aagpuvg', 'ZTF23aagvwth', 'ZTF23aahbnkn',
'ZTF23aahpjkh']
class TestLVCFiltering(unittest.TestCase):
"""Test filtering of candidates"""
def test_filtering(self):
"""Test filtering"""
setup_output_directories(outdir)
selected_candidates = filter_candidates(skymap_path=skymap_path,
cumprob=0.9,
mjd_event=mjd_event,
start_date_jd=start_date_jd,
end_date_jd=end_date_jd,
time_window_days=time_window_days,
outdir=outdir,
)
self.assertEqual(len(selected_candidates), NUM_CANDIDATES)
names = [candidate['objectId'] for candidate in selected_candidates]
self.assertEqual(names, CANDIDATE_NAMES)
if __name__ == '__main__':
setup_output_directories(outdir)
selected_candidates = filter_candidates(skymap_path=skymap_path,
cumprob=0.9,
mjd_event=mjd_event,
start_date_jd=start_date_jd,
end_date_jd=end_date_jd,
time_window_days=time_window_days,
outdir=outdir,
)
print("Num candidates:", len(selected_candidates))
print(f"Candidate "
f"names: {[candidate['objectId'] for candidate in selected_candidates]}")
|
virajkaramREPO_NAMEemgwcavePATH_START.@emgwcave_extracted@emgwcave-main@tests@test_lvc_candidates.py@.PATH_END.py
|
{
"filename": "unix.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/platformdirs/platformdirs/unix.py",
"type": "Python"
}
|
"""Unix."""
from __future__ import annotations
import os
import sys
from configparser import ConfigParser
from pathlib import Path
from typing import Iterator, NoReturn
from .api import PlatformDirsABC
if sys.platform == "win32":
def getuid() -> NoReturn:
msg = "should only be used on Unix"
raise RuntimeError(msg)
else:
from os import getuid
class Unix(PlatformDirsABC): # noqa: PLR0904
"""
On Unix/Linux, we follow the `XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-
latest.html>`_.
The spec allows overriding directories with environment variables. The examples shown are the default values,
alongside the name of the environment variable that overrides them. Makes use of the `appname
<platformdirs.api.PlatformDirsABC.appname>`, `version <platformdirs.api.PlatformDirsABC.version>`, `multipath
<platformdirs.api.PlatformDirsABC.multipath>`, `opinion <platformdirs.api.PlatformDirsABC.opinion>`, `ensure_exists
<platformdirs.api.PlatformDirsABC.ensure_exists>`.
"""
@property
def user_data_dir(self) -> str:
"""
:return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
``$XDG_DATA_HOME/$appname/$version``
"""
path = os.environ.get("XDG_DATA_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/share") # noqa: PTH111
return self._append_app_name_and_version(path)
@property
def _site_data_dirs(self) -> list[str]:
path = os.environ.get("XDG_DATA_DIRS", "")
if not path.strip():
path = f"/usr/local/share{os.pathsep}/usr/share"
return [self._append_app_name_and_version(p) for p in path.split(os.pathsep)]
@property
def site_data_dir(self) -> str:
"""
:return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is
enabled and ``XDG_DATA_DIRS`` is set and a multi path the response is also a multi path separated by the
OS path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
"""
# XDG default for $XDG_DATA_DIRS; only first, if multipath is False
dirs = self._site_data_dirs
if not self.multipath:
return dirs[0]
return os.pathsep.join(dirs)
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
``$XDG_CONFIG_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CONFIG_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.config") # noqa: PTH111
return self._append_app_name_and_version(path)
@property
def _site_config_dirs(self) -> list[str]:
path = os.environ.get("XDG_CONFIG_DIRS", "")
if not path.strip():
path = "/etc/xdg"
return [self._append_app_name_and_version(p) for p in path.split(os.pathsep)]
@property
def site_config_dir(self) -> str:
"""
:return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`
is enabled and ``XDG_CONFIG_DIRS`` is set and a multi path the response is also a multi path separated by
the OS path separator), e.g. ``/etc/xdg/$appname/$version``
"""
# XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
dirs = self._site_config_dirs
if not self.multipath:
return dirs[0]
return os.pathsep.join(dirs)
@property
def user_cache_dir(self) -> str:
"""
:return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
``~/$XDG_CACHE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CACHE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.cache") # noqa: PTH111
return self._append_app_name_and_version(path)
@property
def site_cache_dir(self) -> str:
""":return: cache directory shared by users, e.g. ``/var/cache/$appname/$version``"""
return self._append_app_name_and_version("/var/cache")
@property
def user_state_dir(self) -> str:
"""
:return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
``$XDG_STATE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_STATE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/state") # noqa: PTH111
return self._append_app_name_and_version(path)
@property
def user_log_dir(self) -> str:
""":return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it"""
path = self.user_state_dir
if self.opinion:
path = os.path.join(path, "log") # noqa: PTH118
self._optionally_create_directory(path)
return path
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
return _get_user_media_dir("XDG_DOCUMENTS_DIR", "~/Documents")
@property
def user_downloads_dir(self) -> str:
""":return: downloads directory tied to the user, e.g. ``~/Downloads``"""
return _get_user_media_dir("XDG_DOWNLOAD_DIR", "~/Downloads")
@property
def user_pictures_dir(self) -> str:
""":return: pictures directory tied to the user, e.g. ``~/Pictures``"""
return _get_user_media_dir("XDG_PICTURES_DIR", "~/Pictures")
@property
def user_videos_dir(self) -> str:
""":return: videos directory tied to the user, e.g. ``~/Videos``"""
return _get_user_media_dir("XDG_VIDEOS_DIR", "~/Videos")
@property
def user_music_dir(self) -> str:
""":return: music directory tied to the user, e.g. ``~/Music``"""
return _get_user_media_dir("XDG_MUSIC_DIR", "~/Music")
@property
def user_desktop_dir(self) -> str:
""":return: desktop directory tied to the user, e.g. ``~/Desktop``"""
return _get_user_media_dir("XDG_DESKTOP_DIR", "~/Desktop")
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
``$XDG_RUNTIME_DIR/$appname/$version``.
For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/user/$(id -u)/$appname/$version`` if
exists, otherwise ``/tmp/runtime-$(id -u)/$appname/$version``, if``$XDG_RUNTIME_DIR``
is not set.
"""
path = os.environ.get("XDG_RUNTIME_DIR", "")
if not path.strip():
if sys.platform.startswith(("freebsd", "openbsd", "netbsd")):
path = f"/var/run/user/{getuid()}"
if not Path(path).exists():
path = f"/tmp/runtime-{getuid()}" # noqa: S108
else:
path = f"/run/user/{getuid()}"
return self._append_app_name_and_version(path)
@property
def site_runtime_dir(self) -> str:
"""
:return: runtime directory shared by users, e.g. ``/run/$appname/$version`` or \
``$XDG_RUNTIME_DIR/$appname/$version``.
Note that this behaves almost exactly like `user_runtime_dir` if ``$XDG_RUNTIME_DIR`` is set, but will
fall back to paths associated to the root user instead of a regular logged-in user if it's not set.
If you wish to ensure that a logged-in root user path is returned e.g. ``/run/user/0``, use `user_runtime_dir`
instead.
For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/$appname/$version`` if ``$XDG_RUNTIME_DIR`` is not set.
"""
path = os.environ.get("XDG_RUNTIME_DIR", "")
if not path.strip():
if sys.platform.startswith(("freebsd", "openbsd", "netbsd")):
path = "/var/run"
else:
path = "/run"
return self._append_app_name_and_version(path)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_data_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users, returns the first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_config_dir)
@property
def site_cache_path(self) -> Path:
""":return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_cache_dir)
def iter_config_dirs(self) -> Iterator[str]:
""":yield: all user and site configuration directories."""
yield self.user_config_dir
yield from self._site_config_dirs
def iter_data_dirs(self) -> Iterator[str]:
""":yield: all user and site data directories."""
yield self.user_data_dir
yield from self._site_data_dirs
def _get_user_media_dir(env_var: str, fallback_tilde_path: str) -> str:
media_dir = _get_user_dirs_folder(env_var)
if media_dir is None:
media_dir = os.environ.get(env_var, "").strip()
if not media_dir:
media_dir = os.path.expanduser(fallback_tilde_path) # noqa: PTH111
return media_dir
def _get_user_dirs_folder(key: str) -> str | None:
"""
Return directory from user-dirs.dirs config file.
See https://freedesktop.org/wiki/Software/xdg-user-dirs/.
"""
user_dirs_config_path = Path(Unix().user_config_dir) / "user-dirs.dirs"
if user_dirs_config_path.exists():
parser = ConfigParser()
with user_dirs_config_path.open() as stream:
# Add fake section header, so ConfigParser doesn't complain
parser.read_string(f"[top]\n{stream.read()}")
if key not in parser["top"]:
return None
path = parser["top"][key].strip('"')
# Handle relative home paths
return path.replace("$HOME", os.path.expanduser("~")) # noqa: PTH111
return None
__all__ = [
"Unix",
]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@platformdirs@platformdirs@unix.py@.PATH_END.py
|
{
"filename": "test_datasets.py",
"repo_name": "cta-observatory/ctapipe",
"repo_path": "ctapipe_extracted/ctapipe-main/src/ctapipe/utils/tests/test_datasets.py",
"type": "Python"
}
|
import json
import os
from pathlib import Path
import pytest
import yaml
from ctapipe.utils import datasets
def test_find_datasets():
# find all datasets matching pattern
r = datasets.find_all_matching_datasets(r"(.*)\.camgeom\.fits\.gz")
assert len(r) > 3
# get the full filename for a resources
assert datasets.get_dataset_path(r[0].name).exists()
# try using a pattern
r = datasets.find_all_matching_datasets(r"(.*)\.camgeom\.fits\.gz", regexp_group=1)
assert not str(r[0]).endswith("gz")
def test_datasets_in_custom_path(tmpdir_factory):
"""
check that a dataset in a user-defined CTAPIPE_SVC_PATH is located
"""
tmpdir1 = tmpdir_factory.mktemp("datasets1")
tmpdir2 = tmpdir_factory.mktemp("datasets2")
os.environ["CTAPIPE_SVC_PATH"] = ":".join([str(tmpdir1), str(tmpdir2)])
# create a dummy dataset to search for:
dataset_name = "test_dataset_1.txt"
dataset_path = str(tmpdir1.join(dataset_name))
with open(dataset_path, "w") as fp:
fp.write("test test test")
# try to find dummy dataset
path = datasets.get_dataset_path(dataset_name)
assert path == Path(dataset_path)
with pytest.raises(FileNotFoundError):
datasets.get_dataset_path("does_not_exist")
# try using find_all_matching_datasets:
ds = datasets.find_all_matching_datasets(
"test.*", searchpath=os.environ["CTAPIPE_SVC_PATH"]
)
assert dataset_name in {d.name for d in ds}
def test_structured_datasets(tmpdir):
test_data = dict(x=[1, 2, 3, 4, 5], y="test_json")
os.environ["CTAPIPE_SVC_PATH"] = ":".join([str(tmpdir)])
with tmpdir.join("data_test.json").open(mode="w") as fp:
json.dump(test_data, fp)
data1 = datasets.get_structured_dataset("data_test")
assert data1["x"] == [1, 2, 3, 4, 5]
assert data1["y"] == "test_json"
tmpdir.join("data_test.json").remove()
test_data["y"] = "test_yaml"
with tmpdir.join("data_test.yaml").open(mode="w") as fp:
yaml.dump(test_data, fp)
data1 = datasets.get_structured_dataset("data_test")
assert data1["x"] == [1, 2, 3, 4, 5]
assert data1["y"] == "test_yaml"
|
cta-observatoryREPO_NAMEctapipePATH_START.@ctapipe_extracted@ctapipe-main@src@ctapipe@utils@tests@test_datasets.py@.PATH_END.py
|
{
"filename": "test_quantity_interaction.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/time/tests/test_quantity_interaction.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Column
from astropy.time import Time, TimeDelta
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
class TestTimeQuantity:
"""Test Interaction of Time with Quantities"""
def test_valid_quantity_input(self):
"""Test Time formats that are allowed to take quantity input."""
q = 2450000.125 * u.day
t1 = Time(q, format="jd", scale="utc")
assert t1.value == q.value
q2 = q.to(u.second)
t2 = Time(q2, format="jd", scale="utc")
assert t2.value == q.value == q2.to_value(u.day)
q3 = q - 2400000.5 * u.day
t3 = Time(q3, format="mjd", scale="utc")
assert t3.value == q3.value
# test we can deal with two quantity arguments, with different units
qs = 24.0 * 36.0 * u.second
t4 = Time(q3, qs, format="mjd", scale="utc")
assert t4.value == (q3 + qs).to_value(u.day)
qy = 1990.0 * u.yr
ty1 = Time(qy, format="jyear", scale="utc")
assert ty1.value == qy.value
ty2 = Time(qy.to(u.day), format="jyear", scale="utc")
assert ty2.value == qy.value
qy2 = 10.0 * u.yr
tcxc = Time(qy2, format="cxcsec")
assert tcxc.value == qy2.to_value(u.second)
tgps = Time(qy2, format="gps")
assert tgps.value == qy2.to_value(u.second)
tunix = Time(qy2, format="unix")
assert tunix.value == qy2.to_value(u.second)
qd = 2000.0 * 365.0 * u.day
tplt = Time(qd, format="plot_date", scale="utc")
assert tplt.value == qd.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.m, format="jd", scale="utc")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
def test_column_with_and_without_units(self):
"""Ensure a Column without a unit is treated as an array [#3648]"""
a = np.arange(50000.0, 50010.0)
ta = Time(a, format="mjd")
c1 = Column(np.arange(50000.0, 50010.0), name="mjd")
tc1 = Time(c1, format="mjd")
assert np.all(ta == tc1)
c2 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="day")
tc2 = Time(c2, format="mjd")
assert np.all(ta == tc2)
c3 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="m")
with pytest.raises(u.UnitsError):
Time(c3, format="mjd")
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.0 * u.yr
for fmt in ("iso", "yday", "datetime", "byear", "byear_str", "jyear_str"):
with pytest.raises(ValueError):
Time(qy, format=fmt, scale="utc")
def test_valid_quantity_operations(self):
"""Check that adding a time-valued quantity to a Time gives a Time"""
t0 = Time(100000.0, format="cxcsec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, Time)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# check broadcasting
q3 = np.arange(15.0).reshape(3, 5) * u.hour
t3 = t0 - q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value - q3.to_value(u.second))
def test_invalid_quantity_operations(self):
"""Check that comparisons of Time with quantities does not work
(even for time-like, since we cannot compare Time to TimeDelta)"""
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.m # noqa: B015
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.second # noqa: B015
class TestTimeDeltaQuantity:
"""Test interaction of TimeDelta with Quantities"""
def test_valid_quantity_input(self):
"""Test that TimeDelta can take quantity input."""
q = 500.25 * u.day
dt1 = TimeDelta(q, format="jd")
assert dt1.value == q.value
dt2 = TimeDelta(q, format="sec")
assert dt2.value == q.to_value(u.second)
dt3 = TimeDelta(q)
assert dt3.value == q.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
TimeDelta(2450000.0 * u.m, format="jd")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
with pytest.raises(TypeError):
TimeDelta(100, format="sec") > 10.0 * u.m # noqa: B015
def test_quantity_output(self):
q = 500.25 * u.day
dt = TimeDelta(q)
assert dt.to(u.day) == q
assert dt.to_value(u.day) == q.value
assert dt.to_value("day") == q.value
assert dt.to(u.second).value == q.to_value(u.second)
assert dt.to_value(u.second) == q.to_value(u.second)
assert dt.to_value("s") == q.to_value(u.second)
# Following goes through "format", but should be the same.
assert dt.to_value("sec") == q.to_value(u.second)
def test_quantity_output_errors(self):
dt = TimeDelta(250.0, format="sec")
with pytest.raises(u.UnitsError):
dt.to(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(unit=u.m)
with pytest.raises(
ValueError,
match="not one of the known formats.*failed to parse as a unit",
):
dt.to_value("parrot")
with pytest.raises(TypeError):
dt.to_value("sec", unit=u.s)
with pytest.raises(
ValueError,
match=r"cannot specify 'subfmt' and positional arg.*not a valid format",
):
dt.to_value(u.s, subfmt="str")
def test_valid_quantity_operations1(self):
"""Check adding/subtracting/comparing a time-valued quantity works
with a TimeDelta. Addition/subtraction should give TimeDelta"""
t0 = TimeDelta(106400.0, format="sec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, TimeDelta)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert isinstance(t2, TimeDelta)
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# now comparisons
assert t0 > q1
assert t0 < 1.0 * u.yr
# and broadcasting
q3 = np.arange(12.0).reshape(4, 3) * u.hour
t3 = t0 + q3
assert isinstance(t3, TimeDelta)
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))
def test_valid_quantity_operations2(self):
"""Check that TimeDelta is treated as a quantity where possible."""
t0 = TimeDelta(100000.0, format="sec")
f = 1.0 / t0
assert isinstance(f, u.Quantity)
assert f.unit == 1.0 / u.day
g = 10.0 * u.m / u.second**2
v = t0 * g
assert isinstance(v, u.Quantity)
assert u.allclose(v, t0.sec * g.value * u.m / u.second)
q = np.log10(t0 / u.second)
assert isinstance(q, u.Quantity)
assert q.value == np.log10(t0.sec)
s = 1.0 * u.m
v = s / t0
assert isinstance(v, u.Quantity)
assert u.allclose(v, 1.0 / t0.sec * u.m / u.s)
t = 1.0 * u.s
t2 = t0 * t
assert isinstance(t2, u.Quantity)
assert u.allclose(t2, t0.sec * u.s**2)
t3 = [1] / t0
assert isinstance(t3, u.Quantity)
assert u.allclose(t3, 1 / (t0.sec * u.s))
# broadcasting
t1 = TimeDelta(np.arange(100000.0, 100012.0).reshape(6, 2), format="sec")
f = np.array([1.0, 2.0]) * u.cycle * u.Hz
phase = f * t1
assert isinstance(phase, u.Quantity)
assert phase.shape == t1.shape
assert u.allclose(phase, t1.sec * f.value * u.cycle)
q = t0 * t1
assert isinstance(q, u.Quantity)
assert np.all(q == t0.to(u.day) * t1.to(u.day))
q = t1 / t0
assert isinstance(q, u.Quantity)
assert np.all(q == t1.to(u.day) / t0.to(u.day))
def test_valid_quantity_operations3(self):
"""Test a TimeDelta remains one if possible."""
t0 = TimeDelta(10.0, format="jd")
q = 10.0 * u.one
t1 = q * t0
assert isinstance(t1, TimeDelta)
assert t1 == TimeDelta(100.0, format="jd")
t2 = t0 * q
assert isinstance(t2, TimeDelta)
assert t2 == TimeDelta(100.0, format="jd")
t3 = t0 / q
assert isinstance(t3, TimeDelta)
assert t3 == TimeDelta(1.0, format="jd")
q2 = 1.0 * u.percent
t4 = t0 * q2
assert isinstance(t4, TimeDelta)
assert abs(t4 - TimeDelta(0.1, format="jd")) < 1.0 * u.ns
q3 = 1.0 * u.hr / (36.0 * u.s)
t5 = q3 * t0
assert isinstance(t4, TimeDelta)
assert abs(t5 - TimeDelta(1000.0, format="jd")) < 1.0 * u.ns
# Test multiplication with a unit.
t6 = t0 * u.one
assert isinstance(t6, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t7 = u.one * t0
assert isinstance(t7, TimeDelta)
assert t7 == TimeDelta(10.0, format="jd")
t8 = t0 * ""
assert isinstance(t8, TimeDelta)
assert t8 == TimeDelta(10.0, format="jd")
t9 = "" * t0
assert isinstance(t9, TimeDelta)
assert t9 == TimeDelta(10.0, format="jd")
t10 = t0 / u.one
assert isinstance(t10, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t11 = t0 / ""
assert isinstance(t11, TimeDelta)
assert t11 == TimeDelta(10.0, format="jd")
t12 = t0 / [1]
assert isinstance(t12, TimeDelta)
assert t12 == TimeDelta(10.0, format="jd")
t13 = [1] * t0
assert isinstance(t13, TimeDelta)
assert t13 == TimeDelta(10.0, format="jd")
def test_invalid_quantity_operations(self):
"""Check comparisons of TimeDelta with non-time quantities fails."""
with pytest.raises(TypeError):
TimeDelta(100000.0, format="sec") > 10.0 * u.m # noqa: B015
def test_invalid_quantity_operations2(self):
"""Check that operations with non-time/quantity fail."""
td = TimeDelta(100000.0, format="sec")
with pytest.raises(TypeError):
td * object()
with pytest.raises(TypeError):
td / object()
def test_invalid_quantity_broadcast(self):
"""Check broadcasting rules in interactions with Quantity."""
t0 = TimeDelta(np.arange(12.0).reshape(4, 3), format="sec")
with pytest.raises(ValueError):
t0 + np.arange(4.0) * u.s
class TestDeltaAttributes:
def test_delta_ut1_utc(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=6)
t.delta_ut1_utc = 0.3 * u.s
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = 0.4 / 60.0 * u.minute
assert t.ut1.iso == "2010-01-01 00:00:00.400000"
with pytest.raises(u.UnitsError):
t.delta_ut1_utc = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_ut1_utc = TimeDelta(0.3, format="sec")
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = TimeDelta(0.5 / 24.0 / 3600.0, format="jd")
assert t.ut1.iso == "2010-01-01 00:00:00.500000"
def test_delta_tdb_tt(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="tt", precision=6)
t.delta_tdb_tt = 20.0 * u.second
assert t.tdb.iso == "2010-01-01 00:00:20.000000"
t.delta_tdb_tt = 30.0 / 60.0 * u.minute
assert t.tdb.iso == "2010-01-01 00:00:30.000000"
with pytest.raises(u.UnitsError):
t.delta_tdb_tt = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_tdb_tt = TimeDelta(40.0, format="sec")
assert t.tdb.iso == "2010-01-01 00:00:40.000000"
t.delta_tdb_tt = TimeDelta(50.0 / 24.0 / 3600.0, format="jd")
assert t.tdb.iso == "2010-01-01 00:00:50.000000"
@pytest.mark.parametrize(
"q1, q2",
(
(5e8 * u.s, None),
(5e17 * u.ns, None),
(4e8 * u.s, 1e17 * u.ns),
(4e14 * u.us, 1e17 * u.ns),
),
)
def test_quantity_conversion_rounding(q1, q2):
"""Check that no rounding errors are incurred by unit conversion.
This occurred before as quantities in seconds were converted to days
before trying to split them into two-part doubles. See gh-7622.
"""
t = Time("2001-01-01T00:00:00.", scale="tai")
expected = Time("2016-11-05T00:53:20.", scale="tai")
if q2 is None:
t0 = t + q1
else:
t0 = t + q1 + q2
assert abs(t0 - expected) < 20 * u.ps
dt1 = TimeDelta(q1, q2)
t1 = t + dt1
assert abs(t1 - expected) < 20 * u.ps
dt2 = TimeDelta(q1, q2, format="sec")
t2 = t + dt2
assert abs(t2 - expected) < 20 * u.ps
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@time@tests@test_quantity_interaction.py@.PATH_END.py
|
{
"filename": "telluric_remove.py",
"repo_name": "msiebert1/UCSC_spectral_pipeline",
"repo_path": "UCSC_spectral_pipeline_extracted/UCSC_spectral_pipeline-master/spectral_reduction/tmath/pydux/telluric_remove.py",
"type": "Python"
}
|
def telluric_remove(bstarwave, bstar, bairmass, wave, object, airmass, variance, spectrum, yes= False,shift=None):
import numpy as np
import pdb
import matplotlib.pyplot as plt
from tmath.wombat.inputter import inputter
from tmath.wombat.yesno import yesno
from tmath.wombat.womscipyrebin import womscipyrebin
from tmath.wombat.womget_element import womget_element
from tmath.pydux.xcor import xcor
from tmath.pydux.finalscaler import finalscaler
bstartmp=womscipyrebin(bstarwave,bstar,wave)
# plt.cla()
# plt.plot(bstarwave,bstartmp)
# plt.pause(0.01)
# answer=yesno('y')
print('\nThe ratio of airmasses (object/B-star) is {}'.format(airmass/bairmass))
if (airmass/bairmass > 3.0) or (airmass/bairmass < 0.33):
print('\nWARNING: OBJECT AND B-STAR HAVE WILDLY DIFFERENT')
print('AIRMASSES: ATMOSPHERIC BAND DIVISION MAY BE LOUSY\n')
wmin=wave[0]
wmax=wave[-1]
npix=len(object)
wdelt=wave[1]-wave[0]
print('wdelt',wdelt)
lag=np.zeros(3)
lagflag=[False]*3
xfactor=10
maxlag=200
if not shift:
print('\nCross-correlating object with B-star spectrum\n')
fig=plt.figure()
axarr=fig.subplots(2,1)
if (wmin < 6200) and (wmax > 6400) and (wmax < 6900):
indblue=womget_element(wave,6200)
indred=womget_element(wave,6400)
lag[0]=xcor(object[indblue:indred+1],bstartmp[indblue:indred+1],xfactor,maxlag)
lagflag[0]=True
print('The shift at the 6250A band is {} angstroms'.format(lag[0]*wdelt))
if (wmin < 6800) and (wmax > 6500):
indblue=womget_element(wave,6800)
indred=womget_element(wave,6950)
scale = 1./np.max(object[indblue:indred+1])
obb=scale*object[indblue:indred+1]
bb=bstartmp[indblue:indred+1]
lag[1]=xcor(obb,bb,xfactor,maxlag)
lagflag[1]=True
print('The shift at the B band is {} angstroms'.format(lag[1]*wdelt))
plt.cla()
# ymin,ymax=finalscaler(object)
# plt.plot(wave,object,drawstyle='steps-mid',color='r')
# plt.plot(wave,newobject,drawstyle='steps-mid',color='k')
ymin,ymax=finalscaler(bstartmp[indblue:indred+1])
axarr[0].plot(wave[indblue:indred+1], scale*object[indblue:indred+1],drawstyle='steps-mid',color='r')
axarr[0].plot(wave[indblue:indred+1], bstartmp[indblue:indred+1],drawstyle='steps-mid',color='k')
axarr[0].plot(wave[indblue:indred+1]+lag[1]*wdelt, bstartmp[indblue:indred+1],drawstyle='steps-mid',color='g')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axarr[0].text(0.05, 0.95, "Shift: {:.2} Å".format(lag[1]*wdelt),transform=axarr[0].transAxes, fontsize=14,verticalalignment='top', bbox=props)
plt.pause(0.01)
if (wmin < 7500) and (wmax > 8000):
indblue=womget_element(wave,7500)
indred=womget_element(wave,8000)
scale = 1./np.max(object[indblue:indred+1])
lag[2]=xcor(scale*object[indblue:indred+1],bstartmp[indblue:indred+1],xfactor,maxlag)
print('The shift at the A band is {} angstroms'.format(lag[2]*wdelt))
lagflag[2]=True
# ymin,ymax=finalscaler(object)
# plt.plot(wave,object,drawstyle='steps-mid',color='r')
# plt.plot(wave,newobject,drawstyle='steps-mid',color='k')
ymin,ymax=finalscaler(bstartmp[indblue:indred+1])
axarr[1].plot(wave[indblue:indred+1], scale*object[indblue:indred+1],drawstyle='steps-mid',color='r')
axarr[1].plot(wave[indblue:indred+1], bstartmp[indblue:indred+1],drawstyle='steps-mid',color='k')
axarr[1].plot(wave[indblue:indred+1]+lag[2]*wdelt, bstartmp[indblue:indred+1],drawstyle='steps-mid',color='g')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axarr[1].text(0.05, 0.95, "Shift: {:.2} Å".format(lag[2]*wdelt),transform=axarr[1].transAxes, fontsize=14,verticalalignment='top', bbox=props)
plt.pause(0.01)
check=inputter('Check plot [enter when done]: ','string',False,yes=yes)
if (sum(lagflag) > 0):
avglag=np.sum(lag)/sum(lagflag)
angshift=avglag*wdelt
print('The mean shift is {} Angstroms'.format(angshift))
else:
angshift=0.0
plt.close()
else:
angshift=shift
fig = plt.figure(figsize = [9,5])
telluric_done = False
bstartmpcopy=bstartmp.copy()
while (not telluric_done):
print('Applying a shift of {} Angstroms'.format(angshift))
bstartmp=bstartmpcopy.copy()
tmp=womscipyrebin(wave+angshift,bstartmp,wave)
bstartmp=tmp.copy()
bstartmp=bstartmp**((airmass/bairmass)**0.55)
# newobject=object/bstartmp
newobject=spectrum/bstartmp
bvar=variance/bstartmp
print('\nPlotting before and after atmospheric band correction\n')
plt.cla()
# ymin,ymax=finalscaler(object)
# plt.plot(wave,object,drawstyle='steps-mid',color='r')
# plt.plot(wave,newobject,drawstyle='steps-mid',color='k')
ymin,ymax=finalscaler(spectrum)
plt.plot(wave,spectrum,drawstyle='steps-mid',color='r')
plt.plot(wave,newobject,drawstyle='steps-mid',color='k')
plt.ylim([ymin,ymax])
plt.pause(0.01)
if not shift:
# print('Is this OK?')
# answer=yesno('y')
if yes:
answer=yes
plt.savefig("plots/shift.pdf")
else:
answer = input('Is this ok? [y]/n: ') or 'y'
if (answer == 'n'):
angshift=inputter('Enter B-star shift in Angstroms: ','float',False)
else:
telluric_done = True
else:
check=inputter('Check plot [enter when done]: ','string',False)
telluric_done = True
plt.close()
return newobject, bvar, angshift
|
msiebert1REPO_NAMEUCSC_spectral_pipelinePATH_START.@UCSC_spectral_pipeline_extracted@UCSC_spectral_pipeline-master@spectral_reduction@tmath@pydux@telluric_remove.py@.PATH_END.py
|
{
"filename": "slack_webhook.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/common/src/slack_webhook.py",
"type": "Python"
}
|
import sys, requests, json, os
import argparse
import warnings
class SlackWebhook():
"""Represents a Slack app or integration that we can send messages to."""
def __init__(self, url: str, channel: str = None):
self.url = url
self.channel = channel
def send(self, message: str):
"""Send a message."""
_send(self.url, message, self.channel)
def send(url, message):
"""Send a message to the specified URL (deprecated)."""
warnings.warn("Direct use of send is deprecated, please use LasairLogging.",
DeprecationWarning, stacklevel=2)
_send(url, message, channel='#general')
def _send(url, message, channel):
data = {'text': message}
if channel is not None:
data['channel'] = channel
response = requests.post(url, data=json.dumps(data),
headers={'Content-Type': 'application/json'})
if response.status_code != 200:
raise ValueError(
'Request to slack returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
)
if __name__ == "__main__":
"""Read from stdin and send each line as a message."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-u', '--url', required=True, type=str, help='Webhook URL')
conf = vars(parser.parse_args())
for line in sys.stdin:
send(conf['url'], line.rstrip())
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@common@src@slack_webhook.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "SimonPfeifer/cows",
"repo_path": "cows_extracted/cows-master/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import cows
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
# 'nbsphinx',
'myst_nb',
]
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = 'cows'
year = '2022'
author = 'Simon Pfeifer'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.0.2'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/SimonPfeifer/python-cows/issues/%s', '#'),
'pr': ('https://github.com/SimonPfeifer/python-cows/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
SimonPfeiferREPO_NAMEcowsPATH_START.@cows_extracted@cows-master@docs@conf.py@.PATH_END.py
|
{
"filename": "plotting_scripts.py",
"repo_name": "eliotayache-nv/GAMMA",
"repo_path": "GAMMA_extracted/GAMMA-master/bin/Tools/plotting_scripts.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# @Author: eliotayache
# @Date: 2020-05-14 16:24:48
# @Last Modified by: Eliot Ayache
# @Last Modified time: 2022-03-22 16:22:32
'''
This file contains functions used to print GAMMA outputs. These functions
should be run from the ./bin/Tools directory.
This can be run from a jupyter or iPython notebook:
$run plotting_scripts.py
'''
# Imports
# --------------------------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# MPL options
# --------------------------------------------------------------------------------------------------
plt.rc('font', family='serif', size=12)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
plt.rc('legend', fontsize=12)
plt.rcParams['savefig.dpi'] = 200
# IO functions
# --------------------------------------------------------------------------------------------------
def readData(key, it=None, sequence=False):
if sequence:
filename = '../../results/%s/phys%010d.out' % (key, it)
elif it is None:
filename = '../../results/%s' % (key)
else:
filename = '../../results/%s%d.out' % (key, it)
data = pd.read_csv(filename, sep=" ")
return(data)
def pivot(data, key):
return(data.pivot(index="j", columns="i", values=key).to_numpy())
# Plotting functions
# --------------------------------------------------------------------------------------------------
def plotMulti(data, keys, jtrack=None, log=[], labels={}, **kwargs):
'''
Plots multiple variables for a single 1D track in the same figure.
Args:
-----
data: pandas dataframe. Output data.
keys: list of string. Variables to plot.
kwargs:
-------
log: list of strings. keys of variables to be plotted in logspace.
Returns:
--------
f: pyplot figure.
axes: list of axes contained in the figure.
Example usage:
--------------
f, axes = plotMulti(data, ["rho","p","lfac"],
tracer=False,
line=False,
labels={"rho":"$\\rho/\\rho_0$", "p":"$p/p_0$","lfac":"$\\gamma$"},
x_norm=RShock)
'''
Nk = len(keys)
f, axes = plt.subplots(Nk, 1, sharex=True, figsize=(6,2*Nk))
for key, k, ax in zip(keys, range(Nk), axes):
logkey = False
label = None
if key in log:
logkey = True
if key in labels:
label = labels[key]
plot1D(data, key, ax, jtrack=jtrack, log=logkey, label=label, **kwargs)
plt.tight_layout()
return(f, axes)
def plot1D(data, key, ax=None, mov="x", log=False, v1min=None, tracer=True,
line=True, r2=False, x_norm=None, jtrack=None, label=None,
**kwargs):
'''
Plots 1D outputs from GAMMA.
Works on 1D AND 2D outputs. In the 2D case, specify the jtrack to plot.
Args:
-----
data: pandas dataframe. Output data.
key: string. Variable to plot.
Example usage:
--------------
data = readData('Last/phys0000000000.out')
plot1D(data, "rho", log=True, jtrack=0)
'''
if key == "lfac":
var = "vx"
else:
var = key
if jtrack is not(None):
z = pivot(data, var)[jtrack, :]
x = pivot(data, "x")[jtrack, :]
tracvals = pivot(data, "trac")[jtrack, :]
else:
z = data[var].to_numpy()
x = np.copy(data["x"].to_numpy())
tracvals = data["trac"].to_numpy()
if x_norm is not(None):
x /= x_norm
if key == "lfac":
z = 1./np.sqrt(1 - z**2)
if r2:
z *= x**2
if ax is None:
plt.figure()
ax = plt.gca()
if label is not(None):
ax.set_ylabel(label)
else:
ax.set_ylabel(key)
if log:
ax.set_yscale('log')
if line:
ax.plot(x, z, 'k',zorder=1)
ax.scatter(x, z, c='None', edgecolors='k', lw=2, zorder=2, label="numerical")
if tracer:
ax.scatter(x, z, c=tracvals, edgecolors='None', zorder=3, cmap='cividis')
def plot2D(data, key, z_override=None, mov="x", log=False, v1min=None,
geometry="cartesian", quiver=False, color=None, edges='None',
invert=False, r2=False, cmap='magma', tlayout=False, colorbar=True,
slick=False, phi=0., fig=None, label=None, axis=None, thetaobs=0.,
nuobs=1.e17, shrink=0.6, expand=False):
'''
Plots 2D outputs from GAMMA.
Args:
-----
data: pandas dataframe. Output data.
key: string. Variable to plot.
Returns:
--------
xmin: double. Minimum coordinate x in data.
xmax: double. Maximum coordinate x in data.
thetamax: double. Highest track angle in polar geometry.
im: pyplot.image. 2D map of the requested variable.
Example usage:
--------------
data = readData('Last/phys0000000000.out')
# On specific axes
f = plt.figure()
ax = plt.axes(projection='polar')
plot2D(data, "rho", fig=f, axis=ax, **kwargs)
# On axies of its own
plot2D(data, "rho", geometry='polar', **kwargs)
'''
if z_override is not None:
z = z_override
if key == "lfac":
vx = data.pivot(index='j', columns='i', values="vx").to_numpy()
vy = data.pivot(index='j', columns='i', values="vy").to_numpy()
z = 1./np.sqrt(1 - (vx**2+vy**2))
else:
z = data.pivot(index='j', columns='i', values=key).to_numpy()
x = data.pivot(index='j', columns='i', values='x').to_numpy()
dx = data.pivot(index='j', columns='i', values='dx').to_numpy()
y = data.pivot(index='j', columns='i', values='y').to_numpy()
dy = data.pivot(index='j', columns='i', values='dy').to_numpy()
# duplicating last row for plotting
z = np.append(z, np.expand_dims(z[-1, :], axis=0), axis=0)
x = np.append(x, np.expand_dims(x[-1, :], axis=0), axis=0)
dx = np.append(dx, np.expand_dims(dx[-1, :], axis=0), axis=0)
y = np.append(y, np.expand_dims(y[-1, :], axis=0), axis=0)
dy = np.append(dy, np.expand_dims(dy[-1, :], axis=0), axis=0)
# duplicating first column for plotting
z = np.append(z, np.expand_dims(z[:, -1], axis=1), axis=1)
x = np.append(x, np.expand_dims(x[:, -1], axis=1), axis=1)
dx = np.append(dx, np.expand_dims(dx[:, -1], axis=1), axis=1)
y = np.append(y, np.expand_dims(y[:, -1], axis=1), axis=1)
dy = np.append(dy, np.expand_dims(dy[:, -1], axis=1), axis=1)
nact = np.array([np.count_nonzero(~np.isnan(xj)) for xj in x])
if (quiver):
vx = data.pivot(index='j', columns='i', values='vx').to_numpy()
vx = np.append(vx, np.expand_dims(vx[-1, :], axis=0), axis=0)
vx = np.ma.masked_array(vx, np.isnan(vx))
if r2:
z *= x**2
xmin = np.nanmin(x)
xmax = np.nanmax(x)
ymin = np.nanmin(y)
ymax = np.nanmax(y)
vmax = np.nanmax(z[4:, :])
vmin = np.nanmin(z)
if log:
vmin = np.nanmin(z[z > 0])
if v1min:
vmin = v1min
if geometry == "polar":
projection = "polar"
else:
projection = None
if axis is None:
f = plt.figure()
ax = plt.axes(projection=projection)
else:
f = fig
ax = axis
if geometry == "polar" or axis is not None:
ax.set_thetamax(ymax*180./np.pi)
ax.set_thetamin(ymin*180./np.pi)
if invert:
ax.set_thetamin(-ymax*180./np.pi)
if slick:
ax.axis("off")
for j in range(z.shape[0]-1):
xj = x - dx/2.
yj = y - dy/2.
dyj = dy
xj[j, nact[j]-1] += dx[j, nact[j]-1]
if mov == 'y':
tmp = np.copy(xj)
xj = yj
yj = np.copy(tmp)
dyj = dx
xj[j+1, :] = xj[j, :]
yj[j+1, :] = yj[j, :]+dyj[j, :]
xj = xj[j:j+2, :]
yj = yj[j:j+2, :]
zj = z[j:j+2, :]
if invert:
yj *= -1
if log:
im = ax.pcolor(yj, xj, zj,
norm=LogNorm(vmin=vmin, vmax=vmax),
edgecolors=edges,
cmap=cmap,
facecolor=color)
else:
im = ax.pcolor(yj, xj, zj,
vmin=vmin, vmax=vmax,
edgecolors=edges,
cmap=cmap,
facecolor=color)
if geometry != "polar":
ax.set_aspect('equal')
if geometry == "polar" or axis is not None:
ax.set_rorigin(0)
ax.set_rmin(xmin)
ax.set_rticks([xmin, xmax])
if colorbar:
cb = f.colorbar(im, ax=ax, orientation='vertical', shrink=shrink, pad=0.1)
if label is None:
label = key
cb.set_label(label, fontsize=14)
if tlayout:
f.tight_layout()
thetamax = ymax*180./np.pi
return xmin, xmax, thetamax, im
|
eliotayache-nvREPO_NAMEGAMMAPATH_START.@GAMMA_extracted@GAMMA-master@bin@Tools@plotting_scripts.py@.PATH_END.py
|
{
"filename": "_parcats.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/_parcats.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Parcats(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "parcats"
_valid_props = {
"arrangement",
"bundlecolors",
"counts",
"countssrc",
"dimensiondefaults",
"dimensions",
"domain",
"hoverinfo",
"hoveron",
"hovertemplate",
"labelfont",
"line",
"meta",
"metasrc",
"name",
"sortpaths",
"stream",
"tickfont",
"type",
"uid",
"uirevision",
"visible",
}
# arrangement
# -----------
@property
def arrangement(self):
"""
Sets the drag interaction mode for categories and dimensions.
If `perpendicular`, the categories can only move along a line
perpendicular to the paths. If `freeform`, the categories can
freely move on the plane. If `fixed`, the categories and
dimensions are stationary.
The 'arrangement' property is an enumeration that may be specified as:
- One of the following enumeration values:
['perpendicular', 'freeform', 'fixed']
Returns
-------
Any
"""
return self["arrangement"]
@arrangement.setter
def arrangement(self, val):
self["arrangement"] = val
# bundlecolors
# ------------
@property
def bundlecolors(self):
"""
Sort paths so that like colors are bundled together within each
category.
The 'bundlecolors' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["bundlecolors"]
@bundlecolors.setter
def bundlecolors(self, val):
self["bundlecolors"] = val
# counts
# ------
@property
def counts(self):
"""
The number of observations represented by each state. Defaults
to 1 so that each state represents one observation
The 'counts' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["counts"]
@counts.setter
def counts(self, val):
self["counts"] = val
# countssrc
# ---------
@property
def countssrc(self):
"""
Sets the source reference on Chart Studio Cloud for counts .
The 'countssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["countssrc"]
@countssrc.setter
def countssrc(self, val):
self["countssrc"] = val
# dimensions
# ----------
@property
def dimensions(self):
"""
The dimensions (variables) of the parallel categories diagram.
The 'dimensions' property is a tuple of instances of
Dimension that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcats.Dimension
- A list or tuple of dicts of string/value properties that
will be passed to the Dimension constructor
Supported dict properties:
categoryarray
Sets the order in which categories in this
dimension appear. Only has an effect if
`categoryorder` is set to "array". Used with
`categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud
for categoryarray .
categoryorder
Specifies the ordering logic for the categories
in the dimension. By default, plotly uses
"trace", which specifies the order that is
present in the data supplied. Set
`categoryorder` to *category ascending* or
*category descending* if order should be
determined by the alphanumerical order of the
category names. Set `categoryorder` to "array"
to derive the ordering from the attribute
`categoryarray`. If a category is not found in
the `categoryarray` array, the sorting behavior
for that attribute will be identical to the
"trace" mode. The unspecified categories will
follow the categories in `categoryarray`.
displayindex
The display index of dimension, from left to
right, zero indexed, defaults to dimension
index.
label
The shown name of the dimension.
ticktext
Sets alternative tick labels for the categories
in this dimension. Only has an effect if
`categoryorder` is set to "array". Should be an
array the same length as `categoryarray` Used
with `categoryorder`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
values
Dimension values. `values[n]` represents the
category value of the `n`th point in the
dataset, therefore the `values` vector for all
dimensions must be the same (longer vectors
will be truncated).
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
visible
Shows the dimension when set to `true` (the
default). Hides the dimension for `false`.
Returns
-------
tuple[plotly.graph_objs.parcats.Dimension]
"""
return self["dimensions"]
@dimensions.setter
def dimensions(self, val):
self["dimensions"] = val
# dimensiondefaults
# -----------------
@property
def dimensiondefaults(self):
"""
When used in a template (as
layout.template.data.parcats.dimensiondefaults), sets the
default property values to use for elements of
parcats.dimensions
The 'dimensiondefaults' property is an instance of Dimension
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Dimension`
- A dict of string/value properties that will be passed
to the Dimension constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcats.Dimension
"""
return self["dimensiondefaults"]
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self["dimensiondefaults"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this parcats trace
.
row
If there is a layout grid, use the domain for
this row in the grid for this parcats trace .
x
Sets the horizontal domain of this parcats
trace (in plot fraction).
y
Sets the vertical domain of this parcats trace
(in plot fraction).
Returns
-------
plotly.graph_objs.parcats.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['count', 'probability'] joined with '+' characters
(e.g. 'count+probability')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoveron
# -------
@property
def hoveron(self):
"""
Sets the hover interaction mode for the parcats diagram. If
`category`, hover interaction take place per category. If
`color`, hover interactions take place per color per category.
If `dimension`, hover interactions take place across all
categories per dimension.
The 'hoveron' property is an enumeration that may be specified as:
- One of the following enumeration values:
['category', 'color', 'dimension']
Returns
-------
Any
"""
return self["hoveron"]
@hoveron.setter
def hoveron(self, val):
self["hoveron"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. variables
`count`, `probability`, `category`, `categorycount`,
`colorcount` and `bandcolorcount`. Anything contained in tag
`<extra>` is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# labelfont
# ---------
@property
def labelfont(self):
"""
Sets the font for the `dimension` labels.
The 'labelfont' property is an instance of Labelfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Labelfont`
- A dict of string/value properties that will be passed
to the Labelfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcats.Labelfont
"""
return self["labelfont"]
@labelfont.setter
def labelfont(self, val):
self["labelfont"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `line.colorscale`. Has an effect
only if in `line.color`is set to a numerical
array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette
will be chosen according to whether numbers in
the `color` array are all positive, all
negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `line.color`) or the bounds set in
`line.cmin` and `line.cmax` Has an effect only
if in `line.color`is set to a numerical array.
Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `line.cmin` and/or `line.cmax` to be
equidistant to this point. Has an effect only
if in `line.color`is set to a numerical array.
Value should have the same units as in
`line.color`. Has no effect when `line.cauto`
is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmax` must be set as well.
color
Sets thelinecolor. It accepts either a specific
color or an array of numbers that are mapped to
the colorscale relative to the max and min
values of the array or relative to `line.cmin`
and `line.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.parcats.line.Color
Bar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`line.cmin` and `line.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
variables `count` and `probability`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `line.color`is set to a
numerical array. If true, `line.cmin` will
correspond to the last color in the array and
`line.cmax` will correspond to the first color.
shape
Sets the shape of the paths. If `linear`, paths
are composed of straight lines. If `hspline`,
paths are composed of horizontal curved splines
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `line.color`is set to a numerical array.
Returns
-------
plotly.graph_objs.parcats.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# sortpaths
# ---------
@property
def sortpaths(self):
"""
Sets the path sorting algorithm. If `forward`, sort paths based
on dimension categories from left to right. If `backward`, sort
paths based on dimensions categories from right to left.
The 'sortpaths' property is an enumeration that may be specified as:
- One of the following enumeration values:
['forward', 'backward']
Returns
-------
Any
"""
return self["sortpaths"]
@sortpaths.setter
def sortpaths(self, val):
self["sortpaths"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.parcats.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the font for the `category` labels.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcats.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
arrangement
Sets the drag interaction mode for categories and
dimensions. If `perpendicular`, the categories can only
move along a line perpendicular to the paths. If
`freeform`, the categories can freely move on the
plane. If `fixed`, the categories and dimensions are
stationary.
bundlecolors
Sort paths so that like colors are bundled together
within each category.
counts
The number of observations represented by each state.
Defaults to 1 so that each state represents one
observation
countssrc
Sets the source reference on Chart Studio Cloud for
counts .
dimensions
The dimensions (variables) of the parallel categories
diagram.
dimensiondefaults
When used in a template (as
layout.template.data.parcats.dimensiondefaults), sets
the default property values to use for elements of
parcats.dimensions
domain
:class:`plotly.graph_objects.parcats.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoveron
Sets the hover interaction mode for the parcats
diagram. If `category`, hover interaction take place
per category. If `color`, hover interactions take place
per color per category. If `dimension`, hover
interactions take place across all categories per
dimension.
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `count`, `probability`,
`category`, `categorycount`, `colorcount` and
`bandcolorcount`. Anything contained in tag `<extra>`
is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
labelfont
Sets the font for the `dimension` labels.
line
:class:`plotly.graph_objects.parcats.Line` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
sortpaths
Sets the path sorting algorithm. If `forward`, sort
paths based on dimension categories from left to right.
If `backward`, sort paths based on dimensions
categories from right to left.
stream
:class:`plotly.graph_objects.parcats.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `category` labels.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
arrangement=None,
bundlecolors=None,
counts=None,
countssrc=None,
dimensions=None,
dimensiondefaults=None,
domain=None,
hoverinfo=None,
hoveron=None,
hovertemplate=None,
labelfont=None,
line=None,
meta=None,
metasrc=None,
name=None,
sortpaths=None,
stream=None,
tickfont=None,
uid=None,
uirevision=None,
visible=None,
**kwargs
):
"""
Construct a new Parcats object
Parallel categories diagram for multidimensional categorical
data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Parcats`
arrangement
Sets the drag interaction mode for categories and
dimensions. If `perpendicular`, the categories can only
move along a line perpendicular to the paths. If
`freeform`, the categories can freely move on the
plane. If `fixed`, the categories and dimensions are
stationary.
bundlecolors
Sort paths so that like colors are bundled together
within each category.
counts
The number of observations represented by each state.
Defaults to 1 so that each state represents one
observation
countssrc
Sets the source reference on Chart Studio Cloud for
counts .
dimensions
The dimensions (variables) of the parallel categories
diagram.
dimensiondefaults
When used in a template (as
layout.template.data.parcats.dimensiondefaults), sets
the default property values to use for elements of
parcats.dimensions
domain
:class:`plotly.graph_objects.parcats.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoveron
Sets the hover interaction mode for the parcats
diagram. If `category`, hover interaction take place
per category. If `color`, hover interactions take place
per color per category. If `dimension`, hover
interactions take place across all categories per
dimension.
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `count`, `probability`,
`category`, `categorycount`, `colorcount` and
`bandcolorcount`. Anything contained in tag `<extra>`
is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
labelfont
Sets the font for the `dimension` labels.
line
:class:`plotly.graph_objects.parcats.Line` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
sortpaths
Sets the path sorting algorithm. If `forward`, sort
paths based on dimension categories from left to right.
If `backward`, sort paths based on dimensions
categories from right to left.
stream
:class:`plotly.graph_objects.parcats.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `category` labels.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Parcats
"""
super(Parcats, self).__init__("parcats")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Parcats
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Parcats`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("arrangement", None)
_v = arrangement if arrangement is not None else _v
if _v is not None:
self["arrangement"] = _v
_v = arg.pop("bundlecolors", None)
_v = bundlecolors if bundlecolors is not None else _v
if _v is not None:
self["bundlecolors"] = _v
_v = arg.pop("counts", None)
_v = counts if counts is not None else _v
if _v is not None:
self["counts"] = _v
_v = arg.pop("countssrc", None)
_v = countssrc if countssrc is not None else _v
if _v is not None:
self["countssrc"] = _v
_v = arg.pop("dimensions", None)
_v = dimensions if dimensions is not None else _v
if _v is not None:
self["dimensions"] = _v
_v = arg.pop("dimensiondefaults", None)
_v = dimensiondefaults if dimensiondefaults is not None else _v
if _v is not None:
self["dimensiondefaults"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoveron", None)
_v = hoveron if hoveron is not None else _v
if _v is not None:
self["hoveron"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("labelfont", None)
_v = labelfont if labelfont is not None else _v
if _v is not None:
self["labelfont"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("sortpaths", None)
_v = sortpaths if sortpaths is not None else _v
if _v is not None:
self["sortpaths"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "parcats"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@_parcats.py@.PATH_END.py
|
{
"filename": "_pathbar.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/treemap/_pathbar.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Pathbar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap"
_path_str = "treemap.pathbar"
_valid_props = {"edgeshape", "side", "textfont", "thickness", "visible"}
# edgeshape
# ---------
@property
def edgeshape(self):
"""
Determines which shape is used for edges between `barpath`
labels.
The 'edgeshape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['>', '<', '|', '/', '\\']
Returns
-------
Any
"""
return self["edgeshape"]
@edgeshape.setter
def edgeshape(self, val):
self["edgeshape"] = val
# side
# ----
@property
def side(self):
"""
Determines on which side of the the treemap the `pathbar`
should be presented.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used inside `pathbar`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.treemap.pathbar.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
Returns
-------
plotly.graph_objs.treemap.pathbar.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of `pathbar` (in px). If not specified the
`pathbar.textfont.size` is used with 3 pixles extra padding on
each side.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [12, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# visible
# -------
@property
def visible(self):
"""
Determines if the path bar is drawn i.e. outside the trace
`domain` and with one pixel gap.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
"""
def __init__(
self,
arg=None,
edgeshape=None,
side=None,
textfont=None,
thickness=None,
visible=None,
**kwargs,
):
"""
Construct a new Pathbar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.Pathbar`
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
Returns
-------
Pathbar
"""
super(Pathbar, self).__init__("pathbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Pathbar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Pathbar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("edgeshape", None)
_v = edgeshape if edgeshape is not None else _v
if _v is not None:
self["edgeshape"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@treemap@_pathbar.py@.PATH_END.py
|
{
"filename": "mrf.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/ixpeobssim/irf/mrf.py",
"type": "Python"
}
|
# Copyright (C) 2018--2022, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Modulation respose function.
"""
from __future__ import print_function, division
from ixpeobssim.irf.base import xSpecRespBase
# pylint: disable=invalid-name
class xModulationResponse(xSpecRespBase):
"""Class describing the modulation response, i.e., the product of the
effective area times the modulation factor.
"""
Y_UNITS = 'cm$^2$'
Y_LABEL = 'Modulation response function [%s]' % Y_UNITS
def __init__(self, file_path):
"""Constructor.
"""
xSpecRespBase.__init__(self, file_path, 'mrf')
def plot(self):
"""Plot the modulation response.
"""
# pylint: disable=arguments-differ
self.plot_base(logy=True)
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@ixpeobssim@irf@mrf.py@.PATH_END.py
|
{
"filename": "performgc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/performgc.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import gc
import os
from nose.plugins import Plugin
class PerformGC(Plugin):
"""This plugin adds option to call ``gc.collect`` after each test"""
enabled = False
def options(self, parser, env=os.environ):
env_opt = 'PERFORM_GC'
parser.add_option('--perform-gc', action='store_true',
dest='performGC', default=env.get(env_opt, False),
help='Call gc.collect() after each test')
def configure(self, options, conf):
if not self.can_configure:
return
self.enabled = getattr(options, 'performGC', False)
def afterTest(self, test):
gc.collect()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py2@matplotlib@testing@_nose@plugins@performgc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/enzo_e/__init__.py",
"type": "Python"
}
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@enzo_e@__init__.py@.PATH_END.py
|
|
{
"filename": "setup.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/setup.py",
"type": "Python"
}
|
"""
To build with coverage of Cython files
export SM_CYTHON_COVERAGE=1
python -m pip install -e .
pytest --cov=statsmodels statsmodels
coverage html
"""
from setuptools import Command, Extension, find_packages, setup
from setuptools.dist import Distribution
from collections import defaultdict
import fnmatch
import inspect
import os
from os.path import dirname, join as pjoin, relpath
from pathlib import Path
import shutil
import sys
import numpy as np
from packaging.version import parse
try:
# SM_FORCE_C is a testing shim to force setup to use C source files
FORCE_C = int(os.environ.get("SM_FORCE_C", 0))
if FORCE_C:
raise ImportError("Force import error for testing")
from Cython import Tempita, __version__ as cython_version
from Cython.Build import cythonize
from Cython.Distutils import build_ext
HAS_CYTHON = True
CYTHON_3 = parse(cython_version) >= parse("3.0")
except ImportError:
from setuptools.command.build_ext import build_ext # noqa: F401
HAS_CYTHON = CYTHON_3 = False
SETUP_DIR = Path(__file__).parent.resolve()
###############################################################################
# Key Values that Change Each Release
###############################################################################
# These are strictly installation requirements. Builds requirements are
# managed in pyproject.toml
INSTALL_REQUIRES = []
with open("requirements.txt", encoding="utf-8") as req:
for line in req.readlines():
INSTALL_REQUIRES.append(line.split("#")[0].strip())
DEVELOP_REQUIRES = []
with open("requirements-dev.txt", encoding="utf-8") as req:
for line in req.readlines():
DEVELOP_REQUIRES.append(line.split("#")[0].strip())
CYTHON_MIN_VER = "3.0.10" # released January 2023
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "pystatsmodels@googlegroups.com"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest)
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
cmdclass = {"clean": CleanCommand}
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise OSError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
NUMPY_INCLUDES = sorted(
{np.get_include(), pjoin(dirname(inspect.getfile(np.core)), "include")}
)
NUMPY_MATH_LIBS = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), "..", "lib")],
"libraries": ["npymath"],
}
extensions = []
for config in exts.values():
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
include_dirs = sorted(set(include_dirs + NUMPY_MATH_LIBS["include_dirs"]))
libraries = sorted(set(libraries + NUMPY_MATH_LIBS["libraries"]))
library_dirs = sorted(set(library_dirs + NUMPY_MATH_LIBS["library_dirs"]))
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"] + NUMPY_MATH_LIBS["include_dirs"],
depends=[],
libraries=NUMPY_MATH_LIBS["libraries"],
library_dirs=NUMPY_MATH_LIBS["library_dirs"],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
COMPILER_DIRECTIVES["cpow"] = True
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
return False
setup(
name=DISTNAME,
maintainer=MAINTAINER,
ext_modules=extensions,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms="any",
cmdclass=cmdclass,
packages=find_packages(),
package_data=package_data,
distclass=BinaryDistribution,
include_package_data=False, # True will install all files in repo
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
zip_safe=False,
python_requires=">=3.9",
)
# Clean-up copied files
for copy in FILES_COPIED_TO_PACKAGE:
os.unlink(copy)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@setup.py@.PATH_END.py
|
{
"filename": "reverse_sequence.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/testing/op_tests/reverse_sequence.py",
"type": "Python"
}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for reverse_sequence."""
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_reverse_sequence_tests(options):
"""Make a set of tests to do reverse_sequence."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[8, 4, 5, 5, 6], [4, 4, 3, 5]],
"seq_lengths": [[2, 2, 2, 2], [2, 1, 1, 0]],
"seq_axis": [0, 3],
"batch_axis": [1]
}, {
"input_dtype": [tf.float32],
"input_shape": [[2, 4, 5, 5, 6]],
"seq_lengths": [[2, 1]],
"seq_axis": [2],
"batch_axis": [0]
}, {
"input_dtype": [tf.float32],
"input_shape": [[4, 2]],
"seq_lengths": [[3, 1]],
"seq_axis": [0],
"batch_axis": [1]
}]
def build_graph(parameters):
"""Build the graph for reverse_sequence tests."""
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.reverse_sequence(
input=input_value,
seq_lengths=parameters["seq_lengths"],
batch_axis=parameters["batch_axis"],
seq_axis=parameters["seq_axis"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@testing@op_tests@reverse_sequence.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/photutils",
"repo_path": "photutils_extracted/photutils-main/photutils/detection/tests/__init__.py",
"type": "Python"
}
|
astropyREPO_NAMEphotutilsPATH_START.@photutils_extracted@photutils-main@photutils@detection@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "exceptions.py",
"repo_name": "tardis-sn/tardis",
"repo_path": "tardis_extracted/tardis-main/tardis/plasma/exceptions.py",
"type": "Python"
}
|
class PlasmaException(Exception):
pass
class IncompleteAtomicData(PlasmaException):
def __init__(self, atomic_data_name):
message = (
"The current plasma calculation requires {0}, "
"which is not provided by the given atomic data".format(
atomic_data_name
)
)
super(PlasmaException, self).__init__(message)
class PlasmaMissingModule(PlasmaException):
pass
class PlasmaIsolatedModule(PlasmaException):
pass
class NotInitializedModule(PlasmaException):
pass
class PlasmaIonizationError(PlasmaException):
pass
class PlasmaConfigError(PlasmaException):
pass
|
tardis-snREPO_NAMEtardisPATH_START.@tardis_extracted@tardis-main@tardis@plasma@exceptions.py@.PATH_END.py
|
{
"filename": "contingency.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/stats/contingency.py",
"type": "Python"
}
|
"""Some functions for working with contingency tables (i.e. cross tabulations).
"""
from __future__ import division, print_function, absolute_import
from functools import reduce
import numpy as np
from .stats import power_divergence
__all__ = ['margins', 'expected_freq', 'chi2_contingency']
def margins(a):
"""Return a list of the marginal sums of the array `a`.
Parameters
----------
a : ndarray
The array for which to compute the marginal sums.
Returns
-------
margsums : list of ndarrays
A list of length `a.ndim`. `margsums[k]` is the result
of summing `a` over all axes except `k`; it has the same
number of dimensions as `a`, but the length of each axis
except axis `k` will be 1.
Examples
--------
>>> a = np.arange(12).reshape(2, 6)
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11]])
>>> m0, m1 = margins(a)
>>> m0
array([[15],
[51]])
>>> m1
array([[ 6, 8, 10, 12, 14, 16]])
>>> b = np.arange(24).reshape(2,3,4)
>>> m0, m1, m2 = margins(b)
>>> m0
array([[[ 66]],
[[210]]])
>>> m1
array([[[ 60],
[ 92],
[124]]])
>>> m2
array([[[60, 66, 72, 78]]])
"""
margsums = []
ranged = list(range(a.ndim))
for k in ranged:
marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
margsums.append(marg)
return margsums
def expected_freq(observed):
"""
Compute the expected frequencies from a contingency table.
Given an n-dimensional contingency table of observed frequencies,
compute the expected frequencies for the table based on the marginal
sums under the assumption that the groups associated with each
dimension are independent.
Parameters
----------
observed : array_like
The table of observed frequencies. (While this function can handle
a 1-D array, that case is trivial. Generally `observed` is at
least 2-D.)
Returns
-------
expected : ndarray of float64
The expected frequencies, based on the marginal sums of the table.
Same shape as `observed`.
Examples
--------
>>> observed = np.array([[10, 10, 20],[20, 20, 20]])
>>> from scipy.stats import expected_freq
>>> expected_freq(observed)
array([[ 12., 12., 16.],
[ 18., 18., 24.]])
"""
# Typically `observed` is an integer array. If `observed` has a large
# number of dimensions or holds large values, some of the following
# computations may overflow, so we first switch to floating point.
observed = np.asarray(observed, dtype=np.float64)
# Create a list of the marginal sums.
margsums = margins(observed)
# Create the array of expected frequencies. The shapes of the
# marginal sums returned by apply_over_axes() are just what we
# need for broadcasting in the following product.
d = observed.ndim
expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
return expected
def chi2_contingency(observed, correction=True, lambda_=None):
"""Chi-square test of independence of variables in a contingency table.
This function computes the chi-square statistic and p-value for the
hypothesis test of independence of the observed frequencies in the
contingency table [1]_ `observed`. The expected frequencies are computed
based on the marginal sums under the assumption of independence; see
`scipy.stats.contingency.expected_freq`. The number of degrees of
freedom is (expressed using numpy functions and attributes)::
dof = observed.size - sum(observed.shape) + observed.ndim - 1
Parameters
----------
observed : array_like
The contingency table. The table contains the observed frequencies
(i.e. number of occurrences) in each category. In the two-dimensional
case, the table is often described as an "R x C table".
correction : bool, optional
If True, *and* the degrees of freedom is 1, apply Yates' correction
for continuity. The effect of the correction is to adjust each
observed value by 0.5 towards the corresponding expected value.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic [2]_. `lambda_` allows a statistic from the
Cressie-Read power divergence family [3]_ to be used instead. See
`power_divergence` for details.
Returns
-------
chi2 : float
The test statistic.
p : float
The p-value of the test
dof : int
Degrees of freedom
expected : ndarray, same shape as `observed`
The expected frequencies, based on the marginal sums of the table.
See Also
--------
contingency.expected_freq
fisher_exact
chisquare
power_divergence
Notes
-----
An often quoted guideline for the validity of this calculation is that
the test should be used only if the observed and expected frequencies
in each cell are at least 5.
This is a test for the independence of different categories of a
population. The test is only meaningful when the dimension of
`observed` is two or more. Applying the test to a one-dimensional
table will always result in `expected` equal to `observed` and a
chi-square statistic equal to 0.
This function does not handle masked arrays, because the calculation
does not make sense with missing values.
Like stats.chisquare, this function computes a chi-square statistic;
the convenience this function provides is to figure out the expected
frequencies and degrees of freedom from the given contingency table.
If these were already known, and if the Yates' correction was not
required, one could use stats.chisquare. That is, if one calls::
chi2, p, dof, ex = chi2_contingency(obs, correction=False)
then the following is true::
(chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(),
ddof=obs.size - 1 - dof)
The `lambda_` argument was added in version 0.13.0 of scipy.
References
----------
.. [1] "Contingency table",
https://en.wikipedia.org/wiki/Contingency_table
.. [2] "Pearson's chi-squared test",
https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
.. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
A two-way example (2 x 3):
>>> from scipy.stats import chi2_contingency
>>> obs = np.array([[10, 10, 20], [20, 20, 20]])
>>> chi2_contingency(obs)
(2.7777777777777777,
0.24935220877729619,
2,
array([[ 12., 12., 16.],
[ 18., 18., 24.]]))
Perform the test using the log-likelihood ratio (i.e. the "G-test")
instead of Pearson's chi-squared statistic.
>>> g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood")
>>> g, p
(2.7688587616781319, 0.25046668010954165)
A four-way example (2 x 2 x 2 x 2):
>>> obs = np.array(
... [[[[12, 17],
... [11, 16]],
... [[11, 12],
... [15, 16]]],
... [[[23, 15],
... [30, 22]],
... [[14, 17],
... [15, 16]]]])
>>> chi2_contingency(obs)
(8.7584514426741897,
0.64417725029295503,
11,
array([[[[ 14.15462386, 14.15462386],
[ 16.49423111, 16.49423111]],
[[ 11.2461395 , 11.2461395 ],
[ 13.10500554, 13.10500554]]],
[[[ 19.5591166 , 19.5591166 ],
[ 22.79202844, 22.79202844]],
[[ 15.54012004, 15.54012004],
[ 18.10873492, 18.10873492]]]]))
"""
observed = np.asarray(observed)
if np.any(observed < 0):
raise ValueError("All values in `observed` must be nonnegative.")
if observed.size == 0:
raise ValueError("No data; `observed` has size 0.")
expected = expected_freq(observed)
if np.any(expected == 0):
# Include one of the positions where expected is zero in
# the exception message.
zeropos = list(zip(*np.nonzero(expected == 0)))[0]
raise ValueError("The internally computed table of expected "
"frequencies has a zero element at %s." % (zeropos,))
# The degrees of freedom
dof = expected.size - sum(expected.shape) + expected.ndim - 1
if dof == 0:
# Degenerate case; this occurs when `observed` is 1D (or, more
# generally, when it has only one nontrivial dimension). In this
# case, we also have observed == expected, so chi2 is 0.
chi2 = 0.0
p = 1.0
else:
if dof == 1 and correction:
# Adjust `observed` according to Yates' correction for continuity.
observed = observed + 0.5 * np.sign(expected - observed)
chi2, p = power_divergence(observed, expected,
ddof=observed.size - 1 - dof, axis=None,
lambda_=lambda_)
return chi2, p, dof, expected
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@stats@contingency.py@.PATH_END.py
|
{
"filename": "batman_transit_subset.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/deprecated/batman_transit_subset.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import np, OrderedSet
from pyorbit.models.abstract_model import AbstractModel
from pyorbit.models.abstract_transit import AbstractTransit
try:
import batman
except (ModuleNotFoundError,ImportError):
pass
class Batman_Transit_TTV_Subset(AbstractModel, AbstractTransit):
def __init__(self, *args, **kwargs):
# this calls all constructors up to AbstractModel
super().__init__(*args, **kwargs)
super(AbstractModel, self).__init__(*args, **kwargs)
try:
import batman
except (ModuleNotFoundError,ImportError):
print("ERROR: batman not installed, this will not work")
quit()
self.list_pams_common = OrderedSet([
'P', # Period, log-uniform prior
'e', # eccentricity, uniform prior
'omega', # argument of pericenter (in radians)
'R_Rs', # planet radius (in units of stellar radii)
])
self.batman_params = None
self.batman_models = {}
self.code_options = {}
def initialize_model(self, mc, **kwargs):
""" Force the use of the time of inferior conjunction """
mc.common_models[self.planet_ref].use_time_inferior_conjunction = True
self._prepare_planetary_parameters(mc, **kwargs)
self._prepare_limb_darkening_coefficients(mc, **kwargs)
self.code_options['nthreads'] = kwargs.get('nthreads', 1)
try:
import multiprocessing
if self.code_options['nthreads'] > multiprocessing.cpu_count():
print('Batman nthreads automatically lowered to the maximum CPU count')
self.code_options['nthreads'] = multiprocessing.cpu_count()
except:
self.code_options['nthreads'] = 1
""" We pick the subset parameters of choice,
we remove them from the common parameters,
and add them back as a dataset-specific parameter
"""
self.subset_parameters = np.atleast_1d(kwargs.get('subset_parameters', None))
for par in self.subset_parameters:
self.list_pams_common.discard(par)
self.batman_params = batman.TransitParams()
""" Initialization with random transit parameters"""
self.batman_params.t0 = 0. # time of inferior conjunction
self.batman_params.per = 1. # orbital period
# planet radius (in units of stellar radii)
self.batman_params.rp = 0.1
# semi-major axis (in units of stellar radii)
self.batman_params.a = 15.
self.batman_params.inc = 87. # orbital inclination (in degrees)
self.batman_params.ecc = 0. # eccentricity
self.batman_params.w = 90. # longitude of periastron (in degrees)
""" Setting up the limb darkening calculation"""
self.batman_params.limb_dark = kwargs['limb_darkening_model']
self.batman_params.u = np.ones(kwargs['limb_darkening_ncoeff'],
dtype=np.double) * 0.1 # limb darkening coefficients
def initialize_model_dataset(self, mc, dataset, **kwargs):
""" Reading some code-specific keywords from the configuration file"""
self._prepare_dataset_options(mc, dataset, **kwargs)
for i_sub in range(0, dataset.submodel_flag):
sub_dataset = dataset.x[(dataset.submodel_id == i_sub)]
for par_original in self.subset_parameters:
par_subset = par_original+'_'+repr(i_sub)
self.transfer_parameter_properties(mc, dataset, par_original, par_subset, dataset_pam=True)
if par_original != 'Tc':
continue
if kwargs[dataset.name_ref].get('boundaries', False):
par_update = kwargs[dataset.name_ref]['boundaries'].get(
par_subset, [min(sub_dataset), max(sub_dataset)])
elif kwargs.get('boundaries', False):
par_update = kwargs['boundaries'].get(par_subset, [min(sub_dataset), max(sub_dataset)])
else:
par_update = [min(sub_dataset), max(sub_dataset)]
self.batman_models[dataset.name_ref + '_'+repr(i_sub)] = \
batman.TransitModel(self.batman_params,
sub_dataset,
supersample_factor=self.code_options[dataset.name_ref]['sample_factor'],
exp_time=self.code_options[dataset.name_ref]['exp_time'],
nthreads=self.code_options['nthreads'])
def compute(self, parameter_values, dataset, x0_input=None):
"""
:param parameter_values:
:param dataset:
:param x0_input:
:return:
"""
"""
From the batman manual:
Reinitializing the model is by far the slowest component of batman,
because it calculates the optimal step size
for the integration starting from a very small value.
-> However, we estimated the optimal step size from random parameters,
so at some point we'll need to
reinitialize the model so that the correct step size is computed.
"""
random_selector = np.random.randint(1000)
random_selector = 50
if x0_input is None:
y_output = np.zeros(dataset.n)
else:
y_output = x0_input * 0.
for i_sub in range(0,dataset.submodel_flag):
for par_original in self.subset_parameters:
par_subset = par_original+'_'+repr(i_sub)
parameter_values[par_original] = parameter_values[par_subset]
#if self.compute_inclination:
# if parameter_values['b'] > 1. + parameter_values['R_Rs'] :
# return y_output
self.update_parameter_values(parameter_values, dataset.Tref)
for key, key_val in parameter_values.items():
if np.isnan(key_val):
return 0.
self.batman_params.a = parameter_values['a_Rs']
self.batman_params.inc = parameter_values['i']
self.batman_params.per = parameter_values['P'] # orbital period
# planet radius (in units of stellar radii)
self.batman_params.rp = parameter_values['R_Rs']
self.batman_params.ecc = parameter_values['e'] # eccentricity
# longitude of periastron (in degrees)
self.batman_params.w = parameter_values['omega']
"""
print 'a ', self.batman_params.a
print 'inc ', self.batman_params.inc
print 't0 ', self.batman_params.t0
print 'per ', self.batman_params.per
print 'rp ', self.batman_params.rp
print 'ecc ', self.batman_params.ecc
print 'w ', self.batman_params.w
print 'u ', self.batman_params.u
"""
for par, i_par in self.ldvars.items():
self.batman_params.u[i_par] = parameter_values[par]
if x0_input is None:
sel_data = (dataset.submodel_id==i_sub)
if random_selector == 50:
self.batman_models[dataset.name_ref + '_'+repr(i_sub)] = \
batman.TransitModel(self.batman_params,
dataset.x0[sel_data],
supersample_factor=self.code_options[
dataset.name_ref]['sample_factor'],
exp_time=self.code_options[dataset.name_ref]['exp_time'],
nthreads=self.code_options['nthreads'])
y_output[sel_data] = self.batman_models[dataset.name_ref+ '_'+repr(i_sub)].light_curve(self.batman_params) - 1.
else:
original_dataset = dataset.x0[(dataset.submodel_id==i_sub)]
sel_data = (x0_input >= np.amin(original_dataset)) & (x0_input <= np.amax(original_dataset))
temporary_model = batman.TransitModel(self.batman_params,
x0_input[sel_data],
supersample_factor=self.code_options[
dataset.name_ref]['sample_factor'],
exp_time=self.code_options[dataset.name_ref]['exp_time'],
nthreads=self.code_options['nthreads'])
y_output[sel_data] = temporary_model.light_curve(self.batman_params) - 1.
return y_output
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@deprecated@batman_transit_subset.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scatterpolargl/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._hoverlabel import Hoverlabel
from ._legendgrouptitle import Legendgrouptitle
from ._line import Line
from ._marker import Marker
from ._selected import Selected
from ._stream import Stream
from ._textfont import Textfont
from ._unselected import Unselected
from . import hoverlabel
from . import legendgrouptitle
from . import marker
from . import selected
from . import unselected
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".hoverlabel", ".legendgrouptitle", ".marker", ".selected", ".unselected"],
[
"._hoverlabel.Hoverlabel",
"._legendgrouptitle.Legendgrouptitle",
"._line.Line",
"._marker.Marker",
"._selected.Selected",
"._stream.Stream",
"._textfont.Textfont",
"._unselected.Unselected",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scatterpolargl@__init__.py@.PATH_END.py
|
{
"filename": "make_catalog.py",
"repo_name": "ChrisBeaumont/brut",
"repo_path": "brut_extracted/brut-master/bubbly/data/make_catalog.py",
"type": "Python"
}
|
from MySQLdb import connect
import numpy as np
import cPickle as pickle
pth = 'catalog.pkl'
db = connect(host='localhost', user='beaumont', db='mwp')
cursor = db.cursor()
cursor.execute('select lon, lat, angle, semi_major, semi_minor, '
'thickness, hit_rate from clean_bubbles_anna')
cat = np.array([map(float, row) for row in cursor.fetchall()])
with open(pth, 'w') as outfile:
pickle.dump(cat, outfile)
|
ChrisBeaumontREPO_NAMEbrutPATH_START.@brut_extracted@brut-master@bubbly@data@make_catalog.py@.PATH_END.py
|
{
"filename": "box.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/galsim/box.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
__all__ = [ 'Pixel', 'Box', 'TopHat' ]
import math
from . import _galsim
from .gsobject import GSObject
from .gsparams import GSParams
from .utilities import lazy_property, doc_inherit
class Box(GSObject):
"""A class describing a box profile. This is just a 2D top-hat function, where the
width and height are allowed to be different.
Parameters:
width: The width of the Box.
height: The height of the Box.
flux: The flux (in photons/cm^2/s) of the profile. [default: 1]
gsparams: An optional `GSParams` argument. [default: None]
"""
_req_params = { "width" : float, "height" : float }
_opt_params = { "flux" : float }
_has_hard_edges = True
_is_axisymmetric = False
_is_analytic_x = True
_is_analytic_k = True
def __init__(self, width, height, flux=1., gsparams=None):
self._width = float(width)
self._height = float(height)
self._flux = float(flux)
self._gsparams = GSParams.check(gsparams)
self._norm = self._flux / (self._width * self._height)
@lazy_property
def _sbp(self):
return _galsim.SBBox(self._width, self._height, self._flux, self.gsparams._gsp)
@property
def width(self):
"""The width of the `Box`.
"""
return self._width
@property
def height(self):
"""The height of the `Box`.
"""
return self._height
def __eq__(self, other):
return (self is other or
(isinstance(other, Box) and
self.width == other.width and
self.height == other.height and
self.flux == other.flux and
self.gsparams == other.gsparams))
def __hash__(self):
return hash(("galsim.Box", self.width, self.height, self.flux, self.gsparams))
def __repr__(self):
return 'galsim.Box(width=%r, height=%r, flux=%r, gsparams=%r)'%(
self.width, self.height, self.flux, self.gsparams)
def __str__(self):
s = 'galsim.Box(width=%s, height=%s'%(self.width, self.height)
if self.flux != 1.0:
s += ', flux=%s'%self.flux
s += ')'
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_sbp',None)
return d
def __setstate__(self, d):
self.__dict__ = d
@property
def _maxk(self):
return 2. / (self.gsparams.maxk_threshold * min(self.width, self.height))
@property
def _stepk(self):
return math.pi / max(self.width, self.height)
@property
def _max_sb(self):
return self._norm
def _xValue(self, pos):
if 2.*abs(pos.x) < self._width and 2.*abs(pos.y) < self._height:
return self._norm
else:
return 0.
def _kValue(self, kpos):
return self._sbp.kValue(kpos._p)
def _drawReal(self, image, jac=None, offset=(0.,0.), flux_scaling=1.):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
dx,dy = offset
self._sbp.draw(image._image, image.scale, _jac, dx, dy, flux_scaling)
def _shoot(self, photons, rng):
self._sbp.shoot(photons._pa, rng._rng)
def _drawKImage(self, image, jac=None):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
self._sbp.drawK(image._image, image.scale, _jac)
@doc_inherit
def withFlux(self, flux):
return Box(width=self.width, height=self.height, flux=flux, gsparams=self.gsparams)
class Pixel(Box):
"""A class describing a pixel profile. This is just a 2D square top-hat function.
This class is typically used to represent a pixel response function. It is used internally by
the `GSObject.drawImage` function, but there may be cases where the user would want to use
this profile directly.
Parameters:
scale: The linear scale size of the pixel. Typically given in arcsec.
flux: The flux (in photons/cm^2/s) of the profile. This should almost
certainly be left at the default value of 1. [default: 1]
gsparams: An optional `GSParams` argument. [default: None]
"""
_req_params = { "scale" : float }
_opt_params = { "flux" : float }
def __init__(self, scale, flux=1., gsparams=None):
super(Pixel, self).__init__(width=scale, height=scale, flux=flux, gsparams=gsparams)
@property
def scale(self):
"""The linear scale size of the `Pixel`.
"""
return self.width
def __repr__(self):
return 'galsim.Pixel(scale=%r, flux=%r, gsparams=%r)'%(
self.scale, self.flux, self.gsparams)
def __str__(self):
s = 'galsim.Pixel(scale=%s'%self.scale
if self.flux != 1.0:
s += ', flux=%s'%self.flux
s += ')'
return s
@doc_inherit
def withFlux(self, flux):
return Pixel(scale=self.scale, flux=flux, gsparams=self.gsparams)
class TopHat(GSObject):
"""A class describing a radial tophat profile. This profile is a constant value within some
radius, and zero outside this radius.
Parameters:
radius: The radius of the TopHat, where the surface brightness drops to 0.
flux: The flux (in photons/cm^2/s) of the profile. [default: 1]
gsparams: An optional `GSParams` argument. [default: None]
"""
_req_params = { "radius" : float }
_opt_params = { "flux" : float }
_has_hard_edges = True
_is_axisymmetric = True
_is_analytic_x = True
_is_analytic_k = True
def __init__(self, radius, flux=1., gsparams=None):
self._radius = float(radius)
self._flux = float(flux)
self._gsparams = GSParams.check(gsparams)
self._rsq = self._radius**2
self._norm = self._flux / (math.pi * self._rsq)
@lazy_property
def _sbp(self):
return _galsim.SBTopHat(self._radius, self._flux, self.gsparams._gsp)
@property
def radius(self):
"""The radius of the `TopHat` profile.
"""
return self._radius
def __eq__(self, other):
return (self is other or
(isinstance(other, TopHat) and
self.radius == other.radius and
self.flux == other.flux and
self.gsparams == other.gsparams))
def __hash__(self):
return hash(("galsim.TopHat", self.radius, self.flux, self.gsparams))
def __repr__(self):
return 'galsim.TopHat(radius=%r, flux=%r, gsparams=%r)'%(
self.radius, self.flux, self.gsparams)
def __str__(self):
s = 'galsim.TopHat(radius=%s'%self.radius
if self.flux != 1.0:
s += ', flux=%s'%self.flux
s += ')'
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_sbp',None)
return d
def __setstate__(self, d):
self.__dict__ = d
@property
def _maxk(self):
return self._sbp.maxK()
@property
def _stepk(self):
return math.pi / self._radius
@property
def _max_sb(self):
return self._norm
def _xValue(self, pos):
rsq = pos.x**2 + pos.y**2
if rsq < self._rsq:
return self._norm
else:
return 0.
def _kValue(self, kpos):
return self._sbp.kValue(kpos._p)
def _drawReal(self, image, jac=None, offset=(0.,0.), flux_scaling=1.):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
dx,dy = offset
self._sbp.draw(image._image, image.scale, _jac, dx, dy, flux_scaling)
def _shoot(self, photons, rng):
self._sbp.shoot(photons._pa, rng._rng)
def _drawKImage(self, image, jac=None):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
self._sbp.drawK(image._image, image.scale, _jac)
@doc_inherit
def withFlux(self, flux):
return TopHat(radius=self.radius, flux=flux, gsparams=self.gsparams)
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@galsim@box.py@.PATH_END.py
|
{
"filename": "test_move.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/tools/file_management/test_move.py",
"type": "Python"
}
|
"""Test the FileMove tool."""
from pathlib import Path
from tempfile import TemporaryDirectory
from langchain_community.tools.file_management.move import MoveFileTool
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
)
def test_move_file_with_root_dir() -> None:
"""Test the FileMove tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = MoveFileTool(root_dir=temp_dir)
source_file = Path(temp_dir) / "source.txt"
destination_file = Path(temp_dir) / "destination.txt"
source_file.write_text("Hello, world!")
tool.run({"source_path": "source.txt", "destination_path": "destination.txt"})
assert not source_file.exists()
assert destination_file.exists()
assert destination_file.read_text() == "Hello, world!"
def test_move_file_errs_outside_root_dir() -> None:
"""Test the FileMove tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = MoveFileTool(root_dir=temp_dir)
result = tool.run(
{
"source_path": "../source.txt",
"destination_path": "../destination.txt",
}
)
assert result == INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value="../source.txt"
)
def test_move_file() -> None:
"""Test the FileMove tool."""
with TemporaryDirectory() as temp_dir:
tool = MoveFileTool()
source_file = Path(temp_dir) / "source.txt"
destination_file = Path(temp_dir) / "destination.txt"
source_file.write_text("Hello, world!")
tool.run(
{"source_path": str(source_file), "destination_path": str(destination_file)}
)
assert not source_file.exists()
assert destination_file.exists()
assert destination_file.read_text() == "Hello, world!"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@tools@file_management@test_move.py@.PATH_END.py
|
{
"filename": "ffs_specmat_noBB.py",
"repo_name": "carronj/LensIt",
"repo_path": "LensIt_extracted/LensIt-master/lensit/ffs_covs/ffs_specmat_noBB.py",
"type": "Python"
}
|
"""
Basically same as spectralmatrices_wtensors but set BB to zero.
"""
import numpy as np
typs = ['T', 'QU', 'TQU']
def _rootCMBcls(cmbCls):
"""
Symmetric square root of
(T E B) spectral matrix
TT TE 0
TE EE 0
0 0 BB
This assumes TB = EB == 0
"""
s = np.sqrt(cmbCls['tt'] * cmbCls['ee'] - cmbCls['te'] ** 2)
t = np.sqrt(cmbCls['tt'] + cmbCls['ee'] + 2 * s)
ctt = np.zeros(len(cmbCls['tt']))
cee = np.zeros(len(cmbCls['ee']))
cte = np.zeros(len(cmbCls['te']))
ii = np.where(t > 0.)
ctt[ii] = (cmbCls['tt'][ii] + s[ii]) / t[ii]
cee[ii] = (cmbCls['ee'][ii] + s[ii]) / t[ii]
cte[ii] = cmbCls['te'][ii] / t[ii]
return {'tt': ctt, 'ee': cee, 'te': cte}
def _clpinv(cl):
ret = np.zeros_like(cl)
ret[np.where(cl != 0.)] = 1. / cl[np.where(cl != 0.)]
return ret
def TE2TQUlms(typ, lib_alm, TElms):
"""
T = A T
Q E
U B
where A is
1 0 0
0 cos -sin
0 sin cos
"""
assert typ in typs
if typ == 'T':
return np.array(TElms).copy()
elif typ == 'QU':
cos, sin = lib_alm.get_cossin_2iphi()
return np.array([cos * TElms[0], sin * TElms[0]])
elif typ == 'TQU':
cos, sin = lib_alm.get_cossin_2iphi()
return np.array([TElms[0], cos * TElms[1], sin * TElms[1]])
def TQU2TElms(typ, lib_alm, TQUlms):
"""
T = A T
Q E
U B
where A is
1 0 0
0 cos -sin
0 sin cos
This is the inverse relation
"""
assert typ in typs
assert len(TQUlms) == len(typ)
if typ == 'T':
return np.array(TQUlms).copy()
elif typ == 'QU':
cos, sin = lib_alm.get_cossin_2iphi()
return np.array([cos * TQUlms[0] + sin * TQUlms[1]])
elif typ == 'TQU':
cos, sin = lib_alm.get_cossin_2iphi()
return np.array([TQUlms[0], cos * TQUlms[1] + sin * TQUlms[2]])
else:
assert 0, (typ, typs)
def apply_rootTEmat(typ, lib_alm, cmb_cls, TElms):
"""
Assumes TB = EB = BB = 0
"""
assert (typ in typs)
assert ('tb' not in cmb_cls.keys()) and ('eb' not in cmb_cls.keys()), cmb_cls.keys()
if typ == 'T':
return np.array([lib_alm.almxfl(TElms[0], np.sqrt(cmb_cls['tt']))])
elif typ == 'QU':
return np.array([lib_alm.almxfl(TElms[0], np.sqrt(cmb_cls['ee']))])
elif typ == 'TQU':
rootCls = _rootCMBcls(cmb_cls)
fl = lambda id, _f: lib_alm.almxfl(TElms[id], rootCls[_f])
return np.array([fl(0, 'tt') + fl(1, 'te'), fl(0, 'te') + fl(1, 'ee')])
def apply_TEmat(typ, lib_alm, cmb_cls, TElms):
"""
Assumes TB = EB = 0
"""
assert (typ in typs)
assert ('tb' not in cmb_cls.keys()) and ('eb' not in cmb_cls.keys()), cmb_cls.keys()
fl = lambda id, _f: lib_alm.almxfl(TElms[id], cmb_cls[_f])
if typ == 'T':
return np.array([fl(0, 'tt')])
elif typ == 'QU':
return np.array([fl(0, 'ee'), fl(1, 'bb')])
elif typ == 'TQU':
return np.array([fl(0, 'tt') + fl(1, 'te'), fl(0, 'te') + fl(1, 'ee')])
else:
assert 0, (typ, typs)
def apply_pinvTEmat(typ, lib_alm, cmb_cls, TElms):
"""
Assumes TB = EB = 0.
P^{-1} set to zero when there is no power in the variable (e.g. unl BB or ell = 0,1 in pol)
"""
assert (typ in typs)
assert ('tb' not in cmb_cls.keys()) and ('eb' not in cmb_cls.keys()), cmb_cls.keys()
assert ('tb' not in cmb_cls.keys()) and ('eb' not in cmb_cls.keys()), cmb_cls.keys()
fl = lambda id, cl: lib_alm.almxfl(TElms[id], cl)
if typ == 'T':
return np.array([fl(0, _clpinv(cmb_cls['tt']))])
elif typ == 'QU':
return np.array([fl(0, _clpinv(cmb_cls['ee']))])
elif typ == 'TQU':
cli = get_pinvTEcls(typ, cmb_cls)
return np.array([fl(0, cli['tt']) + fl(1, cli['te']), fl(0, cli['te']) + fl(1, cli['ee'])])
else:
assert 0
def get_pinvTEcls(typ, cmb_cls):
if typ == 'T':
return {'tt': _clpinv(cmb_cls['tt'])}
elif typ == 'QU':
return {'ee': _clpinv(cmb_cls['ee'])}
elif typ == 'TQU':
ret = {}
# FIXME rewrite this
deti = _clpinv(cmb_cls['tt'] * cmb_cls['ee'] - cmb_cls['te'] ** 2)
ret['tt'] = np.where(deti > 0, cmb_cls['ee'] * deti, _clpinv(cmb_cls['tt']))
ret['te'] = np.where(deti > 0, -cmb_cls['te'] * deti, np.zeros(len(cmb_cls['te'])))
ret['ee'] = np.where(deti > 0, cmb_cls['tt'] * deti, _clpinv(cmb_cls['ee']))
return ret
else:
assert 0
|
carronjREPO_NAMELensItPATH_START.@LensIt_extracted@LensIt-master@lensit@ffs_covs@ffs_specmat_noBB.py@.PATH_END.py
|
{
"filename": "_dtickrange.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergeo/marker/colorbar/tickformatstop/_dtickrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scattergeo.marker.colorbar.tickformatstop",
**kwargs,
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"editType": "calc", "valType": "any"},
{"editType": "calc", "valType": "any"},
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergeo@marker@colorbar@tickformatstop@_dtickrange.py@.PATH_END.py
|
{
"filename": "imshow_utils.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/express/imshow_utils.py",
"type": "Python"
}
|
"""Vendored code from scikit-image in order to limit the number of dependencies
Extracted from scikit-image/skimage/exposure/exposure.py
"""
import numpy as np
from warnings import warn
_integer_types = (
np.byte,
np.ubyte, # 8 bits
np.short,
np.ushort, # 16 bits
np.intc,
np.uintc, # 16 or 32 or 64 bits
np.int_,
np.uint, # 32 or 64 bits
np.longlong,
np.ulonglong,
) # 64 bits
_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1),
}
dtype_range.update(_integer_ranges)
DTYPE_RANGE = dtype_range.copy()
DTYPE_RANGE.update((d.__name__, limits) for d, limits in dtype_range.items())
DTYPE_RANGE.update(
{
"uint10": (0, 2 ** 10 - 1),
"uint12": (0, 2 ** 12 - 1),
"uint14": (0, 2 ** 14 - 1),
"bool": dtype_range[np.bool_],
"float": dtype_range[np.float64],
}
)
def intensity_range(image, range_values="image", clip_negative=False):
"""Return image intensity range (min, max) based on desired value type.
Parameters
----------
image : array
Input image.
range_values : str or 2-tuple, optional
The image intensity range is configured by this parameter.
The possible values for this parameter are enumerated below.
'image'
Return image min/max as the range.
'dtype'
Return min/max of the image's dtype as the range.
dtype-name
Return intensity range based on desired `dtype`. Must be valid key
in `DTYPE_RANGE`. Note: `image` is ignored for this range type.
2-tuple
Return `range_values` as min/max intensities. Note that there's no
reason to use this function if you just want to specify the
intensity range explicitly. This option is included for functions
that use `intensity_range` to support all desired range types.
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
"""
if range_values == "dtype":
range_values = image.dtype.type
if range_values == "image":
i_min = np.min(image)
i_max = np.max(image)
elif range_values in DTYPE_RANGE:
i_min, i_max = DTYPE_RANGE[range_values]
if clip_negative:
i_min = 0
else:
i_min, i_max = range_values
return i_min, i_max
def _output_dtype(dtype_or_range):
"""Determine the output dtype for rescale_intensity.
The dtype is determined according to the following rules:
- if ``dtype_or_range`` is a dtype, that is the output dtype.
- if ``dtype_or_range`` is a dtype string, that is the dtype used, unless
it is not a NumPy data type (e.g. 'uint12' for 12-bit unsigned integers),
in which case the data type that can contain it will be used
(e.g. uint16 in this case).
- if ``dtype_or_range`` is a pair of values, the output data type will be
float.
Parameters
----------
dtype_or_range : type, string, or 2-tuple of int/float
The desired range for the output, expressed as either a NumPy dtype or
as a (min, max) pair of numbers.
Returns
-------
out_dtype : type
The data type appropriate for the desired output.
"""
if type(dtype_or_range) in [list, tuple, np.ndarray]:
# pair of values: always return float.
return np.float_
if type(dtype_or_range) == type:
# already a type: return it
return dtype_or_range
if dtype_or_range in DTYPE_RANGE:
# string key in DTYPE_RANGE dictionary
try:
# if it's a canonical numpy dtype, convert
return np.dtype(dtype_or_range).type
except TypeError: # uint10, uint12, uint14
# otherwise, return uint16
return np.uint16
else:
raise ValueError(
"Incorrect value for out_range, should be a valid image data "
"type or a pair of values, got %s." % str(dtype_or_range)
)
def rescale_intensity(image, in_range="image", out_range="dtype"):
"""Return image after stretching or shrinking its intensity levels.
The desired intensity range of the input and output, `in_range` and
`out_range` respectively, are used to stretch or shrink the intensity range
of the input image. See examples below.
Parameters
----------
image : array
Image array.
in_range, out_range : str or 2-tuple, optional
Min and max intensity values of input and output image.
The possible values for this parameter are enumerated below.
'image'
Use image min/max as the intensity range.
'dtype'
Use min/max of the image's dtype as the intensity range.
dtype-name
Use intensity range based on desired `dtype`. Must be valid key
in `DTYPE_RANGE`.
2-tuple
Use `range_values` as explicit min/max intensities.
Returns
-------
out : array
Image array after rescaling its intensity. This image is the same dtype
as the input image.
Notes
-----
.. versionchanged:: 0.17
The dtype of the output array has changed to match the output dtype, or
float if the output range is specified by a pair of floats.
See Also
--------
equalize_hist
Examples
--------
By default, the min/max intensities of the input image are stretched to
the limits allowed by the image's dtype, since `in_range` defaults to
'image' and `out_range` defaults to 'dtype':
>>> image = np.array([51, 102, 153], dtype=np.uint8)
>>> rescale_intensity(image)
array([ 0, 127, 255], dtype=uint8)
It's easy to accidentally convert an image dtype from uint8 to float:
>>> 1.0 * image
array([ 51., 102., 153.])
Use `rescale_intensity` to rescale to the proper range for float dtypes:
>>> image_float = 1.0 * image
>>> rescale_intensity(image_float)
array([0. , 0.5, 1. ])
To maintain the low contrast of the original, use the `in_range` parameter:
>>> rescale_intensity(image_float, in_range=(0, 255))
array([0.2, 0.4, 0.6])
If the min/max value of `in_range` is more/less than the min/max image
intensity, then the intensity levels are clipped:
>>> rescale_intensity(image_float, in_range=(0, 102))
array([0.5, 1. , 1. ])
If you have an image with signed integers but want to rescale the image to
just the positive range, use the `out_range` parameter. In that case, the
output dtype will be float:
>>> image = np.array([-10, 0, 10], dtype=np.int8)
>>> rescale_intensity(image, out_range=(0, 127))
array([ 0. , 63.5, 127. ])
To get the desired range with a specific dtype, use ``.astype()``:
>>> rescale_intensity(image, out_range=(0, 127)).astype(np.int8)
array([ 0, 63, 127], dtype=int8)
If the input image is constant, the output will be clipped directly to the
output range:
>>> image = np.array([130, 130, 130], dtype=np.int32)
>>> rescale_intensity(image, out_range=(0, 127)).astype(np.int32)
array([127, 127, 127], dtype=int32)
"""
if out_range in ["dtype", "image"]:
out_dtype = _output_dtype(image.dtype.type)
else:
out_dtype = _output_dtype(out_range)
imin, imax = map(float, intensity_range(image, in_range))
omin, omax = map(
float, intensity_range(image, out_range, clip_negative=(imin >= 0))
)
if np.any(np.isnan([imin, imax, omin, omax])):
warn(
"One or more intensity levels are NaN. Rescaling will broadcast "
"NaN to the full image. Provide intensity levels yourself to "
"avoid this. E.g. with np.nanmin(image), np.nanmax(image).",
stacklevel=2,
)
image = np.clip(image, imin, imax)
if imin != imax:
image = (image - imin) / (imax - imin)
return np.asarray(image * (omax - omin) + omin, dtype=out_dtype)
else:
return np.clip(image, omin, omax).astype(out_dtype)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@express@imshow_utils.py@.PATH_END.py
|
{
"filename": "test_cube.py",
"repo_name": "FRBs/zdm",
"repo_path": "zdm_extracted/zdm-main/zdm/tests/test_cube.py",
"type": "Python"
}
|
"""
File to test that cubing produces the expected output
"""
import os
import pytest
from pkg_resources import resource_filename
import pandas
from zdm import iteration as it
from zdm import loading
from zdm import io
from zdm.tests import tstutils
import numpy as np
from IPython import embed
def check_accuracy(x1,x2,thresh=1e-4):
diff=x1-x2
mean=0.5*(x1+x2)
if np.abs(diff/mean) < thresh:
return True
else:
return False
def test_cube_run():
# use default parameters
# Initialise survey and grid
# For this purporse, we only need two different surveys
# the defaults are below - passing these will do nothing
survey_names = ['CRAFT/FE',
'CRAFT_ICS_1632',
'CRAFT_ICS_892',
'CRAFT_ICS_1300',
'PKS/Mb']
#sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys')
#surveys=[]
#grids=[]
'''
# We should be using real_loading
for name in names:
s,g = loading.survey_and_grid(
survey_name=name,NFRB=None,sdir=sdir) # should be equal to actual number of FRBs, but for this purpose it doesn't matter
surveys.append(s)
grids.append(g)
'''
surveys, grids = loading.surveys_and_grids(
nz=500, ndm=1400) # Small number to keep this cheap
### gets cube files
pfile= tstutils.data_path('real_mini_cube.json')
input_dict=io.process_jfile(pfile)
# Deconstruct the input_dict
state_dict, cube_dict, vparam_dict = it.parse_input_dict(input_dict)
outfile= tstutils.data_path('output.csv')
# check that the output file does not already exist
#assert not os.path.exists(outfile)
run=1
howmany=1
#run=2 + 2*4 + 5*4*4 + 2*10*4*4 + 1*4*10*4*4 + 5*3*4*10*4*4 + 5*10*3*4*10*4*4
# I have chosen this to pick normal values of most parameters, and a high Emax
# should prevent any nans appearing...
#run = 5 + 1*10 + 9*3*10 + 1*10*3*10 + 1*4*10*3*10 + 1*4*4*10*3*10 + 6*4*4*10*3*10
# Set cube shape
####### counters for each dimensions ######
parameter_order = cube_dict['parameter_order']
PARAMS = list(vparam_dict.keys())
order, iorder = it.set_orders(parameter_order, PARAMS)
# Shape of the grid (ignoring the constant, lC)
cube_shape = it.set_cube_shape(vparam_dict, order)
# Choose parameters in the middle to avoid NANs
current = [item//2 for item in cube_shape]
run = np.ravel_multi_index(current, cube_shape, order='F')
#pytest.set_trace()
it.cube_likelihoods(grids,surveys,vparam_dict, cube_dict,run,howmany,outfile)
# now we check that the output file exists
#assert os.path.exists(outfile)
# output is
# ith run (counter)
# variables: 8 of these (Emax, H0, alpha, gamma, n, lmean, lsigma, C)
# 5 pieces of info for each surveys (ll_survey, Pn, Ps, Pzdm, expected_N)
# 8 pieces of info at the end (lltot, Pntot, Ps_tot, Pzdm_tot,
# pDM|z_tot, pz_tot,pz|DM_tot, pDM_tot
# =18+Nsurveys*5
ns=len(surveys)
# Load with pandas to assess
df = pandas.read_csv(outfile)
ds = df.iloc[0]
# lls
lltot_v0= ds.lls
lltot_v1=0
for j in np.arange(ns):
lltot_v1 += ds[f'lls{j}']
lltot_v2= np.sum(
[ds[key] for key in ['P_zDM', 'P_n', 'P_s']])
assert check_accuracy(lltot_v0,lltot_v1)
assert check_accuracy(lltot_v0,lltot_v2)
# zdm
zdm_v1= ds.p_zgDM + ds.p_DM
zdm_v2= ds.p_DMgz + ds.p_z
assert check_accuracy(zdm_v1,zdm_v2)
test_cube_run()
|
FRBsREPO_NAMEzdmPATH_START.@zdm_extracted@zdm-main@zdm@tests@test_cube.py@.PATH_END.py
|
{
"filename": "tests_on_impact_parameter.ipynb",
"repo_name": "cta-observatory/cta-lstchain",
"repo_path": "cta-lstchain_extracted/cta-lstchain-main/notebooks/tests_on_impact_parameter.ipynb",
"type": "Jupyter Notebook"
}
|
<font size="5">__cta-lstchain: Notebook for testing the effects of impact parameters on the energy reconstruction__</font>
<font size="4">
To run this notebook you will need the last version of cta-lstchain:
git clone https://github.com/cta-observatory/cta-lstchain
<br>
<br>
**If you have ctapipe already installed in a conda environment:**
<br><br>
source activate cta-dev
<br>
python setup.py install
<br>
<font size="4">
**If you don't have ctapipe installed:**</font>
<br><br>
conda env create -f environment.yml
<br>
source activate cta-dev
<br>
python setup.py install
Also, you will need the datafiles from **cta-lstchain-extra:**
git clone https://github.com/cta-observatory/cta-lstchain-extra
**Content:**
- Definition of two functions for presenting the energy resolution:
- plot_e_resolution: For plotting the STD and Bias of Erec-Etrue in several energy bins.
- calc_resolution: For calculating the overall energy resolution in terms of the 68% area.
- Plotting energy vs. intensity to check linearity.
- Training RF without cuts in Impact Parameter.
- Taining RF only with events with Impact Parameter between 40 m and 100 m.
- Training RF witg all events, but including Impact Parameter as a feature.
<font size="4">
**Some imports...**
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from lstchain.reco import dl1_to_dl2
from lstchain.visualization import plot_dl2
from lstchain.reco import utils
import scipy
from matplotlib import gridspec
%matplotlib inline
plt.rcParams['figure.figsize'] = (10, 5)
plt.rcParams['font.size'] = 14
```
/Users/garciaenrique/anaconda3/envs/cta-dev/lib/python3.7/site-packages/sklearn/externals/joblib/__init__.py:15: DeprecationWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.
warnings.warn(msg, category=DeprecationWarning)
<font size="4">
**Define two functions to show results later**
```python
def plot_e_resolution(data,Nbins):
plt.rcParams['figure.figsize'] = (30, 10)
plt.rcParams['font.size'] = 14
#difE = ((data['mc_energy']-data['reco_energy'])*np.log(10))
difE = np.log(10**data['reco_energy']/10**data['mc_energy'])
means_result = scipy.stats.binned_statistic(
data['mc_energy'],[difE,difE**2],
bins=Nbins,range=(1,6),statistic='mean')
means, means2 = means_result.statistic
standard_deviations = np.sqrt(means2 - means**2)
bin_edges = means_result.bin_edges
bin_centers = (bin_edges[:-1] + bin_edges[1:])/2.
gs0 = gridspec.GridSpec(1,2,width_ratios=[1,2])
subplot = plt.subplot(gs0[0])
gs = gridspec.GridSpecFromSubplotSpec(2, 1,height_ratios=[1, 1],subplot_spec=subplot)
ax0 = plt.subplot(gs[0])
plot0 = ax0.errorbar(x=bin_centers, y=means, yerr=standard_deviations,linestyle='none', marker='.')
plt.ylabel('Bias',fontsize=24)
plt.grid()
ax1 = plt.subplot(gs[1],sharex = ax0)
plot1 = ax1.plot(bin_centers,standard_deviations,
marker='+',linestyle='None')
plt.ylabel('STD',fontsize=24)
plt.xlabel('$log_{10}E_{true}(GeV)$',fontsize=24)
plt.grid()
subplot2 = plt.subplot(gs0[1])
#Lines for setting the configuration of the subplots depending on Nbins
import math
sqrtNbins = np.sqrt(Nbins)
a = int(math.ceil(sqrtNbins))
dif = a - sqrtNbins
b=a
if dif > 0.5:
b=a-1
gs2 = gridspec.GridSpecFromSubplotSpec(a, b,subplot_spec=subplot2)
for nbin in range(0,Nbins):
ax = plt.subplot(gs2[nbin])
plt.hist(difE[means_result.binnumber==nbin+1],50,label='$logE_{center}$ '+'%.2f' % bin_centers[nbin])
plt.legend()
plt.subplots_adjust(hspace=.25)
plt.subplots_adjust(wspace=.5)
```
```python
def calc_resolution(data):
difE = np.log(10**data['reco_energy']/10**data['mc_energy'])
n , bins, _ = plt.hist(difE,bins=500)
mu,sigma = scipy.stats.norm.fit(difE)
print(mu,sigma)
bin_width = bins[1] - bins[0]
total = bin_width*sum(n)*0.68
idx = np.abs(bins - mu).argmin()
x = 0
mindif = 1e10
xpos=0
integral=0
while integral <= total:
integral = bin_width*sum(n[idx-x:idx+x])
x = x+1
print(x,integral,total)
sigma = bins[idx+x-1]
plt.plot(bins,integral*scipy.stats.norm.pdf(bins, mu, sigma),linewidth=4,color='red',linestyle='--')
plt.xlabel("$log(E_{rec}/E_{true})$")
print(mu,sigma)
return mu,sigma
```
```python
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
```
<style>.container { width:90% !important; }</style>
<font size="4">
**Get event DL1 file for training.**
<br>
Gammas are pointlike.
```python
PATH_EVENTS = "../../cta-lstchain-extra/reco/sample_data/dl1/"
gammafile = PATH_EVENTS+"/gamma_events_point_tiny.h5"
df_gammas = pd.read_hdf(gammafile)
```
<font size="4">
We read the file as pandas dataframes:
```python
df_gammas.keys()
```
Index(['obs_id', 'event_id', 'mc_energy', 'mc_alt', 'mc_az', 'mc_core_x',
'mc_core_y', 'mc_h_first_int', 'mc_type', 'gps_time', 'width', 'length',
'wl', 'phi', 'psi', 'r', 'x', 'y', 'intensity', 'skewness', 'kurtosis',
'mc_alt_tel', 'mc_az_tel', 'impact', 'mc_x_max', 'time_gradient',
'intercept', 'src_x', 'src_y', 'disp', 'hadroness', 'disp_norm'],
dtype='object')
<font size="4">
We can keep only bright showers:
```python
df_gammas = df_gammas[df_gammas['intensity']>np.log10(300)]
df_gammas.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>obs_id</th>
<th>event_id</th>
<th>mc_energy</th>
<th>mc_alt</th>
<th>mc_az</th>
<th>mc_core_x</th>
<th>mc_core_y</th>
<th>mc_h_first_int</th>
<th>mc_type</th>
<th>gps_time</th>
<th>...</th>
<th>mc_az_tel</th>
<th>impact</th>
<th>mc_x_max</th>
<th>time_gradient</th>
<th>intercept</th>
<th>src_x</th>
<th>src_y</th>
<th>disp</th>
<th>hadroness</th>
<th>disp_norm</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>12104.000000</td>
<td>1.210400e+04</td>
<td>12104.000000</td>
<td>12104.00000</td>
<td>12104.000000</td>
<td>12104.000000</td>
<td>12104.000000</td>
<td>12104.000000</td>
<td>12104.0</td>
<td>1.210400e+04</td>
<td>...</td>
<td>12104.0</td>
<td>12104.000000</td>
<td>12104.000000</td>
<td>12104.000000</td>
<td>12104.000000</td>
<td>1.210400e+04</td>
<td>1.210400e+04</td>
<td>12104.000000</td>
<td>12104.0</td>
<td>12104.000000</td>
</tr>
<tr>
<th>mean</th>
<td>11031.168952</td>
<td>4.989209e+06</td>
<td>2.554061</td>
<td>1.22173</td>
<td>6.283185</td>
<td>-86.720668</td>
<td>-6.565950</td>
<td>25053.149677</td>
<td>0.0</td>
<td>1.445304e+09</td>
<td>...</td>
<td>0.0</td>
<td>186.785022</td>
<td>284.256873</td>
<td>0.059951</td>
<td>8.027671</td>
<td>4.103384e-13</td>
<td>-2.892042e-06</td>
<td>0.565944</td>
<td>0.0</td>
<td>0.565944</td>
</tr>
<tr>
<th>std</th>
<td>658.132843</td>
<td>2.888803e+06</td>
<td>0.566735</td>
<td>0.00000</td>
<td>0.000000</td>
<td>139.670505</td>
<td>151.915065</td>
<td>8234.162795</td>
<td>0.0</td>
<td>1.213798e+05</td>
<td>...</td>
<td>0.0</td>
<td>89.291014</td>
<td>66.506206</td>
<td>11.092541</td>
<td>3.644383</td>
<td>5.048918e-29</td>
<td>8.470679e-22</td>
<td>0.236171</td>
<td>0.0</td>
<td>0.236171</td>
</tr>
<tr>
<th>min</th>
<td>10147.000000</td>
<td>1.705000e+03</td>
<td>1.258275</td>
<td>1.22173</td>
<td>6.283185</td>
<td>-567.871460</td>
<td>-488.114166</td>
<td>6860.260742</td>
<td>0.0</td>
<td>1.445077e+09</td>
<td>...</td>
<td>0.0</td>
<td>2.954935</td>
<td>-20.000000</td>
<td>-49.847715</td>
<td>-13.069206</td>
<td>4.103384e-13</td>
<td>-2.892042e-06</td>
<td>0.005040</td>
<td>0.0</td>
<td>0.005040</td>
</tr>
<tr>
<th>25%</th>
<td>10385.000000</td>
<td>2.462403e+06</td>
<td>2.137222</td>
<td>1.22173</td>
<td>6.283185</td>
<td>-188.298092</td>
<td>-114.189913</td>
<td>19422.038086</td>
<td>0.0</td>
<td>1.445192e+09</td>
<td>...</td>
<td>0.0</td>
<td>120.070513</td>
<td>238.879780</td>
<td>-7.183782</td>
<td>6.429323</td>
<td>4.103384e-13</td>
<td>-2.892042e-06</td>
<td>0.375532</td>
<td>0.0</td>
<td>0.375532</td>
</tr>
<tr>
<th>50%</th>
<td>11495.000000</td>
<td>4.967105e+06</td>
<td>2.460539</td>
<td>1.22173</td>
<td>6.283185</td>
<td>-87.699158</td>
<td>-7.989395</td>
<td>23709.536133</td>
<td>0.0</td>
<td>1.445336e+09</td>
<td>...</td>
<td>0.0</td>
<td>175.091805</td>
<td>277.393021</td>
<td>-0.027170</td>
<td>9.068647</td>
<td>4.103384e-13</td>
<td>-2.892042e-06</td>
<td>0.546132</td>
<td>0.0</td>
<td>0.546132</td>
</tr>
<tr>
<th>75%</th>
<td>11681.000000</td>
<td>7.502930e+06</td>
<td>2.861781</td>
<td>1.22173</td>
<td>6.283185</td>
<td>12.928750</td>
<td>100.033356</td>
<td>29086.712891</td>
<td>0.0</td>
<td>1.445408e+09</td>
<td>...</td>
<td>0.0</td>
<td>247.107099</td>
<td>321.322357</td>
<td>7.200042</td>
<td>10.495006</td>
<td>4.103384e-13</td>
<td>-2.892042e-06</td>
<td>0.769557</td>
<td>0.0</td>
<td>0.769557</td>
</tr>
<tr>
<th>max</th>
<td>11715.000000</td>
<td>9.999503e+06</td>
<td>5.344416</td>
<td>1.22173</td>
<td>6.283185</td>
<td>361.726654</td>
<td>473.510223</td>
<td>92663.656250</td>
<td>0.0</td>
<td>1.445554e+09</td>
<td>...</td>
<td>0.0</td>
<td>508.237991</td>
<td>775.039368</td>
<td>56.879713</td>
<td>21.090128</td>
<td>4.103384e-13</td>
<td>-2.892042e-06</td>
<td>1.079062</td>
<td>0.0</td>
<td>1.079062</td>
</tr>
</tbody>
</table>
<p>8 rows × 32 columns</p>
</div>
<font size="4">
Energy should be proportional to intensity:
```python
h = plt.hist2d(df_gammas['mc_energy'],df_gammas['intensity'],bins=100)
plt.colorbar(h[3])
```
<matplotlib.colorbar.Colorbar at 0x12b182278>

<font size="4">
Let's choose events with a closer impact parameter (>40m, <100m)
```python
df_gammas['mc_core_distance'] = df_gammas['impact'] #Uncomment if you are using an old file without the "mc_core_distance key"
df_gammas.mc_core_distance.hist(bins=100);
plt.xlabel('Impact distance [m]');
```

```python
filter_impact = (df_gammas.mc_core_distance > 40) & (df_gammas.mc_core_distance < 100)
closer = df_gammas[filter_impact]
c = plt.hist2d(closer['mc_energy'],closer['intensity'],bins=100)
plt.colorbar(c[3]);
```

<font size="4">
Correlation is much more clear for this range.
<br><br>
Let's see how this cut affect to the energy reconstruction.
<br><br>
First of all, let's train a Random Forest with all events, **without any cut** and without using any mc information.
<br>
Choose the features for training the random forest (Hillas and Timing parameters)
```python
features = ['intensity',
'time_gradient',
'width',
'length',
'wl',
'phi',
'psi',
'skewness',
'kurtosis']
```
<font size="4">
Split data into train and test sets.
```python
from sklearn.model_selection import train_test_split
np.random.seed(0)
train, test = train_test_split(df_gammas, train_size=0.8)
print("Training datasets: {} events \nTest dataset: {} events".format(len(train), len(test)))
```
Training datasets: 9683 events
Test dataset: 2421 events
<font size="4">
And train Random Forests for Energy and Disp reconstruction.
```python
from lstchain.io.config import get_standard_config
custom_config = get_standard_config()
custom_config['regression_features'] = features
RFreg_Energy, RFreg_Disp = dl1_to_dl2.train_reco(train,custom_config)
```
Given features: ['intensity', 'time_gradient', 'width', 'length', 'wl', 'phi', 'psi', 'skewness', 'kurtosis']
Number of events for training: 9683
Training Random Forest Regressor for Energy Reconstruction...
Random Forest trained!
Training Random Forest Regressor for disp_norm Reconstruction...
Random Forest trained!
Done!
<font size="4">
Apply RF to test data to reconstruct Energy.
```python
from lstchain.visualization.plot_dl2 import plot_importances
plt.figure(figsize=(22,5))
plot_importances(RFreg_Energy, features);
```
Feature importances (gini index)
1. intensity (0.403711)
2. skewness (0.244341)
3. time_gradient (0.134707)
4. length (0.074602)
5. kurtosis (0.043110)
6. wl (0.037891)
7. width (0.025218)
8. psi (0.019826)
9. phi (0.016593)

```python
test['reco_energy'] = RFreg_Energy.predict(test[features])
```
/Users/garciaenrique/anaconda3/envs/cta-dev/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""Entry point for launching an IPython kernel.
```python
plt.figure(figsize=(7,5))
plt.grid()
#plot_dl2.plot_e(test,True)
calc_resolution(test)
```
-0.00273987640278246 0.5723908670962217
28 27.474974067162062 27.003880780470187
-0.00273987640278246 0.43770893316385173
(-0.00273987640278246, 0.43770893316385173)

```python
plot_e_resolution(test,15)
```

<font size="4">
Now, lets do the cuts on impact parameter to have closer events.
```python
train.mc_core_distance.hist(bins=100);
plt.xlabel('Impact distance [m]');
```

```python
train_cut = train[(train.mc_core_distance>40) & (train.mc_core_distance<200)]
test_cut = test[(test.mc_core_distance>40) & (test.mc_core_distance<200)]
```
<font size="4">
Train the RF again.
```python
RFreg_Energy, RFreg_Disp = dl1_to_dl2.train_reco(train_cut, custom_config)
```
Given features: ['intensity', 'time_gradient', 'width', 'length', 'wl', 'phi', 'psi', 'skewness', 'kurtosis']
Number of events for training: 5546
Training Random Forest Regressor for Energy Reconstruction...
Random Forest trained!
Training Random Forest Regressor for disp_norm Reconstruction...
Random Forest trained!
Done!
```python
plt.figure(figsize=(22,5))
plot_importances(RFreg_Energy, features);
```
Feature importances (gini index)
1. intensity (0.870297)
2. time_gradient (0.029438)
3. skewness (0.019631)
4. psi (0.014398)
5. kurtosis (0.014120)
6. width (0.013922)
7. length (0.013680)
8. wl (0.012567)
9. phi (0.011948)

<font size="4">
And reconstruct the energy.
```python
test_cut['reco_energy'] = RFreg_Energy.predict(test_cut[features])
```
/Users/garciaenrique/anaconda3/envs/cta-dev/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""Entry point for launching an IPython kernel.
```python
plt.figure(figsize=(7,5))
plt.grid()
#plot_dl2.plot_e(test_cut,True)
calc_resolution(test_cut)
```
-0.007815518393407455 0.3658399639109474
55 6.165240098541984 6.148507714140931
-0.007815518393407455 0.3426903061453519
(-0.007815518393407455, 0.3426903061453519)

```python
plot_e_resolution(test_cut,20)
```

<font size="4">
Let's do a last test. We will use all events, but using the impact parameter as a feature.
```python
features = ['intensity',
'time_gradient',
'width',
'length',
'wl',
'phi',
'psi',
'skewness',
'kurtosis',
'mc_core_distance']
custom_config['regression_features'] = features
```
<font size="4">
And train Random Forests for Energy and Disp reconstruction.
```python
RFreg_Energy, RFreg_Disp = dl1_to_dl2.train_reco(train,custom_config)
```
Given features: ['intensity', 'time_gradient', 'width', 'length', 'wl', 'phi', 'psi', 'skewness', 'kurtosis', 'mc_core_distance']
Number of events for training: 9683
Training Random Forest Regressor for Energy Reconstruction...
Random Forest trained!
Training Random Forest Regressor for disp_norm Reconstruction...
Random Forest trained!
Done!
```python
plt.figure(figsize=(22,5))
plot_importances(RFreg_Energy, features);
```
Feature importances (gini index)
1. intensity (0.443297)
2. mc_core_distance (0.424355)
3. skewness (0.057328)
4. wl (0.022012)
5. length (0.015750)
6. kurtosis (0.009861)
7. phi (0.009326)
8. psi (0.007694)
9. width (0.005406)
10. time_gradient (0.004969)

<font size="4">
Apply RF to test data to reconstruct Energy.
```python
test['reco_energy'] = RFreg_Energy.predict(test[features])
plt.figure(figsize=(7,5))
#plot_dl2.plot_e(test,True)
calc_resolution(test)
#plt.savefig("gaussian_fit.png")
```
/Users/garciaenrique/anaconda3/envs/cta-dev/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""Entry point for launching an IPython kernel.
-0.006847706834342981 0.31655786439959555
29 16.277286936463565 15.96958995099001
-0.006847706834342981 0.2669369367047296
(-0.006847706834342981, 0.2669369367047296)

```python
plt.figure(figsize=(19,8))
plot_e_resolution(test,15)
```

```python
```
|
cta-observatoryREPO_NAMEcta-lstchainPATH_START.@cta-lstchain_extracted@cta-lstchain-main@notebooks@tests_on_impact_parameter.ipynb@.PATH_END.py
|
{
"filename": "ModelMetadataAddAuthor.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/metadata_schema_py_generated/ModelMetadataAddAuthor.md",
"type": "Markdown"
}
|
page_type: reference
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.metadata_schema_py_generated.ModelMetadataAddAuthor" />
<meta itemprop="path" content="Stable" />
</div>
# tflite_support.metadata_schema_py_generated.ModelMetadataAddAuthor
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L3120-L3121">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.metadata_schema_py_generated.ModelMetadataAddAuthor(
builder, author
)
</code></pre>
<!-- Placeholder for "Used in" -->
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@metadata_schema_py_generated@ModelMetadataAddAuthor.md@.PATH_END.py
|
{
"filename": "rfi_excision_tools.py",
"repo_name": "CHIME-Pulsar-Timing/CHIME-Pulsar_automated_filterbank",
"repo_path": "CHIME-Pulsar_automated_filterbank_extracted/CHIME-Pulsar_automated_filterbank-main/rfi_excision_tools.py",
"type": "Python"
}
|
import numpy as np
def get_divisors(n):
d = []
i = 1
while i <= n:
if n % i == 0:
d.append(i)
i += 1
return np.array(d)
def mad(data, axis=None):
median = np.ma.median(data, axis=axis)
return np.ma.median(np.abs(data - median), axis=axis)
def mad2std(m):
return 1.4826 * m
def detect_rfi_mad(spectra, stat_window=64, tfact=1, thresh=5.0):
"""
This function detects RFI in frequency by computing the median and median absolute deviation
of channels from adjacent channels. This helps to recognize dropouts due to packet loss or
GPU node failures, while also picking up on some RFI that spectral kurtosis may miss.
Adapted from Bradley Meyers code developed for slow pulsar search
This function is based on functions used to estimate calibration information for CHIME/FRB,
written by Bridget Anderson.
Parameters
----------
spectra : np.array of floats
The dynamic spectrum with a shape (nchan, M) where M is the number
of time steps to be combined.
stat_window : int
The size (in chunks) of the sliding window in frequency over which
statistics are calculated.
thresh : float
The number of standard deviations the median of a channel
must differ from the median to be flagged.
Returns
-------
final_mask : np.array of bool
The output RFI mask. It is the same shape as the input dynamic spectrum.
Indices that are flagged are denoted by a value of True.
"""
old_np_settings = np.seterr(divide='ignore', invalid='ignore')
final_mask = np.zeros_like(spectra, dtype=bool)
nchan, ntimes = spectra.shape
step = 1024 // tfact
# do this for every time step (i.e. every ~1 ms)
for i in range(ntimes // step):
# predetermine the time-index slice
tsl = slice(i * step, (i + 1) * step)
intensity = np.ma.mean(spectra[:, tsl], axis=1)
for j in range(stat_window, nchan + stat_window, stat_window):
# predetermine the freqeuncy-index slice based on stat_window
upper_index = j if j < nchan else nchan
lower_index = j - stat_window if (j - stat_window) > 0 else 0
fsl = slice(lower_index, upper_index)
spec_chunk = intensity[fsl]
# calculate the median and the median absolute deviation
med = np.ma.median(spec_chunk)
mad = np.ma.median(np.abs(spec_chunk - med))
# convert he MAD to an equivalent standard deviation
std = mad2std(mad)
# filter out samples outside the nominal threshold defined by thresh
pthresh = med + thresh * std
nthresh = med - thresh * std
filt = np.logical_or(spec_chunk > pthresh, spec_chunk < nthresh)
# update the entries of the final data mask
final_mask[fsl, tsl] = np.tile(filt, (step, 1)).T
# also do the median filtering based on the average spectrum
mspec = np.ma.mean(spectra, axis=-1)
mspec_med = np.ma.median(mspec)
mspec_mad = np.ma.median(np.abs(mspec - mspec_med))
mspec_std = mad2std(mspec_mad)
pthresh = mspec_med + 3 * mspec_std
nthresh = mspec_med - 1.5 * mspec_std
mspec_mask = np.logical_or(mspec > pthresh, mspec < nthresh)
# combine the mean spectrum mask with the individual time step mask
final_mask = np.logical_or(final_mask, mspec_mask[:, np.newaxis])
np.seterr(**old_np_settings)
return final_mask
def detect_rfi_sk(spectra, thresh=3.0, ffact=1, plot=False):
"""
Calculate the generalized spectral kurtosis from a dynamic spectrum in order to flag RFI.
This function operates under the assumption that the `spectra` (which can be a masked array)
passed has shape (nchan, M), which means that the organization of data needs to be done prior
to calling this function.
Adapted from Bradley Meyers code developed for slow pulsar search
Parameters
----------
spectra : np.array of floats
The dynamic spectrum with a shape (nchan, M) where M is the number
of time steps to be combined.
thresh : float
The number of standard deviations from the mean (E[SK] = 1) above
and below which channels will be masked. The lower threshold is
multiplied by 0.75.
Returns
-------
combined_mask : np.array of bool
The output RFI mask. It is the same shape as the input dynamic spectrum.
Indices that are flagged are denoted by a value of True.
"""
old_np_settings = np.seterr(divide='ignore', invalid='ignore')
nchan, m = spectra.shape
# perform a summation over the m power measurements and their square
s1_sq = (np.ma.sum(spectra, axis=1).astype(float))**2
s2 = np.ma.sum(spectra**2, axis=1)
# generalized spectral kurtosis estimator (Nita & Gary 2010, eq. 7)
spec_sk = ((m + 1) / float(m - 1)) * (m * (s2 / s1_sq) - 1)
# calcualte the mean of the SK estimator in clean parts of the spectrum
spec_sk_norm = np.ma.mean([
np.ma.median(spec_sk[3100//ffact:3800//ffact]),
np.ma.median(spec_sk[6300//ffact:7000//ffact])])
# calcualte the appropriate d so that the mean value of SK is 1
#d = (np.ma.mean(s1_sq) * ((m - 1) * spec_sk_norm + 1) - m * np.ma.mean(s2)) / (m * (m * np.ma.mean(s2) - np.ma.mean(s1_sq))))
d = 1.0 / spec_sk_norm # approximately
#print("recalculated d = {0}".format(d))
# recalculate the SK with the d correction. Solve for X in:
#
# X * ((m + 1) / (m - 1)) * (m * (s2 / s1_sq) - 1) = ((d * m + 1) / (m - 1)) * (m * (s2 / s1_sq) - 1)
# X = (d * m + 1) / (m + 1)
spec_sk = ((d * m + 1) / float(m + 1)) * spec_sk
# the expectation values for a correctly scaled SK measurement (Nita & Gary 2010, eq. 9 & 10)
expectation_mean = 1.0
expectation_std = np.sqrt((2.0 / m) * (1.0 + 1.0 / d))
# based on the desired threshold (equivalent to Gaussian sigma) for RFI removal
nthresh = expectation_mean - thresh * expectation_std
pthresh = expectation_mean + thresh * expectation_std
# create RFI mask from SK estimator
sk_mask = np.ma.logical_or(spec_sk > pthresh, spec_sk < nthresh)
#sk_mask = np.tile(sk_mask.astype(bool), (m, 1))
# combine the SK mask with the original spectra mask (from the message packets themselves)
combined_mask = np.logical_or(spectra.mask, sk_mask[:, np.newaxis])
np.seterr(**old_np_settings)
return combined_mask
|
CHIME-Pulsar-TimingREPO_NAMECHIME-Pulsar_automated_filterbankPATH_START.@CHIME-Pulsar_automated_filterbank_extracted@CHIME-Pulsar_automated_filterbank-main@rfi_excision_tools.py@.PATH_END.py
|
{
"filename": "_fill.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/volume/surface/_fill.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.surface", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@volume@surface@_fill.py@.PATH_END.py
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolargl/unselected/marker/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="opacity",
parent_name="scatterpolargl.unselected.marker",
**kwargs,
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolargl@unselected@marker@_opacity.py@.PATH_END.py
|
{
"filename": "sdss_dr16_qso_bao_dmdh.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/bao/sdss_dr16_qso_bao_dmdh.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import BAO
class sdss_dr16_qso_bao_dmdh(BAO):
r"""
Likelihood of the QSO BAO from SDSS DR16 \cite{Alam:2020sor}.
"""
pass
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@bao@sdss_dr16_qso_bao_dmdh.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/cone/_hoverlabel.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="cone", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@cone@_hoverlabel.py@.PATH_END.py
|
{
"filename": "Comparing_HITEMP_and_ExoMol.ipynb",
"repo_name": "HajimeKawahara/exojax",
"repo_path": "exojax_extracted/exojax-master/documents/tutorials/Comparing_HITEMP_and_ExoMol.ipynb",
"type": "Jupyter Notebook"
}
|
## Comparing HITEMP and ExoMol
```python
from exojax.spec.hitran import line_strength, doppler_sigma, gamma_hitran, gamma_natural
from exojax.spec.exomol import gamma_exomol
from exojax.spec import api
import numpy as np
import matplotlib.pyplot as plt
```
First of all, set a wavenumber bin in the unit of wavenumber (cm-1).
Here we set the wavenumber range as $1000 \le \nu \le 10000$ (1/cm) with the resolution of 0.01 (1/cm).
We call moldb instance with the path of par file.
If the par file does not exist, moldb will try to download it from HITRAN website.
```python
# Setting wavenumber bins and loading HITEMP database
wav = np.linspace(22930.0, 23000.0, 4000, dtype=np.float64) # AA
nus = 1.0e8 / wav[::-1] # cm-1
```
```python
mdbCO_HITEMP = api.MdbHitemp(
"CO", nus, isotope=1, gpu_transfer=True
) # we use istope=1 for comparison
```
radis engine = vaex
Downloading 05_HITEMP2019.par.bz2 for CO (1/1).
Download complete. Parsing CO database to /home/kawahara/exojax/documents/tutorials/CO-05_HITEMP2019.hdf5
```python
emf = "CO/12C-16O/Li2015" # this is isotope=1 12C-16O
mdbCO_Li2015 = api.MdbExomol(emf, nus, gpu_transfer=True)
```
/home/kawahara/exojax/src/exojax/utils/molname.py:197: FutureWarning: e2s will be replaced to exact_molname_exomol_to_simple_molname.
warnings.warn(
/home/kawahara/exojax/src/exojax/utils/molname.py:91: FutureWarning: exojax.utils.molname.exact_molname_exomol_to_simple_molname will be replaced to radis.api.exomolapi.exact_molname_exomol_to_simple_molname.
warnings.warn(
/home/kawahara/exojax/src/exojax/utils/molname.py:91: FutureWarning: exojax.utils.molname.exact_molname_exomol_to_simple_molname will be replaced to radis.api.exomolapi.exact_molname_exomol_to_simple_molname.
warnings.warn(
HITRAN exact name= (12C)(16O)
radis engine = vaex
Molecule: CO
Isotopologue: 12C-16O
Background atmosphere: H2
ExoMol database: None
Local folder: CO/12C-16O/Li2015
Transition files:
=> File 12C-16O__Li2015.trans
Broadening code level: a0
/home/kawahara/exojax/src/radis/radis/api/exomolapi.py:685: AccuracyWarning: The default broadening parameter (alpha = 0.07 cm^-1 and n = 0.5) are used for J'' > 80 up to J'' = 152
warnings.warn(
Define molecular weight of CO ($\sim 12+16=28$), temperature (K), and pressure (bar).
Also, we here assume the 100 % CO atmosphere, i.e. the partial pressure = pressure.
```python
from exojax.spec import molinfo
molecular_mass = molinfo.molmass("CO") # molecular weight
Tfix = 1300.0 # we assume T=1300K
Pfix = 0.99 # we compute P=1 bar=0.99+0.1
Ppart = 0.01 # partial pressure of CO. here we assume a 1% CO atmosphere (very few).
```
partition function ratio $q(T)$ is defined by
$q(T) = Q(T)/Q(T_{ref})$; $T_{ref}$=296 K
Here, we use the partition function from HAPI
```python
# mdbCO_HITEMP.ExomolQT(emf) #use Q(T) from Exomol/Li2015
from exojax.utils.constants import Tref_original
qt_HITEMP = mdbCO_HITEMP.qr_interp(1, Tfix, Tref_original)
qt_Li2015 = mdbCO_Li2015.qr_interp(Tfix, Tref_original)
```
Let us compute the line strength S(T) at temperature of Tfix.
$S (T;s_0,\nu_0,E_l,q(T)) = S_0 \frac{Q(T_{ref})}{Q(T)} \frac{e^{- h c E_l /k_B T}}{e^{- h c E_l /k_B T_{ref}}} \frac{1- e^{- h c \nu /k_B T}}{1-e^{- h c \nu /k_B T_{ref}}}= q_r(T)^{-1} e^{ s_0 - c_2 E_l (T^{-1} - T_{ref}^{-1})} \frac{1- e^{- c_2 \nu_0/ T}}{1-e^{- c_2 \nu_0/T_{ref}}}$
$s_0=\log_{e} S_0$ : logsij0
$\nu_0$: nu_lines
$E_l$ : elower
Why the input is $s_0 = \log_{e} S_0$ instead of $S_0$ in SijT? This is because the direct value of $S_0$ is quite small and we need to use float32 for jax.
```python
Sij_HITEMP = line_strength(
Tfix,
mdbCO_HITEMP.logsij0,
mdbCO_HITEMP.nu_lines,
mdbCO_HITEMP.elower,
qt_HITEMP,
Tref_original,
)
Sij_Li2015 = line_strength(
Tfix,
mdbCO_Li2015.logsij0,
mdbCO_Li2015.nu_lines,
mdbCO_Li2015.elower,
qt_Li2015,
Tref_original,
)
```
Then, compute the Lorentz gamma factor (pressure+natural broadening)
$\gamma_L = \gamma^p_L + \gamma^n_L$
where the pressure broadning (HITEMP)
$\gamma^p_L = (T/296K)^{-n_{air}} [ \alpha_{air} ( P - P_{part})/P_{atm} + \alpha_{self} P_{part}/P_{atm}]$
$P_{atm}$: 1 atm in the unit of bar (i.e. = 1.01325)
or
the pressure broadning (ExoMol)
$\gamma^p_L = \alpha_{ref} ( T/T_{ref} )^{-n_{texp}} ( P/P_{ref}), $
and the natural broadening
$\gamma^n_L = \frac{A}{4 \pi c}$
```python
gammaL_HITEMP = gamma_hitran(
Pfix,
Tfix,
Ppart,
mdbCO_HITEMP.n_air,
mdbCO_HITEMP.gamma_air,
mdbCO_HITEMP.gamma_self,
) + gamma_natural(mdbCO_HITEMP.A)
gammaL_Li2015 = gamma_exomol(
Pfix, Tfix, mdbCO_Li2015.n_Texp, mdbCO_Li2015.alpha_ref
) + gamma_natural(mdbCO_Li2015.A)
```
Thermal broadening
$\sigma_D^{t} = \sqrt{\frac{k_B T}{M m_u}} \frac{\nu_0}{c}$
```python
# thermal doppler sigma
sigmaD_HITEMP = doppler_sigma(mdbCO_HITEMP.nu_lines, Tfix, molecular_mass)
sigmaD_Li2015 = doppler_sigma(mdbCO_Li2015.nu_lines, Tfix, molecular_mass)
```
Then, the line center...
In HITRAN database, a slight pressure shift can be included using $\delta_{air}$:
$\nu_0(P) = \nu_0 + \delta_{air} P$. But this shift is quite a bit.
```python
# line center
nu0_HITEMP = mdbCO_HITEMP.nu_lines
nu0_Li2015 = mdbCO_Li2015.nu_lines
```
We use Direct LFP.
```python
from exojax.spec.initspec import init_lpf
from exojax.spec.lpf import xsvector
numatrix_HITEMP = init_lpf(mdbCO_HITEMP.nu_lines, nus)
xsv_HITEMP = xsvector(numatrix_HITEMP, sigmaD_HITEMP, gammaL_HITEMP, Sij_HITEMP)
numatrix_Li2015 = init_lpf(mdbCO_Li2015.nu_lines, nus)
xsv_Li2015 = xsvector(numatrix_Li2015, sigmaD_Li2015, gammaL_Li2015, Sij_Li2015)
```
```python
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(111)
plt.plot(wav[::-1], xsv_HITEMP, lw=2, label="HITEMP2019")
plt.plot(wav[::-1], xsv_Li2015, lw=2, ls="dashed", label="Exomol w/ .broad")
plt.xlim(22970, 22976)
plt.xlabel("wavelength ($\AA$)", fontsize=14)
plt.ylabel("cross section ($cm^{2}$)", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.tick_params(labelsize=12)
plt.savefig("co_comparison.pdf", bbox_inches="tight", pad_inches=0.0)
plt.savefig("co_comparison.png", bbox_inches="tight", pad_inches=0.0)
plt.title("T=1300K,P=1bar")
plt.show()
```

```python
```
|
HajimeKawaharaREPO_NAMEexojaxPATH_START.@exojax_extracted@exojax-master@documents@tutorials@Comparing_HITEMP_and_ExoMol.ipynb@.PATH_END.py
|
{
"filename": "cattools.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/Sandbox/mkCat_singletile/cattools.py",
"type": "Python"
}
|
'''
python functions to do various useful date processing/manipulation
'''
import numpy as np
import fitsio
import glob
import astropy.io.fits as fits
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import desimodel.footprint
import desimodel.focalplane
from random import random
def combspecdata(tile,night,coaddir ):
#put data from different spectrographs together, one table for fibermap, other for z
specs = []
#find out which spectrograph have data
for si in range(0,10):
try:
fitsio.read(coaddir+str(tile)+'/'+night+'/zbest-'+str(si)+'-'+str(tile)+'-'+night+'.fits')
specs.append(si)
except:
print('no spectrograph '+str(si)+ ' on night '+night)
print('spectrographs with data:')
print(specs)
tspec = Table.read(coaddir+str(tile)+'/'+night+'/zbest-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tf = Table.read(coaddir+str(tile)+'/'+night+'/zbest-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
for i in range(1,len(specs)):
tn = Table.read(coaddir+str(tile)+'/'+night+'/zbest-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tnf = Table.read(coaddir+str(tile)+'/'+night+'/zbest-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
tspec = vstack([tspec,tn])
tf = vstack([tf,tnf])
tf = unique(tf,keys=['TARGETID'])
tf.keep_columns(['TARGETID','LOCATION','FIBERSTATUS','PRIORITY'])
tspec = join(tspec,tf,keys=['TARGETID'])
print(len(tspec),len(tf))
#tspec['LOCATION'] = tf['LOCATION']
#tspec['FIBERSTATUS'] = tf['FIBERSTATUS']
#tspec['PRIORITY'] = tf['PRIORITY']
return tspec
def goodlocdict(tf):
'''
Make a dictionary to map between location and priority
'''
wloc = tf['FIBERSTATUS'] == 0
print(str(len(tf[wloc])) + ' locations with FIBERSTATUS 0')
goodloc = tf[wloc]['LOCATION']
pdict = dict(zip(tf['LOCATION'], tf['PRIORITY'])) #to be used later for randoms
return pdict,goodloc
def gettarinfo_type(fadir,tile,goodloc,mtlf,tarbit,tp='CMX_TARGET'):
#get target info
tfa = Table.read(fadir+'fba-0'+str(tile)+'.fits',hdu='FAVAIL')
tft = unique(tfa,keys=['TARGETID'])
wgt = (np.isin(tfa['LOCATION'],goodloc))
print(str(len(np.unique(tfa[wgt]['LOCATION']))) + ' good locations')
print('comparison of number targets, number of targets with good locations')
print(len(tfa),len(tfa[wgt]))
tfa = unique(tfa[wgt],keys=['TARGETID'])
tt = Table.read(mtlf)
tt.remove_columns(['Z','ZWARN'])
wtype = ((tt[tp] & 2**tarbit) > 0)
tt = tt[wtype]
tfa = join(tfa,tt,keys=['TARGETID'])
tft = join(tft,tt,keys=['TARGETID'])
print(str(len(tfa)) +' unique targets with good locations and at '+str(len(np.unique(tfa['LOCATION'])))+' unique locations and '+str(len(tft))+ ' total unique targets at '+str(len(np.unique(tft['LOCATION']))) +' unique locations ')
#Mark targets that actually got assigned fibers
tfall = Table.read(fadir+'fba-0'+str(tile)+'.fits',hdu='FASSIGN')
tfall.keep_columns(['TARGETID','LOCATION'])
tfa = join(tfa,tfall,keys=['TARGETID'],join_type='left',table_names = ['', '_ASSIGNED'], uniq_col_name='{col_name}{table_name}')
wgl = np.isin(tfa['LOCATION_ASSIGNED'],goodloc)
wtype = ((tfa[tp] & 2**tarbit) > 0)
wtfa = wgl & wtype
print('number of assigned fibers at good locations '+str(len(tfa[wtfa])))
wal = tfa['LOCATION_ASSIGNED']*0 == 0
print('number of assigned fibers '+str(len(tfa[wal])))
tfa['LOCATION_ASSIGNED'] = np.zeros(len(tfa),dtype=int)
tfa['LOCATION_ASSIGNED'][wal] = 1
wal = tfa['LOCATION_ASSIGNED'] == 1
print('number of assigned fibers '+str(len(tfa[wal])))
return tfa
def mkfullran(tile,goodloc,pdict,randir):
ranf = randir+'fba-0'+str(tile)+'.fits'
f1 = fitsio.read(ranf)
f2 = fitsio.read(ranf,ext=2)
f3 = fitsio.read(ranf,ext=3)
goodranw = np.isin(f3['LOCATION'],goodloc)
goodranid = np.unique(f3[goodranw]['TARGETID'])
t2 = Table.read(ranf,hdu=2)
tj = Table()
tj['TARGETID'] = f3[goodranw]['TARGETID']
tj['LOCATION'] = f3[goodranw]['LOCATION']
tj['FIBER'] = f3[goodranw]['FIBER']
tj = unique(tj,keys=['TARGETID'])
t2.remove_columns(['PRIORITY','OBSCONDITIONS','SUBPRIORITY'])
rant = join(tj,t2,keys=['TARGETID'],join_type='left')
#now match back to randoms with all columns
tall = Table.read(randir+'tilenofa-'+str(tile)+'.fits')
tall.remove_columns(['NUMOBS_MORE','PRIORITY','OBSCONDITIONS','SUBPRIORITY','NUMOBS_INIT'])
ranall = join(rant,tall,keys=['TARGETID'],join_type='left')
print('number of randoms:')
print(len(ranall))
ranall['PRIORITY'] = np.vectorize(pdict.__getitem__)(ranall['LOCATION'])
return ranall
def mkclusdat(ffd,fcd,zfailmd= 'zwarn',weightmd= 'wloc',maskbits=[]):
dd = fitsio.read(ffd)
ddm = cutphotmask(dd,maskbits)
#print(np.unique(dd['ZWARN']))
maxp = np.max(dd['PRIORITY'])
if zfailmd == 'zwarn':
wfail = (dd['ZWARN'] != 999999) & (dd['ZWARN'] > 0)
wg = (ddm['ZWARN'] == 0)
loc_fail = dd[wfail]['LOCATION']
print(' number of redshift failures:')
print(len(loc_fail))
#
nl = countloc(ddm)
#
ddzg = ddm[wg]
#
print('clustering catalog will have '+str(len(ddzg))+ ' objects in it')
#
ddclus = Table()
ddclus['RA'] = ddzg['RA']
ddclus['DEC'] = ddzg['DEC']
ddclus['Z'] = ddzg['Z']
if weightmd == 'wloc':
ddclus['WEIGHT'] = assignweights(ddzg,nl)
#
print('minimum,maximum weight')
print(np.min(ddclus['WEIGHT']),np.max(ddclus['WEIGHT']))
#
ddclus.write(fcd,format='fits',overwrite=True)
print('write clustering data file to '+fcd)
return maxp,loc_fail
def mkclusran(ffr,fcr,fcd,maxp,loc_fail,maskbits=[]):
dr = fitsio.read(ffr)
drm = cutphotmask(dr,maskbits)
#
wpr = drm['PRIORITY'] <= maxp
wzf = np.isin(drm['LOCATION'],loc_fail)
wzt = wpr & ~wzf
#
drmz = drm[wzt]
print(str(len(drmz))+' after cutting based on failures and priority')
rclus = Table()
rclus['RA'] = drmz['RA']
rclus['DEC'] = drmz['DEC']
dd = fitsio.read(fcd)
#rclus['Z'] = 0
#rclus['WEIGHT'] = 1
zl = []
wl = []
ndz = 0
naz = 0
for ii in range(0,len(rclus)):
ind = int(random()*len(dd))
zr = dd[ind]['Z']
if zr == 0:
ndz += 1.
naz += 1
wr = dd[ind]['WEIGHT']
#rclus[ii]['Z'] = zr
#rclus[ii]['WEIGHT'] = wr
zl.append(zr)
wl.append(wr)
zl = np.array(zl)
wl = np.array(wl)
rclus['Z'] = zl
rclus['WEIGHT'] = wl
wz = rclus['Z'] == 0
print(ndz,naz,len(rclus[wz]))
rclus.write(fcr,format='fits',overwrite=True)
print('write clustering random file to '+fcr)
def cutphotmask(aa,bits):
print(str(len(aa)) +' before imaging veto' )
keep = (aa['NOBS_G']>0) & (aa['NOBS_R']>0) & (aa['NOBS_Z']>0)
for biti in bits:
keep &= ((aa['MASKBITS'] & 2**biti)==0)
aa = aa[keep]
print(str(len(aa)) +' after imaging veto' )
return aa
def countloc(aa):
locs = aa['LOCATION']
la = np.max(locs)+1
nl = np.zeros(la)
for i in range(0,len(aa)):
nl[locs[i]] += 1
return nl
def assignweights(aa,nl):
wts = np.ones(len(aa))
for i in range(0,len(aa)):
loc = aa[i]['LOCATION']
wts[i] = nl[loc]
return wts
def mkran4fa(N=None,fout='random_mtl.fits',dirout=''):
'''
cut imaging random file to first N (or take all if N is None) entries and add columns necessary for fiberassignment routines
'''
if N is None:
rall = fitsio.read('/global/cfs/cdirs/desi/target/catalogs/dr9m/0.44.0/randoms/resolve/randoms-1-0.fits')
else:
rall = fitsio.read('/global/cfs/cdirs/desi/target/catalogs/dr9m/0.44.0/randoms/resolve/randoms-1-0.fits',rows=np.arange(N))
print('read '+str(len(rall))+ ' rows from random file')
rmtl = Table()
for name in rall.dtype.names:
rmtl[name] = rall[name]
rmtl['TARGETID'] = np.arange(len(rall))
rmtl['DESI_TARGET'] = np.ones(len(rall),dtype=int)*2
rmtl['SV1_DESI_TARGET'] = np.ones(len(rall),dtype=int)*2
rmtl['NUMOBS_INIT'] = np.zeros(len(rall),dtype=int)
rmtl['NUMOBS_MORE'] = np.ones(len(rall),dtype=int)
rmtl['PRIORITY'] = np.ones(len(rall),dtype=int)*3400
rmtl['OBSCONDITIONS'] = np.ones(len(rall),dtype=int)
rmtl['SUBPRIORITY'] = np.random.random(len(rall))
print('added columns, writing to '+dirout+fout)
del rall
rmtl.write(dirout+fout,format='fits', overwrite=True)
def randomtiles(tilef ):
tiles = fitsio.read(tilef)
rt = fitsio.read(minisvdir+'random/random_mtl.fits')
print('loaded random file')
indsa = desimodel.footprint.find_points_in_tiles(tiles,rt['RA'], rt['DEC'])
print('got indexes')
for i in range(0,len(indsa)):
tile = tiles['TILEID']
fname = minisvdir+'random/tilenofa-'+str(tile)+'.fits'
inds = indsa[i]
fitsio.write(fname,rt[inds],clobber=True)
print('wrote tile '+str(tile))
def randomtilesi(tilef ,dirout,ii):
tiles = fitsio.read(tilef)
trad = desimodel.focalplane.get_tile_radius_deg()*1.1 #make 10% greater just in case
print(trad)
rt = fitsio.read('/global/cfs/cdirs/desi/target/catalogs/dr9m/0.44.0/randoms/resolve/randoms-1-'+str(ii)+'.fits')
#rt = fitsio.read(minisvdir+'random/random_mtl.fits')
print('loaded random file')
for i in range(0,len(tiles)):
tile = tiles['TILEID'][i]
fname = dirout+str(ii)+'/tilenofa-'+str(tile)+'.fits'
tdec = tiles['DEC'][i]
decmin = tdec - trad
decmax = tdec + trad
wdec = (rt['DEC'] > decmin) & (rt['DEC'] < decmax)
print(len(rt[wdec]))
inds = desimodel.footprint.find_points_radec(tiles['RA'][i], tdec,rt[wdec]['RA'], rt[wdec]['DEC'])
print('got indexes')
#fitsio.write(fname,rt[wdec][inds],clobber=True)
#print('wrote tile '+str(tile))
#rmtl = Table.read(fname)
rtw = rt[wdec][inds]
rmtl = Table(rtw)
rmtl['TARGETID'] = np.arange(len(rmtl))
rmtl['DESI_TARGET'] = np.ones(len(rmtl),dtype=int)*2
rmtl['SV1_DESI_TARGET'] = np.ones(len(rmtl),dtype=int)*2
rmtl['NUMOBS_INIT'] = np.zeros(len(rmtl),dtype=int)
rmtl['NUMOBS_MORE'] = np.ones(len(rmtl),dtype=int)
rmtl['PRIORITY'] = np.ones(len(rmtl),dtype=int)*3400
rmtl['OBSCONDITIONS'] = np.ones(len(rmtl),dtype=int)*tiles['OBSCONDITIONS'][i]
rmtl['SUBPRIORITY'] = np.random.random(len(rmtl))
print('added columns, writing to '+fname)
rmtl.write(fname,format='fits', overwrite=True)
def ELGtilesi(tilef ):
tiles = fitsio.read(tilef)
trad = desimodel.focalplane.get_tile_radius_deg()*1.1 #make 10% greater just in case
print(trad)
rt = fitsio.read(minisvdir+'targets/MTL_all_SV0_ELG_tiles_0.37.0.fits')
print('loaded random file')
for i in range(3,len(tiles)):
tile = tiles['TILEID'][i]
fname = minisvdir+'targets/MTL_TILE_ELG_'+str(tile)+'_0.37.0.fits'
tdec = tiles['DEC'][i]
decmin = tdec - trad
decmax = tdec + trad
wdec = (rt['DEC'] > decmin) & (rt['DEC'] < decmax)
print(len(rt[wdec]))
inds = desimodel.footprint.find_points_radec(tiles['RA'][i], tdec,rt[wdec]['RA'], rt[wdec]['DEC'])
print('got indexes')
fitsio.write(fname,rt[wdec][inds],clobber=True)
print('wrote tile '+str(tile))
def targtilesi(type,tilef ):
tiles = fitsio.read(tilef)
trad = desimodel.focalplane.get_tile_radius_deg()*1.1 #make 10% greater just in case
print(trad)
rt = fitsio.read(tardir+type+'allDR8targinfo.fits')
print('loaded random file')
for i in range(0,len(tiles)):
tile = tiles['TILEID'][i]
fname = tardir+type+str(tile)+'.fits'
tdec = tiles['DEC'][i]
decmin = tdec - trad
decmax = tdec + trad
wdec = (rt['DEC'] > decmin) & (rt['DEC'] < decmax)
print(len(rt[wdec]))
inds = desimodel.footprint.find_points_radec(tiles['RA'][i], tdec,rt[wdec]['RA'], rt[wdec]['DEC'])
print('got indexes')
fitsio.write(fname,rt[wdec][inds],clobber=True)
print('wrote tile '+str(tile))
def mktilef_date(dirout,fout='msvtiles.fits'):
'''
make a tile file for a date that Anand made tiles
TBD
'''
msvtiles = Table()
msvtiles['TILEID'] = np.array([70000,70001,70002,70003,70004,70005,70006],dtype=int)
msvtiles['RA'] = np.array([119.,133.,168.,214.75,116.,158.,214.75])
msvtiles['DEC'] = np.array([50.,26.5,27.6,53.4,20.7,25.,53.4])
msvtiles['PASS'] = np.zeros(7,dtype=int)
msvtiles['IN_DESI'] = np.ones(7,dtype=int)
msvtiles['OBSCONDITIONS'] = np.ones(7,dtype=int)*65535
pa = []
for i in range(0,7):
pa.append(b'DARK')
msvtiles['PROGRAM'] = np.array(pa,dtype='|S6')
msvtiles.write(dirout+fout,format='fits', overwrite=True)
def mkminisvtilef(dirout,fout='msvtiles.fits'):
'''
manually make tile fits file for sv tiles
'''
msvtiles = Table()
msvtiles['TILEID'] = np.array([70000,70001,70002,70003,70004,70005,70006],dtype=int)
msvtiles['RA'] = np.array([119.,133.,168.,214.75,116.,158.,214.75])
msvtiles['DEC'] = np.array([50.,26.5,27.6,53.4,20.7,25.,53.4])
msvtiles['PASS'] = np.zeros(7,dtype=int)
msvtiles['IN_DESI'] = np.ones(7,dtype=int)
msvtiles['OBSCONDITIONS'] = np.ones(7,dtype=int)*65535
pa = []
for i in range(0,7):
pa.append(b'DARK')
msvtiles['PROGRAM'] = np.array(pa,dtype='|S6')
msvtiles.write(dirout+fout,format='fits', overwrite=True)
def mkminisvtilef_SV0(dirout,fout='msv0tiles.fits'):
'''
manually make tile fits file for minisv0 tiles
'''
msvtiles = Table()
msvtiles['TILEID'] = np.array([68000,68001,68002,67142,67230],dtype=int)
msvtiles['RA'] = np.array([214.75,214.76384,202.,204.136476102484,138.997356099811])
msvtiles['DEC'] = np.array([53.4,53.408,8.25,5.90422737037591,0.574227370375913])
msvtiles['PASS'] = np.zeros(5,dtype=int)
msvtiles['IN_DESI'] = np.ones(5,dtype=int)
msvtiles['OBSCONDITIONS'] = np.ones(5,dtype=int)*65535
pa = []
for i in range(0,5):
pa.append(b'DARK')
msvtiles['PROGRAM'] = np.array(pa,dtype='|S6')
msvtiles.write(dirout+fout,format='fits', overwrite=True)
def plotdatran(type,tile,night):
df = fitsio.read(dircat+type +str(tile)+'_'+night+'_clustering.dat.fits')
rf = fitsio.read(dircat+type +str(tile)+'_'+night+'_clustering.ran.fits')
plt.plot(rf['RA'],rf['DEC'],'k,')
if type == 'LRG':
pc = 'r'
pt = 'o'
if type == 'ELG':
pc = 'b'
pt = '*'
plt.scatter(df['RA'],df['DEC'],s=df['WEIGHT']*3,c=pc,marker=pt)
plt.xlabel('RA')
plt.ylabel('DEC')
plt.title(type + ' '+tile+' '+night)
plt.savefig('dataran'+type+tile+night+'.png')
plt.show()
def gathertargets(type):
fns = glob.glob(targroot+'*.fits')
keys = ['RA', 'DEC', 'BRICKNAME','MORPHTYPE','DCHISQ','FLUX_G', 'FLUX_R', 'FLUX_Z','MW_TRANSMISSION_G', 'MW_TRANSMISSION_R', 'MW_TRANSMISSION_Z','NOBS_G', 'NOBS_R', 'NOBS_Z','PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R',\
'GALDEPTH_Z','FIBERFLUX_G', 'FIBERFLUX_R', 'FIBERFLUX_Z', 'FIBERTOTFLUX_G', 'FIBERTOTFLUX_R', 'FIBERTOTFLUX_Z',\
'MASKBITS', 'EBV', 'PHOTSYS','TARGETID','DESI_TARGET']
#put information together, takes a couple of minutes
ncat = len(fns)
mydict = {}
for key in keys:
mydict[key] = []
if type == 'ELG':
bit = 1 #target bit for ELGs
if type == 'LRG':
bit = 0
if type == 'QSO':
bit = 2
for i in range(0,ncat):
data = fitsio.read(fns[i],columns=keys)
data = data[(data['DESI_TARGET'] & 2**bit)>0]
for key in keys:
mydict[key] += data[key].tolist()
print(i)
outf = tardir+type+'allDR8targinfo.fits'
collist = []
for key in keys:
fmt = fits.open(fns[0])[1].columns[key].format
collist.append(fits.Column(name=key,format=fmt,array=mydict[key]))
print(key)
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(collist))
hdu.writeto(outf,overwrite=True)
print('wrote to '+outf)
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@Sandbox@mkCat_singletile@cattools.py@.PATH_END.py
|
{
"filename": "TRES_setup.py",
"repo_name": "amusecode/TRES",
"repo_path": "TRES_extracted/TRES-main/TRES_setup.py",
"type": "Python"
}
|
from amuse.community.seba.interface import SeBa
from amuse.datamodel import Particles
from amuse.units import units
from seculartriple_TPS.interface import SecularTriple
from TRES_options import max_mass, absolute_min_mass
from interactions import *
from tidal_friction_constant import *
import numpy as np
#-------
#to initialize the triple object
def make_stars(inner_primary_mass, inner_secondary_mass, outer_mass):
stars = Particles(3)
stars.is_star = True
stars.is_donor = False
if inner_primary_mass < inner_secondary_mass:
spare = inner_primary_mass
inner_primary_mass = inner_secondary_mass
inner_secondary_mass = spare
stars[0].mass = inner_primary_mass
stars[1].mass = inner_secondary_mass
stars[2].mass = outer_mass
stars[0].initial_mass = inner_primary_mass
stars[1].initial_mass = inner_secondary_mass
stars[2].initial_mass = outer_mass
return stars
def make_bins(stars, inner_semimajor_axis, outer_semimajor_axis,
inner_eccentricity, outer_eccentricity,
inner_argument_of_pericenter, outer_argument_of_pericenter,
inner_longitude_of_ascending_node, outer_longitude_of_ascending_node):
bins = Particles(2)
bins.is_star = False
bins.is_mt_stable = True
bins.part_dt_mt = 1.
bins.bin_type = bin_type['unknown'] #Unknown
bins[0].child1 = stars[0]
bins[0].child2 = stars[1]
bins[0].child1.parent = bins[0]
bins[0].child2.parent = bins[0]
bins[0].semimajor_axis = inner_semimajor_axis
bins[0].eccentricity = inner_eccentricity
bins[0].argument_of_pericenter = inner_argument_of_pericenter
bins[0].longitude_of_ascending_node = inner_longitude_of_ascending_node
bins[0].mass_transfer_rate = 0.0 | units.MSun/units.yr
bins[0].accretion_efficiency_mass_transfer = 1.0
bins[0].accretion_efficiency_wind_child1_to_child2 = 0.0
bins[0].accretion_efficiency_wind_child2_to_child1 = 0.0
bins[1].child1 = stars[2]
bins[1].child2 = bins[0]
bins[1].child1.parent = bins[1]
bins[1].child2.parent = bins[1]
bins[1].semimajor_axis = outer_semimajor_axis
bins[1].eccentricity = outer_eccentricity
bins[1].argument_of_pericenter = outer_argument_of_pericenter
bins[1].longitude_of_ascending_node = outer_longitude_of_ascending_node
bins[1].mass_transfer_rate = 0.0 | units.MSun/units.yr
bins[1].accretion_efficiency_mass_transfer = 1.0
bins[1].accretion_efficiency_wind_child1_to_child2 = 0.0
bins[1].accretion_efficiency_wind_child2_to_child1 = 0.0
# binary evolutionary settings
bins[0].specific_AM_loss_mass_transfer = 2.5
bins[1].specific_AM_loss_mass_transfer = 2.5
return bins
def test_initial_parameters(inner_primary_mass, inner_secondary_mass, outer_mass,
inner_semimajor_axis, outer_semimajor_axis,
inner_eccentricity, outer_eccentricity,
relative_inclination,
inner_argument_of_pericenter, outer_argument_of_pericenter,
inner_longitude_of_ascending_node):
if max(inner_primary_mass, outer_mass) > max_mass:
print('error: masses not in allowed range')
print('m1=',inner_primary_mass, 'm2=',inner_secondary_mass, 'm3=',outer_mass)
print('should be below:', max_mass)
print('max_mass settable in TRES_options.py')
return False, 0,0
if min(inner_secondary_mass, outer_mass) <= absolute_min_mass:
print('error: masses not in allowed range')
print('m1=',inner_primary_mass, 'm2=',inner_secondary_mass, 'm3=',outer_mass)
print('should be at least above:', absolute_min_mass)
print('absolute_min_mass settable in TRES_options.py')
print('substellar objects can be included through EXCLUDE_SSO in TRES_options.py')
return False, 0,0
if inner_semimajor_axis >= outer_semimajor_axis:
print('error input parameters, should be:')
print('inner_semimajor_axis < outer_semimajor_axis' )
return False, 0,0
if (inner_semimajor_axis < 0.|units.RSun):
print('error: inner separation not in allowed range')
return False, 0,0
if (outer_semimajor_axis < 0.|units.RSun):
print('error: outer separation not in allowed range')
return False, 0,0
if (inner_eccentricity < 0.) or (inner_eccentricity > 1.):
print('error: inner eccentricity not in allowed range')
return False, 0,0
if (outer_eccentricity < 0.) or (outer_eccentricity > 1.):
print('error: outer eccentricity not in allowed range')
return False, 0,0
if (inner_eccentricity < minimum_eccentricity):
inner_eccentricity = minimum_eccentricity
if (outer_eccentricity < minimum_eccentricity):
outer_eccentricity = minimum_eccentricity
if (relative_inclination < 0.) or (relative_inclination > np.pi):
print('error: relative inclination not in allowed range')
return False, 0,0
if (inner_argument_of_pericenter < -1.*np.pi) or (inner_argument_of_pericenter > np.pi):
print('error: inner argument of pericenter not in allowed range')
return False, 0,0
if (outer_argument_of_pericenter < -1.*np.pi) or (outer_argument_of_pericenter > np.pi):
print('error: outer argument of pericenter not in allowed range')
return False, 0,0
if (inner_longitude_of_ascending_node < -1.*np.pi) or (inner_longitude_of_ascending_node > np.pi):
print('error: inner longitude of ascending node not in allowed range')
return False, 0,0
return True, inner_eccentricity, outer_eccentricity
def make_particle_sets(inner_primary_mass, inner_secondary_mass, outer_mass,
inner_semimajor_axis, outer_semimajor_axis,
inner_eccentricity, outer_eccentricity,
relative_inclination,
inner_argument_of_pericenter, outer_argument_of_pericenter,
inner_longitude_of_ascending_node):
correct_params, inner_eccentricity, outer_eccentricity = test_initial_parameters(inner_primary_mass, inner_secondary_mass, outer_mass,
inner_semimajor_axis, outer_semimajor_axis, inner_eccentricity, outer_eccentricity,
relative_inclination, inner_argument_of_pericenter, outer_argument_of_pericenter,
inner_longitude_of_ascending_node)
outer_longitude_of_ascending_node = inner_longitude_of_ascending_node - np.pi
stars = make_stars(inner_primary_mass, inner_secondary_mass, outer_mass)
bins = make_bins(stars, inner_semimajor_axis, outer_semimajor_axis,
inner_eccentricity, outer_eccentricity,
inner_argument_of_pericenter, outer_argument_of_pericenter,
inner_longitude_of_ascending_node, outer_longitude_of_ascending_node)
return stars, bins, correct_params
#-------
#setup community codes
def setup_stellar_code(stellar_code, stars):
stellar_code.particles.add_particles(stars)
return stellar_code
def setup_secular_code(triple, secular_code, stop_at_semisecular_regime):
triple_set = triple.as_set()
triple_time = triple_set.time
secular_code.triples.add_particles(triple_set)
secular_code.parameters.verbose = False
# secular_code.parameters.verbose = True
#needed for initialisation in some circumstances
secular_code.model_time = triple_time
secular_code.parameters.equations_of_motion_specification = 0
secular_code.parameters.roche_radius_specification = 0
#0: eccentric eggleton, 1: sepinsky, 2: classical circular eggleton
secular_code.parameters.stability_limit_specification = 0
#for stars 0, 5-6, for exoplanets 1-4
#0: mardling & aarseth 2001, 1:petrovich et al. 2015 simple, 2:petrovich et al. 2015
#3: holman et al. 98 s-type, 4: holman et al. 98 p-type,
#5: vynatheya+ 22
#6: tory+ 22
secular_code.parameters.ignore_tertiary = False
secular_code.parameters.include_quadrupole_terms = True
secular_code.parameters.include_octupole_terms = True
secular_code.parameters.include_inner_wind_terms = True
secular_code.parameters.include_outer_wind_terms = True
secular_code.parameters.include_inner_RLOF_terms = True
secular_code.parameters.include_outer_RLOF_terms = True
secular_code.parameters.include_magnetic_braking_terms = False # not tested
secular_code.parameters.include_inner_tidal_terms = True
secular_code.parameters.include_outer_tidal_terms = True
secular_code.parameters.include_1PN_inner_terms = True
secular_code.parameters.include_1PN_outer_terms = True
secular_code.parameters.include_1PN_inner_outer_terms = False ### warning: probably broken
secular_code.parameters.include_25PN_inner_terms = True
secular_code.parameters.include_25PN_outer_terms = True
secular_code.parameters.check_for_dynamical_stability = True
secular_code.parameters.check_for_dynamical_stability_at_initialisation = True
secular_code.parameters.check_for_semisecular_regime = stop_at_semisecular_regime
secular_code.parameters.check_for_semisecular_regime_at_initialisation = stop_at_semisecular_regime
secular_code.parameters.check_for_inner_collision = True
secular_code.parameters.check_for_outer_collision = True
secular_code.parameters.check_for_inner_RLOF = True
secular_code.parameters.check_for_outer_RLOF = True
secular_code.parameters.include_spin_radius_mass_coupling_terms_star1 = True
secular_code.parameters.include_spin_radius_mass_coupling_terms_star2 = True
secular_code.parameters.include_spin_radius_mass_coupling_terms_star3 = True
# accuracy of secular code
# secular_code.parameters.input_precision = 1.0e-10#1.0e-5
# secular_code.parameters.relative_tolerance = 1.0e-10
# secular_code.parameters.threshold_value_of_e_in_for_setting_tidal_e_in_dot_zero = 1.0e-12
secular_code.parameters.threshold_value_of_spin_angular_frequency_for_setting_spin_angular_frequency_dot_moment_of_inertia_plus_wind_changes_zero = 1.0e-7|units.Myr**-1
secular_code.parameters.include_linear_mass_change = True #needed for Jspin conservation
secular_code.parameters.include_linear_radius_change = True #needed for Jspin conservation
# channel_from_secular = secular_code.triples.new_channel_to(triple_set)
# channel_to_secular = triple_set.new_channel_to(secular_code.triples)
return secular_code
#-------
|
amusecodeREPO_NAMETRESPATH_START.@TRES_extracted@TRES-main@TRES_setup.py@.PATH_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/table/hoverlabel/font/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="table.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@table@hoverlabel@font@_sizesrc.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.