metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "feature_request.md",
"repo_name": "smirik/resonances",
"repo_path": "resonances_extracted/resonances-main/.github/ISSUE_TEMPLATE/feature_request.md",
"type": "Markdown"
}
|
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
|
smirikREPO_NAMEresonancesPATH_START.@resonances_extracted@resonances-main@.github@ISSUE_TEMPLATE@feature_request.md@.PATH_END.py
|
{
"filename": "_open.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/ohlc/_open.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpenValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="open", parent_name="ohlc", **kwargs):
super(OpenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@ohlc@_open.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/indicator/stream/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@indicator@stream@__init__.py@.PATH_END.py
|
{
"filename": "flavor_transformation.py",
"repo_name": "SNEWS2/snewpy",
"repo_path": "snewpy_extracted/snewpy-main/python/snewpy/flavor_transformation.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Supernova oscillation physics for flavors e, X, e-bar, X-bar.
For measured mixing angles and latest global analysis results, visit
http://www.nu-fit.org/.
"""
from abc import abstractmethod, ABC
import numpy as np
from astropy import units as u
from astropy import constants as c
from .neutrino import MassHierarchy, MixingParameters
class FlavorTransformation(ABC):
"""Generic interface to compute neutrino and antineutrino survival probability."""
@abstractmethod
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
@abstractmethod
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
@abstractmethod
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
@abstractmethod
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
@abstractmethod
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
@abstractmethod
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
@abstractmethod
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
@abstractmethod
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
float or ndarray
Transition probability.
"""
pass
class NoTransformation(FlavorTransformation):
"""Survival probabilities for no oscillation case."""
def __init__(self):
pass
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1.
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ee(t,E)
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_ee(t,E)) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1.
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_eebar(t,E)
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_eebar(t,E)) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
class CompleteExchange(FlavorTransformation):
"""Survival probabilities for the case when the electron flavors are completely exchanged with the x flavor."""
def __init__(self):
pass
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 0.
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ee(t,E)
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_ee(t,E)) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 0.
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_eebar(t,E)
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_eebar(t,E)) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
class AdiabaticMSW(FlavorTransformation):
"""Adiabatic MSW effect."""
def __init__(self, mix_angles=None, mh=MassHierarchy.NORMAL):
"""Initialize transformation matrix.
Parameters
----------
mix_angles : tuple or None
If not None, override default mixing angles using tuple (theta12, theta13, theta23).
mh : MassHierarchy
MassHierarchy.NORMAL or MassHierarchy.INVERTED.
"""
if type(mh) == MassHierarchy:
self.mass_order = mh
else:
raise TypeError('mh must be of type MassHierarchy')
if mix_angles is not None:
theta12, theta13, theta23 = mix_angles
else:
pars = MixingParameters(mh)
theta12, theta13, theta23 = pars.get_mixing_angles()
self.De1 = float((np.cos(theta12) * np.cos(theta13))**2)
self.De2 = float((np.sin(theta12) * np.cos(theta13))**2)
self.De3 = float(np.sin(theta13)**2)
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De3
else:
return self.De2
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ee(t,E)
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_ee(t,E)) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De1
else:
return self.De3
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_eebar(t,E)
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_eebar(t,E)) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
class NonAdiabaticMSWH(FlavorTransformation):
"""Nonadiabatic MSW effect."""
def __init__(self, mix_angles=None, mh=MassHierarchy.NORMAL):
"""Initialize transformation matrix.
Parameters
----------
mix_angles : tuple or None
If not None, override default mixing angles using tuple (theta12, theta13, theta23).
mh : MassHierarchy
MassHierarchy.NORMAL or MassHierarchy.INVERTED.
"""
if type(mh) == MassHierarchy:
self.mass_order = mh
else:
raise TypeError('mh must be of type MassHierarchy')
if mix_angles is not None:
theta12, theta13, theta23 = mix_angles
else:
pars = MixingParameters(mh)
theta12, theta13, theta23 = pars.get_mixing_angles()
self.De1 = float((np.cos(theta12) * np.cos(theta13))**2)
self.De2 = float((np.sin(theta12) * np.cos(theta13))**2)
self.De3 = float(np.sin(theta13)**2)
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return self.De2
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ee(t,E)
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_ee(t,E)) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return self.De1
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_eebar(t,E)
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_eebar(t,E)) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
class TwoFlavorDecoherence(FlavorTransformation):
"""Star-earth transit survival probability: two flavor case."""
def __init__(self, mix_angles=None, mh=MassHierarchy.NORMAL):
"""Initialize transformation matrix.
Parameters
----------
mix_angles : tuple or None
If not None, override default mixing angles using tuple (theta12, theta13, theta23).
mh : MassHierarchy
MassHierarchy.NORMAL or MassHierarchy.INVERTED.
"""
if type(mh) == MassHierarchy:
self.mass_order = mh
else:
raise TypeError('mh must be of type MassHierarchy')
if mix_angles is not None:
theta12, theta13, theta23 = mix_angles
else:
pars = MixingParameters(mh)
theta12, theta13, theta23 = pars.get_mixing_angles()
self.De1 = float((np.cos(theta12) * np.cos(theta13))**2)
self.De2 = float((np.sin(theta12) * np.cos(theta13))**2)
self.De3 = float(np.sin(theta13)**2)
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
"""
if self.mass_order == MassHierarchy.NORMAL:
return (self.De2+self.De3)/2.
else:
return self.De2
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ee(t,E)
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_ee(t,E)) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De1
else:
return (self.De1+self.De3)/2
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_eebar(t,E)
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_eebar(t,E)) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
class ThreeFlavorDecoherence(FlavorTransformation):
"""Star-earth transit survival probability: three flavor case."""
def __init__(self):
pass
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
"""
return 1./3.
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ee(t,E)
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_ee(t,E)) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1./3.
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_eebar(t,E)
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. + self.prob_eebar(t,E)) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
class NeutrinoDecay(FlavorTransformation):
"""Decay effect, where the heaviest neutrino decays to the lightest
neutrino. For a description and typical parameters, see A. de Gouvêa et al.,
PRD 101:043013, 2020, arXiv:1910.01127.
"""
def __init__(self, mix_angles=None, mass=1*u.eV/c.c**2, tau=1*u.day, dist=10*u.kpc, mh=MassHierarchy.NORMAL):
"""Initialize transformation matrix.
Parameters
----------
mix_angles : tuple or None
If not None, override default mixing angles using tuple (theta12, theta13, theta23).
mass : astropy.units.quantity.Quantity
Mass of the heaviest neutrino; expect in eV/c^2.
tau : astropy.units.quantity.Quantity
Lifetime of the heaviest neutrino.
dist : astropy.units.quantity.Quantity
Distance to the supernova.
mh : MassHierarchy
MassHierarchy.NORMAL or MassHierarchy.INVERTED.
"""
if type(mh) == MassHierarchy:
self.mass_order = mh
else:
raise TypeError('mh must be of type MassHierarchy')
if mix_angles is not None:
theta12, theta13, theta23 = mix_angles
else:
pars = MixingParameters(mh)
theta12, theta13, theta23 = pars.get_mixing_angles()
self.De1 = float((np.cos(theta12) * np.cos(theta13))**2)
self.De2 = float((np.sin(theta12) * np.cos(theta13))**2)
self.De3 = float(np.sin(theta13)**2)
self.m = mass
self.tau = tau
self.d = dist
def gamma(self, E):
"""Decay width of the heaviest neutrino mass.
Parameters
----------
E : float
Energy of the nu3.
Returns
-------
Gamma : float
Decay width of the neutrino mass, in units of 1/length.
:meta private:
"""
return self.m*c.c / (E*self.tau)
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
# NMO case.
if self.mass_order == MassHierarchy.NORMAL:
pe_array = self.De1*(1-np.exp(-self.gamma(E)*self.d)) + \
self.De3*np.exp(-self.gamma(E)*self.d)
# IMO case.
else:
pe_array = self.De2*np.exp(-self.gamma(E)*self.d) + \
self.De3*(1-np.exp(-self.gamma(E)*self.d))
return pe_array
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
# NMO case.
if self.mass_order == MassHierarchy.NORMAL:
return self.De1 + self.De3
# IMO case.
else:
return self.De1 + self.De2
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ex(t,E) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return self.De3
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
# NMO case.
if self.mass_order == MassHierarchy.NORMAL:
pxbar_array = self.De1*(1-np.exp(-self.gamma(E)*self.d)) + \
self.De2 + self.De3*np.exp(-self.gamma(E)*self.d)
# IMO case.
else:
pxbar_array = self.De1 + self.De2*np.exp(-self.gamma(E)*self.d) + \
self.De3*(1-np.exp(-self.gamma(E)*self.d))
return pxbar_array
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_exbar(t,E) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
class AdiabaticMSWes(FlavorTransformation):
"""A four-neutrino mixing prescription. The assumptions used are that:
1. the fourth neutrino mass is the heaviest but not so large that the electron-sterile resonances
are inside the neutrinosphere;
2. the “outer” or H' electron-sterile MSW resonance is adiabatic;
3. the “inner” or H'' electron-sterile MSW resonance (where the electron fraction = 1/3) is non-adiabatic.
For further insight see, for example, Esmaili, Peres, and Serpico, Phys. Rev. D 90, 033013 (2014).
"""
def __init__(self, mix_angles, mh=MassHierarchy.NORMAL):
"""Initialize transformation matrix.
Parameters
----------
mix_angles : tuple
Values for mixing angles (theta12, theta13, theta23, theta14).
mh : MassHierarchy
MassHierarchy.NORMAL or MassHierarchy.INVERTED.
"""
if type(mh) == MassHierarchy:
self.mass_order = mh
else:
raise TypeError('mh must be of type MassHierarchy')
theta12, theta13, theta23, theta14 = mix_angles
self.De1 = float((np.cos(theta12) * np.cos(theta13) * np.cos(theta14))**2)
self.De2 = float((np.sin(theta12) * np.cos(theta13) * np.cos(theta14))**2)
self.De3 = float((np.sin(theta13) * np.cos(theta14))**2)
self.De4 = float((np.sin(theta14))**2)
self.Ds1 = float((np.cos(theta12) * np.cos(theta13) * np.sin(theta14))**2)
self.Ds2 = float((np.sin(theta12) * np.cos(theta13) * np.sin(theta14))**2)
self.Ds3 = float((np.sin(theta13) * np.sin(theta14))**2)
self.Ds4 = float((np.cos(theta14))**2)
def prob_ee(self, t, E):
"""e -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return self.De4
def prob_ex(self, t, E):
"""x -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De1 + self.De2
else:
return self.De1 + self.De3
def prob_xx(self, t, E):
"""x -> x neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return ( 2 - self.De1 - self.De2 - self.Ds1 - self.Ds2 ) / 2
else:
return ( 2 - self.De1 - self.De3 - self.Ds1 - self.Ds3 ) / 2
def prob_xe(self, t, E):
"""e -> x neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return ( 1 - self.De4 - self.Ds4 )/2
def prob_eebar(self, t, E):
"""e -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De1
else:
return self.De3
def prob_exbar(self, t, E):
"""x -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De3 + self.De4
else:
return self.De2 + self.De4
def prob_xxbar(self, t, E):
"""x -> x antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return ( 2 - self.De3 - self.De4 - self.Ds3 - self.Ds4 ) / 2
else:
return ( 2 - self.De2 - self.De4 - self.Ds2 - self.Ds4 ) / 2
def prob_xebar(self, t, E):
"""e -> x antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return ( 1 - self.De1 - self.Ds1 ) / 2
else:
return ( 1 - self.De3 - self.Ds3 ) / 2
class NonAdiabaticMSWes(FlavorTransformation):
"""A four-neutrino mixing prescription. The assumptions used are that:
1. the fourth neutrino mass is the heaviest but not so large that the electron-sterile resonances
are inside the neutrinosphere;
2. the “outer” or H' electron-sterile MSW resonance is non-adiabatic;
3. the “inner” or H'' electron-sterile MSW resonance (where the electron fraction = 1/3) is non-adiabatic.
For further insight see, for example, Esmaili, Peres, and Serpico, Phys. Rev. D 90, 033013 (2014).
"""
def __init__(self, mix_angles, mh=MassHierarchy.NORMAL):
"""Initialize transformation matrix.
Parameters
----------
mix_angles : tuple
Values for mixing angles (theta12, theta13, theta23, theta14).
mh : MassHierarchy
MassHierarchy.NORMAL or MassHierarchy.INVERTED.
"""
if type(mh) == MassHierarchy:
self.mass_order = mh
else:
raise TypeError('mh must be of type MassHierarchy')
theta12, theta13, theta23, theta14 = mix_angles
self.De1 = float((np.cos(theta12) * np.cos(theta13) * np.cos(theta14))**2)
self.De2 = float((np.sin(theta12) * np.cos(theta13) * np.cos(theta14))**2)
self.De3 = float((np.sin(theta13) * np.cos(theta14))**2)
self.De4 = float((np.sin(theta14))**2)
self.Ds1 = float((np.cos(theta12) * np.cos(theta13) * np.sin(theta14))**2)
self.Ds2 = float((np.sin(theta12) * np.cos(theta13) * np.sin(theta14))**2)
self.Ds3 = float((np.sin(theta13) * np.sin(theta14))**2)
self.Ds4 = float((np.cos(theta14))**2)
def prob_ee(self, t, E):
"""e -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De3
else:
return self.De2
def prob_ex(self, t, E):
"""x -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De1 + self.De2
else:
return self.De1 + self.De3
def prob_xx(self, t, E):
"""x -> x neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return ( 2 - self.De1 - self.De2 - self.Ds1 - self.Ds2 ) / 2
else:
return ( 2 - self.De1 - self.De3 - self.Ds1 - self.Ds3 ) / 2
def prob_xe(self, t, E):
"""e -> x neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return (1 - self.De3 - self.Ds3)/2
else:
return (1 - self.De2 - self.Ds2) / 2
def prob_eebar(self, t, E):
"""e -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De1
else:
return self.De3
def prob_exbar(self, t, E):
"""x -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return self.De2 + self.De3
else:
return self.De1 + self.De2
def prob_xxbar(self, t, E):
"""x -> x antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return ( 2 - self.De2 - self.De3 - self.Ds2 - self.Ds3 ) / 2
else:
return ( 2 - self.De1 - self.De2 - self.Ds1 - self.Ds2 ) / 2
def prob_xebar(self, t, E):
"""e -> x antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
if self.mass_order == MassHierarchy.NORMAL:
return ( 1 - self.De1 - self.Ds1 ) / 2
else:
return ( 1 - self.De3 - self.Ds3 ) / 2
class QuantumDecoherence(FlavorTransformation):
"""Quantum Decoherence, where propagation in vacuum leads to equipartition
of states. For a description and typical parameters, see M. V. dos Santos et al.,
2023, arXiv:2306.17591.
"""
def __init__(self, mix_angles=None, Gamma3=1e-27*u.eV, Gamma8=1e-27*u.eV, dist=10*u.kpc, n=0, E0=10*u.MeV, mh=MassHierarchy.NORMAL):
"""Initialize transformation matrix.
Parameters
----------
mix_angles : tuple or None
If not None, override default mixing angles using tuple (theta12, theta13, theta23).
Gamma3 : astropy.units.quantity.Quantity
Quantum decoherence parameter; expect in eV.
Gamma8 : astropy.units.quantity.Quantity
Quantum decoherence parameter; expect in eV.
dist : astropy.units.quantity.Quantity
Distance to the supernova.
n : float
Exponent of power law for energy dependent quantum decoherence parameters,
i.e. Gamma = Gamma0*(E/E0)**n. If not specified, it is taken as zero.
E0 : astropy.units.quantity.Quantity
Reference energy in the power law Gamma = Gamma0*(E/E0)**n. If not specified,
it is taken as 10 MeV. Note that if n = 0, quantum decoherence parameters are independent
of E0.
mh : MassHierarchy
MassHierarchy.NORMAL or MassHierarchy.INVERTED.
"""
if type(mh) == MassHierarchy:
self.mass_order = mh
else:
raise TypeError('mh must be of type MassHierarchy')
if mix_angles is not None:
theta12, theta13, theta23 = mix_angles
else:
pars = MixingParameters(mh)
theta12, theta13, theta23 = pars.get_mixing_angles()
self.De1 = float((np.cos(theta12) * np.cos(theta13))**2)
self.De2 = float((np.sin(theta12) * np.cos(theta13))**2)
self.De3 = float(np.sin(theta13)**2)
self.Gamma3 = (Gamma3 / (c.hbar.to('eV s') * c.c)).to('1/kpc')
self.Gamma8 = (Gamma8 / (c.hbar.to('eV s') * c.c)).to('1/kpc')
self.d = dist
self.n = n
self.E0 = E0
def P11(self, E):
"""Survival probability of state nu1 in vacuum.
Parameters
----------
E : float
Energy.
Returns
-------
P11 : float
Survival probability of state nu1 in vacuum.
:meta private:
"""
return 1/3 + 1/2 * np.exp(-(self.Gamma3 * (E/self.E0)**self.n + self.Gamma8 * (E/self.E0)**self.n / 3) * self.d) + 1/6 * np.exp(-self.Gamma8 * (E/self.E0)**self.n * self.d)
def P21(self, E):
"""Transition probability from the state nu2 to nu1 in vacuum.
Parameters
----------
E : float
Energy.
Returns
-------
P21 : float
Transition probability from the state nu2 to nu1 in vacuum.
Note that P21 = P12.
:meta private:
"""
return 1/3 - 1/2 * np.exp(-(self.Gamma3 * (E/self.E0)**self.n + self.Gamma8 * (E/self.E0)**self.n / 3) * self.d) + 1/6 * np.exp(-self.Gamma8 * (E/self.E0)**self.n * self.d)
def P22(self, E):
"""Survival probability of state nu2 in vacuum.
Parameters
----------
E : float
Energy.
Returns
-------
P21 : float
Survival probability of state nu2 in vacuum.
:meta private:
"""
return self.P11(E)
def P31(self, E):
"""Transition probability from the state nu3 to nu1 in vacuum.
Parameters
----------
E : float
Energy.
Returns
-------
P31 : float
Transition probability from the state nu3 to nu1 in vacuum.
Note that P31 = P13.
:meta private:
"""
return 1/3 - 1/3 * np.exp(-self.Gamma8 * (E/self.E0)**self.n * self.d)
def P32(self, E):
"""Transition probability from the state nu3 to nu2 in vacuum.
Parameters
----------
E : float
Energy.
Returns
-------
P32 : float
Transition probability from the state nu3 to nu2 in vacuum.
Note that P32 = P23.
:meta private:
"""
return self.P31(E)
def P33(self, E):
"""Survival probability of state nu3 in vacuum.
Parameters
----------
E : float
Energy.
Returns
-------
P33 : float
Survival probability of state nu3 in vacuum.
:meta private:
"""
return 1/3 + 2/3 * np.exp(-self.Gamma8 * (E/self.E0)**self.n * self.d)
def prob_ee(self, t, E):
"""Electron neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
# NMO case.
if self.mass_order == MassHierarchy.NORMAL:
pe_array = self.P31(E)*self.De1 + self.P32(E)*self.De2 + self.P33(E)*self.De3
# IMO case.
else:
pe_array = self.P22(E)*self.De2 + self.P21(E)*self.De1 + self.P32(E)*self.De3
return pe_array
def prob_ex(self, t, E):
"""X -> e neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ee(t,E)
def prob_xx(self, t, E):
"""Flavor X neutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_ex(t,E) / 2.
def prob_xe(self, t, E):
"""e -> X neutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_ee(t,E)) / 2.
def prob_eebar(self, t, E):
"""Electron antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
# NMO case.
if self.mass_order == MassHierarchy.NORMAL:
pe_array = self.P11(E)*self.De1 + self.P21(E)*self.De2 + self.P31(E)*self.De3
# IMO case.
else:
pe_array = self.P31(E)*self.De1 + self.P32(E)*self.De2 + self.P33(E)*self.De3
return pe_array
def prob_exbar(self, t, E):
"""X -> e antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_eebar(t,E)
def prob_xxbar(self, t, E):
"""X -> X antineutrino survival probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return 1. - self.prob_exbar(t,E) / 2.
def prob_xebar(self, t, E):
"""e -> X antineutrino transition probability.
Parameters
----------
t : float or ndarray
List of times.
E : float or ndarray
List of energies.
Returns
-------
prob : float or ndarray
Transition probability.
"""
return (1. - self.prob_eebar(t,E)) / 2.
|
SNEWS2REPO_NAMEsnewpyPATH_START.@snewpy_extracted@snewpy-main@python@snewpy@flavor_transformation.py@.PATH_END.py
|
{
"filename": "test_base.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/openai/tests/integration_tests/embeddings/test_base.py",
"type": "Python"
}
|
"""Test OpenAI embeddings."""
import numpy as np
import openai
from langchain_openai.embeddings.base import OpenAIEmbeddings
def test_langchain_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) > 0
def test_langchain_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = OpenAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) > 0
def test_langchain_openai_embeddings_dimensions() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=128)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 128
def test_langchain_openai_embeddings_equivalent_to_raw() -> None:
documents = ["disallowed special token '<|endoftext|>'"]
embedding = OpenAIEmbeddings()
lc_output = embedding.embed_documents(documents)[0]
direct_output = (
openai.OpenAI()
.embeddings.create(input=documents, model=embedding.model)
.data[0]
.embedding
)
assert np.allclose(lc_output, direct_output, atol=0.001)
async def test_langchain_openai_embeddings_equivalent_to_raw_async() -> None:
documents = ["disallowed special token '<|endoftext|>'"]
embedding = OpenAIEmbeddings()
lc_output = (await embedding.aembed_documents(documents))[0]
client = openai.AsyncOpenAI()
direct_output = (
(await client.embeddings.create(input=documents, model=embedding.model))
.data[0]
.embedding
)
assert np.allclose(lc_output, direct_output, atol=0.001)
def test_langchain_openai_embeddings_dimensions_large_num() -> None:
"""Test openai embeddings."""
documents = [f"foo bar {i}" for i in range(2000)]
embedding = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=128)
output = embedding.embed_documents(documents)
assert len(output) == 2000
assert len(output[0]) == 128
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@openai@tests@integration_tests@embeddings@test_base.py@.PATH_END.py
|
{
"filename": "test_dataset.py",
"repo_name": "n-claes/legolas",
"repo_path": "legolas_extracted/legolas-master/tests/pylbo_tests/test_dataset.py",
"type": "Python"
}
|
import numpy as np
import pytest
from pylbo.exceptions import BackgroundNotPresent, MatricesNotPresent
ds_v112_ev_guess = -0.14360602 + 0.00688731j
ds_v112_ev_idx = 158
def test_ds_iterable(ds_v112):
gen = [i for i in ds_v112]
assert len(gen) == 1
assert gen.pop() == ds_v112
def test_ds_has_efs(ds_v112):
assert isinstance(ds_v112.has_efs, bool)
assert ds_v112.has_efs
def test_ds_ef_names(ds_v112):
assert isinstance(ds_v112.ef_names, list)
def test_ds_ef_names_not_present(ds_v100):
assert ds_v100.ef_names is None
def test_ds_ef_grid(ds_v112):
grid = ds_v112.ef_grid
assert grid is not None
# check increasing and monotone
assert np.all(grid[1:] > grid[:-1])
def test_ds_ef_grid_none(ds_v100):
grid = ds_v100.ef_grid
assert grid is None
def test_ds_sound_speed_invalid(ds_v112):
with pytest.raises(ValueError):
ds_v112.get_sound_speed(which_values="max")
def test_ds_sound_speed(ds_v112):
cs = ds_v112.get_sound_speed()
idxs = [0, 10, 21, -2]
values = [0.40806667, 0.39936673, 0.37869727, 0.29759189]
assert np.all(np.isclose(cs[idxs], values))
def test_ds_sound_speed_avg(ds_v112):
cs_avg = ds_v112.get_sound_speed(which_values="average")
assert np.isclose(cs_avg, 0.35804914)
def test_ds_sound_speed_min(ds_v112):
cs_min = ds_v112.get_sound_speed(which_values="minimum")
assert np.isclose(cs_min, 0.29617802)
def test_ds_sound_speed_max(ds_v112):
cs_max = ds_v112.get_sound_speed(which_values="maximum")
assert np.isclose(cs_max, 0.40806667)
def test_ds_alfven_speed(ds_v112):
ca = ds_v112.get_alfven_speed()
idxs = [2, 8, 15, -1]
values = [0.94652052, 0.93669782, 0.91177547, 0.61700952]
assert np.all(np.isclose(ca[idxs], values))
def test_ds_tube_speed(ds_v112):
ct = ds_v112.get_tube_speed()
idxs = [3, 17, -3, -7]
values = [0.37351949, 0.35630853, 0.27120232, 0.27910677]
assert np.all(np.isclose(ct[idxs], values))
def test_ds_tube_speed_cartesian(ds_v112_eta):
ct = ds_v112_eta.get_tube_speed()
assert ct is None
def test_ds_reynolds_no_eta(ds_v112):
reynolds = ds_v112.get_reynolds_nb()
assert reynolds is None
def test_ds_reynolds(ds_v112_eta):
reynolds = ds_v112_eta.get_reynolds_nb()
assert np.all(np.isclose(reynolds, 3535.53390593))
def test_ds_magnetic_reynolds_no_eta(ds_v112):
magnetic_reynolds = ds_v112.get_magnetic_reynolds_nb()
assert magnetic_reynolds is None
def test_ds_magnetic_reynolds(ds_v112_eta):
magnetic_reynolds = ds_v112_eta.get_magnetic_reynolds_nb()
assert np.all(np.isclose(magnetic_reynolds, 1e4))
def test_ds_k0_squared(ds_v112):
k02 = ds_v112.get_k0_squared()
assert np.isclose(k02, 1 + (-1.2) ** 2)
def test_ds_no_matrices(ds_v112):
with pytest.raises(MatricesNotPresent):
ds_v112.get_matrix_B()
with pytest.raises(MatricesNotPresent):
ds_v112.get_matrix_A()
def test_ds_matrix_B(ds_v100):
rows, cols, vals = ds_v100.get_matrix_B()
assert len(rows) == len(cols) == len(vals)
assert np.all([isinstance(i, np.integer) for i in rows])
assert np.all([isinstance(i, np.integer) for i in cols])
assert np.all(np.isreal(vals))
def test_ds_matrix_A(ds_v100):
rows, cols, vals = ds_v100.get_matrix_A()
assert len(rows) == len(cols) == len(vals)
assert np.all([isinstance(i, np.integer) for i in rows])
assert np.all([isinstance(i, np.integer) for i in cols])
assert np.all([isinstance(i, complex) for i in vals])
def test_ds_get_efs_invalid_guesses(ds_v112):
with pytest.raises(ValueError):
ds_v112.get_eigenfunctions(3 + 2j, 10)
def test_ds_get_efs_guess_single(ds_v112):
guess = ds_v112_ev_guess
efs = ds_v112.get_eigenfunctions(ev_guesses=guess)
assert isinstance(efs, np.ndarray)
(ef,) = efs
assert np.isclose(guess, ef.get("eigenvalue", np.NaN))
def test_ds_get_efs_guess_list(ds_v112):
guess = [ds_v112_ev_guess] * 2
efs = ds_v112.get_eigenfunctions(ev_guesses=guess)
assert isinstance(efs, np.ndarray)
for i, ef in enumerate(efs):
assert np.isclose(guess[i], ef.get("eigenvalue", np.NaN))
def test_ds_get_efs_idx(ds_v112):
efs = ds_v112.get_eigenfunctions(ev_idxs=ds_v112_ev_idx)
assert isinstance(efs, np.ndarray)
(ef,) = efs
assert np.isclose(ds_v112_ev_guess, ef.get("eigenvalue", np.NaN))
def test_ds_get_efs_idx_list(ds_v112):
efs = ds_v112.get_eigenfunctions(ev_idxs=[ds_v112_ev_idx] * 2)
assert isinstance(efs, np.ndarray)
for ef in efs:
assert np.isclose(ds_v112_ev_guess, ef.get("eigenvalue", np.NaN))
def test_ds_get_evs(ds_v112):
idxs, evs = ds_v112.get_nearest_eigenvalues(ds_v112_ev_guess)
assert isinstance(idxs, np.ndarray)
assert idxs[0] == ds_v112_ev_idx
assert isinstance(evs, np.ndarray)
assert np.isclose(evs[0], ds_v112_ev_guess)
def test_ds_get_continua(ds_v112):
continua = ds_v112.continua
assert isinstance(continua, dict)
for value in continua.values():
assert isinstance(value, np.ndarray)
assert len(value) == len(ds_v112.grid_gauss)
def test_ds_get_parameters(ds_v112):
params = ds_v112.parameters
assert isinstance(params, dict)
for value in params.values():
assert np.isscalar(value)
def test_ds_nobg_empty_equilibria_dict(ds_v200_tear_nobg):
bg = ds_v200_tear_nobg.equilibria
assert not ds_v200_tear_nobg.has_background
assert isinstance(bg, dict)
assert not bg
def test_ds_nobg_continua(ds_v200_tear_nobg):
assert ds_v200_tear_nobg.continua is None
def test_ds_nobg_soundspeed(ds_v200_tear_nobg):
with pytest.raises(BackgroundNotPresent):
ds_v200_tear_nobg.get_sound_speed()
def test_ds_nobg_alfvenspeed(ds_v200_tear_nobg):
with pytest.raises(BackgroundNotPresent):
ds_v200_tear_nobg.get_alfven_speed()
def test_ds_nobg_tube_speed(ds_v200_tear_nobg):
with pytest.raises(BackgroundNotPresent):
ds_v200_tear_nobg.get_tube_speed()
def test_ds_nobg_reynolds(ds_v200_tear_nobg):
with pytest.raises(BackgroundNotPresent):
ds_v200_tear_nobg.get_reynolds_nb()
def test_ds_nobg_magnetic_reynolds(ds_v200_tear_nobg):
with pytest.raises(BackgroundNotPresent):
ds_v200_tear_nobg.get_magnetic_reynolds_nb()
|
n-claesREPO_NAMElegolasPATH_START.@legolas_extracted@legolas-master@tests@pylbo_tests@test_dataset.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "aneeshnaik/spam",
"repo_path": "spam_extracted/spam-master/data/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: 2018
Author: A. P. Naik
Description: 'data' submodule of spam package. See README for details and usage
examples.
Attributes
----------
SPARCGalaxy : class
Main object containing all relevant data for each SPARC galaxy.
names_full : list of strings, length 147
List of names of SPARC galaxies in 'full' sample, i.e. 147 galaxies
remaining after first 4 data cuts described in Naik et al. (2019).
names_standard : list of strings, length 85
List of names of SPARC galaxies in 'standard' sample, i.e. 85 galaxies
remaining after all data cuts described in Naik et al. (2019). Difference
between 'standard' and 'full' samples are that in the 'standard' case,
environmentally screened galaxies have additionally been cut from the
sample.
"""
import os as _os
import numpy as _np
from scipy.constants import parsec as _pc
# physical constants
_kpc = 1e+3*_pc
_Mpc = 1e+6*_pc
_Msun = 1.989e+30
class SPARCGalaxy:
"""
Class containing all relevant data for a given galaxy. All data come from
the SPARC database (http://astroweb.cwru.edu/SPARC/), with the following
exceptions:
- gas_radius : calculated in spam.data.fit_gas_disc.py
- hernquist_radius : calculated in spam.data.fit_stellar_bulge.py
- hernquist_rho_0 : ditto
- stellar_expdisc_sigma_0 : calculated in spam.data.fit_stellar_disc.py
- stellar_expdisc_R_d : ditto
- ext_potential : calculated via the screening map of Desmond et al.
- ext_potential_lower : ditto
- ext_potential_upper : ditto
Parameters
----------
name : str
Name of galaxy matching name in SPARC database, e.g. F574-1 or CamB.
Attributes
----------
name : str
As above.
hubble_type : str
Hubble classification of galaxy.
distance : float
Distance to galaxy. UNITS: m
distance_err : float
Error on distance to galaxy. UNITS: m
distance_method : int, {1, 2, 3, 4, 5}
Method used to determine distance to galaxy (see SPARC database for
meanings of numbers).
inclination : float
Inclination of galaxy. UNITS: degrees
inclination_err : float
Error on inclination. UNITS: degrees
luminosity_tot : float
Total luminosity of galaxy at 3.6mu. UNITS: 10^9 L_sun.
luminosity_err : float
Error on total luminosity. UNITS: 10^9 L_sun.
disc_scale : float
Scale length of disc fit to photometry data. UNITS: m
disc_SB : float
Central surface brightness of disc fit to photometry data. UNITS: m
HI_mass : float
Total mass of HI gas. UNITS: kg
Q_flag : int
Quality flag (see SPARC database).
StellarBulge : bool
Whether a bulge component is detected.
R : 1D numpy.ndarray
Radii of rotation curve measurements. UNITS: kpc
v : 1D numpy.ndarray, shape same as R
Rotation curve measurements. UNITS: km/s
v_err : 1D numpy.ndarray, shape same as R
Errors on rotation curve. UNITS: km/s
v_gas : 1D numpy.ndarray, shape same as R
Gas contribution to rotation curve. UNITS: km/s
v_disc : 1D numpy.ndarray, shape same as R
Stellar disc contribution to rotation curve, assuming mass-to-light
ratio of 1 M_sun/L_sun. UNITS: km/s
v_bul : 1D numpy.ndarray, shape same as R
Stellar bulge contribution to rotation curve, assuming mass-to-light
ratio of 1 M_sun/L_sun. Zero everywhere if StellarBulge is False.
UNITS: km/s
coords_RA : float
Right ascension of galaxy. UNITS: degrees
coords_DEC : float
Declination of galaxy. UNITS: degrees
gas_radius : float
Best fit radius of gas disc, calculated in spam.data.fit_gas_disc.py.
UNITS: m
hernquist_radius : float
Best fit radius of Hernquist bulge, calculated in
spam.data.fit_stellar_bulge.py. UNITS: m
hernquist_rho_0 : float
Best fit central density of Hernquist bulge, calculated in
spam.data.fit_stellar_bulge.py. UNITS: kg/m^3
stellar_expdisc_sigma_0 : float
Best fit central density of stellar disc, calculated in
spam.data.fit_stellar_disc.py. UNITS: kg/m^2
stellar_expdisc_R_d : float
Best fit scale length of stellar disc, calculated in
spam.data.fit_stellar_disc.py. UNITS: m
ext_potential : float
Maximum posterior external potential (specifically, log10(phi/c^2) )
calculated via the screening map of Desmond et al. (see Naik et al.,
2019 for details and refs).
ext_potential_lower : float
1 sigma lower bound on external potential.
ext_potential_upper : ditto
1 sigma upper bound on external potential.
"""
def __init__(self, name):
self.name = name
datadir = _os.path.dirname(_os.path.realpath(__file__))+"/SPARCData"
# loading metadata
listfile = open(datadir+"/metadata.txt", 'r')
data = listfile.readlines()
listfile.close()
names = []
for i in range(len(data)):
names.append(data[i].split()[0])
ind = names.index(self.name)
htypes = {0: 'S0', 1: 'Sa', 2: 'Sab', 3: 'Sb', 4: 'Sbc', 5: 'Sc',
6: 'Scd', 7: 'Sd', 8: 'Sdm', 9: 'Sm', 10: 'Im', 11: 'BCD'}
self.hubble_type = htypes[int(data[ind].split()[1])]
self.distance = float(data[ind].split()[2])*_Mpc # metres
self.distance_err = float(data[ind].split()[3])*_Mpc # metres
self.distance_method = int(data[ind].split()[4])
self.inclination = float(data[ind].split()[5]) # degrees
self.inclination_err = float(data[ind].split()[6]) # degrees
self.luminosity_tot = float(data[ind].split()[7]) # 1e+9 Lsun
self.luminosity_err = float(data[ind].split()[8]) # 1e+9 Lsun
self.disc_scale = float(data[ind].split()[11])*_kpc # metres
self.disc_SB = float(data[ind].split()[12])/_pc**2 # Lsun/m^2
self.HI_mass = float(data[ind].split()[13])*1e+9*_Msun # kg
self.Q_flag = int(data[ind].split()[17])
# loading main SPARC data
self.filename = datadir+"/data/"+name+"_rotmod.dat"
gal_file = open(self.filename, 'r')
data = gal_file.readlines()
gal_file.close()
self.R = _np.zeros((len(data[3:]),))
self.v = _np.zeros((len(data[3:]),))
self.v_err = _np.zeros((len(data[3:]),))
self.v_gas = _np.zeros((len(data[3:]),))
self.v_disc = _np.zeros((len(data[3:]),))
self.v_bul = _np.zeros((len(data[3:]),))
for i in range(len(data[3:])):
self.R[i] = float(data[3:][i].split()[0])
self.v[i] = float(data[3:][i].split()[1])
self.v_err[i] = float(data[3:][i].split()[2])
self.v_gas[i] = float(data[3:][i].split()[3])
self.v_disc[i] = float(data[3:][i].split()[4])
self.v_bul[i] = float(data[3:][i].split()[5])
if (self.v_bul == 0).all():
self.StellarBulge = False
else:
self.StellarBulge = True
# loading coords
coordfile = open(datadir+"/coords.txt", 'r')
data = coordfile.readlines()[1:]
coordfile.close()
assert data[ind].split()[0] == self.name
self.coords_RA = float(data[ind].split()[2])
self.coords_DEC = float(data[ind].split()[3])
# loading gas radius
gasfile = open(datadir+"/gas_radii.txt", 'r')
data = gasfile.readlines()
gasfile.close()
assert data[ind].split()[0] == self.name
self.gas_radius = float(data[ind].split()[1])
# loading hernquist parameters
if self.StellarBulge:
hernquistfile = open(datadir+"/hernquist_parameters.txt", 'r')
data = hernquistfile.readlines()
hernquistfile.close()
assert data[ind].split()[0] == self.name
self.hernquist_rho_0 = float(data[ind].split()[1])
self.hernquist_radius = float(data[ind].split()[2])
else:
self.hernquist_rho_0 = None
self.hernquist_radius = None
# loading stellar disc fit parameters
discparfile = open(datadir+"/stellar_disc_parameters.txt", 'r')
data = discparfile.readlines()
discparfile.close()
assert data[ind].split()[0] == self.name
self.stellar_expdisc_sigma_0 = float(data[ind].split()[1]) # kg/m^2
self.stellar_expdisc_R_d = float(data[ind].split()[2]) # metres
# loading external potential data
potential_dir = datadir+"/SPARC_potentials"
col1 = _np.array([], dtype=_np.float64)
col2 = _np.array([], dtype=_np.float64)
col3 = _np.array([], dtype=_np.float64)
for i in range(20):
file = open(potential_dir+"/SPARC_screen_"+str(i)+".dat", 'r')
data = file.readlines()
file.close()
assert data[ind].split()[0] == self.name
col1 = _np.append(col1, float(data[ind].split()[1]))
col2 = _np.append(col2, float(data[ind].split()[2]))
col3 = _np.append(col3, float(data[ind].split()[3]))
self.ext_potential_lower = col1
self.ext_potential = col2
self.ext_potential_upper = col3
return
# getting list of galaxy names
names_full = []
names_standard = []
_datadir = _os.path.dirname(_os.path.realpath(__file__))+"/SPARCData"
_namefile = open(_datadir+"/names_full.txt", 'r')
_data = _namefile.readlines()
_namefile.close()
names_full = []
for _i in range(len(_data)):
names_full.append(_data[_i].split()[0])
_namefile = open(_datadir+"/names_standard.txt", 'r')
_data = _namefile.readlines()
_namefile.close()
names_standard = []
for _i in range(len(_data)):
names_standard.append(_data[_i].split()[0])
__all__ = ['SPARCGalaxy', 'names_full', 'names_standard']
|
aneeshnaikREPO_NAMEspamPATH_START.@spam_extracted@spam-master@data@__init__.py@.PATH_END.py
|
{
"filename": "finite_diff.py",
"repo_name": "neuraloperator/neuraloperator",
"repo_path": "neuraloperator_extracted/neuraloperator-main/neuralop/losses/finite_diff.py",
"type": "Python"
}
|
import torch
"""
finite_diff.py implements utilities for computing derivatives via finite-difference method
"""
#Set fix{x,y,z}_bnd if function is non-periodic in {x,y,z} direction
#x: (*, s)
#y: (*, s)
def central_diff_1d(x, h, fix_x_bnd=False):
"""central_diff_1d computes the first spatial derivative
of x using central finite-difference
Parameters
----------
x : torch.Tensor
input data on a regular 1d grid, such that
x[i] = f(x_i)
h : float
discretization size of input x
fix_x_bnd : bool, optional
whether to average boundary and second-outermost
derivative values, by default False
Returns
-------
dx
output tensor of df(x)/dx at each point
"""
dx = (torch.roll(x, -1, dims=-1) - torch.roll(x, 1, dims=-1))/(2.0*h)
if fix_x_bnd:
dx[...,0] = (x[...,1] - x[...,0])/h
dx[...,-1] = (x[...,-1] - x[...,-2])/h
return dx
#x: (*, s1, s2)
#y: (*, s1, s2)
def central_diff_2d(x, h, fix_x_bnd=False, fix_y_bnd=False):
"""central_diff_2d computes derivatives
df(x,y)/dx and df(x,y)/dy for f(x,y) defined
on a regular 2d grid using finite-difference
Parameters
----------
x : torch.Tensor
input function defined x[:,i,j] = f(x_i, y_j)
h : float or list
discretization size of grid for each dimension
fix_x_bnd : bool, optional
whether to fix dx on the x boundaries, by default False
fix_y_bnd : bool, optional
whether to fix dy on the y boundaries, by default False
Returns
-------
dx, dy
tuple such that dx[:, i,j]= df(x_i,y_j)/dx
and dy[:, i,j]= df(x_i,y_j)/dy
"""
if isinstance(h, float):
h = [h, h]
dx = (torch.roll(x, -1, dims=-2) - torch.roll(x, 1, dims=-2))/(2.0*h[0])
dy = (torch.roll(x, -1, dims=-1) - torch.roll(x, 1, dims=-1))/(2.0*h[1])
if fix_x_bnd:
dx[...,0,:] = (x[...,1,:] - x[...,0,:])/h[0]
dx[...,-1,:] = (x[...,-1,:] - x[...,-2,:])/h[0]
if fix_y_bnd:
dy[...,:,0] = (x[...,:,1] - x[...,:,0])/h[1]
dy[...,:,-1] = (x[...,:,-1] - x[...,:,-2])/h[1]
return dx, dy
#x: (*, s1, s2, s3)
#y: (*, s1, s2, s3)
def central_diff_3d(x, h, fix_x_bnd=False, fix_y_bnd=False, fix_z_bnd=False):
"""central_diff_3d computes derivatives
df(x,y,z)/dx and df(x,y,z)/dy for f(x,y,z) defined
on a regular 2d grid using finite-difference
Parameters
----------
x : torch.Tensor
input function defined x[:,i,j,k] = f(x_i, y_j,z_k)
h : float or list
discretization size of grid for each dimension
fix_x_bnd : bool, optional
whether to fix dx on the x boundaries, by default False
fix_y_bnd : bool, optional
whether to fix dy on the y boundaries, by default False
fix_z_bnd : bool, optional
whether to fix dz on the z boundaries, by default False
Returns
-------
dx, dy, dz
tuple such that dx[:, i,j,k]= df(x_i,y_j,z_k)/dx
and dy[:, i,j,k]= df(x_i,y_j,z_k)/dy
and dz[:, i,j,k]= df(x_i,y_j,z_k)/dz
"""
if isinstance(h, float):
h = [h, h, h]
dx = (torch.roll(x, -1, dims=-3) - torch.roll(x, 1, dims=-3))/(2.0*h[0])
dy = (torch.roll(x, -1, dims=-2) - torch.roll(x, 1, dims=-2))/(2.0*h[1])
dz = (torch.roll(x, -1, dims=-1) - torch.roll(x, 1, dims=-1))/(2.0*h[2])
if fix_x_bnd:
dx[...,0,:,:] = (x[...,1,:,:] - x[...,0,:,:])/h[0]
dx[...,-1,:,:] = (x[...,-1,:,:] - x[...,-2,:,:])/h[0]
if fix_y_bnd:
dy[...,:,0,:] = (x[...,:,1,:] - x[...,:,0,:])/h[1]
dy[...,:,-1,:] = (x[...,:,-1,:] - x[...,:,-2,:])/h[1]
if fix_z_bnd:
dz[...,:,:,0] = (x[...,:,:,1] - x[...,:,:,0])/h[2]
dz[...,:,:,-1] = (x[...,:,:,-1] - x[...,:,:,-2])/h[2]
return dx, dy, dz
|
neuraloperatorREPO_NAMEneuraloperatorPATH_START.@neuraloperator_extracted@neuraloperator-main@neuralop@losses@finite_diff.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "clwainwright/CosmoTransitions",
"repo_path": "CosmoTransitions_extracted/CosmoTransitions-master/cosmoTransitions/__init__.py",
"type": "Python"
}
|
__version__ = "2.0.2"
|
clwainwrightREPO_NAMECosmoTransitionsPATH_START.@CosmoTransitions_extracted@CosmoTransitions-master@cosmoTransitions@__init__.py@.PATH_END.py
|
{
"filename": "representation.py",
"repo_name": "astro-informatics/s2scat",
"repo_path": "s2scat_extracted/s2scat-main/s2scat/representation.py",
"type": "Python"
}
|
from jax import jit
import jax.numpy as jnp
from functools import partial
from typing import List
import s2wav
from s2scat.utility import statistics, reorder, normalisation
from s2scat.operators import spherical
from s2scat import compression
@partial(jit, static_argnums=(1, 2, 3, 4, 7, 8, 9))
def scatter(
flm: jnp.ndarray,
L: int,
N: int,
J_min: int = 0,
reality: bool = True,
config: List[jnp.ndarray] = None,
norm: List[jnp.ndarray] = None,
recursive: bool = False,
isotropic: bool = False,
delta_j: int = None,
) -> List[jnp.ndarray]:
r"""Compute directional scattering covariances on the sphere.
Args:
flm (jnp.ndarray): Spherical harmonic coefficients.
L (int): Spherical harmonic bandlimit.
N (int): Azimuthal bandlimit (directionality).
J_min (int, optional): Minimum dyadic wavelet scale to consider. Defaults to 0.
reality (bool, optional): Whether :math:`f \in \mathbb{R}`, if True exploits
hermitian symmetry of harmonic coefficients. Defaults to True.
config (List[jnp.ndarray], optional): All necessary precomputed arrays. Defaults to None.
norm (List[jnp.ndarray], optional): Covariance normalisation values.
Defaults to None.
recursive (bool, optional): Whether to perform a memory efficient recursive transform,
or a faster but less memory efficient fully precompute transform. Defaults to False.
isotropic (bool, optional): Whether to return isotropic coefficients, i.e. average
over directionality. Defaults to False.
delta_j (int, optional): Range of wavelet scales over which to compute covariances.
If None, covariances between all scales will be considered. Defaults to None.
Raises:
ValueError: If one does not pass configuration arrays.
Returns:
Tuple[jnp.ndarray]: Directional scattering covariance statistics.
Notes:
The recursive transform, outlined in `Price & McEwen (2023) <https://arxiv.org/pdf/2311.14670>`_,
requires :math:`\mathcal{O}(NL^2)` memory overhead and can scale to high bandlimits :math:`L`.
Conversely, the fully precompute transform requires :math:`\mathcal{O}(NL^3)` memory overhead
which can be large. However, the transform will be much faster. For applications at
:math:`L \leq 512` the precompute approach is a better choice, beyond which we recommend the
users switch to recursive transforms or the C backend functionality.
If isotropic is true, the statistics will be contracted across :math:`n`. This will
dramatically compress the covariance representation, but will be somewhat less
sensitive to directional structure.
"""
if config is None:
raise ValueError("Must provide precomputed kernels for this transform!")
filters, Q, precomps = config
### Configure maximum scale, impose reality, define quadrature
J_max = s2wav.samples.j_max(L)
Q = spherical.quadrature(L, J_min) if Q is None else Q
flm = spherical.make_flm_full(flm, L) if reality else flm
### Compute: mean and Variance
mean, var = statistics.compute_mean_variance(flm, L)
### Perform first wavelet transform W_j2 = f * Psi_j2
W = spherical._first_flm_to_analysis(
flm, L, N, J_min, reality, filters, precomps, recursive
)
### Compute S1, P00, and Nj1j2
Nj1j2, S1, P00 = [], [], []
for j2 in range(J_min, J_max + 1):
Lj2 = s2wav.samples.wav_j_bandlimit(L, j2, 2.0, True)
### Compute: Mlm = SHT(|W|)
Mlm = spherical._forward_harmonic_vect(
jnp.abs(W[j2 - J_min]), j2, Lj2, J_min, J_max, reality, precomps, recursive
)
### Compute: S1 and P00 statistics
S1 = statistics.add_to_S1(S1, Mlm, Lj2)
P00 = statistics.add_to_P00(P00, W[j2 - J_min], Q[j2 - J_min])
### Compute: Nj1j2
if j2 > J_min:
val = spherical._flm_to_analysis_vect(
Mlm,
j2,
Lj2,
L,
N,
J_min,
j2 - 1,
reality,
filters,
precomps,
recursive,
delta_j,
)
Nj1j2.append(val)
### Reorder and flatten Njjprime, convert to JAX arrays for C01/C11
Nj1j2_flat = reorder.nested_list_to_list_of_arrays(Nj1j2, J_min, J_max, delta_j)
### Compute: Higher order covariances C00/C11
C01, C11 = statistics.compute_C01_and_C11(Nj1j2_flat, W, Q, J_min, J_max)
### Normalize the coefficients
if norm is not None:
S1, P00, C01, C11 = normalisation.apply_norm(
S1, P00, C01, C11, norm, J_min, J_max
)
### Compress covariances to isotropic coefficients
if isotropic:
C01, C11 = compression.C01_C11_to_isotropic(C01, C11, J_min, J_max)
### Return 1D jnp arrays for synthesis
S1, P00, C01, C11 = reorder.list_to_array(S1, P00, C01, C11)
return mean, var, S1, P00, C01, C11
def scatter_c(
flm: jnp.ndarray,
L: int,
N: int,
J_min: int = 0,
reality: bool = False,
config: List[jnp.ndarray] = None,
norm: List[jnp.ndarray] = None,
isotropic: bool = False,
delta_j: int = None,
) -> List[jnp.ndarray]:
r"""Compute directional scattering covariances on the sphere using a custom C backend.
Args:
flm (jnp.ndarray): Spherical harmonic coefficients.
L (int): Spherical harmonic bandlimit.
N (int): Azimuthal bandlimit (directionality).
J_min (int, optional): Minimum dyadic wavelet scale to consider. Defaults to 0.
reality (bool, optional): Whether :math:`f \in \mathbb{R}`, if True exploits
hermitian symmetry of harmonic coefficients. Defaults to False.
config (List[jnp.ndarray], optional): All necessary precomputed arrays. Defaults to None.
norm (List[jnp.ndarray], optional): Covariance normalisation values.
Defaults to None.
isotropic (bool, optional): Whether to return isotropic coefficients, i.e. average
over directionality. Defaults to False.
delta_j (int, optional): Range of wavelet scales over which to compute covariances.
If None, covariances between all scales will be considered. Defaults to None.
Raises:
ValueError: If one does not pass an array of wavelet filters.
Returns:
Tuple[jnp.ndarray]: Directional scattering covariance statistics.
Notes:
This variant of the directional scattering covariance transform leverages the
JAX frontend for highly optimised C spherical harmonic libraries provided by
`S2FFT <https://github.com/astro-informatics/s2fft/tree/main>`_. As such, it is
currently limited to CPU compute and cannot be JIT compiled. However, this approach
can still be very fast as the underlying spherical harmonic libraries are extremely
optimised. Reverse mode gradient functionality is supported, peak memory overhead is
:math:`\mathcal{O}(NL^2)`, and this variant can scale to very high :math:`L \geq 4096`.
If isotropic is true, the statistics will be contracted across :math:`n`. This will
dramatically compress the covariance representation, but will be somewhat less
sensitive to directional structure.
"""
if config is None:
raise ValueError("Must provide precomputed kernels for this transform!")
filters, Q, _ = config
### Configure maximum scale, impose reality, define quadrature
J_max = s2wav.samples.j_max(L)
Q = spherical.quadrature(L, J_min) if Q is None else Q
flm = spherical.make_flm_full(flm, L) if reality else flm
### Compute: mean and Variance
mean, var = statistics.compute_mean_variance(flm, L)
### Perform first wavelet transform W_j2 = f * Psi_j2
W = spherical._first_flm_to_analysis(
flm, L, N, J_min, reality, filters, use_c_backend=True
)
### Compute S1, P00, and Nj1j2
Nj1j2, S1, P00 = [], [], []
for j2 in range(J_min, J_max + 1):
Lj2 = s2wav.samples.wav_j_bandlimit(L, j2, 2.0, True)
### Compute: Mlm = SHT(|W|)
Mlm = spherical._forward_harmonic_looped(jnp.abs(W[j2 - J_min]), Lj2, N)
### Compute: S1 and P00 statistics
S1 = statistics.add_to_S1(S1, Mlm, Lj2)
P00 = statistics.add_to_P00(P00, W[j2 - J_min], Q[j2 - J_min])
### Compute: Nj1j2
if j2 > J_min:
val = spherical._flm_to_analysis_looped(
Mlm, j2, Lj2, L, N, J_min, j2 - 1, filters, delta_j
)
Nj1j2.append(val)
### Reorder and flatten Njjprime, convert to JAX arrays for C01/C11
Nj1j2_flat = reorder.nested_list_to_list_of_arrays(Nj1j2, J_min, J_max, delta_j)
### Compute: Higher order covariances C00/C11
C01, C11 = statistics.compute_C01_and_C11(Nj1j2_flat, W, Q, J_min, J_max)
### Normalize the coefficients
if norm is not None:
S1, P00, C01, C11 = normalisation.apply_norm(
S1, P00, C01, C11, norm, J_min, J_max
)
### Compress covariances to isotropic coefficients
if isotropic:
C01, C11 = compression.C01_C11_to_isotropic(C01, C11, J_min, J_max)
### Return 1D jnp arrays for synthesis
S1, P00, C01, C11 = reorder.list_to_array(S1, P00, C01, C11)
return mean, var, S1, P00, C01, C11
|
astro-informaticsREPO_NAMEs2scatPATH_START.@s2scat_extracted@s2scat-main@s2scat@representation.py@.PATH_END.py
|
{
"filename": "_subunitwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/geo/_subunitwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SubunitwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="subunitwidth", parent_name="layout.geo", **kwargs):
super(SubunitwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@geo@_subunitwidth.py@.PATH_END.py
|
{
"filename": "analysis_test_matiasfile_18092020.py",
"repo_name": "kumikokotera/GRAND_tools",
"repo_path": "GRAND_tools_extracted/GRAND_tools-master/grid_shape/tools/analysis_test_matiasfile_18092020.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.colors import LogNorm
import numpy as np
import os
import json
from grid_shape import diff_spec as diff_spec
from grid_shape import grids as grids
from grid_shape import utils_analysis as ua
from grid_shape import layout as layout
from grid_shape import masks as masks
SYM_LIST = ['.','o','v','*','s','.','o','v','*','s','.','o','v','*','s','.','o','v','*','s','.','o','v','*','s','.','o','v','*','s','.','o','v','*','s']
MYC = ['0','0.20','0.4','0.6','0.8']
### Commented analysis script
path = "/Users/benoitl/Documents/GRAND/Data_grids/20200918/"
plot_path = '/Users/benoitl/Documents/GRAND/Data_grids/20200918/plots'
os.makedirs(plot_path, exist_ok=True)
#path = "/Users/kotera/BROQUE/Data_GRAND/Matias/Trihex"
primary = "Proton"
input_n_ring = 10
threshold = 30 # trigger threshold for individual antennas in muV
n_trig_thres = 5 # number of triggered antennas required to trigger an event
### "Creating" the layout
pos, offset = grids.create_grid_univ("trihex", 125, do_prune=False, input_n_ring=10)
## Creating the pruned layout
pos2, offset2, mask2 = grids.create_grid_univ("trihex", 125, do_prune=True, input_n_ring=10)
# creating the all mask (all the antennas)
mask = masks.make_all_mask(input_n_ring=10)
lay1 = layout.Layout(path, pos, mask, "all", threshold, n_trig_thres, input_n_ring, primary)
lay2 = layout.Layout(path, pos2, mask2, "simple", threshold, n_trig_thres, input_n_ring, primary)
# creating a random mask with only 5% of the n_ring=10 antennas kept
mask_rand_5 = masks.make_mask_random(input_n_ring=10, n_keep_ratio=0.05)
# creating the layout associated with this mask
lay_rand_5 = layout.Layout(path, pos, mask_rand_5, "rand_5", threshold, n_trig_thres, input_n_ring, primary)
# creating the trihex 250 grid out of the trixhex 125
mask_tri250 = masks.make_trihex_new_out_of_125(pos, 250, 10)
lay_tri250 = layout.Layout(path, pos, mask_tri250, "tri250", threshold, n_trig_thres, input_n_ring, primary)
lay_tri250.plot_layout()
# creating the trihex 500 grid out of the trixhex 125
mask_tri500 = masks.make_trihex_new_out_of_125(pos, 500, 10)
lay_tri500 = layout.Layout(path, pos, mask_tri500, "tri500", threshold, n_trig_thres, input_n_ring, primary)
lay_tri500.plot_layout()
# creating the trihex 500 grid out of the trixhex 125
mask_tri1000 = masks.make_trihex_new_out_of_125(pos, 1000, 10)
lay_tri1000 = layout.Layout(path, pos, mask_tri1000, "tri1000", threshold, n_trig_thres, input_n_ring, primary)
lay_tri1000.plot_layout()
mask_island1 = masks.make_centralisland_out_of_125(pos, 10)
lay_island1 = layout.Layout(path, pos, mask_island1, "island1", threshold, n_trig_thres, input_n_ring, primary)
lay_island1.plot_layout()
mask_island2 = masks.make_centralisland_out_of_125_v2(pos, 10)
lay_island2 = layout.Layout(path, pos, mask_island2, "island2", threshold, n_trig_thres, input_n_ring, primary)
lay_island2.plot_layout()
mini_island_mask = masks.make_mini_island()
lay_mini_island = layout.Layout(
path,
pos,
mini_island_mask,
"mini_island",
threshold, n_trig_thres, input_n_ring, primary
)
mini_island2_mask = masks.make_mini_island2()
lay_mini_island2 = layout.Layout(
path,
pos,
mini_island2_mask,
"mini_island2",
threshold, n_trig_thres, input_n_ring, primary
)
plt.figure(3, figsize=(8,6))
plt.clf()
n_zenith_bins = len(lay1.zenith_bins_centers)
n_bins_to_plot = 6
i_bins = [(i) * n_zenith_bins // (n_bins_to_plot+1) for i in range(n_bins_to_plot)]
#i_bins = [0, 1, 2, 3, 4]
for k, i_bin in enumerate(i_bins):
plt.errorbar(
np.log10(lay1.energy_bins_centers*1e18),
lay1.detection_rate[:,i_bin],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=8,
color = "C1",
ls = '-',
label='%4.0f > zen >%4.0f deg'%(lay1.zenith_bins_limits[i_bin],lay1.zenith_bins_limits[i_bin+1])
)
plt.errorbar(
np.log10(lay1.energy_bins_centers*1e18),
lay_island1.detection_rate[:,i_bin],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=8,
color="C2",
ls = '-',
# label='%4.0f > zen >%4.0f deg'%(lay1.zenith_bins_limits[izen],lay1.zenith_bins_limits[izen+1])
)
plt.errorbar(
np.log10(lay1.energy_bins_centers*1e18),
lay_mini_island2.detection_rate[:,i_bin],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=8,
color="C3",
ls = '-',
# label='%4.0f > zen >%4.0f deg'%(lay1.zenith_bins_limits[izen],lay1.zenith_bins_limits[izen+1])
)
plt.errorbar(
np.log10(lay1.energy_bins_centers*1e18),
lay_tri500.detection_rate[:,i_bin],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=8,
color="C4",
ls = '-',
# label='%4.0f > zen >%4.0f deg'%(lay1.zenith_bins_limits[izen],lay1.zenith_bins_limits[izen+1])
)
plt.errorbar(
np.log10(lay1.energy_bins_centers*1e18),
lay_tri1000.detection_rate[:,i_bin],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=8,
color="C5",
ls = '-',
# label='%4.0f > zen >%4.0f deg'%(lay1.zenith_bins_limits[izen],lay1.zenith_bins_limits[izen+1])
)
plt.yscale('log')
plt.ylabel('triggered event rate over array '+'$\\nu_{ev}\, [day^{-1} km^{-2}]$')
plt.xlabel('log E/eV')
legend1 = plt.legend(loc=1)
#splt.xscale('log')
custom_lines = [
Line2D([0], [0], color="C1", lw=4),
Line2D([0], [0], color="C2", lw=4),
Line2D([0], [0], color="C3", lw=4),
Line2D([0], [0], color="C4", lw=4),
Line2D([0], [0], color="C5", lw=4)
]
legend2 = plt.legend(custom_lines, ['trihex all', 'island 1',"mini island2", "trihex500", "trihex1000"], loc=3)
plt.gca().add_artist(legend1)
plt.gca().add_artist(legend2)
plt.savefig(os.path.join(lay1.plot_path, "detection_rate_vs_energy.png"))
plt.figure(4, figsize=(8,6))
plt.clf()
n_energy_bins = len(lay1.energy_bins_centers)
n_bins_to_plot = 4
i_bins = [(i+1) * n_energy_bins // (n_bins_to_plot+1) for i in range(n_bins_to_plot)]
#i_bins = [0, 1, 2, 3, 4]
for k, i_bin in enumerate(i_bins):
#for iener in range(0, len(lay1.energy_bins_limits)-1):
plt.errorbar(
lay1.zenith_bins_centers,
lay1.detection_rate[i_bin,:],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=10,
ls = '-',
color="C1",
label='%4.2f > log E/eV >%4.2f'%(
np.log10(lay1.energy_bins_limits[i_bin]*1e18),
np.log10(lay1.energy_bins_limits[i_bin+1]*1e18)
)
)
plt.errorbar(
lay1.zenith_bins_centers,
lay_island1.detection_rate[i_bin,:],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=10,
ls = '-',
color="C2"
)
plt.errorbar(
lay_mini_island2.zenith_bins_centers,
lay_mini_island2.detection_rate[i_bin,:],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=10,
ls = '-',
color="C3"
)
plt.errorbar(
lay_tri500.zenith_bins_centers,
lay_tri500.detection_rate[i_bin,:],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=10,
ls = '-',
color="C4"
)
plt.errorbar(
lay_tri1000.zenith_bins_centers,
lay_tri1000.detection_rate[i_bin,:],
fmt=SYM_LIST[i_bin],
capsize=2,
alpha=0.7,
ms=10,
ls = '-',
color="C5"
)
plt.yscale('log')
plt.ylabel('triggered event rate over array '+'$\\nu_{ev}\, [day^{-1} km^{-2}]$')
plt.xlabel('Zenith [deg]')
#plt.title('%s, %4.2f > zenith > %4.2f deg'%(lay1.mask_name, lay1.[izen], thetal[izen+1]))
legend1 = plt.legend(loc=1)
custom_lines = [
Line2D([0], [0], color="C1", lw=4),
Line2D([0], [0], color="C2", lw=4),
Line2D([0], [0], color="C3", lw=4),
Line2D([0], [0], color="C4", lw=4),
Line2D([0], [0], color="C5", lw=4)
]
legend2 = plt.legend(custom_lines, ['trihex all', 'island1',"mini island2", "trihex500", "trihex1000"], loc=3)
plt.gca().add_artist(legend1)
plt.gca().add_artist(legend2)
plt.savefig(os.path.join(lay1.plot_path, "detection_rate_vs_zenith.png"))
plt.figure(45)
lay1.plot_layout()
plt.savefig(os.path.join(lay1.plot_path, "lay1.png"))
plt.figure(46)
lay_rand_5.plot_layout(fig=46)
plt.savefig(os.path.join(lay1.plot_path, "lay_rand_5.png"))
plt.figure(47)
lay_tri250.plot_layout(fig=47)
plt.savefig(os.path.join(lay1.plot_path, "lay_tri250.png"))
plt.figure(48)
lay_tri500.plot_layout(fig=48)
plt.savefig(os.path.join(lay1.plot_path, "lay_tri500.png"))
plt.figure(49)
lay_tri1000.plot_layout(fig=49)
plt.savefig(os.path.join(lay1.plot_path, "lay_tri1000.png"))
plt.figure(50)
lay_island1.plot_layout(fig=50)
plt.savefig(os.path.join(lay1.plot_path, "lay_island1.png"))
plt.figure(51)
lay_island2.plot_layout(fig=51)
plt.savefig(os.path.join(lay1.plot_path, "lay_island2.png"))
plt.figure(52)
lay_mini_island.plot_layout(fig=51)
plt.savefig(os.path.join(lay1.plot_path, "lay_mini_island.png"))
plt.figure(53)
lay_mini_island2.plot_layout(fig=53)
plt.savefig(os.path.join(lay1.plot_path, "lay_mini_island2.png"))
lay1.make_diff_event_rate(200)
lay_tri1000.make_diff_event_rate(200)
lay_tri500.make_diff_event_rate(200)
lay_tri250.make_diff_event_rate(200)
lay_island1.make_diff_event_rate(200)
lay_mini_island.make_diff_event_rate(200)
lay_mini_island2.make_diff_event_rate(200)
plt.figure(345)
plt.clf()
plt.plot(lay1.energy_bins_centers, lay1.integrated_ev_rate_no_trig, label="trig efficiency = 1")
plt.plot(lay1.energy_bins_centers, lay1.integrated_ev_rate, label="trihex all")
plt.plot(lay_tri250.energy_bins_centers, lay_tri250.integrated_ev_rate, label="trihex250")
plt.plot(lay_tri500.energy_bins_centers, lay_tri500.integrated_ev_rate, label="trihex500")
plt.plot(lay_tri1000.energy_bins_centers, lay_tri1000.integrated_ev_rate, label="trihex1000")
plt.plot(lay_island1.energy_bins_centers, lay_island1.integrated_ev_rate, label="island1")
plt.plot(lay_mini_island.energy_bins_centers, lay_mini_island.integrated_ev_rate, label="mini island")
plt.plot(lay_mini_island2.energy_bins_centers, lay_mini_island2.integrated_ev_rate, label="mini island2")
plt.ylabel('Differential event rate [day'+"$^{-1}$"+"PeV"+"$^{-1}$"+']')
plt.title('Detector area 200 km'+"$^2$")
plt.xlabel('Energy [EeV]')
plt.xscale('log')
plt.yscale('log')
plt.legend(loc=0)
plt.savefig('diff_rate_200km2.png')
#### hist 2d
lay1.plot_2D_detection_rate()
lay1.plot_2D_differential_rate()
lay1.plot_mean_n_trig()
lay_tri250.plot_2D_detection_rate()
lay_tri250.plot_2D_differential_rate()
lay_tri250.plot_mean_n_trig()
lay_tri500.plot_2D_detection_rate()
lay_tri500.plot_2D_differential_rate()
lay_tri500.plot_mean_n_trig()
lay_tri1000.plot_2D_detection_rate()
lay_tri1000.plot_2D_differential_rate()
lay_tri1000.plot_mean_n_trig()
lay_island1.plot_2D_detection_rate()
lay_island1.plot_2D_differential_rate()
lay_island1.plot_mean_n_trig()
lay_island2.plot_2D_detection_rate()
lay_island2.plot_2D_differential_rate()
lay_island2.plot_mean_n_trig()
lay_mini_island2.plot_2D_detection_rate()
lay_mini_island2.plot_2D_differential_rate()
lay_mini_island2.plot_mean_n_trig()
|
kumikokoteraREPO_NAMEGRAND_toolsPATH_START.@GRAND_tools_extracted@GRAND_tools-master@grid_shape@tools@analysis_test_matiasfile_18092020.py@.PATH_END.py
|
{
"filename": "T01test_python_vs_cpp.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioMC/test/SignalProp/T01test_python_vs_cpp.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
import time
from NuRadioMC.SignalProp import analyticraytracing as ray
from NuRadioMC.utilities import medium
from NuRadioReco.utilities import units
import logging
from numpy import testing
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('test_raytracing')
ice = medium.southpole_simple()
np.random.seed(0) # set seed to have reproducible results
n_events = int(1e3)
rmin = 50. * units.m
rmax = 3. * units.km
zmin = 0. * units.m
zmax = -3. * units.km
rr = np.random.triangular(rmin, rmax, rmax, n_events)
phiphi = np.random.uniform(0, 2 * np.pi, n_events)
xx = rr * np.cos(phiphi)
yy = rr * np.sin(phiphi)
zz = np.random.uniform(zmin, zmax, n_events)
points = np.array([xx, yy, zz]).T
x_receiver = np.array([0., 0., -5.])
results_C0s_cpp = np.zeros((n_events, 2))
n_freqs = 256//2 + 1
# n_freqs = 5
results_A_cpp = np.zeros((n_events, 2, n_freqs))
t_start = time.time()
ff = np.linspace(0, 500*units.MHz, n_freqs)
# tt = 0
for iX, x in enumerate(points):
# t_start2 = time.time()
r = ray.ray_tracing(ice)
r.set_start_and_end_point(x, x_receiver, )
# tt += (time.time() - t_start2)
r.find_solutions()
if(r.has_solution()):
for iS in range(r.get_number_of_solutions()):
results_C0s_cpp[iX, iS] = r.get_results()[iS]['C0']
results_A_cpp[iX, iS] = r.get_attenuation(iS, ff)
t_cpp = time.time() - t_start
print("CPP time = {:.1f} seconds = {:.2f}ms/event".format(t_cpp, 1000. * t_cpp / n_events))
# print("CPP time = {:.1f} seconds = {:.2f}ms/event".format(tt, 1000. * tt / n_events))
results_C0s_python = np.zeros((n_events, 2))
results_A_python = np.zeros((n_events, 2, n_freqs))
t_start = time.time()
for iX, x in enumerate(points):
r = ray.ray_tracing(ice, use_cpp=False)
r.set_start_and_end_point(x, x_receiver)
r.find_solutions()
if(r.has_solution()):
for iS in range(r.get_number_of_solutions()):
results_C0s_python[iX, iS] = r.get_results()[iS]['C0']
results_A_python[iX, iS] = r.get_attenuation(iS, ff)
t_python = time.time() - t_start
print("Python time = {:.1f} seconds = {:.2f}ms/event".format(t_python, 1000. * t_python / n_events))
testing.assert_allclose(results_C0s_cpp, results_C0s_python, atol=1e-08, rtol=1e-05)
testing.assert_allclose(results_A_cpp, results_A_python, rtol=1e-2, atol=1e-3)
print('T01test_python_vs_cpp passed without issues')
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioMC@test@SignalProp@T01test_python_vs_cpp.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/maps/region/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .geom import RegionGeom
from .ndmap import RegionNDMap
__all__ = [
"RegionGeom",
"RegionNDMap",
]
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@maps@region@__init__.py@.PATH_END.py
|
{
"filename": "anatomy.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/showcase/anatomy.py",
"type": "Python"
}
|
"""
===================
Anatomy of a figure
===================
This figure shows the name of several matplotlib elements composing a figure
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Circle
from matplotlib.patheffects import withStroke
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
royal_blue = [0, 20/256, 82/256]
# make the figure
np.random.seed(19680801)
X = np.linspace(0.5, 3.5, 100)
Y1 = 3+np.cos(X)
Y2 = 1+np.cos(1+X/0.75)/2
Y3 = np.random.uniform(Y1, Y2, len(X))
fig = plt.figure(figsize=(7.5, 7.5))
ax = fig.add_axes([0.2, 0.17, 0.68, 0.7], aspect=1)
ax.xaxis.set_major_locator(MultipleLocator(1.000))
ax.xaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_major_locator(MultipleLocator(1.000))
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.xaxis.set_minor_formatter("{x:.2f}")
ax.set_xlim(0, 4)
ax.set_ylim(0, 4)
ax.tick_params(which='major', width=1.0, length=10, labelsize=14)
ax.tick_params(which='minor', width=1.0, length=5, labelsize=10,
labelcolor='0.25')
ax.grid(linestyle="--", linewidth=0.5, color='.25', zorder=-10)
ax.plot(X, Y1, c='C0', lw=2.5, label="Blue signal", zorder=10)
ax.plot(X, Y2, c='C1', lw=2.5, label="Orange signal")
ax.plot(X[::3], Y3[::3], linewidth=0, markersize=9,
marker='s', markerfacecolor='none', markeredgecolor='C4',
markeredgewidth=2.5)
ax.set_title("Anatomy of a figure", fontsize=20, verticalalignment='bottom')
ax.set_xlabel("x Axis label", fontsize=14)
ax.set_ylabel("y Axis label", fontsize=14)
ax.legend(loc="upper right", fontsize=14)
# Annotate the figure
def annotate(x, y, text, code):
# Circle marker
c = Circle((x, y), radius=0.15, clip_on=False, zorder=10, linewidth=2.5,
edgecolor=royal_blue + [0.6], facecolor='none',
path_effects=[withStroke(linewidth=7, foreground='white')])
ax.add_artist(c)
# use path_effects as a background for the texts
# draw the path_effects and the colored text separately so that the
# path_effects cannot clip other texts
for path_effects in [[withStroke(linewidth=7, foreground='white')], []]:
color = 'white' if path_effects else royal_blue
ax.text(x, y-0.2, text, zorder=100,
ha='center', va='top', weight='bold', color=color,
style='italic', fontfamily='monospace',
path_effects=path_effects)
color = 'white' if path_effects else 'black'
ax.text(x, y-0.33, code, zorder=100,
ha='center', va='top', weight='normal', color=color,
fontfamily='monospace', fontsize='medium',
path_effects=path_effects)
annotate(3.5, -0.13, "Minor tick label", "ax.xaxis.set_minor_formatter")
annotate(-0.03, 1.0, "Major tick", "ax.yaxis.set_major_locator")
annotate(0.00, 3.75, "Minor tick", "ax.yaxis.set_minor_locator")
annotate(-0.15, 3.00, "Major tick label", "ax.yaxis.set_major_formatter")
annotate(1.68, -0.39, "xlabel", "ax.set_xlabel")
annotate(-0.38, 1.67, "ylabel", "ax.set_ylabel")
annotate(1.52, 4.15, "Title", "ax.set_title")
annotate(1.75, 2.80, "Line", "ax.plot")
annotate(2.25, 1.54, "Markers", "ax.scatter")
annotate(3.00, 3.00, "Grid", "ax.grid")
annotate(3.60, 3.58, "Legend", "ax.legend")
annotate(2.5, 0.55, "Axes", "fig.subplots")
annotate(4, 4.5, "Figure", "plt.figure")
annotate(0.65, 0.01, "x Axis", "ax.xaxis")
annotate(0, 0.36, "y Axis", "ax.yaxis")
annotate(4.0, 0.7, "Spine", "ax.spines")
# frame around figure
fig.patch.set(linewidth=4, edgecolor='0.5')
plt.show()
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.pyplot.figure`
# - `matplotlib.axes.Axes.text`
# - `matplotlib.axis.Axis.set_minor_formatter`
# - `matplotlib.axis.Axis.set_major_locator`
# - `matplotlib.axis.Axis.set_minor_locator`
# - `matplotlib.patches.Circle`
# - `matplotlib.patheffects.withStroke`
# - `matplotlib.ticker.FuncFormatter`
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@showcase@anatomy.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "maayane/catsHTM",
"repo_path": "catsHTM_extracted/catsHTM-master/build/lib/catsHTM/__init__.py",
"type": "Python"
}
|
__version__='0.1.16'
from .script import * #import all functions from the __all__ in script.py, the moment you do import catsHTM
|
maayaneREPO_NAMEcatsHTMPATH_START.@catsHTM_extracted@catsHTM-master@build@lib@catsHTM@__init__.py@.PATH_END.py
|
{
"filename": "errors.py",
"repo_name": "plazar/coast_guard",
"repo_path": "coast_guard_extracted/coast_guard-master/errors.py",
"type": "Python"
}
|
"""
This file contains custom errors and warnings
for the CoastGuard timing pipeline.
Patrick Lazarus, Nov. 10, 2011
"""
import colour
import log
class CoastGuardError(Exception):
def __init__(self, msg, logit=True):
if logit:
log.log(msg, 'error')
super(CoastGuardError, self).__init__(msg)
def __str__(self):
return colour.cstring(super(CoastGuardError, self).__str__(), 'error')
def get_message(self):
return super(CoastGuardError, self).__str__()
class SystemCallError(CoastGuardError):
pass
class StandardProfileError(CoastGuardError):
pass
class ToaError(CoastGuardError):
pass
class DataReductionFailed(CoastGuardError):
pass
class BadFile(CoastGuardError):
pass
class CleanError(CoastGuardError):
pass
class ConfigurationError(CoastGuardError):
pass
class BadPulsarNameError(CoastGuardError):
pass
class HeaderCorrectionError(CoastGuardError):
pass
class DiagnosticError(CoastGuardError):
pass
class InputError(CoastGuardError):
pass
class FitError(CoastGuardError):
pass
class FormatError(CoastGuardError):
pass
class DatabaseError(CoastGuardError):
pass
class BadStatusError(CoastGuardError):
pass
class UnrecognizedValueError(CoastGuardError):
pass
class TemplateGenerationError(CoastGuardError):
pass
class CalibrationError(CoastGuardError):
pass
# Fatal class of errors. These should not be caught.
class FatalCoastGuardError(Exception):
def __init__(self, msg):
log.log(msg, 'critical')
super(FatalCoastGuardError, self).__init__(msg)
def __str__(self):
return colour.cstring(super(FatalCoastGuardError, self).__str__(), 'error')
def get_message(self):
return super(FatalCoastGuardError, self).__str__()
class BadColumnNameError(FatalCoastGuardError):
pass
# Custom Warnings
class CoastGuardWarning(Warning):
def __str__(self):
return colour.cstring(super(CoastGuardWarning, self).__str__(), 'warning')
class LoggedCoastGuardWarning(CoastGuardWarning):
def __init__(self, msg):
log.log(msg, 'warning')
super(CoastGuardWarning, self).__init__(msg)
|
plazarREPO_NAMEcoast_guardPATH_START.@coast_guard_extracted@coast_guard-master@errors.py@.PATH_END.py
|
{
"filename": "__main__.py",
"repo_name": "NuSpaceSim/nupyprop",
"repo_path": "nupyprop_extracted/nupyprop-main/src/nupyprop/__main__.py",
"type": "Python"
}
|
import sys
if __name__ == "__main__":
from nupyprop.scripts.run import main as _main
sys.exit(_main())
|
NuSpaceSimREPO_NAMEnupypropPATH_START.@nupyprop_extracted@nupyprop-main@src@nupyprop@__main__.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/legendgrouptitle/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="ohlc.legendgrouptitle", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@legendgrouptitle@_font.py@.PATH_END.py
|
{
"filename": "test_orbit_astrometry_triple.py",
"repo_name": "iancze/PSOAP",
"repo_path": "PSOAP_extracted/PSOAP-master/tests/test_orbit_astrometry_triple.py",
"type": "Python"
}
|
import pytest
import os
import pkg_resources
import numpy as np
from psoap import orbit_astrometry
from psoap import constants as C
import matplotlib.pyplot as plt
import matplotlib
# Create plots of all of the orbits
# Create plots of all of the orbits
# If it doesn't already exist, create a "plots/basic/" directory
outdir = "tests/plots/triple/"
if not os.path.exists(outdir):
print("Creating ", outdir)
os.makedirs(outdir)
dpc = 388 # pc
# Orbital elements for GW Ori
# a_in = 1.2 # [AU]
# e_in = 0.2
# i_in = 135.0 # [deg]
# omega_in = 45.0 # omega_1
# Omega_in = 30. # [deg]
# T0_in = 2450000.0 # [Julian Date]
#
# a_out = 8.0 # [AU]
# e_out = 0.2
# i_out = 135.0 # [deg]
# omega_out = 45.0 # omega_1
# Omega_out = 30. # [deg]
# T0_out = 2450000.0 # [Julian Date]
#
# M_1 = 3.0
# M_2 = 1.50 # [M_sun]
# M_3 = 1.0 # M_sun
#
# gamma = 27.0 # [km/s]
a_in = 10**(0.127) # [AU]
e_in = 0.074
i_in = 152.35 # [deg]
omega_in = 200.8 # [deg]
Omega_in = 275.8 # [deg]
T0_in = 2451853.6 # [Julian Date]
a_out = 10**(0.974) # [AU]
e_out = 0.19 #
i_out = 149.0 # [deg]
omega_out = 305.8 # [deg]
Omega_out = 282.0 # [deg]
T0_out = 2453855 # [Julian Date]
M_1 = 3.65 # [M_sun]
M_2 = 1.844 # [M_sun]
M_3 = 0.84 # [M_sun]
gamma = 26.29 # [km/s]
P_in = np.sqrt(4 * np.pi**2 / (C.G * (M_1 + M_2) * C.M_sun) * (a_in * C.AU)**3) / (24 * 3600) # [day]
P_out = np.sqrt(4 * np.pi**2 / (C.G * (M_1 + M_2 + M_3) * C.M_sun) * (a_out * C.AU)**3) / (24 * 3600) # [day]
# Pick a span of dates for one period
dates_in = np.linspace(T0_in, T0_in + P_in, num=600)
dates_out = np.linspace(T0_out, T0_out + P_out, num=600)
# Initialize the orbit
orb = orbit_astrometry.Triple(a_in, e_in, i_in, omega_in, Omega_in, T0_in, a_out, e_out, i_out, omega_out, Omega_out, T0_out, M_1, M_2, M_3, gamma, obs_dates=dates_out)
# Get the quantities to plot the outer orbit over one period first
full_dict = orb.get_full_orbit()
vAs, vBs, vCs, XYZ_ABs, XYZ_Cs, xy_ABs, xy_Cs = [full_dict[key] for key in ("vAs", "vBs", "vCs", "XYZ_ABs", "XYZ_Cs", "xy_ABs", "xy_Cs")]
# Convert to sky coordinates, using distance
alpha_dec_ABs = XYZ_ABs/dpc # [arcsec]
alpha_dec_Cs = XYZ_Cs/dpc # [arcsec]
# peri_A = orb.get_periastron_A()/dpc
# peri_B = orb.get_periastron_B()/dpc
# peri_BA = orb.get_periastron_BA()/dpc
#
# asc_A = orb.get_node_A()/dpc
# asc_B = orb.get_node_B()/dpc
# asc_BA = orb.get_node_BA()/dpc
#
# Since we are plotting vs one date, we need to plot the dots using a color scale so we can figure them out along the orbit.
# Set a colorscale for the lnprobs
cmap_primary = matplotlib.cm.get_cmap("Blues")
cmap_secondary = matplotlib.cm.get_cmap("Oranges")
norm = matplotlib.colors.Normalize(vmin=np.min(dates_out), vmax=np.max(dates_out))
# Determine colors based on the ending lnprob of each walker
def plot_points(ax, dates, xs, ys, primary):
for date, x, y in zip(dates, xs, ys):
if primary:
c = cmap_primary(norm(date))
else:
c = cmap_secondary(norm(date))
ax.plot(x, y, "o", color=c, mew=0.1, ms=3, mec="k")
# Then, we will make 3D plots of the orbit so that we can square with what we think is happening.
# The final crowning grace will be a 3D matplotlib plot of the orbital path.
# Make a series of astrometric plots from different angles.
def test_AB_C_Z():
# Now plot AB and C together, viewed from the Z axis
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_out, alpha_dec_ABs[:,0], alpha_dec_ABs[:,1], True)
plot_points(ax, dates_out, alpha_dec_Cs[:,0], alpha_dec_Cs[:,1], False)
# ax.plot(peri_A[0], peri_A[1], "ko", ms=3)
# ax.plot(peri_B[0], peri_B[1], "ko", ms=3)
# ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3)
# ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.15, right=0.85, bottom=0.15, top=0.85)
# Plot A and B together, viewed from the observer (along -Z axis).
fig.savefig(outdir + "orbit_AB_C_Z.png")
def test_AB_C_X():
# Now plot A and B together, viewed from the X axis
# This means Y will form the "X" axis, or North
# And Z will form the Y axis, or towards observer
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_out, alpha_dec_ABs[:,1], alpha_dec_ABs[:,2], True)
plot_points(ax, dates_out, alpha_dec_Cs[:,1], alpha_dec_Cs[:,2], False)
# ax.plot(peri_A[1], peri_A[2], "ko", ms=3)
# ax.plot(peri_B[1], peri_B[2], "ko", ms=3)
# ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3)
# ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_ylabel(r"$\Delta Z$ mas (towards observer)")
ax.axhline(0, ls=":", color="k")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_C_X.png")
def test_AB_C_Y():
# Now plot A and B together, viewed from the Y axis
# This means Z will form the "X" axis, or towards the observer
# And X will form the Y axis, or East
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_out, alpha_dec_ABs[:,2], alpha_dec_ABs[:,0], True)
plot_points(ax, dates_out, alpha_dec_Cs[:,2], alpha_dec_Cs[:,0], False)
# ax.plot(peri_A[2], peri_A[0], "ko", ms=3)
# ax.plot(peri_B[2], peri_B[0], "ko", ms=3)
# ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3)
# ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3)
ax.axvline(0, ls=":", color="k")
ax.set_xlabel(r"$\Delta Z$ mas (towards observer)")
ax.set_ylabel(r"$\Delta \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_C_Y.png")
def test_out_vel():
# Plot velocities as function of time for one period
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(8,8))
ax[0].plot(dates_out, vAs)
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates_out, vBs)
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates_out, vCs)
ax[2].set_ylabel(r"$v_C$ km/s")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_out_vel.png", dpi=400)
def test_AB_C_plane():
# Plot the outer orbits in the plane
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates_out, xy_ABs[:,0], xy_ABs[:,1], True)
plot_points(ax, dates_out, xy_Cs[:,0], xy_Cs[:,1], False)
ax.plot(0,0, "ko", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_C_plane.png")
# Make the same plots for the inner orbit
full_dict = orb.get_full_orbit(dates_in)
XYZ_As, XYZ_Bs, xy_As, xy_Bs = [full_dict[key] for key in ("XYZ_A_locs", "XYZ_B_locs", "xy_A_locs", "xy_B_locs")]
# Convert to sky coordinates, using distance
alpha_dec_As = XYZ_As/dpc # [arcsec]
alpha_dec_Bs = XYZ_Bs/dpc # [arcsec]
norm = matplotlib.colors.Normalize(vmin=np.min(dates_in), vmax=np.max(dates_in))
def test_A_B_Z():
# Now plot AB and C together, viewed from the Z axis
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_in, alpha_dec_As[:,0], alpha_dec_As[:,1], True)
plot_points(ax, dates_in, alpha_dec_Bs[:,0], alpha_dec_Bs[:,1], False)
# ax.plot(peri_A[0], peri_A[1], "ko", ms=3)
# ax.plot(peri_B[0], peri_B[1], "ko", ms=3)
# ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3)
# ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.2, right=0.8, bottom=0.2, top=0.8)
# Plot A and B together, viewed from the observer (along -Z axis).
fig.savefig(outdir + "orbit_A_B_Z.png")
def test_A_B_X():
# Now plot A and B together, viewed from the X axis
# This means Y will form the "X" axis, or North
# And Z will form the Y axis, or towards observer
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_in, alpha_dec_As[:,1], alpha_dec_As[:,2], True)
plot_points(ax, dates_in, alpha_dec_Bs[:,1], alpha_dec_Bs[:,2], False)
# ax.plot(peri_A[1], peri_A[2], "ko", ms=3)
# ax.plot(peri_B[1], peri_B[2], "ko", ms=3)
# ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3)
# ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_ylabel(r"$\Delta Z$ mas (towards observer)")
ax.axhline(0, ls=":", color="k")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.2, right=0.8, bottom=0.2, top=0.8)
fig.savefig(outdir + "orbit_A_B_X.png")
def test_A_B_Y():
# Now plot A and B together, viewed from the Y axis
# This means Z will form the "X" axis, or towards the observer
# And X will form the Y axis, or East
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_in, alpha_dec_As[:,2], alpha_dec_As[:,0], True)
plot_points(ax, dates_in, alpha_dec_Bs[:,2], alpha_dec_Bs[:,0], False)
# ax.plot(peri_A[2], peri_A[0], "ko", ms=3)
# ax.plot(peri_B[2], peri_B[0], "ko", ms=3)
# ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3)
# ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3)
ax.axvline(0, ls=":", color="k")
ax.set_xlabel(r"$\Delta Z$ mas (towards observer)")
ax.set_ylabel(r"$\Delta \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.2, right=0.8, bottom=0.2, top=0.8)
fig.savefig(outdir + "orbit_A_B_Y.png")
# Now plot the full 3D orbit over the long period.
norm = matplotlib.colors.Normalize(vmin=np.min(dates_out), vmax=np.max(dates_out))
full_dict = orb.get_full_orbit(dates_out)
XYZ_As, XYZ_Bs, XYZ_Cs = [full_dict[key] for key in ("XYZ_As", "XYZ_Bs", "XYZ_Cs")]
# Convert to sky coordinates, using distance
alpha_dec_As = XYZ_As/dpc # [arcsec]
alpha_dec_Bs = XYZ_Bs/dpc # [arcsec]
alpha_dec_Cs = XYZ_Cs/dpc # [arcsec]
def test_A_B_C_Z():
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_out, alpha_dec_Bs[:,0], alpha_dec_Bs[:,1], False)
plot_points(ax, dates_out, alpha_dec_As[:,0], alpha_dec_As[:,1], True)
plot_points(ax, dates_out, alpha_dec_Cs[:,0], alpha_dec_Cs[:,1], False)
# ax.plot(peri_A[0], peri_A[1], "ko", ms=3)
# ax.plot(peri_B[0], peri_B[1], "ko", ms=3)
# ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3)
# ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.2, right=0.8, bottom=0.2, top=0.8)
fig.savefig(outdir + "orbit_A_B_C_Z.png")
def test_A_B_C_X():
# Now plot A and B together, viewed from the X axis
# This means Y will form the "X" axis, or North
# And Z will form the Y axis, or towards observer
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_out, alpha_dec_Bs[:,1], alpha_dec_Bs[:,2], False)
plot_points(ax, dates_out, alpha_dec_As[:,1], alpha_dec_As[:,2], True)
plot_points(ax, dates_out, alpha_dec_Cs[:,1], alpha_dec_Cs[:,2], False)
# ax.plot(peri_A[1], peri_A[2], "ko", ms=3)
# ax.plot(peri_B[1], peri_B[2], "ko", ms=3)
# ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3)
# ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_ylabel(r"$\Delta Z$ mas (towards observer)")
ax.axhline(0, ls=":", color="k")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.2, right=0.8, bottom=0.2, top=0.8)
fig.savefig(outdir + "orbit_A_B_C_X.png")
def test_A_B_C_Y():
# Now plot A and B together, viewed from the Y axis
# This means Z will form the "X" axis, or towards the observer
# And X will form the Y axis, or East
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates_out, alpha_dec_Bs[:,2], alpha_dec_Bs[:,0], False)
plot_points(ax, dates_out, alpha_dec_As[:,2], alpha_dec_As[:,0], True)
plot_points(ax, dates_out, alpha_dec_Cs[:,2], alpha_dec_Cs[:,0], False)
# ax.plot(peri_A[2], peri_A[0], "ko", ms=3)
# ax.plot(peri_B[2], peri_B[0], "ko", ms=3)
# ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3)
# ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3)
ax.axvline(0, ls=":", color="k")
ax.set_xlabel(r"$\Delta Z$ mas (towards observer)")
ax.set_ylabel(r"$\Delta \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.2, right=0.8, bottom=0.2, top=0.8)
fig.savefig(outdir + "orbit_A_B_C_Y.png")
plt.close('all')
|
ianczeREPO_NAMEPSOAPPATH_START.@PSOAP_extracted@PSOAP-master@tests@test_orbit_astrometry_triple.py@.PATH_END.py
|
{
"filename": "event.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioReco/framework/event.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function
import pickle
import NuRadioReco.framework.station
import NuRadioReco.framework.radio_shower
import NuRadioReco.framework.hybrid_information
import NuRadioReco.framework.particle
import NuRadioReco.framework.parameters as parameters
import NuRadioReco.utilities.version
from six import itervalues
import collections
import logging
logger = logging.getLogger('Event')
class Event:
def __init__(self, run_number, event_id):
self._parameters = {}
self.__run_number = run_number
self._id = event_id
self.__stations = collections.OrderedDict()
self.__radio_showers = collections.OrderedDict()
self.__sim_showers = collections.OrderedDict()
self.__event_time = 0
self.__particles = collections.OrderedDict() # stores a dictionary of simulated MC particles in an event
self._generator_info = {} # copies over the relevant information on event generation from the input file attributes
self.__hybrid_information = NuRadioReco.framework.hybrid_information.HybridInformation()
self.__modules_event = [] # saves which modules were executed with what parameters on event level
self.__modules_station = {} # saves which modules were executed with what parameters on station level
def register_module_event(self, instance, name, kwargs):
"""
registers modules applied to this event
Parameters
----------
instance: module instance
the instance of the module that should be registered
name: module name
the name of the module
kwargs:
the key word arguments of the run method
"""
self.__modules_event.append([name, instance, kwargs])
def register_module_station(self, station_id, instance, name, kwargs):
"""
registers modules applied to this event
Parameters
----------
station_id: int
the station id
instance: module instance
the instance of the module that should be registered
name: module name
the name of the module
kwargs:
the key word arguments of the run method
"""
if station_id not in self.__modules_station:
self.__modules_station[station_id] = []
iE = len(self.__modules_event)
self.__modules_station[station_id].append([iE, name, instance, kwargs])
def iter_modules(self, station_id=None):
"""
returns an interator that loops over all modules. If a station id is provided it loops
over all modules that are applied on event or station level (on this particular station). If no
station_id is provided, the loop is only over the event modules.
The order follows the sequence these modules were applied
"""
iE = 0
iS = 0
while True:
if(station_id in self.__modules_station and (len(self.__modules_station[station_id]) > iS) and self.__modules_station[station_id][iS][0] == iE):
iS += 1
yield self.__modules_station[station_id][iS - 1][1:]
else:
if(len(self.__modules_event) == iE):
break
iE += 1
yield self.__modules_event[iE - 1]
def get_parameter(self, key):
if not isinstance(key, parameters.eventParameters):
logger.error("parameter key needs to be of type NuRadioReco.framework.parameters.eventParameters")
raise ValueError("parameter key needs to be of type NuRadioReco.framework.parameters.eventParameters")
return self._parameters[key]
def set_parameter(self, key, value):
if not isinstance(key, parameters.eventParameters):
logger.error("parameter key needs to be of type NuRadioReco.framework.parameters.eventParameters")
raise ValueError("parameter key needs to be of type NuRadioReco.framework.parameters.eventParameters")
self._parameters[key] = value
def has_parameter(self, key):
if not isinstance(key, parameters.eventParameters):
logger.error("parameter key needs to be of type NuRadioReco.framework.parameters.eventParameters")
raise ValueError("parameter key needs to be of type NuRadioReco.framework.parameters.eventParameters")
return key in self._parameters
def get_generator_info(self, key):
if not isinstance(key, parameters.generatorAttributes):
logger.error("generator information key needs to be of type NuRadioReco.framework.parameters.generatorAttributes")
raise ValueError("generator information key needs to be of type NuRadioReco.framework.parameters.generatorAttributes")
return self._generator_info[key]
def set_generator_info(self, key, value):
if not isinstance(key, parameters.generatorAttributes):
logger.error("generator information key needs to be of type NuRadioReco.framework.parameters.generatorAttributes")
raise ValueError("generator information key needs to be of type NuRadioReco.framework.parameters.generatorAttributes")
self._generator_info[key] = value
def has_generator_info(self, key):
if not isinstance(key, parameters.generatorAttributes):
logger.error("generator information key needs to be of type NuRadioReco.framework.parameters.generatorAttributes")
raise ValueError("generator information key needs to be of type NuRadioReco.framework.parameters.generatorAttributes")
return key in self._generator_info
def get_id(self):
return self._id
def set_id(self, evt_id):
self._id = evt_id
def get_run_number(self):
return self.__run_number
def get_station(self, station_id=None):
"""
Returns the station for a given station id.
Parameters
----------
station_id: int
Id of the station you want to get. If None and event has only one station
return it, otherwise raise error. (Default: None)
Returns
-------
station: NuRadioReco.framework.station
"""
if station_id is None:
if len(self.get_station_ids()) == 1:
return self.__stations[self.get_station_ids()[0]]
else:
err = "Event has more than one station, you have to specify \"station_id\""
logger.error(err)
raise ValueError(err)
return self.__stations[station_id]
def get_stations(self):
for station in itervalues(self.__stations):
yield station
def get_station_ids(self):
return list(self.__stations.keys())
def set_station(self, station):
self.__stations[station.get_id()] = station
def has_triggered(self, trigger_name=None):
"""
Returns true if any station has been triggered.
Parameters
----------
trigger_name: string or None (default None)
* if None: The function returns False if not trigger was set. If one or multiple triggers were set,
it returns True if any of those triggers triggered
* if trigger name is set: return if the trigger with name 'trigger_name' has a trigger
Returns
-------
has_triggered : bool
"""
for station in self.get_stations():
if station.has_triggered(trigger_name):
return True
# if it reaches this point, no station has a trigger
return False
def add_particle(self, particle):
"""
Adds a MC particle to the event
Parameters
----------
particle : NuRadioReco.framework.particle.Particle
The MC particle to be added to the event
"""
if not isinstance(particle, NuRadioReco.framework.particle.Particle):
logger.error("Requested to add non-Particle item to the list of particles. {particle} needs to be an instance of Particle.")
raise TypeError("Requested to add non-Particle item to the list of particles. {particle} needs to be an instance of Particle.")
if particle.get_id() in self.__particles:
logger.error("MC particle with id {particle.get_id()} already exists. Simulated particle id needs to be unique per event")
raise AttributeError("MC particle with id {particle.get_id()} already exists. Simulated particle id needs to be unique per event")
self.__particles[particle.get_id()] = particle
def get_particles(self):
"""
Returns an iterator over the MC particles stored in the event
"""
for particle in self.__particles.values():
yield particle
def get_particle(self, particle_id):
"""
returns a specific MC particle identified by its unique id
"""
if particle_id not in self.__particles:
raise AttributeError(f"MC particle with id {particle_id} not present")
return self.__particles[particle_id]
def get_primary(self):
"""
returns a first MC particle
"""
if len(self.__particles) == 0:
return None
return self.get_particle(0)
def get_parent(self, particle_or_shower):
"""
returns the parent of a particle or a shower
"""
if isinstance(particle_or_shower, NuRadioReco.framework.base_shower.BaseShower):
par_id = particle_or_shower[parameters.showerParameters.parent_id]
elif isinstance(particle_or_shower, NuRadioReco.framework.particle.Particle):
par_id = particle_or_shower[parameters.particleParameters.parent_id]
else:
raise ValueError("particle_or_shower needs to be an instance of NuRadioReco.framework.base_shower.BaseShower or NuRadioReco.framework.particle.Particle")
if par_id is None:
logger.info("did not find parent for {particle_or_shower}")
return None
return self.get_particle(par_id)
def has_particle(self, particle_id=None):
"""
Returns true if at least one MC particle is stored in the event
If particle_id is given, it checks if this particular MC particle exists
"""
if particle_id is None:
return len(self.__particles) > 0
return particle_id in self.__particles.keys()
def get_interaction_products(self, parent_particle, showers=True, particles=True):
"""
Return all the daughter particles and showers generated in the interaction of the <parent_particle>
Parameters
----------
showers: bool
Include simulated showers in the list
showers: bool
Include simulated particles in the list
"""
parent_id = parent_particle.get_id()
# iterate over sim_showers to look for parent id
if showers is True:
for shower in self.get_showers():
if shower[parameters.showerParameters.parent_id] == parent_id:
yield shower
# iterate over secondary particles to look for parent id
if particles is True:
for particle in self.get_particles():
if particle[parameters.particleParameters.parent_id] == parent_id:
yield particle
def add_shower(self, shower):
"""
Adds a radio shower to the event
Parameters
----------
shower: RadioShower object
The shower to be added to the event
"""
if(shower.get_id() in self.__radio_showers):
logger.error("shower with id {shower.get_id()} already exists. Shower id needs to be unique per event")
raise AttributeError("shower with id {shower.get_id()} already exists. Shower id needs to be unique per event")
self.__radio_showers[shower.get_id()] = shower
def get_showers(self, ids=None):
"""
Returns an iterator over the showers stored in the event
Parameters
----------
ids: list of integers
A list of station IDs. Only showers that are associated with
all stations in the list are returned
"""
for shower in self.__radio_showers.values():
if ids is None:
yield shower
elif shower.has_station_ids(ids):
yield shower
def get_shower(self, shower_id):
"""
returns a specific shower identified by its unique id
"""
if(shower_id not in self.__radio_showers):
raise AttributeError(f"shower with id {shower_id} not present")
return self.__radio_showers[shower_id]
def has_shower(self, shower_id=None):
"""
Returns true if at least one shower is stored in the event
If shower_id is given, it checks if this particular shower exists
"""
if(shower_id is None):
return len(self.__radio_showers) > 0
else:
return shower_id in self.__radio_showers.keys()
def get_first_shower(self, ids=None):
"""
Returns only the first shower stored in the event. Useful in cases
when there is only one shower in the event.
Parameters
----------
ids: list of integers
A list of station IDs. The first shower that is associated with
all stations in the list is returned
"""
if len(self.__radio_showers) == 0:
return None
if ids is None:
shower_ids = list(self.__radio_showers.keys())
return self.__radio_showers[shower_ids[0]]
for shower in self.__radio_showers:
if shower.has_station_ids(ids):
return shower
return None
def add_sim_shower(self, sim_shower):
"""
Add a simulated shower to the event
Parameters
----------
sim_shower: RadioShower object
The shower to be added to the event
"""
if not isinstance(sim_shower, NuRadioReco.framework.radio_shower.RadioShower):
raise AttributeError("sim_shower needs to be of type NuRadioReco.framework.radio_shower.RadioShower")
if(sim_shower.get_id() in self.__sim_showers):
logger.error(f"sim shower with id {sim_shower.get_id()} already exists. Shower id needs to be unique per event")
raise AttributeError(f"sim shower with id {sim_shower.get_id()} already exists. Shower id needs to be unique per event")
self.__sim_showers[sim_shower.get_id()] = sim_shower
def get_sim_showers(self):
"""
Get an iterator over all simulated showers in the event
"""
for shower in self.__sim_showers.values():
yield shower
def get_sim_shower(self, shower_id):
"""
returns a specific shower identified by its unique id
"""
if(shower_id not in self.__sim_showers):
raise AttributeError(f"sim shower with id {shower_id} not present")
return self.__sim_showers[shower_id]
def get_first_sim_shower(self, ids=None):
"""
Returns only the first sim shower stored in the event. Useful in cases
when there is only one shower in the event.
Parameters
----------
ids: list of integers
A list of station IDs. The first shower that is associated with
all stations in the list is returned
"""
if len(self.__sim_showers) == 0:
return None
if ids is None:
shower_ids = list(self.__sim_showers.keys())
return self.__sim_showers[shower_ids[0]]
for shower in self.__sim_showers:
if shower.has_station_ids(ids):
return shower
return None
def has_sim_shower(self, shower_id=None):
"""
Returns true if at least one simulated shower is stored in the event
If shower_id is given, it checks if this particular shower exists
"""
if(shower_id is None):
return shower_id in self.__sim_showers.keys()
else:
return len(self.__sim_showers) > 0
def get_hybrid_information(self):
"""
Get information about hybrid detector data stored in the event.
"""
return self.__hybrid_information
def serialize(self, mode):
stations_pkl = []
try:
commit_hash = NuRadioReco.utilities.version.get_NuRadioMC_commit_hash()
self.set_parameter(parameters.eventParameters.hash_NuRadioMC, commit_hash)
except:
logger.warning("Event is serialized without commit hash!")
self.set_parameter(parameters.eventParameters.hash_NuRadioMC, None)
for station in self.get_stations():
stations_pkl.append(station.serialize(mode))
showers_pkl = [shower.serialize() for shower in self.get_showers()]
sim_showers_pkl = [shower.serialize() for shower in self.get_sim_showers()]
particles_pkl = [particle.serialize() for particle in self.get_particles()]
hybrid_info = self.__hybrid_information.serialize()
modules_out_event = []
for value in self.__modules_event: # remove module instances (this will just blow up the file size)
modules_out_event.append([value[0], None, value[2]])
invalid_keys = [key for key,val in value[2].items() if isinstance(val, BaseException)]
if len(invalid_keys):
logger.warning(f"The following arguments to module {value[0]} could not be serialized and will not be stored: {invalid_keys}")
modules_out_station = {}
for key in self.__modules_station: # remove module instances (this will just blow up the file size)
modules_out_station[key] = []
for value in self.__modules_station[key]:
modules_out_station[key].append([value[0], value[1], None, value[3]])
invalid_keys = [key for key,val in value[3].items() if isinstance(val, BaseException)]
if len(invalid_keys):
logger.warning(f"The following arguments to module {value[0]} could not be serialized and will not be stored: {invalid_keys}")
data = {'_parameters': self._parameters,
'__run_number': self.__run_number,
'_id': self._id,
'__event_time': self.__event_time,
'stations': stations_pkl,
'showers': showers_pkl,
'sim_showers': sim_showers_pkl,
'particles': particles_pkl,
'hybrid_info': hybrid_info,
'generator_info': self._generator_info,
'__modules_event': modules_out_event,
'__modules_station': modules_out_station
}
return pickle.dumps(data, protocol=4)
def deserialize(self, data_pkl):
data = pickle.loads(data_pkl)
for station_pkl in data['stations']:
station = NuRadioReco.framework.station.Station(0)
station.deserialize(station_pkl)
self.set_station(station)
if 'showers' in data.keys():
for shower_pkl in data['showers']:
shower = NuRadioReco.framework.radio_shower.RadioShower(None)
shower.deserialize(shower_pkl)
self.add_shower(shower)
if 'sim_showers' in data.keys():
for shower_pkl in data['sim_showers']:
shower = NuRadioReco.framework.radio_shower.RadioShower(None)
shower.deserialize(shower_pkl)
self.add_sim_shower(shower)
if 'particles' in data.keys():
for particle_pkl in data['particles']:
particle = NuRadioReco.framework.particle.Particle(None)
particle.deserialize(particle_pkl)
self.add_particle(particle)
self.__hybrid_information = NuRadioReco.framework.hybrid_information.HybridInformation()
if 'hybrid_info' in data.keys():
self.__hybrid_information.deserialize(data['hybrid_info'])
self._parameters = data['_parameters']
self.__run_number = data['__run_number']
self._id = data['_id']
self.__event_time = data['__event_time']
if 'generator_info' in data.keys():
self._generator_info = data['generator_info']
if "__modules_event" in data:
self.__modules_event = data['__modules_event']
if "__modules_station" in data:
self.__modules_station = data['__modules_station']
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioReco@framework@event.py@.PATH_END.py
|
{
"filename": "sphinx_util.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/doc/sphinx_util.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Helper utility function for customization."""
import os
import subprocess
import sys
READTHEDOCS_BUILD = (os.environ.get('READTHEDOCS', None) is not None)
if not os.path.exists('web-data'):
subprocess.call('rm -rf web-data;' +
'git clone https://github.com/dmlc/web-data', shell = True)
else:
subprocess.call('cd web-data; git pull', shell=True)
sys.stderr.write('READTHEDOCS=%s\n' % (READTHEDOCS_BUILD))
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@doc@sphinx_util.py@.PATH_END.py
|
{
"filename": "NestedGridRedistributeNodesInst.cc.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/Distributed/NestedGridRedistributeNodesInst.cc.py",
"type": "Python"
}
|
text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "Geometry/Dimension.hh"
#include "Distributed/NestedGridRedistributeNodes.cc"
namespace Spheral {
template class NestedGridRedistributeNodes< Dim< %(ndim)s > >;
}
"""
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@Distributed@NestedGridRedistributeNodesInst.cc.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "javicarron/pynkowski",
"repo_path": "pynkowski_extracted/pynkowski-main/README.md",
"type": "Markdown"
}
|
<img src="logo.png" width="100">
# Pynkowski
A Python package to compute Minkowski Functionals and other higher order statistics of input fields, as well as their expected values for different kinds of fields.
The **statistics** currently supported by this package are:
- Minkowski functionals.
- Maxima and minima distributions.
The formats currently supported for **input data** are the following:
- Scalar HEALPix maps, as the ones used by [healpy](https://healpy.readthedocs.io/), such as $T, \kappa, P^2$ (see [paper 1](https://arxiv.org/abs/2211.07562)).
- Polarisation HEALPix maps in the $SO(3)$ formalism (see [paper 2](https://arxiv.org/abs/2301.13191)).
- 2D and 3D numpy arrays (coming soon).
The theoretical expectation of some statistics is currently supported for the following **theoretical fields**:
- Gaussian fields (such as CMB $T$ or the initial density field, see [paper 1](https://arxiv.org/abs/2211.07562)).
- $\chi^2$ fields (such as CMB $P^2$, see [paper 1](https://arxiv.org/abs/2211.07562)).
- Spin 2 maps in the $SO(3)$ formalism (see [paper 2](https://arxiv.org/abs/2301.13191)).
We are actively working on the implementation of more statistics, data formats, and theoretical fields. If you want to contribute, we welcome and appreciate pull requests.
If you have any comments or suggestions, please feel free to contact us by email ([1](mailto:javier.carron@csic.es) and [2](mailto:alessandro.carones@roma2.infn.it )) or by opening a discussion thread or issue.
The repository can be found on [https://github.com/javicarron/pynkowski](https://github.com/javicarron/pynkowski).
# Installation
This package can be installed with:
```
pip install pynkowski
```
The dependencies are:
- [numpy](https://numpy.org/)
- [scipy](https://scipy.org/)
- [healpy](https://healpy.readthedocs.io/)
- [tqdm](https://github.com/tqdm/tqdm) (optional)
# Documentation
The documentation can be found on [https://javicarron.github.io/pynkowski](https://javicarron.github.io/pynkowski).
This package is divided into three modules: `stats`, `data`, and `theory`. Each module has a submodule for each kind of object, plus a general utilities submodule and a base submodule for the definition of the base class. In this way, extending the code to a new usecase is reduced to creating a new submodule. The structure is the following:
- [`stats`](https://javicarron.github.io/pynkowski/pynkowski/stats.html)
- [`minkowski`](https://javicarron.github.io/pynkowski/pynkowski/stats/minkowski.html)
- [`extrema`](https://javicarron.github.io/pynkowski/pynkowski/stats/extrema.html)
- [`utils_st`](https://javicarron.github.io/pynkowski/pynkowski/stats/utils_st.html)
- [`data`](https://javicarron.github.io/pynkowski/pynkowski/data.html)
- [`base_da`](https://javicarron.github.io/pynkowski/pynkowski/data/base_da.html)
- [`array`](https://javicarron.github.io/pynkowski/pynkowski/data/array.html)
- [`healpix`](https://javicarron.github.io/pynkowski/pynkowski/data/healpix.html)
- [`utils_da`](https://javicarron.github.io/pynkowski/pynkowski/data/utils_da.html)
- [`theory`](https://javicarron.github.io/pynkowski/pynkowski/theory.html)
- [`base_th`](https://javicarron.github.io/pynkowski/pynkowski/theory/base_th.html)
- [`gaussian`](https://javicarron.github.io/pynkowski/pynkowski/theory/gaussian.html)
- [`spingaussian`](https://javicarron.github.io/pynkowski/pynkowski/theory/spingaussian.html)
- [`chi2`](https://javicarron.github.io/pynkowski/pynkowski/theory/chi2.html)
- [`utils_th`](https://javicarron.github.io/pynkowski/pynkowski/theory/utils_th.html)
The documentation for each submodule can be found by clicking on the links above or navigating the menu on the left.
# Example notebooks
- [Minkowski Functionals of a CMB temperature map and comparison with theory](https://github.com/javicarron/pynkowski/blob/main/examples/Temperature.ipynb).
- [Minkowski Functionals of a CMB polarization P² map and comparison with theory](https://github.com/javicarron/pynkowski/blob/main/examples/P2.ipynb).
# Authors
This package has been developed by [Javier Carrón Duque](https://www.javiercarron.com) and Alessandro Carones.
|
javicarronREPO_NAMEpynkowskiPATH_START.@pynkowski_extracted@pynkowski-main@README.md@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/textfont/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="bar.textfont", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@textfont@_size.py@.PATH_END.py
|
{
"filename": "targetpixelfile.py",
"repo_name": "KeplerGO/lightkurve",
"repo_path": "lightkurve_extracted/lightkurve-main/src/lightkurve/targetpixelfile.py",
"type": "Python"
}
|
"""Defines TargetPixelFile, KeplerTargetPixelFile, and TessTargetPixelFile."""
from __future__ import division
import datetime
import os
import warnings
import logging
import collections
from astropy.io import fits
from astropy.io.fits import Undefined, BinTableHDU
from astropy.nddata import Cutout2D
from astropy.table import Table
from astropy.wcs import WCS
from astropy.utils.exceptions import AstropyWarning
from astropy.coordinates import SkyCoord
from astropy.stats.funcs import median_absolute_deviation as MAD
from astropy.utils.decorators import deprecated
from astropy.time import Time
from astropy.units import Quantity
import astropy.units as u
import matplotlib
from matplotlib import animation
from matplotlib import patches
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from scipy.ndimage import label
from tqdm import tqdm
from copy import deepcopy
from . import PACKAGEDIR, MPLSTYLE
from .lightcurve import LightCurve, KeplerLightCurve, TessLightCurve
from .prf import KeplerPRF
from .utils import (
KeplerQualityFlags,
TessQualityFlags,
plot_image,
LightkurveWarning,
LightkurveDeprecationWarning,
validate_method,
centroid_quadratic,
_query_solar_system_objects,
finalize_notebook_url
)
from .io import detect_filetype
__all__ = ["KeplerTargetPixelFile", "TessTargetPixelFile"]
log = logging.getLogger(__name__)
# OPEN: consider to move to utils and
# consolidate with the helper in lightcurve.py (for time label)
_TIME_LABEL_DICT_BRIEF = {"": "Phase", "bkjd": "[BKJD days]", "btjd": "[BTJD days]"}
def _time_label_brief(time):
format = getattr(time, "format", "")
return _TIME_LABEL_DICT_BRIEF.get(format, format.upper())
class HduToMetaMapping(collections.abc.Mapping):
"""Provides a read-only view of HDU header in `astropy.timeseries.TimeSeries.meta` format"""
def __init__(self, hdu):
# use OrderedDict rather than simple dict for 2 reasons:
# 1. more friendly __repr__ and __str__
# 2. make the behavior between a TPF and a LC is more consistent.
# (LightCurve.meta is an OrderedDict)
self._dict = collections.OrderedDict()
self._dict.update(hdu.header)
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
return self._dict.__repr__()
def __str__(self):
return self._dict.__str__()
class TargetPixelFile(object):
"""Abstract class representing FITS files which contain time series imaging data.
You should probably not be using this abstract class directly;
see `KeplerTargetPixelFile` and `TessTargetPixelFile` instead.
"""
def __init__(self, path, quality_bitmask="default", targetid=None, **kwargs):
self.path = path
if isinstance(path, fits.HDUList):
self.hdu = path
elif (isinstance(path, str) and path.startswith('s3://')):
# Filename is an S3 cloud URI
self.hdu = fits.open(path, use_fsspec=True, fsspec_kwargs={"anon": True}, **kwargs)
else:
self.hdu = fits.open(self.path, **kwargs)
try:
self.quality_bitmask = quality_bitmask
self.targetid = targetid
# For consistency with `LightCurve`, provide a `meta` dictionary
self.meta = HduToMetaMapping(self.hdu[0])
except Exception as e:
# Cannot instantiate TargetPixelFile, close the HDU to release the file handle
self.hdu.close()
raise e
def __getitem__(self, key):
"""Implements indexing and slicing.
Note: the implementation below cannot be be simplified using
`copy[1].data = copy[1].data[self.quality_mask][key]`
due to the complicated behavior of AstroPy's `FITS_rec`.
"""
# Step 1: determine the indexes of the data to return.
# We start by determining the indexes of the good-quality cadences.
quality_idx = np.where(self.quality_mask)[0]
# Then we apply the index or slice to the good-quality indexes.
if isinstance(key, int):
# Ensure we always have a range; this is necessary to ensure
# that we always ge a `FITS_rec` instead of a `FITS_record` below.
if key == -1:
selected_idx = quality_idx[key:]
else:
selected_idx = quality_idx[key : key + 1]
else:
selected_idx = quality_idx[key]
# Step 2: use the indexes to create a new copy of the data.
with warnings.catch_warnings():
# Ignore warnings about empty fields
warnings.simplefilter("ignore", UserWarning)
# AstroPy added `HDUList.copy()` in v3.1, allowing us to avoid manually
# copying the HDUs, which brought along unexpected memory leaks.
copy = self.hdu.copy()
copy[1] = BinTableHDU(
data=self.hdu[1].data[selected_idx], header=self.hdu[1].header
)
return self.__class__(
copy, quality_bitmask=self.quality_bitmask, targetid=self.targetid
)
def __len__(self):
return len(self.time)
def __add__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] += other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __mul__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] *= other
hdu[1].data["FLUX_ERR"][self.quality_mask] *= other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __rtruediv__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] /= other
hdu[1].data["FLUX_ERR"][self.quality_mask] /= other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.__add__(-1 * other)
def __rsub__(self, other):
return (-1 * self).__add__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
@property
@deprecated("2.0", alternative="time", warning_type=LightkurveDeprecationWarning)
def astropy_time(self):
"""Returns an AstroPy Time object for all good-quality cadences."""
return self.time
@property
def hdu(self):
return self._hdu
@hdu.setter
def hdu(self, value, keys=("FLUX", "QUALITY")):
"""Verify the file format when setting the value of `self.hdu`.
Raises a ValueError if `value` does not appear to be a Target Pixel File.
"""
for key in keys:
if ~(
np.any(
[
value[1].header[ttype] == key
for ttype in value[1].header["TTYPE*"]
]
)
):
raise ValueError(
"File {} does not have a {} column, "
"is this a target pixel file?".format(self.path, key)
)
self._hdu = value
def get_keyword(self, keyword, hdu=0, default=None):
"""Returns a header keyword value.
If the keyword is Undefined or does not exist,
then return ``default`` instead.
"""
return self.hdu[hdu].header.get(keyword, default)
@property
@deprecated(
"2.0", alternative="get_header()", warning_type=LightkurveDeprecationWarning
)
def header(self):
"""DEPRECATED. Please use ``get_header()`` instead."""
return self.hdu[0].header
def get_header(self, ext=0):
"""Returns the metadata embedded in the file.
Target Pixel Files contain embedded metadata headers spread across three
different FITS extensions:
1. The "PRIMARY" extension (``ext=0``) provides a metadata header
providing details on the target and its CCD position.
2. The "PIXELS" extension (``ext=1``) provides details on the
data column and their coordinate system (WCS).
3. The "APERTURE" extension (``ext=2``) provides details on the
aperture pixel mask and the expected coordinate system (WCS).
Parameters
----------
ext : int or str
FITS extension name or number.
Returns
-------
header : `~astropy.io.fits.header.Header`
Header object containing metadata keywords.
"""
return self.hdu[ext].header
@property
def ra(self):
"""Right Ascension of target ('RA_OBJ' header keyword)."""
return self.get_keyword("RA_OBJ")
@property
def dec(self):
"""Declination of target ('DEC_OBJ' header keyword)."""
return self.get_keyword("DEC_OBJ")
@property
def column(self):
"""CCD pixel column number ('1CRV5P' header keyword)."""
return self.get_keyword("1CRV5P", hdu=1, default=0)
@property
def row(self):
"""CCD pixel row number ('2CRV5P' header keyword)."""
return self.get_keyword("2CRV5P", hdu=1, default=0)
@property
def pos_corr1(self):
"""Returns the column position correction."""
return self.hdu[1].data["POS_CORR1"][self.quality_mask]
@property
def pos_corr2(self):
"""Returns the row position correction."""
return self.hdu[1].data["POS_CORR2"][self.quality_mask]
@property
def pipeline_mask(self):
"""Returns the optimal aperture mask used by the pipeline.
If the aperture extension is missing from the file, a mask
composed of all `True` values will be returned.
"""
# Both Kepler and TESS flag the pixels in the optimal aperture using
# bit number 2 in the aperture mask extension, e.g. see Section 6 of
# the TESS Data Products documentation (EXP-TESS-ARC-ICD-TM-0014.pdf).
try:
return self.hdu[2].data & 2 > 0
except (IndexError, TypeError):
# `IndexError` may be raised if the aperture extension (#2) is missing
# `TypeError` may be raised because early versions of TESScut returned floats in HDU 2
return np.ones(self.hdu[1].data["FLUX"][0].shape, dtype=bool)
@property
def shape(self):
"""Return the cube dimension shape."""
return self.flux.shape
@property
def time(self) -> Time:
"""Returns the time for all good-quality cadences."""
time_values = self.hdu[1].data["TIME"][self.quality_mask]
# Some data products have missing time values;
# we need to set these to zero or `Time` cannot be instantiated.
time_values[~np.isfinite(time_values)] = 0
bjdrefi = self.hdu[1].header.get("BJDREFI")
if bjdrefi == 2454833:
time_format = "bkjd"
elif bjdrefi == 2457000:
time_format = "btjd"
else:
time_format = "jd"
return Time(
time_values,
scale=self.hdu[1].header.get("TIMESYS", "tdb").lower(),
format=time_format,
)
@property
def cadenceno(self):
"""Return the cadence number for all good-quality cadences."""
cadenceno = self.hdu[1].data["CADENCENO"][self.quality_mask]
# The TESScut service returns an array of zeros as CADENCENO.
# If this is the case, return frame numbers from 0 instead.
if cadenceno[0] == 0:
return np.arange(0, len(cadenceno), 1, dtype=int)
return cadenceno
@property
def nan_time_mask(self):
"""Returns a boolean mask flagging cadences whose time is `nan`."""
return self.time.value == 0
@property
def flux(self) -> Quantity:
"""Returns the flux for all good-quality cadences."""
unit = None
if self.get_header(1).get("TUNIT5") == "e-/s":
unit = "electron/s"
return Quantity(self.hdu[1].data["FLUX"][self.quality_mask], unit=unit)
@property
def flux_err(self) -> Quantity:
"""Returns the flux uncertainty for all good-quality cadences."""
unit = None
if self.get_header(1).get("TUNIT6") == "e-/s":
unit = "electron/s"
return Quantity(self.hdu[1].data["FLUX_ERR"][self.quality_mask], unit=unit)
@property
def flux_bkg(self) -> Quantity:
"""Returns the background flux for all good-quality cadences."""
return Quantity(
self.hdu[1].data["FLUX_BKG"][self.quality_mask], unit="electron/s"
)
@property
def flux_bkg_err(self) -> Quantity:
return Quantity(
self.hdu[1].data["FLUX_BKG_ERR"][self.quality_mask], unit="electron/s"
)
@property
def quality(self):
"""Returns the quality flag integer of every good cadence."""
return self.hdu[1].data["QUALITY"][self.quality_mask]
@property
def wcs(self) -> WCS:
"""Returns an `astropy.wcs.WCS` object with the World Coordinate System
solution for the target pixel file.
Returns
-------
w : `astropy.wcs.WCS` object
WCS solution
"""
if "MAST" in self.hdu[0].header["ORIGIN"]: # Is it a TessCut TPF?
# TPF's generated using the TESSCut service in early 2019 only appear
# to contain a valid WCS in the second extension (the aperture
# extension), so we treat such files as a special case.
return WCS(self.hdu[2])
else:
# For standard (Ames-pipeline-produced) TPF files, we use the WCS
# keywords provided in the first extension (the data table extension).
# Specifically, we use the WCS keywords for the 5th data column (FLUX).
wcs_keywords = {
"1CTYP5": "CTYPE1",
"2CTYP5": "CTYPE2",
"1CRPX5": "CRPIX1",
"2CRPX5": "CRPIX2",
"1CRVL5": "CRVAL1",
"2CRVL5": "CRVAL2",
"1CUNI5": "CUNIT1",
"2CUNI5": "CUNIT2",
"1CDLT5": "CDELT1",
"2CDLT5": "CDELT2",
"11PC5": "PC1_1",
"12PC5": "PC1_2",
"21PC5": "PC2_1",
"22PC5": "PC2_2",
"NAXIS1": "NAXIS1",
"NAXIS2": "NAXIS2",
}
mywcs = {}
for oldkey, newkey in wcs_keywords.items():
if self.hdu[1].header.get(oldkey, None) is not None:
mywcs[newkey] = self.hdu[1].header[oldkey]
return WCS(mywcs)
def get_coordinates(self, cadence="all"):
"""Returns two 3D arrays of RA and Dec values in decimal degrees.
If cadence number is given, returns 2D arrays for that cadence. If
cadence is 'all' returns one RA, Dec value for each pixel in every cadence.
Uses the WCS solution and the POS_CORR data from TPF header.
Parameters
----------
cadence : 'all' or int
Which cadences to return the RA Dec coordinates for.
Returns
-------
ra : numpy array, same shape as tpf.flux[cadence]
Array containing RA values for every pixel, for every cadence.
dec : numpy array, same shape as tpf.flux[cadence]
Array containing Dec values for every pixel, for every cadence.
"""
w = self.wcs
X, Y = np.meshgrid(np.arange(self.shape[2]), np.arange(self.shape[1]))
pos_corr1_pix = np.copy(self.hdu[1].data["POS_CORR1"])
pos_corr2_pix = np.copy(self.hdu[1].data["POS_CORR2"])
# We zero POS_CORR* when the values are NaN or make no sense (>50px)
with warnings.catch_warnings(): # Comparing NaNs to numbers is OK here
warnings.simplefilter("ignore", RuntimeWarning)
bad = np.any(
[
~np.isfinite(pos_corr1_pix),
~np.isfinite(pos_corr2_pix),
np.abs(pos_corr1_pix - np.nanmedian(pos_corr1_pix)) > 50,
np.abs(pos_corr2_pix - np.nanmedian(pos_corr2_pix)) > 50,
],
axis=0,
)
pos_corr1_pix[bad], pos_corr2_pix[bad] = 0, 0
# Add in POSCORRs
X = np.atleast_3d(X).transpose([2, 0, 1]) + np.atleast_3d(
pos_corr1_pix
).transpose([1, 2, 0])
Y = np.atleast_3d(Y).transpose([2, 0, 1]) + np.atleast_3d(
pos_corr2_pix
).transpose([1, 2, 0])
# Pass through WCS
ra, dec = w.wcs_pix2world(X.ravel(), Y.ravel(), 0)
ra = ra.reshape((pos_corr1_pix.shape[0], self.shape[1], self.shape[2]))
dec = dec.reshape((pos_corr2_pix.shape[0], self.shape[1], self.shape[2]))
ra, dec = ra[self.quality_mask], dec[self.quality_mask]
if cadence != "all":
return ra[cadence], dec[cadence]
return ra, dec
def show_properties(self):
"""Prints a description of all non-callable attributes.
Prints in order of type (ints, strings, lists, arrays, others).
"""
attrs = {}
for attr in dir(self):
if not attr.startswith("_") and attr != "header" and attr != "astropy_time":
res = getattr(self, attr)
if callable(res):
continue
if attr == "hdu":
attrs[attr] = {"res": res, "type": "list"}
for idx, r in enumerate(res):
if idx == 0:
attrs[attr]["print"] = "{}".format(r.header["EXTNAME"])
else:
attrs[attr]["print"] = "{}, {}".format(
attrs[attr]["print"], "{}".format(r.header["EXTNAME"])
)
continue
else:
attrs[attr] = {"res": res}
if isinstance(res, int):
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "int"
elif isinstance(res, np.ndarray):
attrs[attr]["print"] = "array {}".format(res.shape)
attrs[attr]["type"] = "array"
elif isinstance(res, list):
attrs[attr]["print"] = "list length {}".format(len(res))
attrs[attr]["type"] = "list"
elif isinstance(res, str):
if res == "":
attrs[attr]["print"] = "{}".format("None")
else:
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "str"
elif attr == "wcs":
attrs[attr]["print"] = "astropy.wcs.wcs.WCS"
attrs[attr]["type"] = "other"
else:
attrs[attr]["print"] = "{}".format(type(res))
attrs[attr]["type"] = "other"
output = Table(names=["Attribute", "Description"], dtype=[object, object])
idx = 0
types = ["int", "str", "list", "array", "other"]
for typ in types:
for attr, dic in attrs.items():
if dic["type"] == typ:
output.add_row([attr, dic["print"]])
idx += 1
output.pprint(max_lines=-1, max_width=-1)
def to_lightcurve(self, method="sap", corrector=None, **kwargs):
"""Performs photometry on the pixel data and returns a LightCurve object.
The valid keyword arguments depends on the method chosen:
- 'sap' or 'aperture': see the docstring of `extract_aperture_photometry()`
- 'prf': see the docstring of `extract_prf_photometry()`
- 'pld': see the docstring of `to_corrector()`
For methods 'sff' and 'cbv', they are syntactic shortcuts of:
- creating a lightcurve using 'sap' method,
- corrects the created lightcurve using `LightCurve.to_corrector()`
of the respective method.
Parameters
----------
method : 'aperture', 'prf', 'sap', 'sff', 'cbv', 'pld'.
Photometry method to use. 'aperture' is an alias of 'sap'.
**kwargs : dict
Extra arguments to be passed to the `extract_aperture_photometry()`, the
`extract_prf_photometry()`, or the `to_corrector()` method of this class.
Returns
-------
lc : LightCurve object
Object containing the resulting lightcurve.
"""
method = validate_method(method, supported_methods=["aperture", "prf", "sap", "sff", "cbv", "pld"])
if method in ["aperture", "sap"]:
return self.extract_aperture_photometry(**kwargs)
elif method == "prf":
return self.prf_lightcurve(**kwargs)
elif method in ["sff", "cbv"]:
lc = self.extract_aperture_photometry(**kwargs)
return lc.to_corrector(method).correct()
elif method == "pld":
return self.to_corrector("pld", **kwargs).correct()
def _resolve_default_aperture_mask(self, aperture_mask):
if isinstance(aperture_mask, str):
if (aperture_mask == "default"):
# returns 'pipeline', unless it is missing. Falls back to 'threshold'
return "pipeline" if np.any(self.pipeline_mask) else "threshold"
else:
return aperture_mask
else:
return aperture_mask
def _parse_aperture_mask(self, aperture_mask):
"""Parse the `aperture_mask` parameter as given by a user.
The `aperture_mask` parameter is accepted by a number of methods.
This method ensures that the parameter is always parsed in the same way.
Parameters
----------
aperture_mask : array-like, 'pipeline', 'all', 'threshold', 'default',
'background', or None
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
If 'background' is passed, all pixels fainter than the median flux
will be used.
If 'empty' is passed, no pixels will be used.
Returns
-------
aperture_mask : ndarray
2D boolean numpy array containing `True` for selected pixels.
"""
aperture_mask = self._resolve_default_aperture_mask(aperture_mask)
# If 'pipeline' mask is requested but missing, fall back to 'threshold'
# To Do: Should pipeline mask always be True?
if isinstance(aperture_mask, str):
if (aperture_mask == "pipeline") and ~np.any(self.pipeline_mask):
raise ValueError(
"_parse_aperture_mask: 'pipeline' is requested, but it is missing or empty."
)
# Input validation
if hasattr(aperture_mask, "shape"):
if (aperture_mask.shape != self.shape[1:]):
raise ValueError(
"`aperture_mask` has shape {}, "
"but the flux data has shape {}"
"".format(aperture_mask.shape, self.shape[1:])
)
if aperture_mask is None:
aperture_mask = np.ones((self.shape[1], self.shape[2]), dtype=bool)
elif isinstance(aperture_mask, str):
if aperture_mask.lower() == "all":
aperture_mask = np.ones((self.shape[1], self.shape[2]), dtype=bool)
elif aperture_mask.lower() == "pipeline":
aperture_mask = self.pipeline_mask
elif aperture_mask.lower() == "threshold":
aperture_mask = self.create_threshold_mask()
elif aperture_mask.lower() == "background":
aperture_mask = ~self.create_threshold_mask(
threshold=0, reference_pixel=None
)
elif aperture_mask.lower() == "empty":
aperture_mask = np.zeros((self.shape[1], self.shape[2]), dtype=bool)
elif isinstance(aperture_mask, np.ndarray):
# Kepler and TESS pipeline style integer flags
if np.issubdtype(aperture_mask.dtype, np.dtype('>i4')):
aperture_mask = (aperture_mask & 2) == 2
elif np.issubdtype(aperture_mask.dtype, int):
if ((aperture_mask & 2) == 2).any():
# Kepler and TESS pipeline style integer flags
aperture_mask = (aperture_mask & 2) == 2
else:
aperture_mask = aperture_mask.astype(bool)
elif np.issubdtype(aperture_mask.dtype, float):
aperture_mask = aperture_mask.astype(bool)
self._last_aperture_mask = aperture_mask
return aperture_mask
def create_threshold_mask(self, threshold=3, reference_pixel="center"):
"""Returns an aperture mask creating using the thresholding method.
This method will identify the pixels in the TargetPixelFile which show
a median flux that is brighter than `threshold` times the standard
deviation above the overall median. The standard deviation is estimated
in a robust way by multiplying the Median Absolute Deviation (MAD)
with 1.4826.
If the thresholding method yields multiple contiguous regions, then
only the region closest to the (col, row) coordinate specified by
`reference_pixel` is returned. For exmaple, `reference_pixel=(0, 0)`
will pick the region closest to the bottom left corner.
By default, the region closest to the center of the mask will be
returned. If `reference_pixel=None` then all regions will be returned.
Parameters
----------
threshold : float
A value for the number of sigma by which a pixel needs to be
brighter than the median flux to be included in the aperture mask.
reference_pixel: (int, int) tuple, 'center', or None
(col, row) pixel coordinate closest to the desired region.
For example, use `reference_pixel=(0,0)` to select the region
closest to the bottom left corner of the target pixel file.
If 'center' (default) then the region closest to the center pixel
will be selected. If `None` then all regions will be selected.
Returns
-------
aperture_mask : ndarray
2D boolean numpy array containing `True` for pixels above the
threshold.
"""
if reference_pixel == "center":
reference_pixel = (self.shape[2] / 2, self.shape[1] / 2)
# Calculate the median image
with warnings.catch_warnings():
warnings.simplefilter("ignore")
median_image = np.nanmedian(self.flux, axis=0)
vals = median_image[np.isfinite(median_image)].flatten()
# Calculate the theshold value in flux units
mad_cut = (1.4826 * MAD(vals) * threshold) + np.nanmedian(median_image)
# Create a mask containing the pixels above the threshold flux
threshold_mask = np.nan_to_num(median_image) >= mad_cut
if (reference_pixel is None) or (not threshold_mask.any()):
# return all regions above threshold
return threshold_mask
else:
# Return only the contiguous region closest to `region`.
# First, label all the regions:
labels = label(threshold_mask)[0]
# For all pixels above threshold, compute distance to reference pixel:
label_args = np.argwhere(labels > 0)
distances = [
np.hypot(crd[0], crd[1])
for crd in label_args
- np.array([reference_pixel[1], reference_pixel[0]])
]
# Which label corresponds to the closest pixel?
closest_arg = label_args[np.argmin(distances)]
closest_label = labels[closest_arg[0], closest_arg[1]]
return labels == closest_label
def estimate_background(self, aperture_mask="background"):
"""Returns an estimate of the median background level in the FLUX column.
In the case of official Kepler and TESS Target Pixel Files, the
background estimates should be close to zero because these products
have already been background-subtracted by the pipeline (i.e. the values
in the `FLUX_BKG` column have been subtracted from the values in `FLUX`).
Background subtraction is often imperfect however, and this method aims
to allow users to estimate residual background signals using different
methods.
Target Pixel Files created by the MAST TESSCut service have
not been background-subtracted. For such products, or other community-
generated pixel files, this method provides a first-order estimate of
the background levels.
This method estimates the per-pixel background flux over time by
computing the median pixel value across the `aperture mask`.
Parameters
----------
aperture_mask : 'background', 'all', or array-like
Which pixels should be used to estimate the background?
If None or 'all' are passed, all pixels in the pixel file will be
used. If 'background' is passed, all pixels fainter than the
median flux will be used. Alternatively, users can pass a boolean
array describing the aperture mask such that `True` means that the
pixel will be used.
Returns
-------
lc : `LightCurve` object
Median background flux in units electron/second/pixel.
"""
mask = self._parse_aperture_mask(aperture_mask)
# For each cadence, compute the median pixel flux across the background
simple_bkg = np.nanmedian(self.flux[:, mask], axis=1) / u.pixel
return LightCurve(time=self.time, flux=simple_bkg)
def estimate_centroids(self, aperture_mask="default", method="moments"):
"""Returns the flux center of an object inside ``aperture_mask``.
Telescopes tend to smear out the light from a point-like star over
multiple pixels. For this reason, it is common to estimate the position
of a star by computing the *geometric center* of its image.
Astronomers refer to this position as the *centroid* of the object,
i.e. the term *centroid* is often used as a generic synonym to refer
to the measured position of an object in a telescope exposure.
This function provides two methods to estimate the position of a star:
* `method='moments'` will compute the "center of mass" of the light
based on the 2D image moments of the pixels inside ``aperture_mask``.
* `method='quadratic'` will fit a two-dimensional, second-order
polynomial to the 3x3 patch of pixels centered on the brightest pixel
inside the ``aperture_mask``, and return the peak of that polynomial.
Following Vakili & Hogg 2016 (ArXiv:1610.05873, Section 3.2).
Parameters
----------
aperture_mask : 'pipeline', 'threshold', 'all', 'default', or array-like
Which pixels contain the object to be measured, i.e. which pixels
should be used in the estimation? If None or 'all' are passed,
all pixels in the pixel file will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
Alternatively, users can pass a boolean array describing the
aperture mask such that `True` means that the pixel will be used.
method : 'moments' or 'quadratic'
Defines which method to use to estimate the centroids. 'moments'
computes the centroid based on the sample moments of the data.
'quadratic' fits a 2D polynomial to the data and returns the
coordinate of the peak of that polynomial.
Returns
-------
columns, rows : `~astropy.units.Quantity`, `~astropy.units.Quantity`
Arrays containing the column and row positions for the centroid
for each cadence, or NaN for cadences where the estimation failed.
"""
method = validate_method(method, ["moments", "quadratic"])
if method == "moments":
return self._estimate_centroids_via_moments(aperture_mask=aperture_mask)
elif method == "quadratic":
return self._estimate_centroids_via_quadratic(aperture_mask=aperture_mask)
def _estimate_centroids_via_moments(self, aperture_mask):
"""Compute the "center of mass" of the light based on the 2D moments;
this is a helper method for `estimate_centroids()`."""
aperture_mask = self._parse_aperture_mask(aperture_mask)
yy, xx = np.indices(self.shape[1:])
yy = self.row + yy
xx = self.column + xx
total_flux = np.nansum(self.flux[:, aperture_mask], axis=1)
with warnings.catch_warnings():
# RuntimeWarnings may occur below if total_flux contains zeros
warnings.simplefilter("ignore", RuntimeWarning)
col_centr = (
np.nansum(xx * aperture_mask * self.flux, axis=(1, 2)) / total_flux
)
row_centr = (
np.nansum(yy * aperture_mask * self.flux, axis=(1, 2)) / total_flux
)
return col_centr * u.pixel, row_centr * u.pixel
def _estimate_centroids_via_quadratic(self, aperture_mask):
"""Estimate centroids by fitting a 2D quadratic to the brightest pixels;
this is a helper method for `estimate_centroids()`."""
aperture_mask = self._parse_aperture_mask(aperture_mask)
col_centr, row_centr = [], []
for idx in range(len(self.time)):
col, row = centroid_quadratic(self.flux[idx], mask=aperture_mask)
col_centr.append(col)
row_centr.append(row)
col_centr = np.asfarray(col_centr) + self.column
row_centr = np.asfarray(row_centr) + self.row
col_centr = Quantity(col_centr, unit="pixel")
row_centr = Quantity(row_centr, unit="pixel")
return col_centr, row_centr
def _aperture_photometry(
self, aperture_mask, flux_method="sum", centroid_method="moments"
):
"""Helper method for ``extract_aperture photometry``.
Returns
-------
flux, flux_err, centroid_col, centroid_row
"""
# Validate the aperture mask
apmask = self._parse_aperture_mask(aperture_mask)
if apmask.sum() == 0:
log.warning("Warning: aperture mask contains zero pixels.")
# Estimate centroids
centroid_col, centroid_row = self.estimate_centroids(
apmask, method=centroid_method
)
# Estimate flux
if flux_method == "sum":
flux = np.nansum(self.flux[:, apmask], axis=1)
elif flux_method == "median":
flux = np.nanmedian(self.flux[:, apmask], axis=1)
elif flux_method == "mean":
flux = np.nanmean(self.flux[:, apmask], axis=1)
else:
raise ValueError("`flux_method` must be one of 'sum', 'median', or 'mean'.")
# In the future we may wish to add a user specified function
# We use ``np.nansum`` above to be robust against a subset of pixels
# being NaN, however if *all* pixels are NaN, we propagate a NaN.
is_allnan = ~np.any(np.isfinite(self.flux[:, apmask]), axis=1)
flux[is_allnan] = np.nan
# Similarly, if *all* pixel values across the TPF are exactly zero,
# we propagate NaN (cf. #873 for an example of this happening)
is_allzero = np.all(self.flux == 0, axis=(1, 2))
flux[is_allzero] = np.nan
# Estimate flux_err
with warnings.catch_warnings():
# Ignore warnings due to negative errors
warnings.simplefilter("ignore", RuntimeWarning)
if flux_method == "sum":
flux_err = np.nansum(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
elif flux_method == "median":
flux_err = np.nanmedian(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
elif flux_method == "mean":
flux_err = np.nanmean(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
is_allnan = ~np.any(np.isfinite(self.flux_err[:, apmask]), axis=1)
flux_err[is_allnan] = np.nan
if self.get_header(1).get("TUNIT5") == "e-/s":
flux = Quantity(flux, unit="electron/s")
if self.get_header(1).get("TUNIT6") == "e-/s":
flux_err = Quantity(flux_err, unit="electron/s")
return flux, flux_err, centroid_col, centroid_row
def query_solar_system_objects(
self,
cadence_mask="outliers",
radius=None,
sigma=3,
cache=True,
return_mask=False,
show_progress=True
):
"""Returns a list of asteroids or comets which affected the target pixel files.
Light curves of stars or galaxies are frequently affected by solar
system bodies (e.g. asteroids, comets, planets). These objects can move
across a target's photometric aperture mask on time scales of hours to
days. When they pass through a mask, they tend to cause a brief spike
in the brightness of the target. They can also cause dips by moving
through a local background aperture mask (if any is used).
The artifical spikes and dips introduced by asteroids are frequently
confused with stellar flares, planet transits, etc. This method helps
to identify false signals injects by asteroids by providing a list of
the solar system objects (name, brightness, time) that passed in the
vicinity of the target during the span of the light curve.
This method queries the `SkyBot API <http://vo.imcce.fr/webservices/skybot/>`_,
which returns a list of asteroids/comets/planets given a location, time,
and search cone.
Notes
-----
* This method will use the `ra` and `dec` properties of the `LightCurve`
object to determine the position of the search cone.
* The size of the search cone is 5 spacecraft pixels + TPF dimension by default. You
can change this by passing the `radius` parameter (unit: degrees).
* By default, this method will only search points in time during which the light
curve showed 3-sigma outliers in flux. You can override this behavior
and search for specific times by passing `cadence_mask`. See examples for details.
Parameters
----------
cadence_mask : str, or boolean array with length of self.time
mask in time to select which frames or points should be searched for SSOs.
Default "outliers" will search for SSOs at points that are `sigma` from the mean.
"all" will search all cadences. Alternatively, pass a boolean array with values of "True"
for times to search for SSOs.
radius : optional, float
Radius to search for bodies. If None, will search for SSOs within 5 pixels of
all pixels in the TPF.
sigma : optional, float
If `cadence_mask` is set to `"outlier"`, `sigma` will be used to identify
outliers.
cache : optional, bool
If True will cache the search result in the astropy cache. Set to False
to request the search again.
return_mask: optional, bool
If True will return a boolean mask in time alongside the result
show_progress: optional, bool
If True will display a progress bar during the download
Returns
-------
result : pandas.DataFrame
DataFrame containing the list objects in frames that were identified to contain
SSOs.
Examples
--------
Find if there are SSOs affecting the target pixel file for the given time frame:
>>> df_sso = tpf.query_solar_system_objects(cadence_mask=(tpf.time.value >= 2014.1) & (tpf.time.value <= 2014.9)) # doctest: +SKIP
Find if there are SSOs affecting the target pixel file for all times, but it will be much slower:
>>> df_sso = tpf.query_solar_system_objects(cadence_mask='all') # doctest: +SKIP
"""
for attr in ["mission", "ra", "dec"]:
if not hasattr(self, "{}".format(attr)):
raise ValueError("Input does not have a `{}` attribute.".format(attr))
location = self.mission.lower()
if isinstance(cadence_mask, str):
if cadence_mask == "outliers":
aper = self.pipeline_mask
if aper.sum() == 0:
aper = "all"
lc = self.to_lightcurve(aperture_mask=aper)
cadence_mask = lc.remove_outliers(sigma=sigma, return_mask=True)[1]
# Avoid searching times with NaN flux; this is necessary because e.g.
# `remove_outliers` includes NaNs in its mask.
cadence_mask &= ~np.isnan(lc.flux)
elif cadence_mask == "all":
cadence_mask = np.ones(len(self.time)).astype(bool)
else:
raise ValueError("invalid `cadence_mask` string argument")
elif isinstance(cadence_mask, collections.abc.Sequence):
cadence_mask = np.array(cadence_mask)
elif isinstance(cadence_mask, (bool)):
# for boundary case of a single element tuple, e.g., (True)
cadence_mask = np.array([cadence_mask])
elif not isinstance(cadence_mask, np.ndarray):
raise ValueError("Pass a cadence_mask method or a cadence_mask")
if (location == "kepler") | (location == "k2"):
pixel_scale = 4
if location == "tess":
pixel_scale = 27
if radius == None:
radius = (
2 ** 0.5 * (pixel_scale * (np.max(self.shape[1:]) + 5))
) * u.arcsecond.to(u.deg)
res = _query_solar_system_objects(
ra=self.ra,
dec=self.dec,
times=self.time.jd[cadence_mask],
location=location,
radius=radius,
cache=cache,
show_progress=show_progress,
)
if return_mask:
return res, np.in1d(self.time.jd, res.epoch)
return res
def plot(
self,
ax=None,
frame=0,
cadenceno=None,
bkg=False,
column="FLUX",
aperture_mask=None,
show_colorbar=True,
mask_color="red",
title=None,
style="lightkurve",
**kwargs,
):
"""Plot the pixel data for a single frame (i.e. at a single time).
The time can be specified by frame index number (`frame=0` will show the
first frame) or absolute cadence number (`cadenceno`).
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
frame : int
Frame number. The default is 0, i.e. the first frame.
cadenceno : int, optional
Alternatively, a cadence number can be provided.
This argument has priority over frame number.
bkg : bool
If True and `column="FLUX"`, background will be added to the pixel values.
column : str
Choose the FITS data column to be plotted. May be one of ('FLUX',
'FLUX_ERR','FLUX_BKG','FLUX_BKG_ERR','COSMIC_RAYS','RAW_CNTS').
aperture_mask : ndarray or str
Highlight pixels selected by aperture_mask.
show_colorbar : bool
Whether or not to show the colorbar
mask_color : str
Color to show the aperture mask
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
kwargs : dict
Keywords arguments passed to `~lightkurve.utils.plot_image`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if style == "lightkurve" or style is None:
style = MPLSTYLE
if cadenceno is not None:
try:
frame = np.argwhere(cadenceno == self.cadenceno)[0][0]
except IndexError:
raise ValueError(
"cadenceno {} is out of bounds, "
"must be in the range {}-{}.".format(
cadenceno, self.cadenceno[0], self.cadenceno[-1]
)
)
try:
if column == "FLUX":
if bkg and np.any(np.isfinite(self.flux_bkg[frame])):
data_to_plot = self.flux[frame] + self.flux_bkg[frame]
else:
data_to_plot = self.flux[frame]
else:
data_to_plot = self.hdu[1].data[column][self.quality_mask][frame]
except KeyError:
raise ValueError(
"column must be one of the following: ('FLUX','FLUX_ERR',"
"'FLUX_BKG','FLUX_BKG_ERR','COSMIC_RAYS','RAW_CNTS')"
)
except IndexError:
raise ValueError(
"frame {} is out of bounds, must be in the range "
"0-{}.".format(frame, self.shape[0])
)
# Make list of preset colour labels
clabels = {
"FLUX": "Flux ($e^{-}s^{-1}$)",
"FLUX_ERR": "Flux Err. ($e^{-}s^{-1}$)",
"FLUX_BKG": "Background Flux ($e^{-}s^{-1}$)",
"FLUX_BKG_ERR": "Background Flux Err. ($e^{-}s^{-1}$)",
"COSMIC_RAYS": "Cosmic Ray Flux ($e^{-}s^{-1}$)",
"RAW_CNTS": "Raw Counts",
}
with plt.style.context(style):
if title is None:
title = "Target ID: {}, Cadence: {}".format(
self.targetid, self.cadenceno[frame]
)
# We subtract -0.5 because pixel coordinates refer to the middle of
# a pixel, e.g. (col, row) = (10.0, 20.0) is a pixel center.
img_extent = (
self.column - 0.5,
self.column + self.shape[2] - 0.5,
self.row - 0.5,
self.row + self.shape[1] - 0.5,
)
# If an axes is passed that used WCS projection, don't use img_extent
# This addresses lk issue #1095, where the tpf coordinates were incorrectly plotted
# By default ax=None
if ax != None:
if hasattr(ax, "wcs"):
img_extent = None
ax = plot_image(
data_to_plot,
ax=ax,
title=title,
extent=img_extent,
show_colorbar=show_colorbar,
clabel=clabels.get(column, column),
**kwargs,
)
ax.grid(False)
# Overlay the aperture mask if given
if aperture_mask is not None:
aperture_mask = self._parse_aperture_mask(aperture_mask)
in_aperture = np.where(aperture_mask)
if hasattr(ax, "wcs"):
ap_row = in_aperture[0] - 0.5
ap_col = in_aperture[1] - 0.5
else:
ap_row = in_aperture[0] + self.row - 0.5
ap_col = in_aperture[1] + self.column - 0.5
for ii in range(len(ap_row)):
rect=patches.Rectangle((ap_col[ii],ap_row[ii]),1,1, fill=False, hatch="//", color=mask_color)
ax.add_patch(rect)
return ax
def _to_matplotlib_animation(
self, step: int = None, interval: int = 200, **plot_args
) -> "matplotlib.animation.FuncAnimation":
"""Returns a `matplotlib.animation.FuncAnimation` object.
The animation shows the flux values over time by calling `tpf.plot()` for multiple frames.
Parameters
----------
step : int
Spacing between frames. By default, the spacing will be determined such that
50 frames are shown, i.e. `step = len(tpf) // 50`. Showing more than 50 frames
will be slow on many systems.
interval : int
Delay between frames in milliseconds.
**plot_args : dict
Optional parameters passed to tpf.plot().
"""
if step is None:
step = len(self) // 50
if step < 1:
step = 1
column = plot_args.get("column", "FLUX")
ax = self.plot(**plot_args)
def init():
return ax.images
def animate(i):
frame = i * step
ax.images[0].set_data(self.hdu[1].data[column][self.quality_mask][frame])
ax.set_title(f"Frame {frame}")
return ax.images
plt.close(ax.figure) # prevent figure from showing up in interactive mode
# `blit=True` means only re-draw the parts that have changed.
frames = len(self) // step
anim = matplotlib.animation.FuncAnimation(
ax.figure,
animate,
init_func=init,
frames=frames,
interval=interval,
blit=True,
)
return anim
def animate(self, step: int = None, interval: int = 200, **plot_args):
"""Displays an interactive HTML matplotlib animation.
This feature requires a Jupyter notebook environment to display correctly.
Parameters
----------
step : int
Spacing between frames. By default, the spacing will be determined such that
50 frames are shown, i.e. `step = len(tpf) // 50`. Showing more than 50 frames
will be slow on many systems.
interval : int
Delay between frames in milliseconds.
**plot_args : dict
Optional parameters passed to tpf.plot().
"""
try:
# To make installing Lightkurve easier, ipython is an optional dependency,
# because we can assume it is installed when notebook-specific features are called
from IPython.display import HTML
return HTML(self._to_matplotlib_animation(step=step, interval=interval, **plot_args).to_jshtml())
except ModuleNotFoundError:
log.error("ipython needs to be installed for animate() to work (e.g., `pip install ipython`)")
def to_fits(self, output_fn=None, overwrite=False):
"""Writes the TPF to a FITS file on disk."""
if output_fn is None:
output_fn = "{}-targ.fits".format(self.targetid)
self.hdu.writeto(output_fn, overwrite=overwrite, checksum=True)
def interact(
self,
notebook_url=None,
max_cadences=200000,
aperture_mask="default",
exported_filename=None,
transform_func=None,
ylim_func=None,
**kwargs,
):
"""Display an interactive Jupyter Notebook widget to inspect the pixel data.
The widget will show both the lightcurve and pixel data. By default,
the lightcurve shown is obtained by calling the `to_lightcurve()` method,
unless the user supplies a custom `LightCurve` object.
This feature requires an optional dependency, bokeh (v0.12.15 or later).
This dependency can be installed using e.g. `conda install bokeh`.
At this time, this feature only works inside an active Jupyter
Notebook, and tends to be too slow when more than ~30,000 cadences
are contained in the TPF (e.g. short cadence data).
Parameters
----------
notebook_url : str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
For use with JupyterHub, set the environment variable LK_JUPYTERHUB_EXTERNAL_URL
to the public hostname of your JupyterHub and notebook_url will
be defined appropriately automatically.
max_cadences : int
Print an error message if the number of cadences shown is larger than
this value. This limit helps keep browsers from becoming unresponsive.
aperture_mask : array-like, 'pipeline', 'threshold', 'default', or 'all'
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
exported_filename: str
An optional filename to assign to exported fits files containing
the custom aperture mask generated by clicking on pixels in interact.
The default adds a suffix '-custom-aperture-mask.fits' to the
TargetPixelFile basename.
transform_func: function
A function that transforms the lightcurve. The function takes in a
LightCurve object as input and returns a LightCurve object as output.
The function can be complex, such as detrending the lightcurve. In this
way, the interactive selection of aperture mask can be evaluated after
inspection of the transformed lightcurve. The transform_func is applied
before saving a fits file. Default: None (no transform is applied).
ylim_func: function
A function that returns ylimits (low, high) given a LightCurve object.
The default is to return a window approximately around 5 sigma-clipped
lightcurve flux values.
Examples
--------
To select an aperture mask for V827 Tau::
>>> import lightkurve as lk
>>> tpf = lk.search_targetpixelfile("V827 Tau", mission="K2").download() # doctest: +SKIP
>>> tpf.interact() # doctest: +SKIP
To see the full y-axis dynamic range of your lightcurve and normalize
the lightcurve after each pixel selection::
>>> ylim_func = lambda lc: (0.0, lc.flux.max()) # doctest: +SKIP
>>> transform_func = lambda lc: lc.normalize() # doctest: +SKIP
>>> tpf.interact(ylim_func=ylim_func, transform_func=transform_func) # doctest: +SKIP
"""
from .interact import show_interact_widget
notebook_url = finalize_notebook_url(notebook_url)
return show_interact_widget(
self,
notebook_url=notebook_url,
max_cadences=max_cadences,
aperture_mask=aperture_mask,
exported_filename=exported_filename,
transform_func=transform_func,
ylim_func=ylim_func,
**kwargs,
)
def interact_sky(self, notebook_url=None, aperture_mask="empty", magnitude_limit=18):
"""Display a Jupyter Notebook widget showing Gaia DR2 positions on top of the pixels.
Parameters
----------
notebook_url : str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
For use with JupyterHub, set the environment variable LK_JUPYTERHUB_EXTERNAL_URL
to the public hostname of your JupyterHub and notebook_url will
be defined appropriately automatically.
aperture_mask : array-like, 'pipeline', 'threshold', 'default', 'background', or 'empty'
Highlight pixels selected by aperture_mask.
Default is 'empty': no pixel is highlighted.
magnitude_limit : float
A value to limit the results in based on Gaia Gmag. Default, 18.
"""
from .interact import show_skyview_widget
notebook_url = finalize_notebook_url(notebook_url)
return show_skyview_widget(
self, notebook_url=notebook_url, aperture_mask=aperture_mask, magnitude_limit=magnitude_limit
)
def to_corrector(self, method="pld", **kwargs):
"""Returns a `~correctors.corrector.Corrector` instance to remove systematics.
Parameters
----------
methods : string
Currently, only "pld" is supported. This will return a
`~correctors.PLDCorrector` class instance.
**kwargs : dict
Extra keyword arguments to be passed on to the corrector class.
Returns
-------
correcter : `~correctors.corrector.Corrector`
Instance of a Corrector class, which typically provides `~correctors.PLDCorrector.correct()`
and `~correctors.PLDCorrector.diagnose()` methods.
"""
allowed_methods = ["pld"]
if method == "sff":
raise ValueError(
"The 'sff' method requires a `LightCurve` instead "
"of a `TargetPixelFile` object. Use `to_lightcurve()` "
"to obtain a `LightCurve` first."
)
if method not in allowed_methods:
raise ValueError(
("Unrecognized method '{0}'\n" "allowed methods are: {1}").format(
method, allowed_methods
)
)
if method == "pld":
from .correctors import PLDCorrector
return PLDCorrector(self, **kwargs)
def cutout(self, center=None, size=5):
"""Cut a rectangle out of the Target Pixel File.
This methods returns a new `TargetPixelFile` object containing a
rectangle of a given ``size`` cut out around a given ``center``.
Parameters
----------
center : (int, int) tuple or `astropy.SkyCoord`
Center of the cutout. If an (int, int) tuple is passed, it will be
interpreted as the (column, row) coordinates relative to
the bottom-left corner of the TPF. If an `astropy.SkyCoord` is
passed then the sky coordinate will be used instead.
If `None` (default) then the center of the TPF will be used.
size : int or (int, int) tuple
Number of pixels to cut out. If a single integer is passed then
a square of that size will be cut. If a tuple is passed then a
rectangle with dimensions (column_size, row_size) will be cut.
Returns
-------
tpf : `lightkurve.TargetPixelFile` object
New and smaller Target Pixel File object containing only the data
cut out.
"""
imshape = self.flux.shape[1:]
# Parse the user input (``center``) into an (x, y) coordinate
if center is None:
x, y = imshape[0] // 2, imshape[1] // 2
elif isinstance(center, SkyCoord):
x, y = self.wcs.world_to_pixel(center)
elif isinstance(center, (tuple, list, np.ndarray)):
x, y = center
col = int(x)
row = int(y)
# Parse the user input (``size``)
if isinstance(size, int):
s = (size / 2, size / 2)
elif isinstance(size, (tuple, list, np.ndarray)):
s = (size[0] / 2, size[1] / 2)
# Find the TPF edges
col_edges = np.asarray(
[np.max([0, col - s[0]]), np.min([col + s[0], imshape[1]])], dtype=int
)
row_edges = np.asarray(
[np.max([0, row - s[1]]), np.min([row + s[1], imshape[0]])], dtype=int
)
# Make a copy of the data extension
hdu = self.hdu[0].copy()
# Find the new object coordinates
r, d = self.get_coordinates(cadence=len(self.flux) // 2)
hdu.header["RA_OBJ"] = np.nanmean(
r[row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]]
)
hdu.header["DEC_OBJ"] = np.nanmean(
d[row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]]
)
# Remove any KIC labels
labels = [
"*MAG",
"PM*",
"GL*",
"PARALLAX",
"*COLOR",
"TEFF",
"LOGG",
"FEH",
"EBMINUSV",
"AV",
"RADIUS",
"TMINDEX",
]
for label in labels:
if label in hdu.header:
hdu.header[label] = fits.card.Undefined()
# HDUList
hdus = [hdu]
# Copy the header
hdr = deepcopy(self.hdu[1].header)
# Trim any columns that have the shape of the image, to be the new shape
data_columns = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for idx, datacol in enumerate(self.hdu[1].columns):
# We exclude Kepler's obscure "RB_LEVEL" column from cutouts
# for now because it has an awkward shape
if datacol.name == "RB_LEVEL":
continue
# If the column is 3D
if len(self.hdu[1].data[datacol.name].shape) == 3:
# Make a copy, trim it and change the format
datacol = deepcopy(datacol)
datacol.array = datacol.array[
:, row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]
]
datacol._dim = "{}".format(datacol.array.shape[1:]).replace(" ", "")
datacol._dims = datacol.array.shape[1:]
datacol._format = fits.column._ColumnFormat(
"{}{}".format(
np.product(datacol.array.shape[1:]), datacol._format[-1]
)
)
data_columns.append(datacol)
hdr["TDIM{}".format(idx)] = "{}".format(
datacol.array.shape[1:]
).replace(" ", "")
hdr["TDIM9"] = "{}".format(datacol.array.shape[1:]).replace(" ", "")
else:
data_columns.append(datacol)
# Get those coordinates sorted for the corner of the TPF and the WCS
hdr["1CRV*P"] = hdr["1CRV4P"] + col_edges[0]
hdr["2CRV*P"] = hdr["2CRV4P"] + row_edges[0]
hdr["1CRPX*"] = hdr["1CRPX4"] - col_edges[0]
hdr["2CRPX*"] = hdr["2CRPX4"] - row_edges[0]
# Make a table for the data
data_columns[-1]._dim = "{}".format(
(0, int(data_columns[5]._dim.split(",")[1][:-1]))
).replace(" ", "")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
btbl = fits.BinTableHDU.from_columns(data_columns, header=hdr)
# Append it to the hdulist
hdus.append(btbl)
# Correct the aperture mask
hdu = self.hdu[2].copy()
ar = hdu.data
ar = ar[row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]]
hdu.header["NAXIS1"] = ar.shape[0]
hdu.header["NAXIS2"] = ar.shape[1]
hdu.data = ar
hdus.append(hdu)
# Make a new tpf
with warnings.catch_warnings():
warnings.simplefilter("ignore")
newfits = fits.HDUList(hdus)
return self.__class__(newfits, quality_bitmask=self.quality_bitmask)
@staticmethod
def from_fits_images(
images_flux,
position,
images_raw_cnts=None,
images_flux_err=None,
images_flux_bkg=None,
images_flux_bkg_err=None,
images_cosmic_rays=None,
size=(11, 11),
extension=1,
target_id="unnamed-target",
hdu0_keywords=None,
**kwargs,
):
"""Creates a new Target Pixel File from a set of images.
This method is intended to make it easy to cut out targets from
Kepler/K2 "superstamp" regions or TESS FFI images.
Parameters
----------
images_flux : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the flux data from.
position : astropy.SkyCoord
Position around which to cut out pixels.
images_raw_cnts : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the raw counts data from.
images_flux_err : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the flux error data from.
images_flux_bkg : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the background data from.
images_flux_bkg_err : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the background error data from.
images_cosmic_rays : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the cosmic rays data from.
size : (int, int)
Dimensions (cols, rows) to cut out around `position`.
extension : int or str
If `images` is a list of filenames, provide the extension number
or name to use. This should be the same for all flux inputs
provided. Default: 1.
target_id : int or str
Unique identifier of the target to be recorded in the TPF.
hdu0_keywords : dict
Additional keywords to add to the first header file.
**kwargs : dict
Extra arguments to be passed to the `TargetPixelFile` constructor.
Returns
-------
tpf : TargetPixelFile
A new Target Pixel File assembled from the images.
"""
len_images = len(images_flux)
if len_images == 0:
raise ValueError("One or more images must be passed.")
if not isinstance(position, SkyCoord):
raise ValueError("Position must be an astropy.coordinates.SkyCoord.")
if hdu0_keywords is None:
hdu0_keywords = {}
basic_keywords = [
"MISSION",
"TELESCOP",
"INSTRUME",
"QUARTER",
"CAMPAIGN",
"CHANNEL",
"MODULE",
"OUTPUT",
"CAMERA",
"CCD",
"SECTOR",
]
carry_keywords = {}
# Define a helper function to accept images in a flexible way
def _open_image(img, extension):
if isinstance(img, fits.ImageHDU):
hdu = img
elif isinstance(img, fits.HDUList):
hdu = img[extension]
else:
with fits.open(img) as hdulist:
hdu = hdulist[extension].copy()
return hdu
# Define a helper function to cutout images if not None
def _cutout_image(hdu, position, wcs_ref, size):
if hdu is None:
cutout_data = None
cutout_wcs = None
elif position is None:
cutout_data = hdu.data
cutout_wcs = hdu.wcs
else:
cutout = Cutout2D(
hdu.data, position, wcs=wcs_ref, size=size, mode="partial"
)
cutout_data = cutout.data
cutout_wcs = cutout.wcs
return cutout_data, cutout_wcs
# Set the default extension if unspecified
if extension is None:
extension = 0
if isinstance(images_flux[0], str) and images_flux[0].endswith("ffic.fits"):
extension = 1 # TESS FFIs have the image data in extension #1
# If no position is given, ensure the cut-out size matches the image size
if position is None:
size = _open_image(images_flux[0], extension).data.shape
# Find middle image to use as a WCS reference
try:
mid_hdu = _open_image(images_flux[int(len_images / 2) - 1], extension)
wcs_ref = WCS(mid_hdu)
column, row = wcs_ref.all_world2pix(
np.asarray([[position.ra.deg], [position.dec.deg]]).T, 0
)[0]
except Exception as e:
raise e
# Create a factory and set default keyword values based on the middle image
factory = TargetPixelFileFactory(
n_cadences=len_images, n_rows=size[0], n_cols=size[1], target_id=target_id
)
# Get some basic keywords
for kw in basic_keywords:
if kw in mid_hdu.header:
if not isinstance(mid_hdu.header[kw], Undefined):
carry_keywords[kw] = mid_hdu.header[kw]
if ("MISSION" not in carry_keywords) and ("TELESCOP" in carry_keywords):
carry_keywords["MISSION"] = carry_keywords["TELESCOP"]
allkeys = hdu0_keywords.copy()
allkeys.update(carry_keywords)
img_list = [
images_raw_cnts,
images_flux,
images_flux_err,
images_flux_bkg,
images_flux_bkg_err,
images_cosmic_rays,
]
for idx, img in tqdm(enumerate(images_flux), total=len_images):
# Open images if provided and get HDUs
hdu_list = [
_open_image(i[idx], extension) if i is not None else None
for i in img_list
]
# Use the header in the flux image for each frame
hdu_idx = hdu_list[1].header
if idx == 0: # Get default keyword values from the first flux image
factory.keywords = hdu_idx
# Get positional shift of the image compared to the reference WCS
wcs_current = WCS(hdu_idx)
column_current, row_current = wcs_current.all_world2pix(
np.asarray([[position.ra.deg], [position.dec.deg]]).T, 0
)[0]
column_ref, row_ref = wcs_ref.all_world2pix(
np.asarray([[position.ra.deg], [position.dec.deg]]).T, 0
)[0]
with warnings.catch_warnings():
# Using `POS_CORR1` as a header keyword violates the FITS
# standard for being too long, but we use it for consistency
# with the TPF column name. Hence we ignore the warning.
warnings.simplefilter("ignore", AstropyWarning)
hdu_idx["POS_CORR1"] = column_current - column_ref
hdu_idx["POS_CORR2"] = row_current - row_ref
# Cutout (if neccessary) and get data
cutout_list = [
_cutout_image(hdu, position, wcs_ref, size) for hdu in hdu_list
]
# Flatten output list
cutout_list = [item for sublist in cutout_list for item in sublist]
(
raw_cnts,
_,
flux,
wcs,
flux_err,
_,
flux_bkg,
_,
flux_bkg_err,
_,
cosmic_rays,
_,
) = cutout_list
factory.add_cadence(
frameno=idx,
raw_cnts=raw_cnts,
flux=flux,
flux_err=flux_err,
flux_bkg=flux_bkg,
flux_bkg_err=flux_bkg_err,
cosmic_rays=cosmic_rays,
header=hdu_idx,
)
ext_info = {}
ext_info["TFORM4"] = "{}J".format(size[0] * size[1])
ext_info["TDIM4"] = "({},{})".format(size[0], size[1])
ext_info.update(wcs.to_header(relax=True))
# TPF contains multiple data columns that require WCS
for m in [4, 5, 6, 7, 8, 9]:
if m > 4:
ext_info["TFORM{}".format(m)] = "{}E".format(size[0] * size[1])
ext_info["TDIM{}".format(m)] = "({},{})".format(size[0], size[1])
# Compute the distance from the star to the TPF lower left corner
# That is approximately half the TPF size, with an adjustment factor if the star's pixel
# position gets rounded up or not.
# The first int is there so that even sizes always round to one less than half of their value
half_tpfsize_col = int((size[0] - 1) / 2.0) + (
int(round(column)) - int(column)
) * ((size[0] + 1) % 2)
half_tpfsize_row = int((size[1] - 1) / 2.0) + (
int(round(row)) - int(row)
) * ((size[1] + 1) % 2)
ext_info["1CRV{}P".format(m)] = (
int(round(column)) - half_tpfsize_col + factory.keywords["CRVAL1P"] - 1
)
ext_info["2CRV{}P".format(m)] = (
int(round(row)) - half_tpfsize_row + factory.keywords["CRVAL2P"] - 1
)
return factory.get_tpf(hdu0_keywords=allkeys, ext_info=ext_info, **kwargs)
def plot_pixels(
self,
ax=None,
periodogram=False,
aperture_mask=None,
show_flux=False,
corrector_func=None,
style="lightkurve",
title=None,
markersize=0.5,
**kwargs,
):
"""Show the light curve of each pixel in a single plot.
Note that all values are autoscaled and axis labels are not provided.
This utility is designed for by-eye inspection of signal morphology.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
periodogram : bool
Default: False; if True, periodograms will be plotted, using normalized light curves.
Note that this keyword overrides normalized.
aperture_mask : ndarray or str
Highlight pixels selected by aperture_mask.
Only `pipeline`, `threshold`, or custom masks will be plotted.
`all` and None masks will be ignored.
show_flux : bool
Default: False; if True, shade pixels with frame 0 flux colour
Inspired by https://github.com/noraeisner/LATTE
corrector_func : function
Function that accepts and returns a `~lightkurve.lightcurve.LightCurve`.
This function is applied to each pixel's light curve.
The default is to return a 5 sigma-clipped light curve.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
markersize : float
Size of the markers in the lightcurve plot. For periodogram plot, it is used as the line width.
Default: 0.5
kwargs : dict
e.g. extra parameters to be passed to `~lightkurve.LightCurve.to_periodogram`.
Examples
--------
Inspect the lightcurve around a possible transit at per-pixel level::
>>> import lightkurve as lk
>>> # A possible transit around time BTJD 2277.0. Inspect the lightcurve around that time
>>> tpf = tpf[(tpf.time.value >= 2276.5) & (tpf.time.value <= 2277.5)] # doctest: +SKIP
>>> tpf.plot_pixels(aperture_mask='pipeline') # doctest: +SKIP
>>>
>>> # Variation: shade the pixel based on the flux at frame 0
>>> # increase markersize so that it is more legible for pixels with yellow background (the brightest pixels)
>>> tpf.plot_pixels(aperture_mask='pipeline', show_flux=True, markersize=1.5) # doctest: +SKIP
>>>
>>> # Variation: Customize the plot's size so that each pixel is about 1 inch by 1 inch
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(tpf.flux[0].shape[1] * 1.0, tpf.flux[0].shape[0] * 1.0)) # doctest: +SKIP
>>> tpf.plot_pixels(ax=fig.gca(), aperture_mask='pipeline') # doctest: +SKIP
"""
if style == "lightkurve" or style is None:
style = MPLSTYLE
if title is None:
title = "Target ID: {0}, {1:.2f} - {2:.2f} {3}".format(
self.targetid,
self.time[0].value,
self.time[-1].value,
_time_label_brief(self.time),
)
if corrector_func is None:
corrector_func = lambda x: x.remove_outliers()
if show_flux:
cmap = plt.get_cmap()
norm = plt.Normalize(
vmin=np.nanmin(self.flux[0].value), vmax=np.nanmax(self.flux[0].value)
)
mask = self._parse_aperture_mask(aperture_mask)
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=(RuntimeWarning, LightkurveWarning)
)
# get an aperture mask for each pixel
masks = np.zeros(
(self.shape[1] * self.shape[2], self.shape[1], self.shape[2]),
dtype="bool",
)
for i in range(self.shape[1] * self.shape[2]):
masks[i][np.unravel_index(i, (self.shape[1], self.shape[2]))] = True
pixel_list = []
for j in range(self.shape[1] * self.shape[2]):
lc = self.to_lightcurve(aperture_mask=masks[j])
lc = corrector_func(lc)
if periodogram:
try:
pixel_list.append(lc.to_periodogram(**kwargs))
except IndexError:
pixel_list.append(None)
else:
if len(lc.remove_nans().flux) == 0:
pixel_list.append(None)
else:
pixel_list.append(lc)
with plt.style.context(style):
if ax is None:
fig = plt.figure()
ax = plt.gca()
set_size = True
else:
fig = ax.get_figure()
set_size = False
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if periodogram:
ax.set(
title=title,
xlabel="Frequency / Column (pixel)",
ylabel="Power / Row (pixel)",
)
else:
ax.set(
title=title,
xlabel="Time / Column (pixel)",
ylabel="Flux / Row (pixel)",
)
gs = gridspec.GridSpec(
self.shape[1], self.shape[2], wspace=0.01, hspace=0.01
)
for k in range(self.shape[1] * self.shape[2]):
if pixel_list[k]:
x, y = np.unravel_index(k, (self.shape[1], self.shape[2]))
# Highlight aperture mask in red
if aperture_mask is not None and mask[x, y]:
rc = {"axes.linewidth": 2, "axes.edgecolor": "red"}
else:
rc = {"axes.linewidth": 1}
with plt.rc_context(rc=rc):
gax = fig.add_subplot(gs[self.shape[1] - x - 1, y])
# Determine background and foreground color
if show_flux:
gax.set_facecolor(cmap(norm(self.flux.value[0, x, y])))
markercolor = "white"
else:
markercolor = "black"
# Plot flux or periodogram
if periodogram:
gax.plot(
pixel_list[k].frequency.value,
pixel_list[k].power.value,
marker="None",
color=markercolor,
lw=markersize,
)
else:
gax.plot(
pixel_list[k].time.value,
pixel_list[k].flux.value,
marker=".",
color=markercolor,
ms=markersize,
lw=0,
)
gax.margins(y=0.1, x=0)
gax.set_xticklabels("")
gax.set_yticklabels("")
gax.set_xticks([])
gax.set_yticks([])
# add row/column numbers to start / end
if x == 0 and y == 0:
gax.set_xlabel(f"{self.column}")
gax.set_ylabel(f"{self.row}")
if x == 0 and y == self.shape[2] - 1: # lower right
gax.set_xlabel(f"{self.column + self.shape[2] - 1}")
if x == self.shape[1] - 1 and y == 0: # upper left
gax.set_ylabel(f"{self.row + self.shape[1] - 1}")
if set_size: # use default size when caller does not supply ax
fig.set_size_inches((y * 1.5, x * 1.5))
return ax
class KeplerTargetPixelFile(TargetPixelFile):
"""Class to read and interact with the pixel data products
("Target Pixel Files") created by NASA's Kepler pipeline.
This class offers a user-friendly way to open a Kepler Target Pixel File
(TPF), access its meta data, visualize its contents, extract light curves
with custom aperture masks, estimate centroid positions, and more.
Please consult the `TargetPixelFile tutorial
<https://docs.lightkurve.org/tutorials/01-target-pixel-files.html>`_
in the online documentation for examples on using this class.
Parameters
----------
path : str or `~astropy.io.fits.HDUList`
Path to a Kepler Target Pixel file. Alternatively, you can pass a
`.HDUList` object, which is the AstroPy object returned by
the `astropy.io.fits.open` function.
quality_bitmask : "none", "default", "hard", "hardest", or int
Bitmask that should be used to ignore bad-quality cadences.
If a string is passed, it has the following meaning:
* "none": no cadences will be ignored (equivalent to
``quality_bitmask=0``).
* "default": cadences with severe quality issues will be ignored
(equivalent to ``quality_bitmask=1130799``).
* "hard": more conservative choice of flags to ignore
(equivalent to ``quality_bitmask=1664431``).
This is known to remove good data.
* "hardest": remove all cadences that have one or more flags raised
(equivalent to ``quality_bitmask=2096639``). This mask is not
recommended because some quality flags can safely be ignored.
If an integer is passed, it will be used as a bitmask, i.e. it will
have the effect of removing cadences where
``(tpf.hdu[1].data['QUALITY'] & quality_bitmask) > 0``.
See the :class:`KeplerQualityFlags` class for details on the bitmasks.
**kwargs : dict
Optional keyword arguments passed on to `astropy.io.fits.open`.
References
----------
.. [1] Kepler: A Search for Terrestrial Planets. Kepler Archive Manual.
http://archive.stsci.edu/kepler/manuals/archive_manual.pdf
"""
def __init__(self, path, quality_bitmask="default", **kwargs):
super(KeplerTargetPixelFile, self).__init__(
path, quality_bitmask=quality_bitmask, **kwargs
)
try:
self.quality_mask = KeplerQualityFlags.create_quality_mask(
quality_array=self.hdu[1].data["QUALITY"], bitmask=quality_bitmask
)
# check to make sure the correct filetype has been provided
filetype = detect_filetype(self.hdu)
if filetype == "TessTargetPixelFile":
warnings.warn(
"A TESS data product is being opened using the "
"`KeplerTargetPixelFile` class. "
"Please use `TessTargetPixelFile` instead.",
LightkurveWarning,
)
elif filetype is None:
warnings.warn(
"File header not recognized as Kepler or TESS " "observation.",
LightkurveWarning,
)
# Use the KEPLERID keyword as the default targetid
if self.targetid is None:
self.targetid = self.get_header().get("KEPLERID")
except Exception as e:
# Cannot instantiate TargetPixelFile, close the HDU to release the file handle
self.hdu.close()
raise e
def __repr__(self):
return "KeplerTargetPixelFile Object (ID: {})".format(self.targetid)
def get_prf_model(self):
"""Returns an object of KeplerPRF initialized using the
necessary metadata in the tpf object.
Returns
-------
prf : instance of SimpleKeplerPRF
"""
return KeplerPRF(
channel=self.channel, shape=self.shape[1:], column=self.column, row=self.row
)
@property
def obsmode(self):
"""'short cadence' or 'long cadence'. ('OBSMODE' header keyword)"""
return self.get_keyword("OBSMODE")
@property
def module(self):
"""Kepler CCD module number. ('MODULE' header keyword)"""
return self.get_keyword("MODULE")
@property
def output(self):
"""Kepler CCD module output number. ('OUTPUT' header keyword)"""
return self.get_keyword("OUTPUT")
@property
def channel(self):
"""Kepler CCD channel number. ('CHANNEL' header keyword)"""
return self.get_keyword("CHANNEL")
@property
def quarter(self):
"""Kepler quarter number. ('QUARTER' header keyword)"""
return self.get_keyword("QUARTER")
@property
def campaign(self):
"""K2 Campaign number. ('CAMPAIGN' header keyword)"""
return self.get_keyword("CAMPAIGN")
@property
def mission(self):
"""'Kepler' or 'K2'. ('MISSION' header keyword)"""
return self.get_keyword("MISSION")
def extract_aperture_photometry(
self, aperture_mask="default", flux_method="sum", centroid_method="moments"
):
"""Returns a LightCurve obtained using aperture photometry.
Parameters
----------
aperture_mask : array-like, 'pipeline', 'threshold', 'default', or 'all'
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
flux_method: 'sum', 'median', or 'mean'
Determines how the pixel values within the aperture mask are combined
at each cadence. Defaults to 'sum'.
centroid_method : str, 'moments' or 'quadratic'
For the details on this arguments, please refer to the documentation
for `estimate_centroids()`.
Returns
-------
lc : KeplerLightCurve object
Array containing the summed flux within the aperture for each
cadence.
"""
# explicitly resolve default, so that the aperture_mask set in meta
# later will be the resolved one
aperture_mask = self._resolve_default_aperture_mask(aperture_mask)
flux, flux_err, centroid_col, centroid_row = self._aperture_photometry(
aperture_mask=aperture_mask,
flux_method=flux_method,
centroid_method=centroid_method,
)
keys = {
"centroid_col": centroid_col,
"centroid_row": centroid_row,
"quality": self.quality,
"channel": self.channel,
"campaign": self.campaign,
"quarter": self.quarter,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
meta = {"APERTURE_MASK": aperture_mask}
return KeplerLightCurve(
time=self.time, flux=flux, flux_err=flux_err, **keys, meta=meta
)
def get_bkg_lightcurve(self, aperture_mask=None):
aperture_mask = self._parse_aperture_mask(aperture_mask)
# Ignore warnings related to zero or negative errors
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
flux_bkg_err = (
np.nansum(self.flux_bkg_err[:, aperture_mask] ** 2, axis=1) ** 0.5
)
keys = {
"quality": self.quality,
"channel": self.channel,
"campaign": self.campaign,
"quarter": self.quarter,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
return KeplerLightCurve(
time=self.time,
flux=np.nansum(self.flux_bkg[:, aperture_mask], axis=1),
flux_err=flux_bkg_err,
**keys,
)
def get_model(self, star_priors=None, **kwargs):
"""Returns a default `TPFModel` object for PRF fitting.
The default model only includes one star and only allows its flux
and position to change. A different set of stars can be added using
the `star_priors` parameter.
Parameters
----------
**kwargs : dict
Arguments to be passed to the `TPFModel` constructor, e.g.
`star_priors`.
Returns
-------
model : TPFModel object
Model with appropriate defaults for this Target Pixel File.
"""
from .prf import TPFModel, StarPrior, BackgroundPrior
from .prf import UniformPrior, GaussianPrior
# Set up the model
if "star_priors" not in kwargs:
centr_col, centr_row = self.estimate_centroids()
star_priors = [
StarPrior(
col=GaussianPrior(
mean=np.nanmedian(centr_col.value),
var=np.nanstd(centr_col.value) ** 2,
),
row=GaussianPrior(
mean=np.nanmedian(centr_row.value),
var=np.nanstd(centr_row.value) ** 2,
),
flux=UniformPrior(
lb=0.5 * np.nanmax(self.flux[0].value),
ub=2 * np.nansum(self.flux[0].value) + 1e-10,
),
targetid=self.targetid,
)
]
kwargs["star_priors"] = star_priors
if "prfmodel" not in kwargs:
kwargs["prfmodel"] = self.get_prf_model()
if "background_prior" not in kwargs:
if np.all(
np.isnan(self.flux_bkg)
): # If TargetPixelFile has no background flux data
# Use the median of the lower half of flux as an estimate for flux_bkg
clipped_flux = np.ma.masked_where(
self.flux.value > np.percentile(self.flux.value, 50),
self.flux.value,
)
flux_prior = GaussianPrior(
mean=np.ma.median(clipped_flux), var=np.ma.std(clipped_flux) ** 2
)
else:
flux_prior = GaussianPrior(
mean=np.nanmedian(self.flux_bkg.value),
var=np.nanstd(self.flux_bkg.value) ** 2,
)
kwargs["background_prior"] = BackgroundPrior(flux=flux_prior)
return TPFModel(**kwargs)
def extract_prf_photometry(self, cadences=None, parallel=True, **kwargs):
"""Returns the results of PRF photometry applied to the pixel file.
Parameters
----------
cadences : list of int
Cadences to fit. If `None` (default) then all cadences will be fit.
parallel : bool
If `True`, fitting cadences will be distributed across multiple
cores using Python's `multiprocessing` module.
**kwargs : dict
Keywords to be passed to `get_model()` to create the
`~prf.TPFModel` object that will be fit.
Returns
-------
results : PRFPhotometry object
Object that provides access to PRF-fitting photometry results and
various diagnostics.
"""
from .prf import PRFPhotometry
log.warning(
"Warning: PRF-fitting photometry is experimental "
"in this version of lightkurve."
)
prfphot = PRFPhotometry(model=self.get_model(**kwargs))
prfphot.run(
self.flux + self.flux_bkg,
cadences=cadences,
parallel=parallel,
pos_corr1=self.pos_corr1,
pos_corr2=self.pos_corr2,
)
return prfphot
def prf_lightcurve(self, **kwargs):
lc = self.extract_prf_photometry(**kwargs).lightcurves[0]
keys = {
"quality": self.quality,
"channel": self.channel,
"campaign": self.campaign,
"quarter": self.quarter,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"targetid": self.targetid,
}
return KeplerLightCurve(time=self.time, flux=lc.flux, **keys)
class FactoryError(Exception):
"""Raised if there is a problem creating a TPF."""
pass
class TargetPixelFileFactory(object):
"""Class to create a TargetPixelFile."""
def __init__(
self, n_cadences, n_rows, n_cols, target_id="unnamed-target", keywords=None
):
self.n_cadences = n_cadences
self.n_rows = n_rows
self.n_cols = n_cols
self.target_id = target_id
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
# Initialize the 3D data structures
self.raw_cnts = np.empty((n_cadences, n_rows, n_cols), dtype="int")
self.flux = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.flux_err = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.flux_bkg = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.flux_bkg_err = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.cosmic_rays = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
# Set 3D data defaults
self.raw_cnts[:, :, :] = -1
self.flux[:, :, :] = np.nan
self.flux_err[:, :, :] = np.nan
self.flux_bkg[:, :, :] = np.nan
self.flux_bkg_err[:, :, :] = np.nan
self.cosmic_rays[:, :, :] = np.nan
# Initialize the 1D data structures
self.mjd = np.zeros(n_cadences, dtype="float64")
self.time = np.zeros(n_cadences, dtype="float64")
self.timecorr = np.zeros(n_cadences, dtype="float32")
self.cadenceno = np.zeros(n_cadences, dtype="int")
self.quality = np.zeros(n_cadences, dtype="int")
self.pos_corr1 = np.zeros(n_cadences, dtype="float32")
self.pos_corr2 = np.zeros(n_cadences, dtype="float32")
def add_cadence(
self,
frameno,
raw_cnts=None,
flux=None,
flux_err=None,
flux_bkg=None,
flux_bkg_err=None,
cosmic_rays=None,
header=None,
):
"""Populate the data for a single cadence."""
if frameno >= self.n_cadences:
raise FactoryError(
"Can not add cadence {}, n_cadences set to {}".format(
frameno, self.n_cadences
)
)
if header is None:
header = {}
# 2D-data
for col in [
"raw_cnts",
"flux",
"flux_err",
"flux_bkg",
"flux_bkg_err",
"cosmic_rays",
]:
if locals()[col] is not None:
if locals()[col].shape != (self.n_rows, self.n_cols):
raise FactoryError(
"Can not add cadence with a different shape ({} x {})".format(
self.n_rows, self.n_cols
)
)
vars(self)[col][frameno] = locals()[col]
# 1D-data
if "TSTART" in header and "TSTOP" in header:
self.time[frameno] = (header["TSTART"] + header["TSTOP"]) / 2.0
if "TIMECORR" in header:
self.timecorr[frameno] = header["TIMECORR"]
if "CADENCEN" in header:
self.cadenceno[frameno] = header["CADENCEN"]
if "QUALITY" in header:
self.quality[frameno] = header["QUALITY"]
if "POS_CORR1" in header:
self.pos_corr1[frameno] = header["POS_CORR1"]
if "POS_CORR2" in header:
self.pos_corr2[frameno] = header["POS_CORR2"]
def _check_data(self):
"""Check the data before writing to a TPF for any obvious errors."""
if len(self.time) != len(np.unique(self.time)):
warnings.warn(
"The factory-created TPF contains cadences with "
"identical TIME values.",
LightkurveWarning,
)
if ~np.all(self.time == np.sort(self.time)):
warnings.warn(
"Cadences in the factory-created TPF do not appear "
"to be sorted in chronological order.",
LightkurveWarning,
)
if np.nansum(self.flux) == 0:
warnings.warn(
"The factory-created TPF does not appear to contain "
"non-zero flux values.",
LightkurveWarning,
)
def get_tpf(self, hdu0_keywords=None, ext_info=None, **kwargs):
"""Returns a TargetPixelFile object."""
if hdu0_keywords is None:
hdu0_keywords = {}
if ext_info is None:
ext_info = {}
self._check_data()
# Detect filetype
hdulist = self._hdulist(hdu0_keywords=hdu0_keywords, ext_info=ext_info)
filetype = detect_filetype(hdulist)
if filetype == "TessTargetPixelFile":
tpf = TessTargetPixelFile(hdulist, **kwargs)
elif filetype == "KeplerTargetPixelFile":
tpf = KeplerTargetPixelFile(hdulist, **kwargs)
else:
warnings.warn(
"Could not detect filetype as TESSTargetPixelFile or KeplerTargetPixelFile, "
"returning generic TargetPixelFile instead.",
LightkurveWarning,
)
tpf = TargetPixelFile(hdulist, **kwargs)
return tpf
def _hdulist(self, hdu0_keywords, ext_info):
"""Returns an astropy.io.fits.HDUList object."""
return fits.HDUList(
[
self._make_primary_hdu(hdu0_keywords=hdu0_keywords),
self._make_target_extension(ext_info=ext_info),
self._make_aperture_extension(),
]
)
def _header_template(self, extension):
"""Returns a template `fits.Header` object for a given extension."""
template_fn = os.path.join(
PACKAGEDIR, "data", "tpf-ext{}-header.txt".format(extension)
)
return fits.Header.fromtextfile(template_fn)
def _make_primary_hdu(self, hdu0_keywords):
"""Returns the primary extension (#0)."""
hdu = fits.PrimaryHDU()
# Copy the default keywords from a template file from the MAST archive
tmpl = self._header_template(0)
for kw in tmpl:
hdu.header[kw] = (tmpl[kw], tmpl.comments[kw])
# Override the defaults where necessary
hdu.header["ORIGIN"] = "Unofficial data product"
hdu.header["DATE"] = datetime.datetime.now().strftime("%Y-%m-%d")
hdu.header["CREATOR"] = "lightkurve.TargetPixelFileFactory"
hdu.header["OBJECT"] = self.target_id
if hdu.header["TELESCOP"] is not None and hdu.header["TELESCOP"] == "Kepler":
hdu.header["KEPLERID"] = self.target_id
# Empty a bunch of keywords rather than having incorrect info
for kw in [
"PROCVER",
"FILEVER",
"CHANNEL",
"MODULE",
"OUTPUT",
"TIMVERSN",
"CAMPAIGN",
"DATA_REL",
"TTABLEID",
"RA_OBJ",
"DEC_OBJ",
]:
hdu.header[kw] = ""
# Some keywords just shouldn't be passed to the new header.
bad_keys = [
"ORIGIN",
"DATE",
"OBJECT",
"SIMPLE",
"BITPIX",
"NAXIS",
"EXTEND",
"NEXTEND",
"EXTNAME",
"NAXIS1",
"NAXIS2",
"QUALITY",
]
for kw, val in hdu0_keywords.items():
if kw in bad_keys:
continue
if kw in hdu.header:
hdu.header[kw] = val
else:
hdu.header.append((kw, val))
return hdu
def _make_target_extension(self, ext_info):
"""Create the 'TARGETTABLES' extension (i.e. extension #1)."""
# Turn the data arrays into fits columns and initialize the HDU
coldim = "({},{})".format(self.n_cols, self.n_rows)
eformat = "{}E".format(self.n_rows * self.n_cols)
jformat = "{}J".format(self.n_rows * self.n_cols)
cols = []
cols.append(
fits.Column(name="TIME", format="D", unit="BJD - 2454833", array=self.time)
)
cols.append(
fits.Column(name="TIMECORR", format="E", unit="D", array=self.timecorr)
)
cols.append(fits.Column(name="CADENCENO", format="J", array=self.cadenceno))
cols.append(
fits.Column(
name="RAW_CNTS",
format=jformat,
unit="count",
dim=coldim,
array=self.raw_cnts,
)
)
cols.append(
fits.Column(
name="FLUX", format=eformat, unit="e-/s", dim=coldim, array=self.flux
)
)
cols.append(
fits.Column(
name="FLUX_ERR",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.flux_err,
)
)
cols.append(
fits.Column(
name="FLUX_BKG",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.flux_bkg,
)
)
cols.append(
fits.Column(
name="FLUX_BKG_ERR",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.flux_bkg_err,
)
)
cols.append(
fits.Column(
name="COSMIC_RAYS",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.cosmic_rays,
)
)
cols.append(fits.Column(name="QUALITY", format="J", array=self.quality))
cols.append(
fits.Column(
name="POS_CORR1", format="E", unit="pixels", array=self.pos_corr1
)
)
cols.append(
fits.Column(
name="POS_CORR2", format="E", unit="pixels", array=self.pos_corr2
)
)
coldefs = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(coldefs)
# Set the header with defaults
template = self._header_template(1)
for kw in template:
if kw not in ["XTENSION", "NAXIS1", "NAXIS2", "CHECKSUM", "BITPIX"]:
try:
hdu.header[kw] = (self.keywords[kw], self.keywords.comments[kw])
except KeyError:
hdu.header[kw] = (template[kw], template.comments[kw])
wcs_keywords = {
"CTYPE1": "1CTYP{}",
"CTYPE2": "2CTYP{}",
"CRPIX1": "1CRPX{}",
"CRPIX2": "2CRPX{}",
"CRVAL1": "1CRVL{}",
"CRVAL2": "2CRVL{}",
"CUNIT1": "1CUNI{}",
"CUNIT2": "2CUNI{}",
"CDELT1": "1CDLT{}",
"CDELT2": "2CDLT{}",
"PC1_1": "11PC{}",
"PC1_2": "12PC{}",
"PC2_1": "21PC{}",
"PC2_2": "22PC{}",
}
# Override defaults using data calculated in from_fits_images
for kw in ext_info.keys():
if kw in wcs_keywords.keys():
for x in [4, 5, 6, 7, 8, 9]:
hdu.header[wcs_keywords[kw].format(x)] = ext_info[kw]
else:
hdu.header[kw] = ext_info[kw]
return hdu
def _make_aperture_extension(self):
"""Create the aperture mask extension (i.e. extension #2)."""
mask = 3 * np.ones((self.n_rows, self.n_cols), dtype="int32")
hdu = fits.ImageHDU(mask)
# Set the header from the template TPF again
template = self._header_template(2)
for kw in template:
if kw not in ["XTENSION", "NAXIS1", "NAXIS2", "CHECKSUM", "BITPIX"]:
try:
hdu.header[kw] = (self.keywords[kw], self.keywords.comments[kw])
except KeyError:
hdu.header[kw] = (template[kw], template.comments[kw])
# Override the defaults where necessary
for keyword in [
"CTYPE1",
"CTYPE2",
"CRPIX1",
"CRPIX2",
"CRVAL1",
"CRVAL2",
"CUNIT1",
"CUNIT2",
"CDELT1",
"CDELT2",
"PC1_1",
"PC1_2",
"PC2_1",
"PC2_2",
]:
hdu.header[keyword] = "" # override wcs keywords
hdu.header["EXTNAME"] = "APERTURE"
return hdu
class TessTargetPixelFile(TargetPixelFile):
"""Represents pixel data products created by NASA's TESS pipeline.
This class enables extraction of custom light curves and centroid positions.
Parameters
----------
path : str
Path to a TESS Target Pixel (FITS) File.
quality_bitmask : "none", "default", "hard", "hardest", or int
Bitmask that should be used to ignore bad-quality cadences.
If a string is passed, it has the following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with severe quality issues will be ignored
(`quality_bitmask=175`).
* "hard": more conservative choice of flags to ignore
(`quality_bitmask=7407`). This is known to remove good data.
* "hardest": removes all data that has been flagged
(`quality_bitmask=8191`). This mask is not recommended.
If an integer is passed, it will be used as a bitmask, i.e. it will
have the effect of removing cadences where
``(tpf.hdu[1].data['QUALITY'] & quality_bitmask) > 0``.
See the :class:`KeplerQualityFlags` class for details on the bitmasks.
kwargs : dict
Keyword arguments passed to `astropy.io.fits.open()`.
"""
def __init__(self, path, quality_bitmask="default", **kwargs):
super(TessTargetPixelFile, self).__init__(
path, quality_bitmask=quality_bitmask, **kwargs
)
try:
self.quality_mask = TessQualityFlags.create_quality_mask(
quality_array=self.hdu[1].data["QUALITY"], bitmask=quality_bitmask
)
# Early TESS releases had cadences with time=NaN (i.e. missing data)
# which were not flagged by a QUALITY flag yet; the line below prevents
# these cadences from being used. They would break most methods!
if (quality_bitmask != 0) and (quality_bitmask != "none"):
self.quality_mask &= np.isfinite(self.hdu[1].data["TIME"])
# check to make sure the correct filetype has been provided
filetype = detect_filetype(self.hdu)
if filetype == "KeplerTargetPixelFile":
warnings.warn(
"A Kepler data product is being opened using the "
"`TessTargetPixelFile` class. "
"Please use `KeplerTargetPixelFile` instead.",
LightkurveWarning,
)
elif filetype is None:
warnings.warn(
"File header not recognized as Kepler or TESS " "observation.",
LightkurveWarning,
)
# Use the TICID keyword as the default targetid
if self.targetid is None:
self.targetid = self.get_header().get("TICID")
except Exception as e:
# Cannot instantiate TargetPixelFile, close the HDU to release the file handle
self.hdu.close()
raise e
def __repr__(self):
return "TessTargetPixelFile(TICID: {})".format(self.targetid)
@property
def background_mask(self):
"""Returns the background mask used by the TESS pipeline."""
# The TESS pipeline flags the pixels in the background aperture using
# bit number 4, cf. Section 6 of the TESS Data Products documentation
# (EXP-TESS-ARC-ICD-TM-0014.pdf).
try:
return self.hdu[2].data & 4 > 0
except (IndexError, TypeError):
# `IndexError` may be raised if the aperture extension (#2) is missing
# `TypeError` may be raised because early versions of TESScut returned floats in HDU 2
return np.zeros(self.hdu[1].data["FLUX"][0].shape, dtype=bool)
@property
def sector(self):
"""TESS Sector number ('SECTOR' header keyword)."""
return self.get_keyword("SECTOR")
@property
def camera(self):
"""TESS Camera number ('CAMERA' header keyword)."""
return self.get_keyword("CAMERA")
@property
def ccd(self):
"""TESS CCD number ('CCD' header keyword)."""
return self.get_keyword("CCD")
@property
def mission(self):
return "TESS"
def extract_aperture_photometry(
self, aperture_mask="default", flux_method="sum", centroid_method="moments"
):
"""Returns a LightCurve obtained using aperture photometry.
Parameters
----------
aperture_mask : array-like, 'pipeline', 'threshold', 'default', or 'all'
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
flux_method: 'sum', 'median', or 'mean'
Determines how the pixel values within the aperture mask are combined
at each cadence. Defaults to 'sum'.
centroid_method : str, 'moments' or 'quadratic'
For the details on this arguments, please refer to the documentation
for `estimate_centroids()`.
Returns
-------
lc : TessLightCurve object
Contains the summed flux within the aperture for each cadence.
"""
# explicitly resolve default, so that the aperture_mask set in meta
# later will be the resolved one
aperture_mask = self._resolve_default_aperture_mask(aperture_mask)
flux, flux_err, centroid_col, centroid_row = self._aperture_photometry(
aperture_mask=aperture_mask,
flux_method=flux_method,
centroid_method=centroid_method,
)
keys = {
"centroid_col": centroid_col,
"centroid_row": centroid_row,
"quality": self.quality,
"sector": self.sector,
"camera": self.camera,
"ccd": self.ccd,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
meta = {"APERTURE_MASK": aperture_mask}
return TessLightCurve(
time=self.time, flux=flux, flux_err=flux_err, **keys, meta=meta
)
def get_bkg_lightcurve(self, aperture_mask=None):
aperture_mask = self._parse_aperture_mask(aperture_mask)
# Ignore warnings related to zero or negative errors
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
flux_bkg_err = (
np.nansum(self.flux_bkg_err[:, aperture_mask] ** 2, axis=1) ** 0.5
)
keys = {
"quality": self.quality,
"sector": self.sector,
"camera": self.camera,
"ccd": self.ccd,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
return TessLightCurve(
time=self.time,
flux=np.nansum(self.flux_bkg[:, aperture_mask], axis=1),
flux_err=flux_bkg_err,
**keys,
)
|
KeplerGOREPO_NAMElightkurvePATH_START.@lightkurve_extracted@lightkurve-main@src@lightkurve@targetpixelfile.py@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/textfont/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="variantsrc", parent_name="scattersmith.textfont", **kwargs
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@textfont@_variantsrc.py@.PATH_END.py
|
{
"filename": "points.md",
"repo_name": "RozanskiT/HANDY",
"repo_path": "HANDY_extracted/HANDY-master/docs/points.md",
"type": "Markdown"
}
|
# HANDY - (Special) Points in normalization process
## Table of Contents
* [Home](index.md)
* [Install](install.md)
* [Basics](basics.md)
* [Regions and ranges](regions.md)
* [Points](points.md)
* [Radial velocity correction](radialVelocity.md)
* [Grids](grids.md)
## What are Special Points?
Special Points are points manually defined by the user, which will be use to fit continuum line. Each point is defined by its **wavelength and ratio** its _y-coordinate_ divided by _median of little range of flux_. Use of ratio insted of y-coordinate make Points portable between defferent spectra we need to normalize. They are needed in places where true continuum is not available in star's spectrum (eg. near Balmer jump). They need to be used with caution because it easy to bias the spectrum by manually adjusting it to expected result. **You should not use them when that is not necessary.**
## The use of Special Points
* **Button _Add special point_** - when this button is active the next left mouse click on the top plot will add special Point
* **Auto fit special points** - (_Be careful with that option!_) when you click this button and theoretical spectrum is available then all special Points will be adjusted to fit normed spectrum to theoretical one in narrow surronding of special Points
## Saving of Special Points
Special points are included in so called _continuum file_ which can be saved by menu option _Save->Save continuum file_. Those files contain also all ranges/regions definitions (see more: [Regions and ranges](regions.md)).
## Example

|
RozanskiTREPO_NAMEHANDYPATH_START.@HANDY_extracted@HANDY-master@docs@points.md@.PATH_END.py
|
{
"filename": "_textposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/pie/_textposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="textposition", parent_name="pie", **kwargs):
super(TextpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["inside", "outside", "auto", "none"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@pie@_textposition.py@.PATH_END.py
|
{
"filename": "graph_store.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/graphs/graph_store.py",
"type": "Python"
}
|
from abc import abstractmethod
from typing import Any, Dict, List
from langchain_community.graphs.graph_document import GraphDocument
class GraphStore:
"""Abstract class for graph operations."""
@property
@abstractmethod
def get_schema(self) -> str:
"""Return the schema of the Graph database"""
pass
@property
@abstractmethod
def get_structured_schema(self) -> Dict[str, Any]:
"""Return the schema of the Graph database"""
pass
@abstractmethod
def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query the graph."""
pass
@abstractmethod
def refresh_schema(self) -> None:
"""Refresh the graph schema information."""
pass
@abstractmethod
def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""Take GraphDocument as input as uses it to construct a graph."""
pass
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@graphs@graph_store.py@.PATH_END.py
|
{
"filename": "_mathtext_data.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py3/matplotlib/_mathtext_data.py",
"type": "Python"
}
|
"""
font data tables for truetype and afm computer modern fonts
"""
from __future__ import annotations
latex_to_bakoma = {
'\\__sqrt__' : ('cmex10', 0x70),
'\\bigcap' : ('cmex10', 0x5c),
'\\bigcup' : ('cmex10', 0x5b),
'\\bigodot' : ('cmex10', 0x4b),
'\\bigoplus' : ('cmex10', 0x4d),
'\\bigotimes' : ('cmex10', 0x4f),
'\\biguplus' : ('cmex10', 0x5d),
'\\bigvee' : ('cmex10', 0x5f),
'\\bigwedge' : ('cmex10', 0x5e),
'\\coprod' : ('cmex10', 0x61),
'\\int' : ('cmex10', 0x5a),
'\\langle' : ('cmex10', 0xad),
'\\leftangle' : ('cmex10', 0xad),
'\\leftbrace' : ('cmex10', 0xa9),
'\\oint' : ('cmex10', 0x49),
'\\prod' : ('cmex10', 0x59),
'\\rangle' : ('cmex10', 0xae),
'\\rightangle' : ('cmex10', 0xae),
'\\rightbrace' : ('cmex10', 0xaa),
'\\sum' : ('cmex10', 0x58),
'\\widehat' : ('cmex10', 0x62),
'\\widetilde' : ('cmex10', 0x65),
'\\{' : ('cmex10', 0xa9),
'\\}' : ('cmex10', 0xaa),
'{' : ('cmex10', 0xa9),
'}' : ('cmex10', 0xaa),
',' : ('cmmi10', 0x3b),
'.' : ('cmmi10', 0x3a),
'/' : ('cmmi10', 0x3d),
'<' : ('cmmi10', 0x3c),
'>' : ('cmmi10', 0x3e),
'\\alpha' : ('cmmi10', 0xae),
'\\beta' : ('cmmi10', 0xaf),
'\\chi' : ('cmmi10', 0xc2),
'\\combiningrightarrowabove' : ('cmmi10', 0x7e),
'\\delta' : ('cmmi10', 0xb1),
'\\ell' : ('cmmi10', 0x60),
'\\epsilon' : ('cmmi10', 0xb2),
'\\eta' : ('cmmi10', 0xb4),
'\\flat' : ('cmmi10', 0x5b),
'\\frown' : ('cmmi10', 0x5f),
'\\gamma' : ('cmmi10', 0xb0),
'\\imath' : ('cmmi10', 0x7b),
'\\iota' : ('cmmi10', 0xb6),
'\\jmath' : ('cmmi10', 0x7c),
'\\kappa' : ('cmmi10', 0x2219),
'\\lambda' : ('cmmi10', 0xb8),
'\\leftharpoondown' : ('cmmi10', 0x29),
'\\leftharpoonup' : ('cmmi10', 0x28),
'\\mu' : ('cmmi10', 0xb9),
'\\natural' : ('cmmi10', 0x5c),
'\\nu' : ('cmmi10', 0xba),
'\\omega' : ('cmmi10', 0x21),
'\\phi' : ('cmmi10', 0xc1),
'\\pi' : ('cmmi10', 0xbc),
'\\psi' : ('cmmi10', 0xc3),
'\\rho' : ('cmmi10', 0xbd),
'\\rightharpoondown' : ('cmmi10', 0x2b),
'\\rightharpoonup' : ('cmmi10', 0x2a),
'\\sharp' : ('cmmi10', 0x5d),
'\\sigma' : ('cmmi10', 0xbe),
'\\smile' : ('cmmi10', 0x5e),
'\\tau' : ('cmmi10', 0xbf),
'\\theta' : ('cmmi10', 0xb5),
'\\triangleleft' : ('cmmi10', 0x2f),
'\\triangleright' : ('cmmi10', 0x2e),
'\\upsilon' : ('cmmi10', 0xc0),
'\\varepsilon' : ('cmmi10', 0x22),
'\\varphi' : ('cmmi10', 0x27),
'\\varrho' : ('cmmi10', 0x25),
'\\varsigma' : ('cmmi10', 0x26),
'\\vartheta' : ('cmmi10', 0x23),
'\\wp' : ('cmmi10', 0x7d),
'\\xi' : ('cmmi10', 0xbb),
'\\zeta' : ('cmmi10', 0xb3),
'!' : ('cmr10', 0x21),
'%' : ('cmr10', 0x25),
'&' : ('cmr10', 0x26),
'(' : ('cmr10', 0x28),
')' : ('cmr10', 0x29),
'+' : ('cmr10', 0x2b),
'0' : ('cmr10', 0x30),
'1' : ('cmr10', 0x31),
'2' : ('cmr10', 0x32),
'3' : ('cmr10', 0x33),
'4' : ('cmr10', 0x34),
'5' : ('cmr10', 0x35),
'6' : ('cmr10', 0x36),
'7' : ('cmr10', 0x37),
'8' : ('cmr10', 0x38),
'9' : ('cmr10', 0x39),
':' : ('cmr10', 0x3a),
';' : ('cmr10', 0x3b),
'=' : ('cmr10', 0x3d),
'?' : ('cmr10', 0x3f),
'@' : ('cmr10', 0x40),
'[' : ('cmr10', 0x5b),
'\\#' : ('cmr10', 0x23),
'\\$' : ('cmr10', 0x24),
'\\%' : ('cmr10', 0x25),
'\\Delta' : ('cmr10', 0xa2),
'\\Gamma' : ('cmr10', 0xa1),
'\\Lambda' : ('cmr10', 0xa4),
'\\Omega' : ('cmr10', 0xad),
'\\Phi' : ('cmr10', 0xa9),
'\\Pi' : ('cmr10', 0xa6),
'\\Psi' : ('cmr10', 0xaa),
'\\Sigma' : ('cmr10', 0xa7),
'\\Theta' : ('cmr10', 0xa3),
'\\Upsilon' : ('cmr10', 0xa8),
'\\Xi' : ('cmr10', 0xa5),
'\\circumflexaccent' : ('cmr10', 0x5e),
'\\combiningacuteaccent' : ('cmr10', 0xb6),
'\\combiningbreve' : ('cmr10', 0xb8),
'\\combiningdiaeresis' : ('cmr10', 0xc4),
'\\combiningdotabove' : ('cmr10', 0x5f),
'\\combininggraveaccent' : ('cmr10', 0xb5),
'\\combiningoverline' : ('cmr10', 0xb9),
'\\combiningtilde' : ('cmr10', 0x7e),
'\\leftbracket' : ('cmr10', 0x5b),
'\\leftparen' : ('cmr10', 0x28),
'\\rightbracket' : ('cmr10', 0x5d),
'\\rightparen' : ('cmr10', 0x29),
'\\widebar' : ('cmr10', 0xb9),
']' : ('cmr10', 0x5d),
'*' : ('cmsy10', 0xa4),
'\N{MINUS SIGN}' : ('cmsy10', 0xa1),
'\\Downarrow' : ('cmsy10', 0x2b),
'\\Im' : ('cmsy10', 0x3d),
'\\Leftarrow' : ('cmsy10', 0x28),
'\\Leftrightarrow' : ('cmsy10', 0x2c),
'\\P' : ('cmsy10', 0x7b),
'\\Re' : ('cmsy10', 0x3c),
'\\Rightarrow' : ('cmsy10', 0x29),
'\\S' : ('cmsy10', 0x78),
'\\Uparrow' : ('cmsy10', 0x2a),
'\\Updownarrow' : ('cmsy10', 0x6d),
'\\Vert' : ('cmsy10', 0x6b),
'\\aleph' : ('cmsy10', 0x40),
'\\approx' : ('cmsy10', 0xbc),
'\\ast' : ('cmsy10', 0xa4),
'\\asymp' : ('cmsy10', 0xb3),
'\\backslash' : ('cmsy10', 0x6e),
'\\bigcirc' : ('cmsy10', 0xb0),
'\\bigtriangledown' : ('cmsy10', 0x35),
'\\bigtriangleup' : ('cmsy10', 0x34),
'\\bot' : ('cmsy10', 0x3f),
'\\bullet' : ('cmsy10', 0xb2),
'\\cap' : ('cmsy10', 0x5c),
'\\cdot' : ('cmsy10', 0xa2),
'\\circ' : ('cmsy10', 0xb1),
'\\clubsuit' : ('cmsy10', 0x7c),
'\\cup' : ('cmsy10', 0x5b),
'\\dag' : ('cmsy10', 0x79),
'\\dashv' : ('cmsy10', 0x61),
'\\ddag' : ('cmsy10', 0x7a),
'\\diamond' : ('cmsy10', 0xa6),
'\\diamondsuit' : ('cmsy10', 0x7d),
'\\div' : ('cmsy10', 0xa5),
'\\downarrow' : ('cmsy10', 0x23),
'\\emptyset' : ('cmsy10', 0x3b),
'\\equiv' : ('cmsy10', 0xb4),
'\\exists' : ('cmsy10', 0x39),
'\\forall' : ('cmsy10', 0x38),
'\\geq' : ('cmsy10', 0xb8),
'\\gg' : ('cmsy10', 0xc0),
'\\heartsuit' : ('cmsy10', 0x7e),
'\\in' : ('cmsy10', 0x32),
'\\infty' : ('cmsy10', 0x31),
'\\lbrace' : ('cmsy10', 0x66),
'\\lceil' : ('cmsy10', 0x64),
'\\leftarrow' : ('cmsy10', 0xc3),
'\\leftrightarrow' : ('cmsy10', 0x24),
'\\leq' : ('cmsy10', 0x2219),
'\\lfloor' : ('cmsy10', 0x62),
'\\ll' : ('cmsy10', 0xbf),
'\\mid' : ('cmsy10', 0x6a),
'\\mp' : ('cmsy10', 0xa8),
'\\nabla' : ('cmsy10', 0x72),
'\\nearrow' : ('cmsy10', 0x25),
'\\neg' : ('cmsy10', 0x3a),
'\\ni' : ('cmsy10', 0x33),
'\\nwarrow' : ('cmsy10', 0x2d),
'\\odot' : ('cmsy10', 0xaf),
'\\ominus' : ('cmsy10', 0xaa),
'\\oplus' : ('cmsy10', 0xa9),
'\\oslash' : ('cmsy10', 0xae),
'\\otimes' : ('cmsy10', 0xad),
'\\pm' : ('cmsy10', 0xa7),
'\\prec' : ('cmsy10', 0xc1),
'\\preceq' : ('cmsy10', 0xb9),
'\\prime' : ('cmsy10', 0x30),
'\\propto' : ('cmsy10', 0x2f),
'\\rbrace' : ('cmsy10', 0x67),
'\\rceil' : ('cmsy10', 0x65),
'\\rfloor' : ('cmsy10', 0x63),
'\\rightarrow' : ('cmsy10', 0x21),
'\\searrow' : ('cmsy10', 0x26),
'\\sim' : ('cmsy10', 0xbb),
'\\simeq' : ('cmsy10', 0x27),
'\\slash' : ('cmsy10', 0x36),
'\\spadesuit' : ('cmsy10', 0xc4),
'\\sqcap' : ('cmsy10', 0x75),
'\\sqcup' : ('cmsy10', 0x74),
'\\sqsubseteq' : ('cmsy10', 0x76),
'\\sqsupseteq' : ('cmsy10', 0x77),
'\\subset' : ('cmsy10', 0xbd),
'\\subseteq' : ('cmsy10', 0xb5),
'\\succ' : ('cmsy10', 0xc2),
'\\succeq' : ('cmsy10', 0xba),
'\\supset' : ('cmsy10', 0xbe),
'\\supseteq' : ('cmsy10', 0xb6),
'\\swarrow' : ('cmsy10', 0x2e),
'\\times' : ('cmsy10', 0xa3),
'\\to' : ('cmsy10', 0x21),
'\\top' : ('cmsy10', 0x3e),
'\\uparrow' : ('cmsy10', 0x22),
'\\updownarrow' : ('cmsy10', 0x6c),
'\\uplus' : ('cmsy10', 0x5d),
'\\vdash' : ('cmsy10', 0x60),
'\\vee' : ('cmsy10', 0x5f),
'\\vert' : ('cmsy10', 0x6a),
'\\wedge' : ('cmsy10', 0x5e),
'\\wr' : ('cmsy10', 0x6f),
'\\|' : ('cmsy10', 0x6b),
'|' : ('cmsy10', 0x6a),
'\\_' : ('cmtt10', 0x5f)
}
# Automatically generated.
type12uni = {
'aring' : 229,
'quotedblright' : 8221,
'V' : 86,
'dollar' : 36,
'four' : 52,
'Yacute' : 221,
'P' : 80,
'underscore' : 95,
'p' : 112,
'Otilde' : 213,
'perthousand' : 8240,
'zero' : 48,
'dotlessi' : 305,
'Scaron' : 352,
'zcaron' : 382,
'egrave' : 232,
'section' : 167,
'Icircumflex' : 206,
'ntilde' : 241,
'ampersand' : 38,
'dotaccent' : 729,
'degree' : 176,
'K' : 75,
'acircumflex' : 226,
'Aring' : 197,
'k' : 107,
'smalltilde' : 732,
'Agrave' : 192,
'divide' : 247,
'ocircumflex' : 244,
'asciitilde' : 126,
'two' : 50,
'E' : 69,
'scaron' : 353,
'F' : 70,
'bracketleft' : 91,
'asciicircum' : 94,
'f' : 102,
'ordmasculine' : 186,
'mu' : 181,
'paragraph' : 182,
'nine' : 57,
'v' : 118,
'guilsinglleft' : 8249,
'backslash' : 92,
'six' : 54,
'A' : 65,
'icircumflex' : 238,
'a' : 97,
'ogonek' : 731,
'q' : 113,
'oacute' : 243,
'ograve' : 242,
'edieresis' : 235,
'comma' : 44,
'otilde' : 245,
'guillemotright' : 187,
'ecircumflex' : 234,
'greater' : 62,
'uacute' : 250,
'L' : 76,
'bullet' : 8226,
'cedilla' : 184,
'ydieresis' : 255,
'l' : 108,
'logicalnot' : 172,
'exclamdown' : 161,
'endash' : 8211,
'agrave' : 224,
'Adieresis' : 196,
'germandbls' : 223,
'Odieresis' : 214,
'space' : 32,
'quoteright' : 8217,
'ucircumflex' : 251,
'G' : 71,
'quoteleft' : 8216,
'W' : 87,
'Q' : 81,
'g' : 103,
'w' : 119,
'question' : 63,
'one' : 49,
'ring' : 730,
'figuredash' : 8210,
'B' : 66,
'iacute' : 237,
'Ydieresis' : 376,
'R' : 82,
'b' : 98,
'r' : 114,
'Ccedilla' : 199,
'minus' : 8722,
'Lslash' : 321,
'Uacute' : 218,
'yacute' : 253,
'Ucircumflex' : 219,
'quotedbl' : 34,
'onehalf' : 189,
'Thorn' : 222,
'M' : 77,
'eight' : 56,
'multiply' : 215,
'grave' : 96,
'Ocircumflex' : 212,
'm' : 109,
'Ugrave' : 217,
'guilsinglright' : 8250,
'Ntilde' : 209,
'questiondown' : 191,
'Atilde' : 195,
'ccedilla' : 231,
'Z' : 90,
'copyright' : 169,
'yen' : 165,
'Eacute' : 201,
'H' : 72,
'X' : 88,
'Idieresis' : 207,
'bar' : 124,
'h' : 104,
'x' : 120,
'udieresis' : 252,
'ordfeminine' : 170,
'braceleft' : 123,
'macron' : 175,
'atilde' : 227,
'Acircumflex' : 194,
'Oslash' : 216,
'C' : 67,
'quotedblleft' : 8220,
'S' : 83,
'exclam' : 33,
'Zcaron' : 381,
'equal' : 61,
's' : 115,
'eth' : 240,
'Egrave' : 200,
'hyphen' : 45,
'period' : 46,
'igrave' : 236,
'colon' : 58,
'Ecircumflex' : 202,
'trademark' : 8482,
'Aacute' : 193,
'cent' : 162,
'lslash' : 322,
'c' : 99,
'N' : 78,
'breve' : 728,
'Oacute' : 211,
'guillemotleft' : 171,
'n' : 110,
'idieresis' : 239,
'braceright' : 125,
'seven' : 55,
'brokenbar' : 166,
'ugrave' : 249,
'periodcentered' : 183,
'sterling' : 163,
'I' : 73,
'Y' : 89,
'Eth' : 208,
'emdash' : 8212,
'i' : 105,
'daggerdbl' : 8225,
'y' : 121,
'plusminus' : 177,
'less' : 60,
'Udieresis' : 220,
'D' : 68,
'five' : 53,
'T' : 84,
'oslash' : 248,
'acute' : 180,
'd' : 100,
'OE' : 338,
'Igrave' : 204,
't' : 116,
'parenright' : 41,
'adieresis' : 228,
'quotesingle' : 39,
'twodotenleader' : 8229,
'slash' : 47,
'ellipsis' : 8230,
'numbersign' : 35,
'odieresis' : 246,
'O' : 79,
'oe' : 339,
'o' : 111,
'Edieresis' : 203,
'plus' : 43,
'dagger' : 8224,
'three' : 51,
'hungarumlaut' : 733,
'parenleft' : 40,
'fraction' : 8260,
'registered' : 174,
'J' : 74,
'dieresis' : 168,
'Ograve' : 210,
'j' : 106,
'z' : 122,
'ae' : 230,
'semicolon' : 59,
'at' : 64,
'Iacute' : 205,
'percent' : 37,
'bracketright' : 93,
'AE' : 198,
'asterisk' : 42,
'aacute' : 225,
'U' : 85,
'eacute' : 233,
'e' : 101,
'thorn' : 254,
'u' : 117,
}
uni2type1 = {v: k for k, v in type12uni.items()}
# The script below is to sort and format the tex2uni dict
## For decimal values: int(hex(v), 16)
# newtex = {k: hex(v) for k, v in tex2uni.items()}
# sd = dict(sorted(newtex.items(), key=lambda item: item[0]))
#
## For formatting the sorted dictionary with proper spacing
## the value '24' comes from finding the longest string in
## the newtex keys with len(max(newtex, key=len))
# for key in sd:
# print("{0:24} : {1: <s},".format("'" + key + "'", sd[key]))
tex2uni = {
'#' : 0x23,
'$' : 0x24,
'%' : 0x25,
'AA' : 0xc5,
'AE' : 0xc6,
'BbbC' : 0x2102,
'BbbN' : 0x2115,
'BbbP' : 0x2119,
'BbbQ' : 0x211a,
'BbbR' : 0x211d,
'BbbZ' : 0x2124,
'Bumpeq' : 0x224e,
'Cap' : 0x22d2,
'Colon' : 0x2237,
'Cup' : 0x22d3,
'DH' : 0xd0,
'Delta' : 0x394,
'Doteq' : 0x2251,
'Downarrow' : 0x21d3,
'Equiv' : 0x2263,
'Finv' : 0x2132,
'Game' : 0x2141,
'Gamma' : 0x393,
'H' : 0x30b,
'Im' : 0x2111,
'Join' : 0x2a1d,
'L' : 0x141,
'Lambda' : 0x39b,
'Ldsh' : 0x21b2,
'Leftarrow' : 0x21d0,
'Leftrightarrow' : 0x21d4,
'Lleftarrow' : 0x21da,
'Longleftarrow' : 0x27f8,
'Longleftrightarrow' : 0x27fa,
'Longrightarrow' : 0x27f9,
'Lsh' : 0x21b0,
'Nearrow' : 0x21d7,
'Nwarrow' : 0x21d6,
'O' : 0xd8,
'OE' : 0x152,
'Omega' : 0x3a9,
'P' : 0xb6,
'Phi' : 0x3a6,
'Pi' : 0x3a0,
'Psi' : 0x3a8,
'QED' : 0x220e,
'Rdsh' : 0x21b3,
'Re' : 0x211c,
'Rightarrow' : 0x21d2,
'Rrightarrow' : 0x21db,
'Rsh' : 0x21b1,
'S' : 0xa7,
'Searrow' : 0x21d8,
'Sigma' : 0x3a3,
'Subset' : 0x22d0,
'Supset' : 0x22d1,
'Swarrow' : 0x21d9,
'Theta' : 0x398,
'Thorn' : 0xde,
'Uparrow' : 0x21d1,
'Updownarrow' : 0x21d5,
'Upsilon' : 0x3a5,
'Vdash' : 0x22a9,
'Vert' : 0x2016,
'Vvdash' : 0x22aa,
'Xi' : 0x39e,
'_' : 0x5f,
'__sqrt__' : 0x221a,
'aa' : 0xe5,
'ac' : 0x223e,
'acute' : 0x301,
'acwopencirclearrow' : 0x21ba,
'adots' : 0x22f0,
'ae' : 0xe6,
'aleph' : 0x2135,
'alpha' : 0x3b1,
'amalg' : 0x2a3f,
'angle' : 0x2220,
'approx' : 0x2248,
'approxeq' : 0x224a,
'approxident' : 0x224b,
'arceq' : 0x2258,
'ast' : 0x2217,
'asterisk' : 0x2a,
'asymp' : 0x224d,
'backcong' : 0x224c,
'backepsilon' : 0x3f6,
'backprime' : 0x2035,
'backsim' : 0x223d,
'backsimeq' : 0x22cd,
'backslash' : 0x5c,
'bagmember' : 0x22ff,
'bar' : 0x304,
'barleftarrow' : 0x21e4,
'barvee' : 0x22bd,
'barwedge' : 0x22bc,
'because' : 0x2235,
'beta' : 0x3b2,
'beth' : 0x2136,
'between' : 0x226c,
'bigcap' : 0x22c2,
'bigcirc' : 0x25cb,
'bigcup' : 0x22c3,
'bigodot' : 0x2a00,
'bigoplus' : 0x2a01,
'bigotimes' : 0x2a02,
'bigsqcup' : 0x2a06,
'bigstar' : 0x2605,
'bigtriangledown' : 0x25bd,
'bigtriangleup' : 0x25b3,
'biguplus' : 0x2a04,
'bigvee' : 0x22c1,
'bigwedge' : 0x22c0,
'blacksquare' : 0x25a0,
'blacktriangle' : 0x25b4,
'blacktriangledown' : 0x25be,
'blacktriangleleft' : 0x25c0,
'blacktriangleright' : 0x25b6,
'bot' : 0x22a5,
'bowtie' : 0x22c8,
'boxbar' : 0x25eb,
'boxdot' : 0x22a1,
'boxminus' : 0x229f,
'boxplus' : 0x229e,
'boxtimes' : 0x22a0,
'breve' : 0x306,
'bullet' : 0x2219,
'bumpeq' : 0x224f,
'c' : 0x327,
'candra' : 0x310,
'cap' : 0x2229,
'carriagereturn' : 0x21b5,
'cdot' : 0x22c5,
'cdotp' : 0xb7,
'cdots' : 0x22ef,
'cent' : 0xa2,
'check' : 0x30c,
'checkmark' : 0x2713,
'chi' : 0x3c7,
'circ' : 0x2218,
'circeq' : 0x2257,
'circlearrowleft' : 0x21ba,
'circlearrowright' : 0x21bb,
'circledR' : 0xae,
'circledS' : 0x24c8,
'circledast' : 0x229b,
'circledcirc' : 0x229a,
'circleddash' : 0x229d,
'circumflexaccent' : 0x302,
'clubsuit' : 0x2663,
'clubsuitopen' : 0x2667,
'colon' : 0x3a,
'coloneq' : 0x2254,
'combiningacuteaccent' : 0x301,
'combiningbreve' : 0x306,
'combiningdiaeresis' : 0x308,
'combiningdotabove' : 0x307,
'combiningfourdotsabove' : 0x20dc,
'combininggraveaccent' : 0x300,
'combiningoverline' : 0x304,
'combiningrightarrowabove' : 0x20d7,
'combiningthreedotsabove' : 0x20db,
'combiningtilde' : 0x303,
'complement' : 0x2201,
'cong' : 0x2245,
'coprod' : 0x2210,
'copyright' : 0xa9,
'cup' : 0x222a,
'cupdot' : 0x228d,
'cupleftarrow' : 0x228c,
'curlyeqprec' : 0x22de,
'curlyeqsucc' : 0x22df,
'curlyvee' : 0x22ce,
'curlywedge' : 0x22cf,
'curvearrowleft' : 0x21b6,
'curvearrowright' : 0x21b7,
'cwopencirclearrow' : 0x21bb,
'd' : 0x323,
'dag' : 0x2020,
'dagger' : 0x2020,
'daleth' : 0x2138,
'danger' : 0x2621,
'dashleftarrow' : 0x290e,
'dashrightarrow' : 0x290f,
'dashv' : 0x22a3,
'ddag' : 0x2021,
'ddagger' : 0x2021,
'ddddot' : 0x20dc,
'dddot' : 0x20db,
'ddot' : 0x308,
'ddots' : 0x22f1,
'degree' : 0xb0,
'delta' : 0x3b4,
'dh' : 0xf0,
'diamond' : 0x22c4,
'diamondsuit' : 0x2662,
'digamma' : 0x3dd,
'disin' : 0x22f2,
'div' : 0xf7,
'divideontimes' : 0x22c7,
'dot' : 0x307,
'doteq' : 0x2250,
'doteqdot' : 0x2251,
'dotminus' : 0x2238,
'dotplus' : 0x2214,
'dots' : 0x2026,
'dotsminusdots' : 0x223a,
'doublebarwedge' : 0x2306,
'downarrow' : 0x2193,
'downdownarrows' : 0x21ca,
'downharpoonleft' : 0x21c3,
'downharpoonright' : 0x21c2,
'downzigzagarrow' : 0x21af,
'ell' : 0x2113,
'emdash' : 0x2014,
'emptyset' : 0x2205,
'endash' : 0x2013,
'epsilon' : 0x3b5,
'eqcirc' : 0x2256,
'eqcolon' : 0x2255,
'eqdef' : 0x225d,
'eqgtr' : 0x22dd,
'eqless' : 0x22dc,
'eqsim' : 0x2242,
'eqslantgtr' : 0x2a96,
'eqslantless' : 0x2a95,
'equal' : 0x3d,
'equalparallel' : 0x22d5,
'equiv' : 0x2261,
'eta' : 0x3b7,
'eth' : 0xf0,
'exists' : 0x2203,
'fallingdotseq' : 0x2252,
'flat' : 0x266d,
'forall' : 0x2200,
'frakC' : 0x212d,
'frakZ' : 0x2128,
'frown' : 0x2322,
'gamma' : 0x3b3,
'geq' : 0x2265,
'geqq' : 0x2267,
'geqslant' : 0x2a7e,
'gg' : 0x226b,
'ggg' : 0x22d9,
'gimel' : 0x2137,
'gnapprox' : 0x2a8a,
'gneqq' : 0x2269,
'gnsim' : 0x22e7,
'grave' : 0x300,
'greater' : 0x3e,
'gtrapprox' : 0x2a86,
'gtrdot' : 0x22d7,
'gtreqless' : 0x22db,
'gtreqqless' : 0x2a8c,
'gtrless' : 0x2277,
'gtrsim' : 0x2273,
'guillemotleft' : 0xab,
'guillemotright' : 0xbb,
'guilsinglleft' : 0x2039,
'guilsinglright' : 0x203a,
'hat' : 0x302,
'hbar' : 0x127,
'heartsuit' : 0x2661,
'hermitmatrix' : 0x22b9,
'hookleftarrow' : 0x21a9,
'hookrightarrow' : 0x21aa,
'hslash' : 0x210f,
'i' : 0x131,
'iiiint' : 0x2a0c,
'iiint' : 0x222d,
'iint' : 0x222c,
'imageof' : 0x22b7,
'imath' : 0x131,
'in' : 0x2208,
'increment' : 0x2206,
'infty' : 0x221e,
'int' : 0x222b,
'intercal' : 0x22ba,
'invnot' : 0x2310,
'iota' : 0x3b9,
'isinE' : 0x22f9,
'isindot' : 0x22f5,
'isinobar' : 0x22f7,
'isins' : 0x22f4,
'isinvb' : 0x22f8,
'jmath' : 0x237,
'k' : 0x328,
'kappa' : 0x3ba,
'kernelcontraction' : 0x223b,
'l' : 0x142,
'lambda' : 0x3bb,
'lambdabar' : 0x19b,
'langle' : 0x27e8,
'lasp' : 0x2bd,
'lbrace' : 0x7b,
'lbrack' : 0x5b,
'lceil' : 0x2308,
'ldots' : 0x2026,
'leadsto' : 0x21dd,
'leftarrow' : 0x2190,
'leftarrowtail' : 0x21a2,
'leftbrace' : 0x7b,
'leftharpoonaccent' : 0x20d0,
'leftharpoondown' : 0x21bd,
'leftharpoonup' : 0x21bc,
'leftleftarrows' : 0x21c7,
'leftparen' : 0x28,
'leftrightarrow' : 0x2194,
'leftrightarrows' : 0x21c6,
'leftrightharpoons' : 0x21cb,
'leftrightsquigarrow' : 0x21ad,
'leftsquigarrow' : 0x219c,
'leftthreetimes' : 0x22cb,
'leq' : 0x2264,
'leqq' : 0x2266,
'leqslant' : 0x2a7d,
'less' : 0x3c,
'lessapprox' : 0x2a85,
'lessdot' : 0x22d6,
'lesseqgtr' : 0x22da,
'lesseqqgtr' : 0x2a8b,
'lessgtr' : 0x2276,
'lesssim' : 0x2272,
'lfloor' : 0x230a,
'lgroup' : 0x27ee,
'lhd' : 0x25c1,
'll' : 0x226a,
'llcorner' : 0x231e,
'lll' : 0x22d8,
'lnapprox' : 0x2a89,
'lneqq' : 0x2268,
'lnsim' : 0x22e6,
'longleftarrow' : 0x27f5,
'longleftrightarrow' : 0x27f7,
'longmapsto' : 0x27fc,
'longrightarrow' : 0x27f6,
'looparrowleft' : 0x21ab,
'looparrowright' : 0x21ac,
'lq' : 0x2018,
'lrcorner' : 0x231f,
'ltimes' : 0x22c9,
'macron' : 0xaf,
'maltese' : 0x2720,
'mapsdown' : 0x21a7,
'mapsfrom' : 0x21a4,
'mapsto' : 0x21a6,
'mapsup' : 0x21a5,
'measeq' : 0x225e,
'measuredangle' : 0x2221,
'measuredrightangle' : 0x22be,
'merge' : 0x2a55,
'mho' : 0x2127,
'mid' : 0x2223,
'minus' : 0x2212,
'minuscolon' : 0x2239,
'models' : 0x22a7,
'mp' : 0x2213,
'mu' : 0x3bc,
'multimap' : 0x22b8,
'nLeftarrow' : 0x21cd,
'nLeftrightarrow' : 0x21ce,
'nRightarrow' : 0x21cf,
'nVDash' : 0x22af,
'nVdash' : 0x22ae,
'nabla' : 0x2207,
'napprox' : 0x2249,
'natural' : 0x266e,
'ncong' : 0x2247,
'ne' : 0x2260,
'nearrow' : 0x2197,
'neg' : 0xac,
'neq' : 0x2260,
'nequiv' : 0x2262,
'nexists' : 0x2204,
'ngeq' : 0x2271,
'ngtr' : 0x226f,
'ngtrless' : 0x2279,
'ngtrsim' : 0x2275,
'ni' : 0x220b,
'niobar' : 0x22fe,
'nis' : 0x22fc,
'nisd' : 0x22fa,
'nleftarrow' : 0x219a,
'nleftrightarrow' : 0x21ae,
'nleq' : 0x2270,
'nless' : 0x226e,
'nlessgtr' : 0x2278,
'nlesssim' : 0x2274,
'nmid' : 0x2224,
'not' : 0x338,
'notin' : 0x2209,
'notsmallowns' : 0x220c,
'nparallel' : 0x2226,
'nprec' : 0x2280,
'npreccurlyeq' : 0x22e0,
'nrightarrow' : 0x219b,
'nsim' : 0x2241,
'nsimeq' : 0x2244,
'nsqsubseteq' : 0x22e2,
'nsqsupseteq' : 0x22e3,
'nsubset' : 0x2284,
'nsubseteq' : 0x2288,
'nsucc' : 0x2281,
'nsucccurlyeq' : 0x22e1,
'nsupset' : 0x2285,
'nsupseteq' : 0x2289,
'ntriangleleft' : 0x22ea,
'ntrianglelefteq' : 0x22ec,
'ntriangleright' : 0x22eb,
'ntrianglerighteq' : 0x22ed,
'nu' : 0x3bd,
'nvDash' : 0x22ad,
'nvdash' : 0x22ac,
'nwarrow' : 0x2196,
'o' : 0xf8,
'obar' : 0x233d,
'ocirc' : 0x30a,
'odot' : 0x2299,
'oe' : 0x153,
'oequal' : 0x229c,
'oiiint' : 0x2230,
'oiint' : 0x222f,
'oint' : 0x222e,
'omega' : 0x3c9,
'ominus' : 0x2296,
'oplus' : 0x2295,
'origof' : 0x22b6,
'oslash' : 0x2298,
'otimes' : 0x2297,
'overarc' : 0x311,
'overleftarrow' : 0x20d6,
'overleftrightarrow' : 0x20e1,
'parallel' : 0x2225,
'partial' : 0x2202,
'perp' : 0x27c2,
'perthousand' : 0x2030,
'phi' : 0x3d5,
'pi' : 0x3c0,
'pitchfork' : 0x22d4,
'plus' : 0x2b,
'pm' : 0xb1,
'prec' : 0x227a,
'precapprox' : 0x2ab7,
'preccurlyeq' : 0x227c,
'preceq' : 0x227c,
'precnapprox' : 0x2ab9,
'precnsim' : 0x22e8,
'precsim' : 0x227e,
'prime' : 0x2032,
'prod' : 0x220f,
'propto' : 0x221d,
'prurel' : 0x22b0,
'psi' : 0x3c8,
'quad' : 0x2003,
'questeq' : 0x225f,
'rangle' : 0x27e9,
'rasp' : 0x2bc,
'ratio' : 0x2236,
'rbrace' : 0x7d,
'rbrack' : 0x5d,
'rceil' : 0x2309,
'rfloor' : 0x230b,
'rgroup' : 0x27ef,
'rhd' : 0x25b7,
'rho' : 0x3c1,
'rightModels' : 0x22ab,
'rightangle' : 0x221f,
'rightarrow' : 0x2192,
'rightarrowbar' : 0x21e5,
'rightarrowtail' : 0x21a3,
'rightassert' : 0x22a6,
'rightbrace' : 0x7d,
'rightharpoonaccent' : 0x20d1,
'rightharpoondown' : 0x21c1,
'rightharpoonup' : 0x21c0,
'rightleftarrows' : 0x21c4,
'rightleftharpoons' : 0x21cc,
'rightparen' : 0x29,
'rightrightarrows' : 0x21c9,
'rightsquigarrow' : 0x219d,
'rightthreetimes' : 0x22cc,
'rightzigzagarrow' : 0x21dd,
'ring' : 0x2da,
'risingdotseq' : 0x2253,
'rq' : 0x2019,
'rtimes' : 0x22ca,
'scrB' : 0x212c,
'scrE' : 0x2130,
'scrF' : 0x2131,
'scrH' : 0x210b,
'scrI' : 0x2110,
'scrL' : 0x2112,
'scrM' : 0x2133,
'scrR' : 0x211b,
'scre' : 0x212f,
'scrg' : 0x210a,
'scro' : 0x2134,
'scurel' : 0x22b1,
'searrow' : 0x2198,
'setminus' : 0x2216,
'sharp' : 0x266f,
'sigma' : 0x3c3,
'sim' : 0x223c,
'simeq' : 0x2243,
'simneqq' : 0x2246,
'sinewave' : 0x223f,
'slash' : 0x2215,
'smallin' : 0x220a,
'smallintclockwise' : 0x2231,
'smallointctrcclockwise' : 0x2233,
'smallowns' : 0x220d,
'smallsetminus' : 0x2216,
'smallvarointclockwise' : 0x2232,
'smile' : 0x2323,
'solbar' : 0x233f,
'spadesuit' : 0x2660,
'spadesuitopen' : 0x2664,
'sphericalangle' : 0x2222,
'sqcap' : 0x2293,
'sqcup' : 0x2294,
'sqsubset' : 0x228f,
'sqsubseteq' : 0x2291,
'sqsubsetneq' : 0x22e4,
'sqsupset' : 0x2290,
'sqsupseteq' : 0x2292,
'sqsupsetneq' : 0x22e5,
'ss' : 0xdf,
'star' : 0x22c6,
'stareq' : 0x225b,
'sterling' : 0xa3,
'subset' : 0x2282,
'subseteq' : 0x2286,
'subseteqq' : 0x2ac5,
'subsetneq' : 0x228a,
'subsetneqq' : 0x2acb,
'succ' : 0x227b,
'succapprox' : 0x2ab8,
'succcurlyeq' : 0x227d,
'succeq' : 0x227d,
'succnapprox' : 0x2aba,
'succnsim' : 0x22e9,
'succsim' : 0x227f,
'sum' : 0x2211,
'supset' : 0x2283,
'supseteq' : 0x2287,
'supseteqq' : 0x2ac6,
'supsetneq' : 0x228b,
'supsetneqq' : 0x2acc,
'swarrow' : 0x2199,
't' : 0x361,
'tau' : 0x3c4,
'textasciiacute' : 0xb4,
'textasciicircum' : 0x5e,
'textasciigrave' : 0x60,
'textasciitilde' : 0x7e,
'textexclamdown' : 0xa1,
'textquestiondown' : 0xbf,
'textquotedblleft' : 0x201c,
'textquotedblright' : 0x201d,
'therefore' : 0x2234,
'theta' : 0x3b8,
'thickspace' : 0x2005,
'thorn' : 0xfe,
'tilde' : 0x303,
'times' : 0xd7,
'to' : 0x2192,
'top' : 0x22a4,
'triangle' : 0x25b3,
'triangledown' : 0x25bf,
'triangleeq' : 0x225c,
'triangleleft' : 0x25c1,
'trianglelefteq' : 0x22b4,
'triangleq' : 0x225c,
'triangleright' : 0x25b7,
'trianglerighteq' : 0x22b5,
'turnednot' : 0x2319,
'twoheaddownarrow' : 0x21a1,
'twoheadleftarrow' : 0x219e,
'twoheadrightarrow' : 0x21a0,
'twoheaduparrow' : 0x219f,
'ulcorner' : 0x231c,
'underbar' : 0x331,
'unlhd' : 0x22b4,
'unrhd' : 0x22b5,
'uparrow' : 0x2191,
'updownarrow' : 0x2195,
'updownarrowbar' : 0x21a8,
'updownarrows' : 0x21c5,
'upharpoonleft' : 0x21bf,
'upharpoonright' : 0x21be,
'uplus' : 0x228e,
'upsilon' : 0x3c5,
'upuparrows' : 0x21c8,
'urcorner' : 0x231d,
'vDash' : 0x22a8,
'varepsilon' : 0x3b5,
'varisinobar' : 0x22f6,
'varisins' : 0x22f3,
'varkappa' : 0x3f0,
'varlrtriangle' : 0x22bf,
'varniobar' : 0x22fd,
'varnis' : 0x22fb,
'varnothing' : 0x2205,
'varphi' : 0x3c6,
'varpi' : 0x3d6,
'varpropto' : 0x221d,
'varrho' : 0x3f1,
'varsigma' : 0x3c2,
'vartheta' : 0x3d1,
'vartriangle' : 0x25b5,
'vartriangleleft' : 0x22b2,
'vartriangleright' : 0x22b3,
'vdash' : 0x22a2,
'vdots' : 0x22ee,
'vec' : 0x20d7,
'vee' : 0x2228,
'veebar' : 0x22bb,
'veeeq' : 0x225a,
'vert' : 0x7c,
'wedge' : 0x2227,
'wedgeq' : 0x2259,
'widebar' : 0x305,
'widehat' : 0x302,
'widetilde' : 0x303,
'wp' : 0x2118,
'wr' : 0x2240,
'xi' : 0x3be,
'yen' : 0xa5,
'zeta' : 0x3b6,
'{' : 0x7b,
'|' : 0x2016,
'}' : 0x7d,
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts: dict[str, dict[str, list[tuple[int, int, str, int]]] |
list[tuple[int, int, str, int]]] = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q
(0x0052, 0x0052, 'it', 0x211d), # R
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (not in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (not in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (not in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'bf', 0xe38a), # A-B
(0x0043, 0x0043, 'bf', 0x2102), # C
(0x0044, 0x0044, 'bf', 0x2145), # D
(0x0045, 0x0047, 'bf', 0xe38d), # E-G
(0x0048, 0x0048, 'bf', 0x210d), # H
(0x0049, 0x004d, 'bf', 0xe390), # I-M
(0x004e, 0x004e, 'bf', 0x2115), # N
(0x004f, 0x004f, 'bf', 0xe395), # O
(0x0050, 0x0051, 'bf', 0x2119), # P-Q
(0x0052, 0x0052, 'bf', 0x211d), # R
(0x0053, 0x0059, 'bf', 0xe396), # S-Y
(0x005a, 0x005a, 'bf', 0x2124), # Z
(0x0061, 0x0063, 'bf', 0xe39d), # a-c
(0x0064, 0x0065, 'bf', 0x2146), # d-e
(0x0066, 0x0068, 'bf', 0xe3a2), # f-h
(0x0069, 0x006a, 'bf', 0x2148), # i-j
(0x006b, 0x007a, 'bf', 0xe3a7), # k-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x004d, 'it', 0x2133), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
'bfit':
[
(0x0041, 0x005a, 'bfit', 0x1d468), # A-Z
(0x0061, 0x007a, 'bfit', 0x1d482), # a-z
(0x0393, 0x03a9, 'bfit', 0x1d71e), # \Gamma-\Omega
(0x03b1, 0x03c9, 'bfit', 0x1d736), # \alpha-\omega
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
# Fix some incorrect glyphs.
stix_glyph_fixes = {
# Cap and Cup glyphs are swapped.
0x22d2: 0x22d3,
0x22d3: 0x22d2,
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py3@matplotlib@_mathtext_data.py@.PATH_END.py
|
{
"filename": "__positron_spectra.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/__positron_spectra.py",
"type": "Python"
}
|
"""
Module for computing positron spectra.
@author: Logan Morrison and Adam Coogan
"""
from typing import overload, Union, Callable, List, Optional
import warnings
from hazma.spectra import (
dnde_positron_charged_pion as positron_charged_pion,
dnde_positron_muon as positron_muon,
)
from hazma._positron.positron_decay import positron
from hazma.utils import RealArray, RealOrRealArray
warnings.warn(
"'hazma.positron_spectra' is deprecated. Use 'hazma.spectra' instead.",
DeprecationWarning,
stacklevel=2,
)
SquaredMatrixElement = Callable[[RealArray], float]
def __flat_squared_matrix_element(_: RealArray) -> float:
return 1.0
@overload
def muon(positron_energies: float, muon_energy: float) -> float:
...
@overload
def muon(positron_energies: RealArray, muon_energy: float) -> RealArray:
...
def muon(positron_energies: RealOrRealArray, muon_energy: float) -> RealOrRealArray:
"""
Returns the positron spectrum from muon decay.
Parameters
----------
positron_energies : float or numpy.array
Energy(ies) of the positron/electron.
muon_energy : float or array-like
Energy of the muon.
Returns
-------
dnde : float or numpy.array
The value of the spectrum given a positron energy(ies)
``positron_energies`` and muon energy ``muon_energy``.
"""
return positron_muon.muon_positron_spectrum(positron_energies, muon_energy)
@overload
def charged_pion(positron_energies: float, pion_energy: float) -> float:
...
@overload
def charged_pion(positron_energies: RealArray, pion_energy: float) -> RealArray:
...
def charged_pion(
positron_energies: RealOrRealArray, pion_energy: float
) -> RealOrRealArray:
"""
Returns the positron spectrum from the decay of a charged pion.
Parameters
----------
positron_energies : float or numpy.array
Energy(ies) of the positron/electron.
pion_energy : float or numpy.array
Energy of the charged pion.
Returns
-------
dnde : float or numpy.array
The value of the spectrum given a positron energy(ies)
``positron_energies`` and charged pion energy ``pion_energy``.
"""
return positron_charged_pion.charged_pion_positron_spectrum(
positron_energies, pion_energy
)
def positron_decay(
particles: List[str],
cme: float,
positron_energies: Union[List[float], RealArray],
mat_elem_sqrd: Optional[SquaredMatrixElement] = None,
num_ps_pts: int = 1000,
num_bins: int = 25,
) -> RealArray:
r"""Returns total gamma ray spectrum from a set of particles.
Parameters
----------
particles : array_like
List of particle names. Available particles are 'muon', and
'charged_pion'.
cme : double
Center of mass energy of the final state in MeV.
positron_energies : np.ndarray[double, ndim=1]
List of positron energies in MeV to evaluate spectra at.
mat_elem_sqrd : double(\*func)(np.ndarray)
Function for the matrix element squared of the process. Must be a
function taking in a list of four momenta of size (num_fsp, 4).
Default value is a flat matrix element returning 1..
num_ps_pts : int {1000}, optional
Number of phase space points to use.
num_bins : int {25}, optional
Number of bins to use.
Returns
-------
spec : np.ndarray
Total gamma ray spectrum from all final state particles.
Notes
-----
The total spectrum is computed using
.. math::
\frac{dN}{dE}(E_{e^{\pm}}) = \sum_{i,j}P_{i}(E_{j})
\frac{dN_i}{dE}(E_{e^{\pm}}, E_{j})
where :math:`i` runs over the final state particles, :math:`j` runs over
energies sampled from probability distributions. :math:`P_{i}(E_{j})` is
the probability that particle :math:`i` has energy :math:`E_{j}`. The
probabilities are computed using ``hazma.phase_space_generator.rambo``. The
total number of energies used is ``num_bins``.
Examples
--------
Generate spectrum from a muon, and two charged pions
with total energy of 5 GeV::
from hazma.positron_spectra import positron_decay
from hazma.parameters import electron_mass as me
import numpy as np
particles = np.array(['muon', 'charged_pion', 'charged_pion'])
cme = 5000.
positron_energies = np.logspace(np.log10(me), np.log10(cme),
num=200, dtype=np.float64)
positron_decay(particles, cme, positron_energies)
"""
if mat_elem_sqrd is None:
msqrd = __flat_squared_matrix_element
else:
msqrd = mat_elem_sqrd
if isinstance(particles, str):
particles = [particles]
return positron(
particles,
cme,
positron_energies,
mat_elem_sqrd=msqrd,
num_ps_pts=num_ps_pts,
num_bins=num_bins,
)
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@__positron_spectra.py@.PATH_END.py
|
{
"filename": "test_argparse.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_core/tests/test_argparse.py",
"type": "Python"
}
|
"""
Tests for the private NumPy argument parsing functionality.
They mainly exists to ensure good test coverage without having to try the
weirder cases on actual numpy functions but test them in one place.
The test function is defined in C to be equivalent to (errors may not always
match exactly, and could be adjusted):
def func(arg1, /, arg2, *, arg3):
i = integer(arg1) # reproducing the 'i' parsing in Python.
return None
"""
import threading
import pytest
import numpy as np
from numpy._core._multiarray_tests import (
argparse_example_function as func,
threaded_argparse_example_function as thread_func,
)
from numpy.testing import IS_WASM
@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads")
def test_thread_safe_argparse_cache():
b = threading.Barrier(8)
def call_thread_func():
b.wait()
thread_func(arg1=3, arg2=None)
tasks = [threading.Thread(target=call_thread_func) for _ in range(8)]
[t.start() for t in tasks]
[t.join() for t in tasks]
def test_invalid_integers():
with pytest.raises(TypeError,
match="integer argument expected, got float"):
func(1.)
with pytest.raises(OverflowError):
func(2**100)
def test_missing_arguments():
with pytest.raises(TypeError,
match="missing required positional argument 0"):
func()
with pytest.raises(TypeError,
match="missing required positional argument 0"):
func(arg2=1, arg3=4)
with pytest.raises(TypeError,
match=r"missing required argument \'arg2\' \(pos 1\)"):
func(1, arg3=5)
def test_too_many_positional():
# the second argument is positional but can be passed as keyword.
with pytest.raises(TypeError,
match="takes from 2 to 3 positional arguments but 4 were given"):
func(1, 2, 3, 4)
def test_multiple_values():
with pytest.raises(TypeError,
match=r"given by name \('arg2'\) and position \(position 1\)"):
func(1, 2, arg2=3)
def test_string_fallbacks():
# We can (currently?) use numpy strings to test the "slow" fallbacks
# that should normally not be taken due to string interning.
arg2 = np.str_("arg2")
missing_arg = np.str_("missing_arg")
func(1, **{arg2: 3})
with pytest.raises(TypeError,
match="got an unexpected keyword argument 'missing_arg'"):
func(2, **{missing_arg: 3})
def test_too_many_arguments_method_forwarding():
# Not directly related to the standard argument parsing, but we sometimes
# forward methods to Python: arr.mean() calls np._core._methods._mean()
# This adds code coverage for this `npy_forward_method`.
arr = np.arange(3)
args = range(1000)
with pytest.raises(TypeError):
arr.mean(*args)
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_core@tests@test_argparse.py@.PATH_END.py
|
{
"filename": "_separatethousands.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmapbox/colorbar/_separatethousands.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="separatethousands",
parent_name="choroplethmapbox.colorbar",
**kwargs,
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmapbox@colorbar@_separatethousands.py@.PATH_END.py
|
{
"filename": "test_adaptation.py",
"repo_name": "blackjax-devs/blackjax",
"repo_path": "blackjax_extracted/blackjax-main/tests/adaptation/test_adaptation.py",
"type": "Python"
}
|
import jax
import jax.numpy as jnp
import numpy as np
import optax
import pytest
import blackjax
from blackjax.adaptation import window_adaptation
from blackjax.adaptation.base import get_filter_adapt_info_fn, return_all_adapt_info
from blackjax.util import run_inference_algorithm
@pytest.mark.parametrize(
"num_steps, expected_schedule",
[
(19, [(0, False)] * 19), # no mass matrix adaptation
(
100,
[(0, False)] * 15 + [(1, False)] * 74 + [(1, True)] + [(0, False)] * 10,
), # windows are resized
(
200,
[(0, False)] * 75
+ [(1, False)] * 24
+ [(1, True)]
+ [(1, False)] * 49
+ [(1, True)]
+ [(0, False)] * 50,
),
],
)
def test_adaptation_schedule(num_steps, expected_schedule):
adaptation_schedule = window_adaptation.build_schedule(num_steps)
assert num_steps == len(adaptation_schedule)
assert np.array_equal(adaptation_schedule, expected_schedule)
@pytest.mark.parametrize(
"adaptation_filters",
[
{
"filter_fn": return_all_adapt_info,
"return_sets": None,
},
{
"filter_fn": get_filter_adapt_info_fn(),
"return_sets": (set(), set(), set()),
},
{
"filter_fn": get_filter_adapt_info_fn(
{"logdensity"},
{"proposal"},
{"random_generator_arg", "step", "da_state"},
),
"return_sets": (
{"logdensity"},
{"proposal"},
{"random_generator_arg", "step", "da_state"},
),
},
],
)
def test_chees_adaptation(adaptation_filters):
logprob_fn = lambda x: jax.scipy.stats.norm.logpdf(
x, loc=0.0, scale=jnp.array([1.0, 10.0])
).sum()
num_burnin_steps = 1000
num_results = 500
num_chains = 16
step_size = 0.1
init_key, warmup_key, inference_key = jax.random.split(jax.random.key(346), 3)
warmup = blackjax.chees_adaptation(
logprob_fn,
num_chains=num_chains,
target_acceptance_rate=0.75,
adaptation_info_fn=adaptation_filters["filter_fn"],
)
initial_positions = jax.random.normal(init_key, (num_chains, 2))
(last_states, parameters), warmup_info = warmup.run(
warmup_key,
initial_positions,
step_size=step_size,
optim=optax.adamw(learning_rate=0.5),
num_steps=num_burnin_steps,
)
algorithm = blackjax.dynamic_hmc(logprob_fn, **parameters)
chain_keys = jax.random.split(inference_key, num_chains)
_, (_, infos) = jax.vmap(
lambda key, state: run_inference_algorithm(
rng_key=key,
initial_state=state,
inference_algorithm=algorithm,
num_steps=num_results,
)
)(chain_keys, last_states)
harmonic_mean = 1.0 / jnp.mean(1.0 / infos.acceptance_rate)
def check_attrs(attribute, keyset):
for name, param in getattr(warmup_info, attribute)._asdict().items():
print(name, param)
if name in keyset:
assert param is not None
else:
assert param is None
keysets = adaptation_filters["return_sets"]
if keysets is None:
keysets = (
warmup_info.state._fields,
warmup_info.info._fields,
warmup_info.adaptation_state._fields,
)
for i, attribute in enumerate(["state", "info", "adaptation_state"]):
check_attrs(attribute, keysets[i])
np.testing.assert_allclose(harmonic_mean, 0.75, atol=1e-1)
np.testing.assert_allclose(parameters["step_size"], 1.5, rtol=2e-1)
np.testing.assert_array_less(infos.num_integration_steps.mean(), 15.0)
|
blackjax-devsREPO_NAMEblackjaxPATH_START.@blackjax_extracted@blackjax-main@tests@adaptation@test_adaptation.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/alembic/models/rev_4dc6a93dfed8/__init__.py",
"type": "Python"
}
|
__all__ = [
'NAMING_CONVENTION',
'Model',
'ReprMixin',
'repr_val',
'Hello',
'Run',
'Trace',
'Prompt',
'CurrentScript',
'Script',
'Stdout',
]
from .base import NAMING_CONVENTION, Model, ReprMixin, repr_val
from .model_hello import Hello
from .model_prompt import Prompt
from .model_run import Run
from .model_script import CurrentScript, Script
from .model_stdout import Stdout
from .model_trace import Trace
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@alembic@models@rev_4dc6a93dfed8@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/util/fonts/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
The fonts module implements some helpful functions for dealing with system
fonts.
"""
__all__ = ['list_fonts']
from ._triage import _load_glyph, list_fonts # noqa, analysis:ignore
from ._vispy_fonts import _vispy_fonts # noqa, analysis:ignore
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@util@fonts@__init__.py@.PATH_END.py
|
{
"filename": "opt_DM.py",
"repo_name": "askap-craco/CELEBI",
"repo_path": "CELEBI_extracted/CELEBI-main/beamform/opt_DM.py",
"type": "Python"
}
|
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from dedisperse import dedisperse
import numpy as np
import matplotlib.pyplot as plt
def _main():
args = get_args()
X = np.load(args.x)
Y = np.load(args.y)
DMs = np.arange(args.minDM, args.maxDM+args.DMstep, args.DMstep)
peaks = []
for DM in DMs:
peaks.append(do_DM(X, Y, DM, args.dt, args.f0, args.bw))
print(DMs[np.argmax(peaks)]+args.DM0)
plt.plot(DMs+args.DM0, peaks)
plt.axvline(DMs[np.argmax(peaks)]+args.DM0)
plt.xlabel("DM (pc/cm3)")
plt.ylabel("max(I)")
plt.tight_layout()
plt.savefig("opt_DM.png")
def get_args():
parser = ArgumentParser(
"Optimise DM for S/N",
formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument("-x", type=str, help="X complex time series")
parser.add_argument("-y", type=str, help="Y complex time series")
parser.add_argument("--DM0", type=float, help="Baseline DM")
parser.add_argument(
"-d", "--minDM", type=float, default=0, help="DM range start"
)
parser.add_argument(
"-D", "--maxDM", type=float, default=10, help="DM range end"
)
parser.add_argument(
"-s", "--DMstep", type=float, default=0.01, help="DM range step"
)
parser.add_argument(
"--dt",
type=int,
default=50,
help="Time resolution to average to in us"
)
parser.add_argument(
"--f0",
type=float,
help="Central frequency in MHz"
)
parser.add_argument(
"--bw",
type=float,
default=336,
help="Bandwidth in MHz"
)
return parser.parse_args()
def running_mean(x, N):
# https://stackoverflow.com/a/27681394
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
def do_DM(X, Y, DM, dt, f0, bw):
# do fft here because dedispersion operates in-place
X_f = np.fft.fft(X)
Y_f = np.fft.fft(Y)
X_f_dd = dedisperse(X_f, DM, f0, bw)
Y_f_dd = dedisperse(Y_f, DM, f0, bw)
X_dd = np.fft.ifft(X_f_dd)
Y_dd = np.fft.ifft(Y_f_dd)
I = np.abs(X_dd)**2 + np.abs(Y_dd)**2
I_red = running_mean(I, 336*dt)
return np.max(I_red)
if __name__ == "__main__":
_main()
|
askap-cracoREPO_NAMECELEBIPATH_START.@CELEBI_extracted@CELEBI-main@beamform@opt_DM.py@.PATH_END.py
|
{
"filename": "test_rbfinterp.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/interpolate/tests/test_rbfinterp.py",
"type": "Python"
}
|
import pickle
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from scipy._lib._array_api import xp_assert_close
from scipy.stats.qmc import Halton
from scipy.spatial import cKDTree # type: ignore[attr-defined]
from scipy.interpolate._rbfinterp import (
_AVAILABLE, _SCALE_INVARIANT, _NAME_TO_MIN_DEGREE, _monomial_powers,
RBFInterpolator
)
from scipy.interpolate import _rbfinterp_pythran
from scipy._lib._testutils import _run_concurrent_barrier
def _vandermonde(x, degree):
# Returns a matrix of monomials that span polynomials with the specified
# degree evaluated at x.
powers = _monomial_powers(x.shape[1], degree)
return _rbfinterp_pythran._polynomial_matrix(x, powers)
def _1d_test_function(x):
# Test function used in Wahba's "Spline Models for Observational Data".
# domain ~= (0, 3), range ~= (-1.0, 0.2)
x = x[:, 0]
y = 4.26*(np.exp(-x) - 4*np.exp(-2*x) + 3*np.exp(-3*x))
return y
def _2d_test_function(x):
# Franke's test function.
# domain ~= (0, 1) X (0, 1), range ~= (0.0, 1.2)
x1, x2 = x[:, 0], x[:, 1]
term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
y = term1 + term2 + term3 + term4
return y
def _is_conditionally_positive_definite(kernel, m):
# Tests whether the kernel is conditionally positive definite of order m.
# See chapter 7 of Fasshauer's "Meshfree Approximation Methods with
# MATLAB".
nx = 10
ntests = 100
for ndim in [1, 2, 3, 4, 5]:
# Generate sample points with a Halton sequence to avoid samples that
# are too close to each other, which can make the matrix singular.
seq = Halton(ndim, scramble=False, seed=np.random.RandomState())
for _ in range(ntests):
x = 2*seq.random(nx) - 1
A = _rbfinterp_pythran._kernel_matrix(x, kernel)
P = _vandermonde(x, m - 1)
Q, R = np.linalg.qr(P, mode='complete')
# Q2 forms a basis spanning the space where P.T.dot(x) = 0. Project
# A onto this space, and then see if it is positive definite using
# the Cholesky decomposition. If not, then the kernel is not c.p.d.
# of order m.
Q2 = Q[:, P.shape[1]:]
B = Q2.T.dot(A).dot(Q2)
try:
np.linalg.cholesky(B)
except np.linalg.LinAlgError:
return False
return True
# Sorting the parametrize arguments is necessary to avoid a parallelization
# issue described here: https://github.com/pytest-dev/pytest-xdist/issues/432.
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_conditionally_positive_definite(kernel):
# Test if each kernel in _AVAILABLE is conditionally positive definite of
# order m, where m comes from _NAME_TO_MIN_DEGREE. This is a necessary
# condition for the smoothed RBF interpolant to be well-posed in general.
m = _NAME_TO_MIN_DEGREE.get(kernel, -1) + 1
assert _is_conditionally_positive_definite(kernel, m)
class _TestRBFInterpolator:
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
def test_scale_invariance_1d(self, kernel):
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
# shape parameter (when smoothing == 0) in 1d.
seq = Halton(1, scramble=False, seed=np.random.RandomState())
x = 3*seq.random(50)
y = _1d_test_function(x)
xitp = 3*seq.random(50)
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
def test_scale_invariance_2d(self, kernel):
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
# shape parameter (when smoothing == 0) in 2d.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
y = _2d_test_function(x)
xitp = seq.random(100)
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_extreme_domains(self, kernel):
# Make sure the interpolant remains numerically stable for very
# large/small domains.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
scale = 1e50
shift = 1e55
x = seq.random(100)
y = _2d_test_function(x)
xitp = seq.random(100)
if kernel in _SCALE_INVARIANT:
yitp1 = self.build(x, y, kernel=kernel)(xitp)
yitp2 = self.build(
x*scale + shift, y,
kernel=kernel
)(xitp*scale + shift)
else:
yitp1 = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
yitp2 = self.build(
x*scale + shift, y,
epsilon=5.0/scale,
kernel=kernel
)(xitp*scale + shift)
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_polynomial_reproduction(self):
# If the observed data comes from a polynomial, then the interpolant
# should be able to reproduce the polynomial exactly, provided that
# `degree` is sufficiently high.
rng = np.random.RandomState(0)
seq = Halton(2, scramble=False, seed=rng)
degree = 3
x = seq.random(50)
xitp = seq.random(50)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
y = P.dot(poly_coeffs)
yitp1 = Pitp.dot(poly_coeffs)
yitp2 = self.build(x, y, degree=degree)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
@pytest.mark.slow
def test_chunking(self, monkeypatch):
# If the observed data comes from a polynomial, then the interpolant
# should be able to reproduce the polynomial exactly, provided that
# `degree` is sufficiently high.
rng = np.random.RandomState(0)
seq = Halton(2, scramble=False, seed=rng)
degree = 3
largeN = 1000 + 33
# this is large to check that chunking of the RBFInterpolator is tested
x = seq.random(50)
xitp = seq.random(largeN)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
y = P.dot(poly_coeffs)
yitp1 = Pitp.dot(poly_coeffs)
interp = self.build(x, y, degree=degree)
ce_real = interp._chunk_evaluator
def _chunk_evaluator(*args, **kwargs):
kwargs.update(memory_budget=100)
return ce_real(*args, **kwargs)
monkeypatch.setattr(interp, '_chunk_evaluator', _chunk_evaluator)
yitp2 = interp(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_vector_data(self):
# Make sure interpolating a vector field is the same as interpolating
# each component separately.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = np.array([_2d_test_function(x),
_2d_test_function(x[:, ::-1])]).T
yitp1 = self.build(x, y)(xitp)
yitp2 = self.build(x, y[:, 0])(xitp)
yitp3 = self.build(x, y[:, 1])(xitp)
xp_assert_close(yitp1[:, 0], yitp2)
xp_assert_close(yitp1[:, 1], yitp3)
def test_complex_data(self):
# Interpolating complex input should be the same as interpolating the
# real and complex components.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x) + 1j*_2d_test_function(x[:, ::-1])
yitp1 = self.build(x, y)(xitp)
yitp2 = self.build(x, y.real)(xitp)
yitp3 = self.build(x, y.imag)(xitp)
xp_assert_close(yitp1.real, yitp2)
xp_assert_close(yitp1.imag, yitp3)
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_interpolation_misfit_1d(self, kernel):
# Make sure that each kernel, with its default `degree` and an
# appropriate `epsilon`, does a good job at interpolation in 1d.
seq = Halton(1, scramble=False, seed=np.random.RandomState())
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
ytrue = _1d_test_function(xitp)
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
mse = np.mean((yitp - ytrue)**2)
assert mse < 1.0e-4
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_interpolation_misfit_2d(self, kernel):
# Make sure that each kernel, with its default `degree` and an
# appropriate `epsilon`, does a good job at interpolation in 2d.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
ytrue = _2d_test_function(xitp)
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
mse = np.mean((yitp - ytrue)**2)
assert mse < 2.0e-4
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_smoothing_misfit(self, kernel):
# Make sure we can find a smoothing parameter for each kernel that
# removes a sufficient amount of noise.
rng = np.random.RandomState(0)
seq = Halton(1, scramble=False, seed=rng)
noise = 0.2
rmse_tol = 0.1
smoothing_range = 10**np.linspace(-4, 1, 20)
x = 3*seq.random(100)
y = _1d_test_function(x) + rng.normal(0.0, noise, (100,))
ytrue = _1d_test_function(x)
rmse_within_tol = False
for smoothing in smoothing_range:
ysmooth = self.build(
x, y,
epsilon=1.0,
smoothing=smoothing,
kernel=kernel)(x)
rmse = np.sqrt(np.mean((ysmooth - ytrue)**2))
if rmse < rmse_tol:
rmse_within_tol = True
break
assert rmse_within_tol
def test_array_smoothing(self):
# Test using an array for `smoothing` to give less weight to a known
# outlier.
rng = np.random.RandomState(0)
seq = Halton(1, scramble=False, seed=rng)
degree = 2
x = seq.random(50)
P = _vandermonde(x, degree)
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
y = P.dot(poly_coeffs)
y_with_outlier = np.copy(y)
y_with_outlier[10] += 1.0
smoothing = np.zeros((50,))
smoothing[10] = 1000.0
yitp = self.build(x, y_with_outlier, smoothing=smoothing)(x)
# Should be able to reproduce the uncorrupted data almost exactly.
xp_assert_close(yitp, y, atol=1e-4)
def test_inconsistent_x_dimensions_error(self):
# ValueError should be raised if the observation points and evaluation
# points have a different number of dimensions.
y = Halton(2, scramble=False, seed=np.random.RandomState()).random(10)
d = _2d_test_function(y)
x = Halton(1, scramble=False, seed=np.random.RandomState()).random(10)
match = 'Expected the second axis of `x`'
with pytest.raises(ValueError, match=match):
self.build(y, d)(x)
def test_inconsistent_d_length_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(1)
match = 'Expected the first axis of `d`'
with pytest.raises(ValueError, match=match):
self.build(y, d)
def test_y_not_2d_error(self):
y = np.linspace(0, 1, 5)
d = np.zeros(5)
match = '`y` must be a 2-dimensional array.'
with pytest.raises(ValueError, match=match):
self.build(y, d)
def test_inconsistent_smoothing_length_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
smoothing = np.ones(1)
match = 'Expected `smoothing` to be'
with pytest.raises(ValueError, match=match):
self.build(y, d, smoothing=smoothing)
def test_invalid_kernel_name_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
match = '`kernel` must be one of'
with pytest.raises(ValueError, match=match):
self.build(y, d, kernel='test')
def test_epsilon_not_specified_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
for kernel in _AVAILABLE:
if kernel in _SCALE_INVARIANT:
continue
match = '`epsilon` must be specified'
with pytest.raises(ValueError, match=match):
self.build(y, d, kernel=kernel)
def test_x_not_2d_error(self):
y = np.linspace(0, 1, 5)[:, None]
x = np.linspace(0, 1, 5)
d = np.zeros(5)
match = '`x` must be a 2-dimensional array.'
with pytest.raises(ValueError, match=match):
self.build(y, d)(x)
def test_not_enough_observations_error(self):
y = np.linspace(0, 1, 1)[:, None]
d = np.zeros(1)
match = 'At least 2 data points are required'
with pytest.raises(ValueError, match=match):
self.build(y, d, kernel='thin_plate_spline')
@pytest.mark.thread_unsafe
def test_degree_warning(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
for kernel, deg in _NAME_TO_MIN_DEGREE.items():
# Only test for kernels that its minimum degree is not 0.
if deg >= 1:
match = f'`degree` should not be below {deg}'
with pytest.warns(Warning, match=match):
self.build(y, d, epsilon=1.0, kernel=kernel, degree=deg-1)
def test_minus_one_degree(self):
# Make sure a degree of -1 is accepted without any warning.
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
for kernel, _ in _NAME_TO_MIN_DEGREE.items():
self.build(y, d, epsilon=1.0, kernel=kernel, degree=-1)
def test_rank_error(self):
# An error should be raised when `kernel` is "thin_plate_spline" and
# observations are 2-D and collinear.
y = np.array([[2.0, 0.0], [1.0, 0.0], [0.0, 0.0]])
d = np.array([0.0, 0.0, 0.0])
match = 'does not have full column rank'
with pytest.raises(LinAlgError, match=match):
self.build(y, d, kernel='thin_plate_spline')(y)
def test_single_point(self):
# Make sure interpolation still works with only one point (in 1, 2, and
# 3 dimensions).
for dim in [1, 2, 3]:
y = np.zeros((1, dim))
d = np.ones((1,))
f = self.build(y, d, kernel='linear')(y)
xp_assert_close(d, f)
def test_pickleable(self):
# Make sure we can pickle and unpickle the interpolant without any
# changes in the behavior.
seq = Halton(1, scramble=False, seed=np.random.RandomState(2305982309))
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
interp = self.build(x, y)
yitp1 = interp(xitp)
yitp2 = pickle.loads(pickle.dumps(interp))(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-16)
class TestRBFInterpolatorNeighborsNone(_TestRBFInterpolator):
def build(self, *args, **kwargs):
return RBFInterpolator(*args, **kwargs)
def test_smoothing_limit_1d(self):
# For large smoothing parameters, the interpolant should approach a
# least squares fit of a polynomial with the specified degree.
seq = Halton(1, scramble=False, seed=np.random.RandomState())
degree = 3
smoothing = 1e8
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
yitp1 = self.build(
x, y,
degree=degree,
smoothing=smoothing
)(xitp)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_smoothing_limit_2d(self):
# For large smoothing parameters, the interpolant should approach a
# least squares fit of a polynomial with the specified degree.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
degree = 3
smoothing = 1e8
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
yitp1 = self.build(
x, y,
degree=degree,
smoothing=smoothing
)(xitp)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
xp_assert_close(yitp1, yitp2, atol=1e-8)
class TestRBFInterpolatorNeighbors20(_TestRBFInterpolator):
# RBFInterpolator using 20 nearest neighbors.
def build(self, *args, **kwargs):
return RBFInterpolator(*args, **kwargs, neighbors=20)
def test_equivalent_to_rbf_interpolator(self):
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
yitp1 = self.build(x, y)(xitp)
yitp2 = []
tree = cKDTree(x)
for xi in xitp:
_, nbr = tree.query(xi, 20)
yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_concurrency(self):
# Check that no segfaults appear with concurrent access to
# RbfInterpolator
seq = Halton(2, scramble=False, seed=np.random.RandomState(0))
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
interp = self.build(x, y)
def worker_fn(_, interp, xp):
interp(xp)
_run_concurrent_barrier(10, worker_fn, interp, xitp)
class TestRBFInterpolatorNeighborsInf(TestRBFInterpolatorNeighborsNone):
# RBFInterpolator using neighbors=np.inf. This should give exactly the same
# results as neighbors=None, but it will be slower.
def build(self, *args, **kwargs):
return RBFInterpolator(*args, **kwargs, neighbors=np.inf)
def test_equivalent_to_rbf_interpolator(self):
seq = Halton(1, scramble=False, seed=np.random.RandomState())
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
yitp1 = self.build(x, y)(xitp)
yitp2 = RBFInterpolator(x, y)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@interpolate@tests@test_rbfinterp.py@.PATH_END.py
|
{
"filename": "lg4_envs.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/private_src/python3_9/Darwin_arm64/kspdg_envs/lbg1/lg4_envs.py",
"type": "Python"
}
|
# Pyarmor 8.5.11 (trial), 000000, non-profits, 2024-12-09T10:19:40.569956
from kspdg.private_src.python3_9.Darwin_arm64.pyarmor_runtime_000000 import __pyarmor__
__pyarmor__(__name__, __file__, b'PY000000\x00\x03\t\x00a\r\r\n\x80\x00\x01\x00\x08\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00d%\x00\x00\x12\t\x04\x00\xad,Ii}P\xbd3?\xb0\xd7~\xa0[sC\x00\x00\x00\x00\x00\x00\x00\x00\xc6\xb3 vS)0\xca\xd6\xa7\x08:Ka|:\xad\xbd\xbc\xad\xfd\x8f\x9b6b\xa9W\xb0\xda\xbf\x9b9\xf6\t\xc5]\xf7\xbe\x81\xd7\xfc\xea\xbcs\x7f\xb7(h\x8c\x83\x07\x83q\xa1\x81Q\xcb\x94\xccQ\xde\xcc\x9b\x00cE\x15\xa9\x86\xd62]\xebN\xc0O\xb7\xa9\xdc\x12\xd0\xfac\x127\xcaQ\x97g\xc9f\xcf\xfb\xf3h\xb7> \xe3&\xc2g\xad3\xc3k\x92\xf2\xfc\x14\xa4\x9a\xd4^\x08\xecCE\x11\xcf\xc0;\x9d\xb7I\xdd\xe6+\xde\xcf,\x1bxK\x9dt]s\x97\x8dO\xfe\x1a\xb4\xfc\x066\xfcHx\xfe\xd3\x197\x1d\x10\xb3\xfa\x98+\x92\x8e\xa3\xc7j\xd8`\x16^so\x82\x14{\xa6n\x04\xb0?\xaf\xc2{\x11IQ*\xef\x02\xe8\xaa\x9e\x17m\xb0\xbcW/T\xa8\xc1]\\p`\x11\x0e\x92\xa9t\xdf\xc2\xe1\x867\x04G$)\x8b"\xfc{P\xb0\x1d\x99\xded\x125R*\x83\xd6\x11\xb3\x089\xa0\xa5\xec\xf1x\x02/\x08b\xa4\xfa}\x18\x04dt=\x08\x15\xa2\xc7\xa7N;q.\x9f_I\x10\xd7\x84\xda\xcdF&kD\xd0!l\xdd,\xf4\xb9d\xd0\xee\x94a\xf7\xd2T;\x10q\xb9y2\xf1\xce\xc2\x9d\xdc\xfe\x8eMg\x9d\x15,\xb6X8\x19\xa7^Y\xf6\xce\x80\xf2^\x83t\xa3a{?5>\x85\x85\xe5\x8a\x10\xb4\xf7DYp=\xdf\x98\xdd\x0b\xdc\xe1\xa4\xc1\xa6\xb8\x9a\\\xa0\x06\xc6HRC\xfa\x99\xa7\xbc}\xfd\x89\x11\x1e\xcd\x0b\xaez\xdd]6z6\x9fM\x1a\xc3\x95-+\xf3\xd5\x07\x88\x9d\x18\xf4\n\x8em\xd2\x9e\xe8\x81\x90\xa2\xd3\x97\x86\x80\xd8t\xf6\xa0\xba\x90\x1a\x19d\xfc\x07\xe2m\x0e\xb0c\x96\x15\xdem\x95\xa2\xfa\x91\xb7\x00H\x7f\xea\xfa]z:z\x18\xb8Ke\x0eh\x01\x13\xae4K\x92\x9dF-\xe4\x9f\x8co\xab\xdd(\x9a\x14\xbed\xfew\xbe-s\x0f\x11\xbc\x00p\xfe\x14\xf3\xed5\xfe\x11[\xeb\x84\xee\x94\x19\x8f\xca\t\x10\xd9\xfbx"\xd8\xcb\xddO\xc1@\xd9QT\xd8$\xed\xd99\xc2\xe7\xc2\xb2\xf5\xc8\xb6\x9bC:\xa7JQ$\xc4\xe7&\x19\x8d\x06\x8c\xea\x0e\x91\xc0I\xfe:N"\xf1\n\xb1\x82\xf1\n\x06\xab2\xb5X\x88\xff{\x0e\xa9\x8d\xf3\xebI\x12\x1f\x81\x00\xc1\xa2\r\xf7\xf9f\xe5\xe9\xfa\xd9\x9bp\xf6\xaa\xf8\xdf]\xa3U\x1b\x18\x84\xee\xca[\x9eI\xed\xbd\x87\\\xb5\xe4\xe7\x9a\xed*\xf9\xc2\xb0\xea\xf01\xae\xdc\x06/\xcdCf\xab\x00\xe5\xfdcR\x98\xb4\x9d\xa9\xe8\xfb\x08IM\xb6Kp\x8a\x08\xca\xae!\xdb\xbcv\x85\xef\xe5z\n\x8f\xc9\xb4G\xd6\xd0\x144S\x1d\xb5t1p\xd5\xb2f\xeb\xde\x1d)|\xb9X\xa5{\xb4F\xeb\x06;d\xd5C\xe2\xcd\xe8\xf2m\x0e/\xe1\x16\xff\xae\xfd\x11\xbfp\xd1\xe2T\xbe\x11\xd2\xb8\xd5\xe2J\x0f\xb2I\x93HM\xcc\xe8k\xf2\x10T\x06\xacC\xa5\x13\xf5\x15z\xb1X\x14%p\x87z\xe1T$x\x8c|_\x8c+\x83\xbc\xc8l\xbc\x1bwus\xc9x\xd1\xa1w,o\x9dj\xbd\xcf\xa3R\xa3\xdf\xb6\xd1J4\x08mR\xddz\xb5^\xdd\x1eC\xfdw")\x01nk\xc7\xe4\xa6\xd1\xc9ZL\xab~\xd2\x8c\xec6\x08x\x97\xff\xf6\xdc\xf2\xddx\xeb\xc7\x12\xab\x13\xe0\x01\x84g\xebWQ\x18\x1d>\xb6\xd1A\xc8vPl\xaa\x82\xfe\xb6\x96\xdf\x85h\x18\xa3m\x82\xcd5\x7f\xba\x9d@z\x06\xf3\x839\xe4\xcc\xf7-\xa5\xdf@B\xa6\x9cr\xech\x8a\xe9\xdf\x1fL\xbeAW]r\xc9&o\xd4O\x82\x1a8_\\\x00j\xb6qf\xef\xcb\xb9\xc3:r\xff\xc7\x0eJ\x18\xc5\xfa\xb0\x9a\xd5-<\x07\x7f\xd7a\x04\xe1\x16h\x0b\xaf\xea\xadwj\xf1\x03\xf3\x93\xd6\n\x10Y)\x9c%V\xe2\x00\xb3\xb9\xe5\xce\xe2\xe3\x8alQ\xe2\xd4B56\xbfk\xb6\xf16\x86UP\xe4\xda\x86Ti\x00\xdd\x08\xd4\x94\xd7\x0f\x8e\xa3\xf7+}3l@\x9f\xc8\x1e>\xbb\x02j\xec\xe8#\xeb\xfa\xba\x01\xe9\xf5nI6\xd57\x0c\xf7\xddv\x93\xcb4\xc6\xa2\x15\x04\x9503\x9f\x84?x\xeb\xabH5>\xd9\x8c?\xb3G\xf0\xb2\x9av\x96dpy7\x87\xfe_\x97\xd5\x90H)\x13>p(4(j\xec\x90\xfd\xa2\xf8\x84\xfe\\\xd1\xc8\xdc\xc8\xf1\xcb/?0\x98\xe2\x8c!\xcd\xc4k\xfc\xe9\x97\x0cmm\xdc\x8e\xb4\x7fp`)\x18w\xc5t\xd5\xfa\x06\xcd\x93\xe6\xe8\x9a\x97\xe6\xc3\x06\\\x8a\xf0\x8e\x1b\x93\x8bR\xe7M\xedM\xbf\xa6\xb2\xa9\xbb*S\xa0\xd8\xab\xc5\x1e\xedG\xf1\x07p9\x00\xe2Q\x1f&\x8d\xe9\x17_\xado\xa1Q\x92\xa3\x19\x99\x17t\xac\x84qo\x0f\xb80\x04tu\xf9\x02\x8f\x9c\xb0\xac\x85c)\x8e\xa7\x0f\'\xc59\xca\xb3A\x90\xf0Ga\x034E\x1e\x820\xf4)\x9a\xf7\xbfC\x9c\xa0\xae\x98\xa1\xa9<\x10&\x9cq\x01^\x9e3\xa8X\xed\\\xab\xc3\x17O\x88\xb9\t\xd2\xc5\xa85\x0f\xd2l\x18b\xd3\xee;\x98\x02\xd5\xfa\xb8\t\xb7r\xae\x1e\r\x92\xa1\x9a*,F\xe5\xd0\x01OJ\xbd\xef\xf77o,>\xdf\xaf\xf2\xe7@\x16\xe8\xf8\x9d-\xdbj\xa6\xecZs\xf5Np\xdbY\x8c\xd9n\x0e8\x08\x8b\x94\xfe\xfa\x10\xa8\x96u\xaa\xe9\x9f\x85\xe9J\x1d|zlP\xf6\x83\xa0\xb5\xe0ui\xd6\x81\x7f\x95\x88\x9e\xa5\x94\x07S\xed5\x17\xa4\x91X\x94\x1dw\x0c\xbb\xc9g\x8bcc\x9b\x93A\xb5\x07\x89:\x95\x1d**\xce\x7f@S\x17\x01\xc5!9\xd1\xee.q\xa7|\xaa\xbb\xc7\xd9\x11\xe3\xb3\xb6)\xd1\x90A\xe71\x89\xf5\xf8|\xa71\xaf\x12\xb7_p\x07\xb3\x06\x11D\xb5$\'\xff\xc6\x00\x0b}_JF\xcf\xce\xba\xfd\xd2,\xcfj\xb2\t\x9c\xfe\xbf\xc8\xbc\xab#;\x9d\x02\xeez9\x07Q0C\xfc\xc0rp\xbb[\xe9%S\x16j/\xbd\x1e\xb0\x05\xff>\xfci\xb3\xbca\xbf\x11$}\xc0V\xc59ibo\x813\xc7c\x9e\xdcT\xb2\xa0\xda\x02\x9c\xfe\xf9\x9f\xdd\x96\xd4Q\x93\x1eN\xe1\xc0g\xceJ\xf61L\xd1\x1f\xacLq`~/\xe4\xe5\xea\x98u\xa0F\xaa\xd0i\xbe\x10\xb3\xc2\xcb\x1fa\xf0\xf7\xe3@f\x8a\x89g\x9e\x91\x06d\xfb,\xc7#\xb4\xf4\xed\xd7\xe7\x9b8\n\xf6\x0f}\xdc\xe8l6\xdf\xc6"\t+\xe5\x83"\xf7\xae\xf7\xfb\x1a[\xcd\xe1\x87\'8\x10B\xa2\x93k\xb9<k\xb3\x10\xe3zn6y\x13/{1\xe4Q\xabKIL\x0b\x9d\x9c\xec`\x03\x88c`\x14\xf6\xa1\xd4\xb1\x93\x9a&\xfa<Y\xf1\x81\x1c\x11xpD\\a\xe1"_\xbfue\xc1\xa0\\\x1a\xd3\x9d\xd6\xa4~\xd0\x17\xa7\xaa\xffm\xd5?\xce\xda\xb0\xeb\x94\xa91\xa6^\xf5&\x98\xde\xad\x84N\xa5\'M\xb3`x!`n\x1cKO\xa8\xd4\xd8z\x8d&R\xc6\xec\x8d\x8c\xa7)y\xbc\x1c)\x9f\xf6\xd6\xaa\x9a\x9e\x89\xb8\x14\xc6\x1b\x86\xffd\x90\xe1\xd87\xd5\xb8\x94,*\xe8\x08\xf0\xe2tt\xf6\x1a=\xe4\xd5\xddH\xf0\x8d\xe6\xd8\x0e<\\\xb4`1\xfc\x16@\x8c\x1c\xb3\xca\xb8\xb3\x1bb[))\x02\xcf\x15\xe3\xa7\xdd|\xc9z\xe2\x13\x99\x04\xb2\xb7N\xc7\xdf\x1d]0\x88Ut-\x17\xeb\xf51\x18\xea\xcb\xc2\xb4\xa17U\x94\xaa\xab\xe0\x1c\xf5\xb0l=\xe1\xb9\x82\xac\xe0\x1c_\xe43\x12\xd9W/\x1cn\xb6\xe7\xf52\xd0E\xae\x90;\'U\x9dO\n77P\x1b\xb3\xdbr\x8c\xe1\xbf\x9e,y\xbcm(\xb2\xe5O\x1b\\\xf0\xb4\x05\x96\x87\xe8u\xd4=\xad\xaa\xf8f\xabk\x0e\x9cP\n\xd0l\x06\xc7f\x03n\xa7\xa7\xbd\xf5>\x133\xe3\x1f \x17\xcc\xd2+veJ\xeb\xd9q\x9fZ\x7f\x0c\xdf\x14\x7fq\xf0\x13UU\xfcv\xa6\xe2\xee\xca\tU\x81\xb4\x1a\xde\xb6\xb8\xf5\xf6\x9f\xd3\xae\x17\x18\xb6\r\xb7\x0c\x1d\x12\r\xc6\x9a6\xdb\xc59"\xe6\xa6E\x8f\xaa\x1d\xf4\xc9\xd6T$\xb2\xbbBw\xa75d\xf6\x9cb\xa2L(B\xf5;\xfb.\xc3\xcc\x83E\xd8\xbf\x16|\xa1\xee\x1e\xed\x16\xe0\xfa\x10T\xf5\x99\x0f\n_\x89t\x04\x85O\xae\xfen\xf0\xb0\x80\t\x1f\x98u\x0c\x9f\xee\x8f\xf2:\xe2b\xadlC\xe8D#5\x84\x99L8\xb46X*l\x9b\xf2\xbdlZX\xad\x7f\xd0^R\xd2\x034\xdc\x9b\x9bK\xde\t(\x8d\x8e\x1d\x89\xfc\xac\xe4sK\xfd\xe2d\x17m\xfaV\xc0P;6\x1d\x844\x83\xebx\t\x86M\x18\x1fXS\x16a?u;\x0cy>4U\x97\x0e\xc9\x90\xdc\x0c\xf2\x92\x87|\xc3i\x8dXD\xa0\xd8\x1c\xd7So\xd5&\x87J\xa3\xb3D\xadx4\xfd\x8d"\x1d\xa5\xf8G\x95K\x97Z+NBQ\xfa\x7f{\xb5\x96\x0f\xdd\xd6 \x9bUl\tES\xdc\x9f*\xdb!JRP\'\xa9\x9b\xf0\xc6\x97\x16o$L[\x00\xaf\xf9\x11\x94t\x9b\xe4\xa6\xf9;\xa0\x9dZ\xe1\x92\np68\xe4\x05\x95d\xa6\xcc\x8f\x1c\xc5\r\x1fi\x8d\x84\x0c\x8a1\xb2\xc5|G\xb8:\xde\x1bz<\xf7D\xc1\xf9\xf0\xf0G\xb1\xea\x89\xde\x918n\x8e\x0e,\x87\x0c;\xa7\xbe/\r\xebq#{\xc6\xb2\xdc\xf6\xfc\x17\xc9a,B\xfa\xf9dT\x81\xc6\xca\xdba\xd9`\xda\x9e\xf7\xf8\x88\x00V\x11\x96\x9e\\-i\x16{#\x93\x88\xff&\xe3\xd6\x94FH\x03\x1b\xcbf\xea\x1bK"\x86\xeb\xe3\xe7\x96\xa8%m!*\x1f\xbczI\xdd\x0bRy\x8e\xb6\xe9\xdcU\x07Sc\xb1Ck\x05\x1dH\xb7}\x88/`\xd4\xba\xf6C\x89\xfb\xf1\xbfy\xf7X\xff\xfc\xaa\x18P\xda\x1b!\x895\xfe\x8c\xd5yJT\xd5\x1b=\x9d\xe3[\xb6\x0e\xb9[\xf1_\xfd9\x8f\xa8)\x91@\x82\x8f\xc1\x86a,[UCIv\x83\xf4b*\xcc\'\xfd\x13\xc84\xf6\xc5Up\x0b6bT:\x94\xed0\xedp\xe5\x95\xfb\xb2\x8a\x99\xf3$\xab+\x02\xc3\xb7\xfe\xdf\xc7 \xc1\xab0[_\xb8\x1e\xc4\x87\xd3D9I\x08\xe1\xaf8\x0c\x1a-\xec\x8b\x0f\xbcc\x8b\x18J\x7fE\xaf\xe0hZ\xbdy\xf3\xd6/\x1f\xeb2\x1a\xf7\xae\x95\xa8\xe8\x8a\x18=\xd7\',\xf29\xd6\xfe+\x10w.\xdd\xeb\xe4\xc8\xfa\xd6\x87H\x0c\x7f\xe94\xb9\x9f\xf8\x91\x03]tc\nE\xe1N\x80\xfe\xc6\xd3/\xb8\x04h\x97\xcc\x8c:\xa4a\xe26\x0c\xdcu\xc9\x9d\xfa[j\x96\x19\xe9\x1b\x88\xc7^\xc5\xfc<\x00\x92\xa9A\x9aoR\xbc\x06\xae\xe3^\x8aC\xff~D\xf6w\xf6\xd7\x18\'\x0c\xb6\x05\xe3\x1b\x0c\xd0\x8d\xa2\xfe&\x04\x00\xf6\x1f\xbb~e\xbb\x80\xafN\x94\xe5sP\xc0\x06F\x95)\xc8\xce\x99#@\x01\x1f\x0f\xe7\xb1\x8f\x80\x88S\xd7 \xdfS\xa7\xc0X\x92\x97\xad\x8d\xa6\x94\x07\x18d\xabBG2\xda\xe0E\xe21rl\xd6V\xc5\xec\x05\x91g\xc5}xP\xf5vH\xd1\x8cL\xe8h\x8a\x16\x0b\x84A\xba\x8e\xe9Q\xe3\x1f\xb4\xe6\\\xb3\x14\x13\xceyz\xf5\\\xc7\x81J\xc3\xcc\'2\xe9\x17\x13\xde\xcb\xe5\xce\x9d\xe2\xb3J1\x9e|\xfe\x0e_\xf4\xef\n\x1c\x1a\xad\x11x?\xacF\xfeD\xa0\xfb\xea\x1d\x92\xd9\r}\xc8\x10\x98IR\xb6\xfb\xe9\x0c\xc7m\xf4*\x01\xdf\xfb-\x06z\xd9`\xd9\xbe\xe6_\xca\xf8\xc9\xbd\xd7\xd6g\xd1\xfa\xbd\x11,\xeff\x92\x00\xa8\xc1\x8b;\x05=F\xf8\xbaG\x9c#\x8c\x0e\t\xf9\x08\xd5m\xe8\x17wQ\x83\xe2\xf1,\xd3\xf7\x8c(\xcd\xca\xde\xac?\xda\x93\x13\x99\x1cD\xd00\xe1\x90\xa6\xd8\x7f\x10\x91\x10:\xe7\x1fcU\xe9\xe7E-\xf8\xc0\x91E\x0e\x98\xec6\xf1\n\xd0\x8e\x9b\x0e d\xd0\x8a\x1f\xa7\x10a\xbd\xefA\xb5e\xef/\xd4\xd1W\x92\xf1rm\xef7\xbb@\x99\xa8-\x9fC\x93\x91\x81q\xb3\x01@\x84\x1a\x0b(\x9d\xa3\xf1%(\x04<0M}\x89\xe7\x01\xc21\xb3\xa0\xa7\xc1+\xd2\x1c9\xa0\xfc\n\xbe\x81\x8eV\t\xcfj-\xe9\x84\x04\x9dtr\nf\x08\x0bY\x9c\x9a\xc5L\xf3\xeb\x9ad+\xde\xf7\x13n\x97\x8dx\xa3\x0c\xca\x81\xae\xf8\xe4\x16f\xcd\xdaR\xa2\xfda\xf88_\x83\xa1\tNN\xea\x9bU\x03\\\xaf\xb5\x1f\xc3\xcd\xcf\x11E\xb6\xf8N\xe0 \x8a\x89\x90w\xf9#I\x0eW\xbe\xc8u\x17\x1c_\x9f\x16\xb8\xbb\x8fuf\x91d\x17;\x12P\x13?\xfc\x0e&\xe3\xe8\xd6\xe4@\xdc\x04\xfe\xd3\x1b\xce\nA\xb8\xbex\xbeY"`\xfay\xe7x\xed\xb4[\xefs\xd5\x9bC\xf80\xee\\\x10\xf6s\xc6\x8b\xa6\x90[\x8a\xb07|\x0b\xd3\'\xb8w\x05\x95\xa0\t\xcc\n\x86\xe0\xad-\xf1\xcd\x88M\x07\x1a\xd5\xbfAz^\x03\xed\xc5\x1b\xcf!\xb7\xff\x87\xd8&H\x90\x03M\x00\xdf`r\xb3\xdf_\xb3g\x0b/\xee\x9d\xaef\xdc\xebT|\x0c\xf1\x80\x059\xe9\x90\xaf\xea\x90\x87\x08\x19\xc9\xd1\xee\xe0\xebOC\x81\xe7\xcaoe\xcen\xb7D\xc9\xadz\x92K\r\x88\x95\t\x99k\xd8\xb8\x99\x14\x86\x93\x05Cc\x1d\x0b\xa3\x96\x82\x05\x1a\x9b\x99\x11q\xceg?1\xc4\xd6\x1e3\xd3\xbf\xf1`\xda\x04\xdf~)08q\x93l\x08,\xd7\\\xbc7^\x0c\x88\x98^\xd7U\xb4<2\x88\xd0_s\xf3J\x9a\x90WH\x9bY\xcf\x1aL\xa9\x98\x99\x86m\xa2\xda\xa2\xb0\xb6u\xf2%@9z\xdc\xe8\x0e\r5>\x00K\xc2\x1d\xde\xd3*}LC;b]Y\xa0\xa4\xed\xc6k{\xe1\xe6!\xf6<\xa7\xd4\x98\xcbI\x86\xd9\xc5\xfc"\xfa@|St/\x1b\x89g*\xf2\x1a3\xe8\xcd\x01G\xd8\xbbE\xb4p.\xa9\x07\x008D\x18\x87\x81\xc0x)`e\xf8\x12p\xab,\xf86\x9a2z\x97W\xa3\xfc\xfa:\x87\xfa\x97,v\x1c8W\xb7\x19\nU5O\x8dq\xcaH\xf8\'"\x87\xb4\xf1\xdb\x03\x7f(\xa5VJ\x89d\xfdNj\x0e7\x8e \x81t\xecl?0\x1a\xc2\x9bU\x7f\x0c\xf6\x83\xfc\x96mR\n\xfad\xb4\xad\x82L\xf7\xa7\xa7\r+\xb9V\x8c@\xab4\x94\xebO\xb6\xa4v\xd0\xbda\x1b\x9b+R\xac\xbb\x15O\xff\xdf\x94\x1d\xee\xd1\x8f\x82\xfbk\xb5\x14lR\xee\xd3\xdb\x19\xaf\xa7\xe5?\xbf\xc7\x9f\xa9\x07\x84"\xeb<\x9e\xc9N\x11\x84;\xba+x\xd4*\xc0Q\x87*\xc5\xeeG\xed\xa0\n;\xc5\x95\xce\x94{\xb7\x8d\xc4!\x08\xa4Eu\\\xfb\x7fo^\x95\x93pc`\x85My\xf1\xf9\x1aQ1\x9b@\x88\xa3\xdc\x81V\xce\x8f\x8b\x92X\xc0\xdcO(\xedZg\xa5e\xd2\x8e\xf83\x02\x80\xcd\xd9e\x15\x9dn\xd2C\xad;\xc2\xf2\xa2\xa4\x14\x17\x01\xebJ`\xe6{c\xb4I\xa0\x0b_5\xcc\x91\x15\x1a\x1c\x9cM\x08\x98\x8er\x84\xb1\xb5\xda\x87\xec\x10\x95\x86\xb1\xea\xa6\x8e\t\x97!n\x98\xa5]_\xab\xc4m\xe7\x0e\xb2y\x91c\x0c\x13\x9cY{\x03\xed\x0c\xb7*\xd1*d0J\xe1\xdf\xaa\xe7e\r\xb7B\xed\xb5\x10\xe0\xed\xc9\xbemZ1\xee~\xa4\xdf\xf8\xbfZ\xb3\x08\xca5\xcf\xea\x8fxS)\xc7;\xd0f\x1a\xf8\xda\xff\xc2H\x0c\x18\x0f\xf9\x13}\xe8c+\xc0B\xc8\xf3!\xc8\x1c1)\xb21\xb4\xa4\x92\xc6k\x8e\x81\xed\xd8X\xf4O\x84[\x1f\xc2\xe0S\xd6\xe9X(\xcb{\xfd\xbcx\xca\x95\xba\xa2\xfe\xa4\xcd\x8e\xcf\xe6\xb2\xbfI\x0beM\xfa\xa6\xec\x12\xc4\xd0Q\xde\xa4\xaa\xb6s\xff#\x88\xaa\xa7X\x82\xeb2>{i\xee\x12\xee\xe2\xb0\xac6\xb4\x9a\xadq\xb8$E\xf3\x9ew\x91h\xf8\x9f\xea\xe4\xc5\x11^\xdc\x00\xe2"\xc0Ie\xe9\xb2\x86\xed\x08\xefv\xc2\xb0 \xc4\x05\x12|\xf7\x04\x10\xdd\x03\xb8\x1f9\x14\x11fn\x01\xd5\xc5!\xdf\xf1g\xef\xee\xec\xe8#r\xf8\x90\x8e\x94\xadn\xb8\xc6Y\xfb\xa5\xe8\xc8\xf9P\xa2!\x19\x9e\xc5+\xdc\xf0\x12e]\x90,i\xec\xf6s\xbb,Tt\x93\x1f\x88\xa35\x91\xd3\x0f9\x0b\xbeD\x87\x96\xbdL\x8d\xe3i\xc8J\xec\xd0\xf4\xd6h\xe6\x9b\xe1\xcd\x8b\\\x8dxHQ{\xffq\x84\x08\x90\xf6\xc6>\xab\xc4vb=\x13\x83\x8f\xa9\xd9h#\x0fqF6\x98\xdc\x07mjU\x81dYiNY.&y\xb22\x98\xf4\xfflw\xbb\xb0\x8e\xf6\xcf\x1eA\xfc=1\xe4\x11\xa3#cQ&*\x91p\xa4otd\x89i\x97F\xca:W\xda:\xf6\xd0\re\xdc\xf0\x18\x97\x84u\xe5\x06\x1b\xfb?\xb3\xd7\xfdv\xf9\x8a\xf6Ok\x17\x97\xf3\'9\xce\xb0\x1a7l\xab\xb1y\xa0\xc0\xbe\x9d\x8d\xa6TH\x9dw\xea\x8a+\xfc\xf7Y\xb9\xc1&\xeb\xca\xdf6\x87\x90\xc3\xd7\x9a\xbc\xb6:4\x84\x00\x8e\xe9\xa5\xd8&P\x7f\x13\x97\x9d++1E\x1c\x08\x18\xd4\t\xc4\x05ym\xdd\xec\xb2\x1cPK\xe8&\xbd\xed\x85(,&\xecd\xc4>\xde\xe7\x87\x91\x0e^MM@ \xf6\xc9g\xff\xbee\x08\xc7U-\xde\x08Z\xa33\x11\x93\xda\x11Jg\xed\xc5d\x90\xf5SA\xb1~\xfbE\x8b\xf4\x03DP\xf2C\xb2\x06\xdey\xc8\xbd\xa8\x80~\x06\xb1\xd8\x0e\xa7n\x90\x0b:\xc0\xe1\xea\xbb\xda\x87\xf8\xdc\xc8\xb6\xf7\xde*\xd4\xb0\x8c\x92\xfe\x90\xd5\xd7\x949}\xc6\xd9\xf3\x08\x0f\x9b}h2\x98\xd1Y\xa6\xed5\x89U\x9b\xc0\xb3q0a\x10$\x98\xa3v$}\xb4\xf2\x86\xcb\xfeQVx\xc1\xb7\x03O\x9e\x94*\xd5\xf9\xc1V\x19_\x02\x96/\xf2\x9dw.\xc6\x80,F\xd6y8,\x7f)Ou\xae\x9a_?Z\x80\x0b\xdfXUn\xc5\xb2 \x99\x9aFK\xcf\xff\xd1\x98P\x11\x8e\xfd\xee\x0ec\xf2m-\xbbT\x03\xfc\xc5/\x0b\x1d]&\'\xfbsE\xee\x11\x17#\x97\xba\xd5\xc83\xcb`\xd0\xc7B\x10\xd5CI}\xc5\xffu=l\xec|\x8a\xa5\xfa\xde\x97sI\x86pi\x15\x04\x8f\xae)\xd0PqE\xd8I9\r\x12L\x86<;caG\xaf\xb7z\xac\xb1g\x8c^\xe1\x17\r\xe6\x0eZ(\xb4q\x91\xac\xd4\xad\xaco\xf9\xb5S\x02\xb0\xd0\xacw\x88\xe4\xfa\xef\x88\x10)TT\xd4\x11o<\xa3\x82I\x00\x01P\xc1x\x87\xe9\xbc\x1cL\xd9\x832\'\xce\x8a\xda\xd0\x8dPJ\x10\xc1?)\x17\x9dS\xe5(\x15:\xe3\x86\xfd}\xa4~R\x98h\xc4/u8\x97F2S\xee\\E\x96\x8b\xcb6\x8d\xff\xb9}\rj~e>,#K\x16N\x81.\xc4m\x84~\xc3\x91\xa4\xc4B\x122D\xec\xa9n\xf6\xef\xae\xdcB\xe1\xbd+.\x1fX\xd3\x9c\xa7\x0f\xdeB\xe9(\xa3KJ\x98\x16\xb6Hk\x81a\xa7\x01]bXz%\xd6\x0bM\xb5CE\xb8\xbf\xc4\x13\xe22\xbf\xbd\xff\xa2Y\x85(jz;c&z>\xee\x1d\xae\xae\xbeR*Yt\xa7\xe0\x8d\xa7\xaa.\xc12S\x9f$\xb6\xf9\x84\x98\x8f*\xf7\x17\x82\xee!\x13%\xa4\xe0\xd6\xc1\xb5\x1cT\x1bF\xcb\xfe\xbf\'\x99\x95\xdc\xf5;\xbe\x0f"\x00\x1d9\x9b$\xa5%~3\x03\xa6\xd8\xa4=\xd9\x8d\x8b\\9Z\x87\xdaN\xd1v:\xe1\x17`\xc6Mz"\x0b\x08\xa8\x9d\xf8\x93(D{\xb8\x01^\x1a\xc8S`\x85\xce\xc09\xa3&f\xdd\x159A\x8c\xc2_\xf4\xc0\xa9\xcd?\xbd(l\x8b\'a!\x97\xbd\x80\xcbCW,{nF%\x19\xa7\x07\xe2\xaf}A\n\xfb2;by\x05D.\x06\xc2X/\x85<\xb5\xc9UA\x06\xfc\x91A\x87;\xf5\xdbG\x00\x98?M\x11\xa0p\t-J\x15+HV\xe1\xc7\xf3\xe7\xdb\x88\xe1C\xf9\xd8l\xbc/\x01\x1c `2\xee\xed\x0fY\xd3\xb9\xc1W\xd2g\xba\xcb\xd6\xcd\x89\x12\x19\xff\xa6\xc8:\xed\xfc\x97\x05\xff8L\xc8/\xcb\xe5#\x907B)\xa1\xfe\xa9B\x97\x99\xef\x14\x08\x18\n\xf8 |\xbc\x9f}\xef_\xe1\x11\x10O\xf0\xbc4\xbf\xe12\xbb\xb7+y\xab\xe2\xe9\xdc\xf2s?\x1bNb\xa5-\x0cu\xb0\x94(z\xc3\x92\xa0\x87!\xc0b\r4\x01\x91\x1f\xf0\xce\x01)\xee*\x9e\xc17\x80\xf7\xf4\xa7_\x9e\xff\xe9\x8c\xff\x9f V\xa8\xda4F\xd3kIP\xcaL\x8a]\xe8{@\xdd_\x07\xe9\xba4 \x8d\x7f\xc1\xf5\xee\x0b\x94\xd4Y\x05Z\x8a\x9c\xc0\x1b\x99\xcdQ\xd3\x07\xf8\xd6\xdf\xd1\xec\xf6\xae*k"2\x9cG\xf6\x08g\x85\xf4\xb2\xd2z<\xf3\xd3e\xa9EcU\x1e\x8a\xf3\xb3\xc8H\xa7\xbb\xa0\x04\x98\xa7Y\n\xdf\xb8o\x9a\xc9\xed\xd2\xfd\xa2\xb3\x15\xbe\xfc.S\x85Hq\xdc\xcbF\xb2\x7fI9D9\xe0\xae5\xfb\xa4J$m\xc6\x95"#\xcc0\xdeWb\x82\x91\xd8\xc7[Z\xe4\x9al\xdb\x81L\xe8hp\xe7SZ\x99S\r\xc2\x94Bc\xf7*\xf9X\xff\x11\xe6\xdb\x0fB\xf43\xaf\xe9{\xd3T\x0cx(i\'\x83~\t\x17k-o\xc9\x1f\x07$\x9a\x8a\xaa?\x9bR\xbe\xf5\x9bU\x03\xb6\xcf\xfc%\xc1>+\x8bk\x96\xc4?\xc3\x1d\x8cr:\x06XN\x90\xbd\xc8\x1c\xc3<\xe3bz\n2>\xe3\xae[U\xa1\x11\xa5T\xc2(z\'v\xb80=Zd)\xd5\x99*\xcb\xa9\x80\x11\xe7w\x80l\xed\n\xc5k\xc6mI\xe3\x88m\xb7\x0c\xdd\xed\xa0\xd9\x11\x91\xec\xf1\x9bt\xd7D\xeeB\xc4[\x9c`\xc5\xf5\x99\x0b\xd9\xafc\xc4o\x82\xc5&v\x1c\x7f\xda\xc7\x15\xd3\x8c-\x8a\xc0J\xc4\x83\x9d\x9b\x04\xfe\xff\xe3L\xb2\xeb\x7f\xaf,4d\xbb[8\x02\xd4Q\xff4^\x1b*\x84y\x8b!ty\xe5\x88w\xe8\x04\xa2\xdd\xf4\x04\xa4O\xf5/\x8a\x13ql)f\x13\xdd\xb2V\xb1WLI\xc4\xd5\x1e\xd5j\xd6\xf4}z$E\xd5 \xffX\xaf\x0f\xd1\x10w\xf7\xa6\xfe\xc6||\xd6\x0b\x0eG\xc7\x1a\x12\xe8\xcdPq\xbe\xaa\x00\x8d\xc1\x03\xb0\x10\x8b7m7y+\xdb\x11\x9c\x86\x10\xa7\xb7\xc8\xbaF\x92K)\xdfq"\x1b\x9c\x8fN!\x1b\xa6;\xe3\x01G\x87\xb4\xef\x0f\xb1\xae\xc7d\xce\xc4\x1du\x04\xed\x05\xe1x_\r\x15\xde.\xb0E\xef#\x87%\xfd\xa6\x02\xdam)\xabL]g\x91\xe7\x95\xf0\x0f7\xdb\xe4\xaaY\xd5\xf6\x16\xdfq\x95&>.\xc7\x04\x89hx\x95\x00\xb2f\xa4~\x96\xbci\xe2\xfe\xff*\x19I\x94j\xa1\xe8\xf3\x95\x05\xf1\xc7Y\xbd\x140\x83\x1b\xb6\xf6\x1c\x0c[\xa9\x8a\x1f\x96\x9fZ\xe4\x0b\xc4v\x87G\xc4\x83\xa9s\xa4\xf5\xca,0\x08!\xa1\xfcb\xb0\x11:\x01\xf4\xbf"\xdemg\xbc\xe4\xf0zjp\xa0\x06\xa8\x8bj\x94\xdcg\xbd\x12BxQ\x96(\xa3\x97\xf1\x85F\x8b\xf4\x08\x8a\x0f\xa6\xbaA\xb5\x88\xea\xb3eG\xa4\x0cb6\x02\x05*?\xe2&\xd8\x08rqjDt\xe81\xe8\xb8/\xd7\xd8\x8c>\xf9\x16\x17\x14\xf1E\xb4a\xa8\x15K}&\xb6\x970\x0e\xa2\xbb\xe8\x12G\xa0F\xb5V\x9a\xdf\x17\xc6\x82\xda\x00\xee\x01\x13hY\x8e\xf5\xd2@\x11\x01\x8b\x99\xb4\x88\xf3~\xa2De\x19`\xeb%\xcdL\xfe5\x015\xe7\xa1L\x1e\x05\xacQ\xc3\xe1R\xb0\x86NHB\x8a\xc1;+\xfbot\xc5\xcf<\xe3>\xdf\n\xaf\xdf\'I!\x05\xc4\xb0\xd5\xc3R8\x0e\x93\x99\x9e:S\xf3\x83O\xe8\x92\xd2I!ds\xdf6\xc3[\xc83\xa0\xff\xd6\xb2;s\xadyY@\xee\xe6\x17!J\xd9q\x1b\xb7\x0eIKT\xb0\xb4\xcc\xc7Y<k\xb8Y\xd1z3\x18\x14\xa7\xc3\x9eh^\x16\x08\xf9\xc8\xe5C\x1c\xc3\xe5_\xbb\xae\x97\xfd|}\x1fO\xf6\x10\xff\x81Q\xa9c@\x1bk:6\xc8>\xf8O\xc8[\x11~ie\x00\xc2\xcf,\x14\x906T\xbf$\xd5D\xe6\x02\xfbQ\xc2\x02O\x96\x89%\xc8\xd9\xe6Q\x1aZ\x87\xae&f+\xd6\xbb`v\x8a\xadE\xff\x18r\xa4]N\xe4\xee\x8e\x93\xd0I\x1a\xbb\x9baB\x96,x\xd8[\xe0\\\xaf\xde\xae1T\x107\x85j\xc7\xd7\xcc\xe3\xcd*\x00\x1c\xe9\x85\xa25\xb3\xcf\x80\xb4\xc7Y9=\tQm\xb9d\xef\xc6,\xef\x01\n\x81\x83\xa2\x15\xc5.e&m\xda1\x0b\xb0\xd7\xa34\x87#\xb0\x85\xd1\xf1?\n/\xd8\xd6\xfc\x11\xe5D\xaffpAE\x1d"{\xe6\x07\xd1R\t\x10\x1e\xe4$&\x1eg\xc8^\x95"\x13\xfd\xbb\x16M\x08\xae\x96CU\x8c\xe2\\\xf1X\xea\x10\xd4\x90\xdd{\x7f\xb3\x80Z\xcf\x8f\x99\x11\x98\xe64\xc3"\xb7Ep!;\x08L\xe7~N\x96\xf9\xe8)}\\\x06\x83I\x0ey"\xe8\xba:\x8cd\x90@\x1b4q\xd1*\x9eh\x0f$\xd3`\r\xf2b\xf5\xf6\xa0\xae\x03I\xc7\x0e-\x1c@qu\x0c\xf0)\xa0\x91X\xa5aJ\xfd\xa2\x0e\xde\x9cj\xd3ZU\xee\xbfu[j\xf5t\x9dh$\x8f\xba\xa9\xf6M\xa0\xf1\xeb.\xd8B\xd9\x837\xac\xd0\x9e\xd3\xe3\x17\xab"uqQ"\x893\xfa4\xd0\xfd\xa0}\xdb\xae\xf7\x1f0#\x1f\x93\x9d\xcd\xdfyW\x01\x12rW\x1bi\xb2\xc9\xf7\xbb\xcc\x05h\xa7\xae\xd4\x02s\x19\\\x93G\xacT}DzD\x05\xb1.\x13"\xc6\xd0\xeb\xf9\x01g?\xdb\x8f\x9a\xa2\x7fjs[\x11+X\x89m$\xe2n\x0b6(\xc2\xb2mH\x84|\xed\xf5L\x19\xd3\x83\x8b5kbF\xfd\xd54\xd84\x88\xecS\xab\x15"\xc9\xaa\xb2\xff@VSu\xdeU\xc2\xc9WK\xe0\xb2\nCaXf\xa8\xf3\xf3\xd1\xea\xac-\xd8\x99A\x9c\xcd#\xf23\xf0\x85&S\xab5\xb7\xfa\x0cF\x12\xc0\xcb\x81\x0e\xcf\xcbt\xef\xa6\xee\x00g\xd5\xa3\xd8A(\xee\xe3\x10\xf3\x82\x0e\x14\x8d\xc2\r_\xec(%\x06)a\x85\xd2\x8f\x7f(w\r\xe0t\xe9\xcc\x11\xbb\xcaQ\x88}\x9c\xcb\xcc\x04;\xd4H\xbf\x1c\xb3\xd4\x8356\xd9K\xcd\xe2\x12#\xac\x94\xe3\x140-\xc3\xa4Ht,\x0eh\xf8\x11\x8f\'M\x98\xdf\xf7k\xb7\xdb\xcf$_|a\x8b\xb8jr\x80\xed\xd5\xff8\xbd36eUf\xf0\xae\xe9\xf3\xd6Y\xddNy\xe7\xb2\xcc\x9f2\x8a\xf0UU\x15\x88\xe6E=H\xcf\x07O_\xbe"\xf6\xb4\x80)\xb3V\xc4f\x1e\xe6v\xdb\x9c\x08\xedUr\x82\xe3\xbb=\xa6\x1e\x1e\xf9\xa4/\xd5T\xe1\xc9\xcb\x00H\x8c\xc6\xdb\x94\xf2\xc8\xd7s\xf7\xe5 D\xa7\xbdh\xb9\x1a\xc8}\x7fh\x14\xc75\x17\x13J\xc9\xecn\xfdy\xc3\xaf\xd7\x1d\x072$\xf6\xdd\xd1\x17\xecw\x01\'H\'\xb6\x82u\x15\xba\xc3{\x84\x80\xd7\xc5D\xaf\x96\xd9\xb5\x04\xa0(\xef\x14\xe7\x8a\xa6\xbchiLf\x83cY\x17+\x0e\x9e\x02\x9c\xe0k\x12c7u\xbd\x88\xe9\xc6\xd5B\xbd\xc9\xd6\xdb\'\xd9*\xd8\xd8\xc4\xbeg\x99\x9cl>\x0c\xc6\xea\x8b\x04\xdfT\x99\'\xc1\x94B\xf0\t\x01\x05\xcbdk\xba,4E\x87\x98\xc1$\x05\xbb\x96\xef\xbcR\x0e\xd6_\xfaNIn\xa2\xb4r\n\xd8\xb1j$\x93%\xf5p\xa3\xedd\xd8H\xeb\x87\xe5[$\xb2\x97u\x11\x14\x11\xfa\tI\xe2\xf1\xed\x18\x90\xe2\x94\xfe\x80\x1dqA$\x7f\x12\xa2;\x83\xd2\xca\xda\x1b\x88\x82\xce\xd0\x82\xd3\x9dN>\xffp&r1\xc2\xd5\x84\xddYt\xc6s\x1dG\xe6Z)+&\xb4S\xcb\x8f\xc9\xb9k\x02\x8c\x87i\x03\xd7h\xb7WA\xe2\x1c\xcd\x11\x0f\x83\x8dU\xeb\x1c9@\x1dS\xcc\xfe\x0c\x83\x84#g\x98\xa2\x07P\xb4P\xf1t\xc4\xbc*l\x8f\xb8\x10\xf6I\xd6e\xd5L`#\xa9\xf9\xf7T\xbf\xd7\x13l%P\x89\xc5\xd7\xae\\\xecb\xb4\xbb\x9bDe\xfaZ\x80\x1dgm|\xc0\x10\xc8\x1e\xe0\xf6TP\xd8\x98>=\xdf\xd9\xa6\xbf\x93\x0e\xf8\xd6\xa5`p\x01}\x8c\x8c\xd4#\xc5\x93\x1f^-\x1e0"\x89\xbc\xad\x15&\x9c\x14+\xee\x0e&\xfd\xe8B\x17\x11\xc6\xd0\xa0V\x0e\x86e\xc7\xb8\xb5\xba\xc2\xa4\x9c\xf0K\x92\xa8~\x00z\xae\xe9\xb87\x1f\x8e\xcaj\xe2E_ \x82\x96\xad\x1b\xe8B\xae\xff[>\x83\xa9\xb6\xe2;\x12\\$\xc41\xb0\x8a\xd6"\x96\xa5\x11\\\x9d\xb3\x0c\x92f!E\x07\xaa\x0fd\xcdq\xf4\xbb_JA\xea*\xacL\x81xE\x92]S\xc7,\xd4<\xbc\xf6\x82\xca}\'\x92\x08\x8a\x8e6\x18\x13\xc6\xe2\xe4w\x82\xca\x04i*W\xad\xda\xa3\x8d\xb6\xf1|\xee\x08\xba5\x14\x8d\x98t\xac8\x7f\xd2\x14\xf7\x87\xea<\xff\xa8\xd1a\xa6\xaa\xfe\xfdu=\xd8\x07\x16\x84\x92\xa4\xa4;-\x99\x11<Y\xb3\xbaof\xc9\xec\x01!\xff\x10\xacE\xc6UZ\x0b\x92\xb7A~7O\xda\x1cG\x80\x90+\x15=lah{L|3+\xc9\xa7H!\xdcw.\xdc\xc2H\xd1)/\xe3l\x84f2_78\x11%\\\x15?\xa8^\xfcZ-\xbb\xa7c\x1e\xeb\x9e\xb82Ev<\x1e|\xc4\xf5I\xd93\x84\x89PQ\xea\x8a&*\x19\xfeB\x0f\x81C\xe9,O\xd4\xd8\xf1\x12\xbaJ\xa3\x95\x95\xe0\xf6R\xa1"\xa9\xb4\x06\xa8\x9c\xe6\x01\x0b"b\xa0\xa26\xd47v\xd1\xcd`^f\xa1G\xda\x1f\xf5\x9a=\x0b\xb0b\xd6lvH\n\n\xf6?\x06\xdf\x8b\xb8K\xfbyzL\xaa\x80\xf0\x1c\x84\xe1\xe8\xb5\xd4\x8b\x89\xf6%\xb7k\xb7Q\xec\xadd\x83\xccN|\x15\xfa@b{l\x9f\xb9\xf3\x10\x1b0}\xd6\xa70\x00\xfc\xa8\xd6Y\x94s\xa3\xda\xb1\xd4\xbe\xf8G\x10\xae\xff\xcc\xc84\xcbn\x814\x07\x05\x08e\xcfw\x9bL\x81(\xe6\x0b\x9c\xc8\x81\xa0\xc3M\x16\xb0\x01\x87\x1f\xac\x9c\xb3!\x00\t\x96\xcdq?\x95\x99\xb4I\xf9\x0e{\xc9\xb8X|\xa8Dt\x90\xf2`kTY\xa1S\x1e\x0f\x0c\x13h{c\xd36\x0b%\xd0\xf1\xb4\x958M18\xfb\x07\x88r\xda\x9d\x16H\xee\x8e\xd2\xdfEc<\x0e\xe4\xe1uPfU5;\xf21^#\x1d\x13ly\xb9\x1a\x1b\x15\xac\x12k9\xf4WI\xc5^\xb8a\xc5l\xd4\x97\x15v\xb8\xa2\x16A\xbc\xf0)\xf3\xa7\xd8\xee\xa3\x92\xd6\xb2u\x1f1\xdc\x9d#p\x867^C\xc4\xde=\x8aa\x0e\xb5\x83\xa7\x94&V\xd2\xa0\xf8\xa0\x03\x87/\xe4.\x9cZ%\xee<\x06&\xce\xc3\x86 \x98\x18\xc4\xa4\x1b\xe6\x8a\xfd\xad\n\xa7\x14%&N\xcb.;\xb0\x7f\x16\xd5\x05\x18\x8c\xcb>\x90\xdb\x17l\x83T\xc6\xd4\xf8\x198\xf6\x9d\xccq\xd9\xa3>\x7f\x06Uk\xc8\xe2x\xf9\xfaG\xfa\xbe=\xfeE<\xcf]z\x1b\x9d/\xca\xe2\xa8\xcd\xd8\xe8;uK#\xdc.\xc0\x8as\x8bby\x90\xb1\x96^|\x9e\x02!R\xae;{n\xa4\xc0`\xd7\x11\xa1\xf1\x93\x1el\x7f\x93\x9e_\xd1|\x97\xa0\xa81\xdf\x11\x8dt\xf6*\x01(\x04\xa6\x10\x90\x9f+W\x94\xf3\xecI\x1b\xbd\xdb#\x11I,\xf2\xd74\xc7\xa3\xf9fSf8\xbbK7\x1c<.\xf9~\x15\t\xafV\xdc\x9d/\xf3o\x90"\x81\xc8\xbe\xa3\x1e\x97I\xdcD\xd7(\x84/\xec\xef^\xba\xb4\x85\xb5\xa4CR\xc0Y\x90\xafX\xc1]\xaa\x8d\x13\xa1\xd2l\xfc\xa2J\xa2\x92\xa4\xb6\xfa\xf9\xfan\x1ev\x8e],\x88\xeb\xb9\xab\xb2r\xc5\x05\x99\xbd\xe9c,\x82P\x88\xb0Y\xb6\xe5/\xcf1\xffVi\xef\xd3\xcc\xb8\x91\xc0R\x0b\x1d\xe8\x0e\x16\x8a<\xc2\xe1\x05\xc4\xbc\xa4\xf7\x11G\xc6$\x08\xbf\xfa\x19\x84\x95nk\x12\xb5@\xea\x99\xf1\xe95v\xb3\x16\xd7\xccm\xd7N\xce\x88v\x90eH_\xc8y\x1e\xfdhA\x98l\x11\xf5\xe2\x9f`\\\x84\x06\x1c\xfd\xacU\xc1\x14-gFn]q\xab\x1a\xfdi\xcc(\xfdT\x0e\xbb\xd8B\xf5\xdee\xfe\x02\x05GZ\xc6\xcc\xa6\x8e-\xe6$E\x9c\x0b\x07%r[\x875\xf9\x82`_\xc1b\xfa\x82\xdc\xa1,\xb5P\xfed}\xe8M\x1e\x97\xe1\x9bz\xa4\xb9\xf8\xbat\x16\xa9\xa8\xfc5\xec\xde\xd4\xfc\x86\xdd]\x82w\x19\xe7\x94\x1b"7K\xe1\x04z\x85]H<\xf754/A\x97\x8dr!y\x1c\xbbq\xfa_*6\x1d\xd2\x86\x8cy\r>\xfa\xff\xf0\xbej_\xc4Zu\xca\xb7\x87\xe7W\x12\xbal\xad$S\xc8v\x99v\x03\x11%R\xf0\xd9\xff\x18\x10\xb7\xccQ\xa6\x08\xdaQ\x15@\xea\xe5\x13X\x87\x83;\xea\x97\xf7}CRJ\nU \xf7\xbbe4\xb5\xf8\x0c)}\xbe\x8c\x9dd\xca\xf3\x9f\x9b`>\x1a\xda\x89\xf5\xecx^\xc2xeca@\xd22S\xc9-8\x8cH\xdfL\xed\x10U\xdc\xde\xa5\xb9K\xc3"\xf8\xcc\x1b\xc6}\xd8|\x8b=E\x19\xb1a(\x05\x8f\xc8,\x19M)\r\x95\x15\xbdq\x13\x1c\x0f\xf1\xed\xa4\x1d\xa8\xe3T3\x8a\x8f\xb5\xf1g\x0c\x05\x81\xff*;\xbc\x98\xdd\x9a\xec\xef\xcf\xd6a1\xe7\xb5\x0b\xaf\xa5\x98\xe1X\xee@o\x92\x0c\x12\xf8 \xf1\x8fv\x9e^\xa1\xc0\x14\xf4\xc7ZB\xa5\xfd\xa9\xb13\xa0\x85\xbdA\xe7\x89\x1a\x1b\xe8\x92\x9b\xbfKy\xbe\xe7~@\x81\x83\x18&\xba\xad\x88\xbf\x9e1!\xfd\xf9N\xbeEp\xab\xaae\xf0$~\t\xa5\xfex\xd0\xb3w\xc5\x10\xddE\x9d\xbfzH<\x80\xaeq\xa6);`\x1d\xeb\xbd\xf2\xbaZ0\xc6\xdd?\xfdn7h\xb2?&\xe2ch\x11vOY\xcb\xd1\xa9\x878\x860\xaf\xf8 Z:{3\xcb\xc0\x92}\xb60(+}3T\xd4\xfaWs\x12\xd5\xeb\xc8\x94Ah\x04\xed=\xda\xdb\xb6\xe9M\xff\x16$\x0bZ\x8b\xcd\x84\x05\xe9\xf0\xf6\x15\xa2FaW\xdaC\x9b\x84\x17\xff\xc2\xc3\x07\xbf{\xf1\xe0 \xf2WS\x0eJ5A4~\xee\xabc\x06\xef:6\xd6\x1e\xd7\xd9\x80\xa1\xc8\xb4\xfc \x9c\x19\xb0\x17\xdfbb\xd2\xd3\\F\xa8\x93\xc3#\x06r\xd7\x9a\x9e\t\x12?Tp\xf5z\xe2\xf0A\x1e\x86\x06\xfd\xb5\xb7\xc9\xeeAm\x1a\xb2\x8f\xcb_hZ\xea\xf4\xc5n\x1cnx\xe8\xbd\xb3\xa0K=\xf0z1\xfd\xf3\x89Q\xa0k$\xf3\xda\xaf\xccA\xe0\xfe\xc7\x0b!\'\xf7\xd1\xcd\x86B \xdd\x17\xf6t\xdc\xca\xca\xb1o3\xf2!\x11\xa8*\xab\xc1u+})\xaa\x99h\xa6U\x04m\xa5\xcb\xb5\xfaO\x8d\xb8g\xfe]J\x0b\xa9\xfb\xf4\xaa\x9d\xaf\x87\xb8\xfa;\xe8*\xb3\xe3\xf9\xce)|\x0b%\xe2\x9d\x1fIL\xe3~#iM\x9b6tO\xa7\x05\xee\xf8\xe91\x8f\xfd\x9d\xa7\xc7\x04\x8cQ\x1c\x80>E=\xa18\x98\xe0\xaaJ\x96\xed\xc5,\xa1\xb6\xc1\x16\xe1\xa4J\xef\xc6E q\xae\xc4\xa9l\xd3\xb3\xb3\xa8\xef\'\x8cSNE\xcd\x8f\xef\xeaJT\xa6s,|\xcb\xeb\xc2\x8c\xa5\x98\xca@\xbd\xe7\xfb\xbdpi\xa6\xb1\x07O"\xc6&\xac\x90X\x953\xad\xd4\xfc\xa4ML0)gM\xdd\xee8\xbb\xb2V\x80\xafP0\xe7|rq\x84\n\x80\xecu,WD\xf4^\xd6\xcc\xab\xf7`,\xa7\xd61\xee3\xe6x\xb7Ys>7\xe4l\xf5\xfb\xeaK-\xf9AV\xb4V\xa2\x1d\x89/mDK\xe6t\x8f\xdd\xaac\xc7A\xe0XD,\xea%\x16v\x86\xf4\xd8\x9bc\x06^\x8a\xc7c\t\xe6\xc6JN\\\x8fG\xd7\xe54O<b!\x9f\xb2\xf1pxbI\x81\x8e\x90\x1e\xdd\xc8\xda\x85>#\xf8\x1cH\x84;\xfa\xb8W\xef\xc3\xf4a`/\x84\xa0\xff_L\x03\xf2\xac\t_\x13f\x8d4\\\xb0K\xd7\xa9\xb6X\x80\xcb\xa4C\xc4\xb1"O[#y\xd21a>5\xe6\x9f\xcfR\xc8c\xba\xf6\xddnD%\xc5\xf8h\x94\xb64\x00\x8adEi\t~\x97nF\xdc\xc0Q\x94D\x08%\'C\x12\x9c`\xb8\\\x00x\xb0w\x9bD\xce@\xe2\xc3\x19\xe9\x7f\xbep\xc3\xf4\xaf\xf3\x08x I\xd4\x06\xd3\xb5\xe4\x85\xaaa\xc2\xe7\xd3\x7f\xcf)@qj\xc4mb8b\x7f\x04\xec\x1f)\xa0\xe2\xdd*\xcd\x8e\x05\x9c\x1a\xf9\xb7\x1d\xf4r^\x0f\x1fl\x8e\x8f?\x82\xab\x0c\x86\xaa\x8a_\xc4\xa0\xfe\xc9\xd9\x95\x10\x04a\x0c\x93U\x92\xd1\xbd\xff\x8cj\x9f\xc7\x9a?\xb1\xa7j\x8d\x90oS\x9c!\xb2\xf4{\xbd\xebWN\xab7\x8d\xb7\xde\xd8\xf31J\x98\x97\xffht\xfc\x03\xb7\x16\xda\x17E}\xe8\x85\xb8\x8b]]f\xa6\x84\x00=g2\xb1\xb8\x1b-9\xaa\x9d,G\x9e\x86\x0c\xae\x13R\xbe9\xd2\x01\xeb\x85\x03\xc8y\xdf\x862O\xb0\xf6q<p\xe2D\x1d1\xf4\xda\xf6\xfb:2\xc9\xf8\x06\x97qA\xfd\xffG\xbb\x7f\xb1N\x9e\xa7\xf83+^\xf6\xbd\x94\x82\xd7\xdb\xd8\xfe\xfc\xbf\x9b\x1e\xe4v\x98\xfd\x16\x1b\xdf\xf7<O\x15&\xb3\xbb\xd9T\x8c\x02{M#\x00\xfa\xd7\x81\x9bC\xec\xbev\x9b\x04\x18\xde\xf9k3U1\xde>\'S\xa2\xf0-\x04\xa1\x10\x98\x99{\x9b\x10r\x12\x13;^\xe8n\xd1\xa9\x8fj\xb2!,h\x91%\xcc\xe8\x04\xf4\xd5\x91\xc5\x1aL\x9b\x02\x1d\xba\x1c\x08\xd7\x12\xbb8O\x1e\x12\x18a\x87\xc3gA\x16\x9c\xa1\x13/|\x04\x07\xa9\x10\xef\xf2\xf9\x12\xfa\x9f\xe7\x0b+"\xe4\xe3v\xa53Uw\x10\xfb\x93\'\xd7/\\O\xb1\xc7Z8\x15}\xd6\x83\xdf\x15\xe9ENBk\xfd\xfb\xf1\x90\xc3":\t\xee\xd2l\xc8\x04\xfa\x05\'\xe06\x18\x8a\xab&\xa00\x86\xd8^\xca\xa6}o\x80\x05{\xc9\xe7 \xe5k\xf6\x8d$h\xb72\xcb\xede\xfa%\xe2L/.k\xa7ey\xc8\xf4\xd5N%\x02R\xf7\xbch\xaf\xec\xad\xea\x9bI\xed\xd7s\xf7\x18o"l\xfa0\xa1/\xdca\xf9j\x05\x879\xd0\x7f\xb0y81\xb16I\xfd\x98\x9a\n\xf02I\x9c\xec\xa1\xbaN\x97\xc71\xd7:\xa1\xf0M\xc6\x9alf\x89\xf5\xdc<\\\xb2\xa2\x19\x1d\xfd\x16\xfb\x8c\x05\x99\xe7\xa1\xdd/\xbdc\xf8\x86\xbd)r=\x1d]6\xf2{c.\x16\x98\x1d\xdf\xabk\n\xb8\xa4c\xd5\xcf\x00\xa8\xf8\x83\x8e\xd8\x97\xbd\x03x\xe0\x00\xc6\xaa\xd6\xbc4r\xb9\xaaB\xfaX\xc9\xedYm\xa2\xeb\x179xr,\x13\x0c\xc1-<#*<u\x7f\x87\xc12\x8e\x02S\x0f{\x8e=P6\x08\xa3\x82h16\xf84\xc8(\x05`D\xbcF\xa54\x9aE+,xe\xd1\xe0\x0f\x800Y\xb4Y(B\'\xb4\xb1\xd5\x19r\x9a\xc3\x00G\x05\x16\xdf\xc4\x04\xfb\xdb\xdd\xa0 \xed\x9c`\x90\x08\x8f\x8a\xa2\xd3\x1c\x9a+x\x0b\xd2\xd5\xcf\xb2KxA*\xfb\xfa\x94\xee#\x90\x94\x1ek\xc7\x18\xc4\xd1\x95U\x00\x0b\x9c\xad\x15\xe4\xb2b\xd0\x0c\x8e=g\xab\x9bY.\xd5[\xce\xef,\x9f\xc6e?\x80}*\x9f\xc3\xae\r\xe9\xd3\x8d\xd3\xb6\x9bk.\xaf\x85\xe7iH\x92\x90\xdf\xd2m\x13\xb4\x8f\xca\x01\x8c\xea\x18\x14y3[\xd2\x81]C\xcc\xffY^\x14\xfb\x9c[\x1bV:\xa9\x05h\xe1\xd2\x9cc\xb9ph\xf6^v\xd1\xfb\x1d\x1e;.\x1d\xe0\x96\xab$\xfa\xfaC,#\x82*\x86\x19\x13&--\xc7\xa2\xac\x14\xfe\xbf\\\x8c\\\xa2\xfc$\x95\x97\xf2\xaa\xda\x7f\xca\xd6\x0c\xc9\x1f\xd9\xa0u\xb1\xe6n\x8c\x80.\xd2\xff\x10\xdeg\x133\x9b\xe5\xc8\x02$\xa6\xaf\x888\x08\xf8\xfb5\x0e~\xcc\x80\xb9\x8c\xab\xc7\xb31\';\xd4\xe0\x07j\x14hm\x9e\n\x00s\x1b\xd6\xea\xf2\x12\xd3z')
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@private_src@python3_9@Darwin_arm64@kspdg_envs@lbg1@lg4_envs.py@.PATH_END.py
|
{
"filename": "test_preprocess.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/core/tests/test_preprocess.py",
"type": "Python"
}
|
# import os
# from copy import deepcopy
from itertools import count
import os
import astrodata
from astrodata.testing import ad_compare
import gemini_instruments
import numpy as np
import pytest
from astrodata.testing import ad_compare, download_from_archive
from geminidr.core.primitives_preprocess import Preprocess
from geminidr.gemini.lookups import DQ_definitions as DQ
# from geminidr.gmos.primitives_gmos_image import GMOSImage
from geminidr.gsaoi.primitives_gsaoi_image import GSAOIImage
from geminidr.niri.primitives_niri_image import NIRIImage
from gempy.library.astrotools import cartesian_regions_to_slices
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_array_equal)
from recipe_system.cal_service.userdb import UserDB
from recipe_system.cal_service.caldb import CalReturn
DEBUG = bool(os.getenv('DEBUG', False))
nonlinearity_datasets = (
('S20190115S0073_dqAdded.fits', 'S20190115S0073_nonlinearityCorrected_adu.fits'),
('S20190115S0073_varAdded.fits', 'S20190115S0073_nonlinearityCorrected_electron.fits'),
)
# ---- Fixtures ----------------------------------------
@pytest.fixture
def niri_images(niri_image):
"""Create two NIRI images, one all 1s, the other all 2s"""
adinputs = []
for i in (1, 2):
ad = niri_image()
ad[0].data += i
adinputs.append(ad)
return NIRIImage(adinputs)
@pytest.fixture
def niriprim(monkeypatch):
file_path = download_from_archive("N20190120S0287.fits")
ad = astrodata.open(file_path)
p = NIRIImage([ad])
def mock_get_processed_bpm(*args, **kwargs):
bpm_file = os.path.join(os.path.dirname(__file__), '../../niri/lookups/BPM/NIRI_bpm.fits')
return CalReturn([bpm_file], [None])
monkeypatch.setattr(p.caldb, "get_processed_bpm", mock_get_processed_bpm)
p.addDQ()
return p
@pytest.fixture
def niriprim2():
file_path = download_from_archive("N20190120S0287.fits")
ad = astrodata.open(file_path)
ad.append(ad[0])
p = NIRIImage([ad])
p.addDQ()
return p
@pytest.fixture
def niri_image(astrofaker):
"""Create a fake NIRI image.
Optional
--------
keywords : dict
A dictionary with keys equal to FITS header keywords, whose values
will be propogated to the new image.
"""
def _niri_image(filename='N20010101S0001.fits', keywords={}):
ad = astrofaker.create('NIRI', 'IMAGE',
extra_keywords=keywords,
filename=filename)
ad.init_default_extensions()
return ad
return _niri_image
@pytest.fixture
def niri_sequence(niri_image):
"""Create a sequence of fake NIRI images.
Parameters
----------
marker : str, Options: ('object', 'sky1', 'sky2', 'sky3')
Can be called in a test like `niri_sequence('object')` as long as it's
passed as an argument.
"""
# Use an infiite iterator to ensure fake files get unique filenames.
filenum = count(1)
def _niri_sequence(marker):
nonlocal filenum
adoutputs = []
if marker == 'object':
ra_offsets = [-6, -6, 0]
dec_offsets = [0, -6, -6]
guided = True
elif marker == 'sky1':
ra_offsets = [-180, -240, -240]
dec_offsets = [180, 180, 120]
guided = False
elif marker == 'sky2':
ra_offsets = [330, 330, 270]
dec_offsets = [-280, -210, -280]
guided = False
elif marker == 'sky3':
ra_offsets = [470, 430, 420]
dec_offsets = [ 420, 420, 370]
guided = False
else:
raise ValueError(f'"{marker}" not recognized as input')
for raoff, decoff in zip(ra_offsets, dec_offsets):
filename = f'N20010101S{next(filenum):04d}.fits'
# Need to add unguided keywords to automatically detect as sky
ad = niri_image(filename=filename)
ad.sky_offset(raoff, decoff)
if not guided:
for keyword in ('PWFS1_ST', 'PWFS2_ST', 'OIWFS_ST'):
ad.phu[keyword] = 'True'
adoutputs.append(ad)
return adoutputs
return _niri_sequence
# ---- Tests ---------------------------------------------
def test_adu_to_electrons(niri_image):
ad = niri_image()
ad[0].data += 1
gain = ad.gain()[0]
p = NIRIImage([ad])
orig_sat = ad.saturation_level()[0]
orig_nonlin = ad.non_linear_level()[0]
p.prepare()
assert ad.saturation_level()[0] == orig_sat
assert ad.non_linear_level()[0] == orig_nonlin
p.ADUToElectrons()
assert ad.gain() == [1.0]
assert ad.saturation_level()[0] == orig_sat * gain
assert ad.non_linear_level()[0] == orig_nonlin * gain
assert_array_almost_equal(ad[0].data, gain)
@pytest.mark.dragons_remote_data
def test_apply_dq_plane_default(niriprim):
"""Default params: replace masked pixels by median of the image."""
ad = niriprim.applyDQPlane()[0]
assert_array_equal(ad[0].data[527:533, 430:435], 35)
@pytest.mark.dragons_remote_data
def test_apply_dq_plane_fixed_value(niriprim):
"""Replace by fixed value"""
ad = niriprim.applyDQPlane(replace_value=0)[0]
assert_array_equal(ad[0].data[527:533, 430:435], 0)
@pytest.mark.dragons_remote_data
def test_apply_dq_plane_mean(niriprim):
"""Replace by mean."""
ad = niriprim.applyDQPlane(replace_value="mean")[0]
assert_array_almost_equal(ad[0].data[527:533, 430:435], 38.56323,
decimal=5)
@pytest.mark.dragons_remote_data
def test_apply_dq_plane_replace_flags(niriprim):
"""Replace only given flags."""
# First check with no_data, which is not present in the mask so pixel
# should not be changed
data_orig = niriprim.streams['main'][0][0].data.copy()
ad = niriprim.applyDQPlane(replace_flags=DQ.no_data, replace_value=0)[0]
assert_array_equal(ad[0].data[527:533, 430:435],
data_orig[527:533, 430:435])
# Now with bad_pixel, so we should get 0 for this region
ad = niriprim.applyDQPlane(replace_flags=DQ.bad_pixel, replace_value=0)[0]
assert_array_equal(ad[0].data[527:533, 430:435], 0)
@pytest.mark.dragons_remote_data
def test_apply_dq_plane_ring_median(niriprim):
"""Replace by fixed value"""
ad = niriprim.applyDQPlane(replace_value='median', inner=3, outer=5)[0]
assert_array_equal(ad[0].data[527:533, 430:435],
[[26., 27., 27., 27., 29.],
[25., 27., 27., 26., 26.],
[26., 27., 26., 25., 26.],
[31., 31., 29., 25., 29.],
[31., 30., 27., 26., 27.],
[31., 30., 27., 26., 28.]])
@pytest.mark.dragons_remote_data
def test_apply_dq_plane_ring_mean(niriprim):
"""Replace by fixed value"""
ad = niriprim.applyDQPlane(replace_value='mean', inner=3, outer=5)[0]
assert_array_almost_equal(
ad[0].data[527:533, 430:435],
[[28.428572, 28.82353, 44.878788, 43.6, 43.314285],
[27.6, 28.32353, 45.14706, 45.45714, 31.17647],
[27.710526, 28.846153, 42.71795, 42.75, 30.868422],
[41.871796, 43.825, 44.1, 42.325, 30.710526],
[44., 45.675674, 48.166668, 32.555557, 30.694445],
[31.68421, 32.432434, 33.7027, 32.675674, 30.552631]],
decimal=5
)
@pytest.mark.dragons_remote_data
def test_fixpixels(niriprim):
regions = [
'430:437,513:533', # vertical region
'450,521', # single pixel
'429:439,136:140', # horizontal region
]
ad = niriprim.fixPixels(regions=';'.join(regions), debug=DEBUG)[0]
for region in regions:
sy, sx = cartesian_regions_to_slices(region)
assert_array_equal(ad[0].mask[sy, sx] & DQ.no_data, DQ.no_data)
sy, sx = cartesian_regions_to_slices(regions[0])
assert_almost_equal(ad[0].data[sy, sx].min(), 18.555, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 42.888, decimal=2)
sy, sx = cartesian_regions_to_slices(regions[1])
assert_almost_equal(ad[0].data[sy, sx].min(), 24.5, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 24.5, decimal=2)
sy, sx = cartesian_regions_to_slices(regions[2])
assert_almost_equal(ad[0].data[sy, sx].min(), 37.166, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 60.333, decimal=2)
@pytest.mark.dragons_remote_data
def test_fixpixels_errors(niriprim):
with pytest.raises(ValueError, match="region .* out of bound"):
niriprim.fixPixels(regions='4300,*')[0]
with pytest.raises(ValueError,
match="no good data left for the interpolation"):
niriprim.fixPixels(regions='*,*')[0]
with pytest.raises(ValueError, match="no good data left for the "
"interpolation along the chosen axis"):
niriprim.fixPixels(regions='430,*', axis=2)[0]
@pytest.mark.dragons_remote_data
def test_fixpixels_median(niriprim):
regions = [
'450,521', # single pixel
]
ad = niriprim.fixPixels(regions=';'.join(regions),
use_local_median=True, debug=DEBUG)[0]
sy, sx = cartesian_regions_to_slices(regions[0])
assert_array_equal(ad[0].mask[sy, sx] & DQ.no_data, DQ.no_data)
assert_almost_equal(ad[0].data[sy, sx].min(), 28, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 28, decimal=2)
@pytest.mark.dragons_remote_data
def test_fixpixels_column(niriprim):
regions = ['433,*']
ad = niriprim.fixPixels(regions=';'.join(regions),
use_local_median=True, debug=DEBUG)[0]
assert_almost_equal(ad[0].data[500:527, 432].min(), 18.5, decimal=2)
assert_almost_equal(ad[0].data[500:527, 432].max(), 43, decimal=2)
@pytest.mark.dragons_remote_data
def test_fixpixels_line(niriprim):
regions = ['*, 533']
ad = niriprim.fixPixels(regions=';'.join(regions),
use_local_median=True, debug=DEBUG)[0]
assert_almost_equal(ad[0].data[532, 430:435].min(), 22, decimal=2)
assert_almost_equal(ad[0].data[532, 430:435].max(), 38.5, decimal=2)
@pytest.mark.dragons_remote_data
def test_fixpixels_specify_axis(niriprim):
regions = [
'430:437,513:533', # vertical region
]
with pytest.raises(ValueError):
ad = niriprim.fixPixels(regions=';'.join(regions), axis=0)[0]
with pytest.raises(ValueError):
ad = niriprim.fixPixels(regions=';'.join(regions), axis=3)[0]
ad = niriprim.fixPixels(regions=';'.join(regions), axis=2, debug=DEBUG)[0]
sy, sx = cartesian_regions_to_slices(regions[0])
assert_array_equal(ad[0].mask[sy, sx] & DQ.no_data, DQ.no_data)
assert_almost_equal(ad[0].data[sy, sx].min(), 17.636, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 38.863, decimal=2)
@pytest.mark.dragons_remote_data
def test_fixpixels_with_file(niriprim, tmp_path):
regions = [
'450,521', # single pixel
'429:439,136:140', # horizontal region
]
regions_file = str(tmp_path / 'regions.txt')
with open(regions_file, mode='w') as f:
f.write('\n'.join(regions))
ad = niriprim.fixPixels(regions='430:437,513:533', # vertical region
regions_file=regions_file,
debug=DEBUG)[0]
sy, sx = cartesian_regions_to_slices('430:437,513:533')
assert_almost_equal(ad[0].data[sy, sx].min(), 18.555, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 42.888, decimal=2)
sy, sx = cartesian_regions_to_slices(regions[0])
assert_almost_equal(ad[0].data[sy, sx].min(), 24.5, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 24.5, decimal=2)
sy, sx = cartesian_regions_to_slices(regions[1])
assert_almost_equal(ad[0].data[sy, sx].min(), 37.166, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 60.333, decimal=2)
@pytest.mark.dragons_remote_data
def test_fixpixels_3D(astrofaker):
np.random.seed(42)
arr = np.arange(4 * 5 * 6, dtype=float).reshape(4, 5, 6)
# Shuffle the values to be sure the interpolation is done on the good axis
# (when chacking the data below)
i, k = np.arange(4), np.arange(6)
np.random.shuffle(i)
np.random.shuffle(k)
arr = arr[i, :, :]
arr = arr[:, :, k]
refarr = arr.copy()
# Set to 0 the region to be fixed
arr[1:3, 2:4, 1:5] = 0
ad = astrofaker.create('NIRI', 'IMAGE')
ad.append(arr)
p = Preprocess([ad])
regions = ['2:5,3:4,2:3']
ad = p.fixPixels(regions=';'.join(regions), debug=DEBUG)[0]
assert_array_equal(refarr, ad[0].data)
@pytest.mark.dragons_remote_data
def test_fixpixels_3D_axis(astrofaker):
np.random.seed(42)
arr = np.arange(4 * 5 * 6, dtype=float).reshape(4, 5, 6)
# Shuffle the values to be sure the interpolation is done on the good axis
# (when chacking the data below)
j, k = np.arange(4), np.arange(6)
np.random.shuffle(j)
np.random.shuffle(k)
arr = arr[:, j, :]
arr = arr[:, :, k]
refarr = arr.copy()
# Set to 0 the region to be fixed
arr[1:3, 2:4, 1:5] = 0
ad = astrofaker.create('NIRI', 'IMAGE')
ad.append(arr)
p = Preprocess([ad])
regions = ['2:5,3:4,2:3']
ad = p.fixPixels(regions=';'.join(regions), debug=DEBUG, axis=3)[0]
assert_array_equal(refarr, ad[0].data)
@pytest.mark.dragons_remote_data
def test_fixpixels_multiple_ext(niriprim2):
regions = [
'430:437, 513:533', # vertical region
'1 / 450,521', # single pixel
'2/429:439, 136:140', # horizontal region
]
ad = niriprim2.fixPixels(regions=';'.join(regions), debug=DEBUG)[0]
# for all extensions
sy, sx = cartesian_regions_to_slices(regions[0])
assert_almost_equal(ad[0].data[sy, sx].min(), 18.555, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 42.888, decimal=2)
assert_almost_equal(ad[1].data[sy, sx].min(), 18.555, decimal=2)
assert_almost_equal(ad[1].data[sy, sx].max(), 42.888, decimal=2)
# only ext 1
sy, sx = cartesian_regions_to_slices(regions[1][3:])
assert_almost_equal(ad[0].data[sy, sx].min(), 24.5, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 24.5, decimal=2)
assert_almost_equal(ad[1].data[sy, sx].min(), 2733, decimal=2)
assert_almost_equal(ad[1].data[sy, sx].max(), 2733, decimal=2)
# only ext 2
sy, sx = cartesian_regions_to_slices(regions[2][2:])
assert_almost_equal(ad[0].data[sy, sx].min(), -125, decimal=2)
assert_almost_equal(ad[0].data[sy, sx].max(), 21293, decimal=2)
assert_almost_equal(ad[1].data[sy, sx].min(), 37.166, decimal=2)
assert_almost_equal(ad[1].data[sy, sx].max(), 60.333, decimal=2)
@pytest.mark.regression
@pytest.mark.preprocessed_data
@pytest.mark.parametrize('dataset', nonlinearity_datasets)
#def test_nonlinearity_correct(path_to_inputs, path_to_refs, dataset):
def test_nonlinearity_correct(path_to_inputs, path_to_refs, dataset):
"""Only GSAOI uses the core primitive with real coefficients"""
ad = astrodata.open(os.path.join(path_to_inputs, dataset[0]))
p = GSAOIImage([ad])
ad_out = p.nonlinearityCorrect().pop()
ad_ref = astrodata.open(os.path.join(path_to_refs, dataset[1]))
assert ad_compare(ad_out, ad_ref, ignore=['filename'])
# TODO @bquint: clean up these tests
def test_scale_by_exposure_time(niri_images):
ad1, ad2 = niri_images.streams['main']
ad2.phu[ad2._keyword_for('exposure_time')] *= 0.5
ad2_orig_value = ad2[0].data.mean()
ad1, ad2 = niri_images.scaleByExposureTime(time=None)
# Check that ad2 had its data doubled
assert abs(ad2[0].data.mean() - ad2_orig_value * 2) < 0.001
ad1, ad2 = niri_images.scaleByExposureTime(time=1)
# Check that ad2 has been rescaled to 1-second
print(ad2[0].data.mean(), ad2_orig_value, ad2.phu["ORIGTEXP"])
assert abs(ad2[0].data.mean() - ad2_orig_value / ad2.phu["ORIGTEXP"]) < 0.001
# @pytest.mark.xfail(reason="Test needs revision", run=False)
# def test_add_object_mask_to_dq(astrofaker):
# ad_orig = astrofaker.create('F2', 'IMAGE')
# # astrodata.open(os.path.join(TESTDATAPATH, 'GMOS', 'N20150624S0106_refcatAdded.fits'))
# p = GMOSImage([deepcopy(ad_orig)])
# ad = p.addObjectMaskToDQ()[0]
# for ext, ext_orig in zip(ad, ad_orig):
# assert all(ext.mask[ext.OBJMASK == 0] == ext_orig.mask[ext.OBJMASK == 0])
# assert all(ext.mask[ext.OBJMASK == 1] == ext_orig.mask[ext.OBJMASK == 1] | 1)
# @pytest.mark.xfail(reason="Test needs revision", run=False)
# def test_adu_to_electrons(astrofaker):
# ad = astrofaker.create("NIRI", "IMAGE")
# # astrodata.open(os.path.join(TESTDATAPATH, 'NIRI', 'N20070819S0104_dqAdded.fits'))
# p = NIRIImage([ad])
# ad = p.ADUToElectrons()[0]
# assert ad_compare(ad, os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070819S0104_ADUToElectrons.fits'))
def test_associate_sky(niri_sequence):
adinputs = niri_sequence('object')
p = NIRIImage(adinputs)
p.separateSky() # Difficult to construct this by hand
p.associateSky()
filename_set = {ad.phu['ORIGNAME'] for ad in adinputs}
# Test here is that each science frame has all other frames as skies
for ad in p.showList():
skies = [a[0].replace('_skyAssociated', '') for a in ad.SKYTABLE]
assert len(ad.SKYTABLE) == len(niri_sequence('object')) - 1
assert set([ad.phu['ORIGNAME']] + skies) == filename_set
def test_associate_sky_pass_skies(niri_sequence):
obj_inputs = niri_sequence('object')
sky_inputs = niri_sequence('sky1')
in_sky_names = [ad.filename for ad in sky_inputs]
p = NIRIImage(obj_inputs)
# Don't run separateSky, this is to simulate starting and resuming work
# with pre-known sky frames.
p.associateSky(sky=sky_inputs)
out_sky_names = [ad.phu['ORIGNAME'] for ad in p.streams['sky']]
assert in_sky_names == out_sky_names
def test_associate_sky_use_all(niri_sequence):
objects = niri_sequence('object')
skies1 = niri_sequence('sky1')
skies2 = niri_sequence('sky2')
expected_skies = set([ad.filename for ad in skies2])
p = NIRIImage(objects + skies1 + skies2)
p.separateSky()
# Check that 'use_all' sets all skies beyond the minimum distance as sky.
# Skies from "sky1" should be within the minimum distance, so all frames
# in the 'main' stream should have all skies from "sky2" in their SKYTABLE.
p.associateSky(distance=305, use_all=True)
for ad in p.streams['main']:
skies = set([row[0].replace('_skyAssociated', '')
for row in ad.SKYTABLE])
assert skies == expected_skies - set([ad.phu['ORIGNAME']])
def test_associate_sky_exclude_all(niri_sequence):
objects = niri_sequence('object')
skies1 = niri_sequence('sky1')
p = NIRIImage(objects + skies1)
p.separateSky()
p.associateSky(distance=1000)
# assert len(p.streams['no_skies']) == len(objects)
for ad in p.showList():
with pytest.raises(AttributeError):
ad.SKYTABLE
def test_associate_sky_exclude_some(niri_image, niri_sequence):
objects = niri_sequence('object')
extra_frame = [niri_image(filename='N20010101S0099.fits')]
extra_frame[0].sky_offset(600, 600)
skies1 = niri_sequence('sky1')
object_names = set([ad.filename for ad in objects])
p = NIRIImage(objects + extra_frame + skies1)
p.separateSky()
p.associateSky(distance=500)
no_skies = set([ad.phu['ORIGNAME'] for ad in p.streams['no_skies']])
# Check that frames in 'objects' have been put in the 'no_skies' stream
# since they're closer to the skies than the minimum distance
assert object_names == no_skies
# def test_correctBackgroundToReference(self):
# pass
# def test_darkCorrect(self):
# ad = astrodata.open(os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070819S0104_nonlinearityCorrected.fits'))
# p = NIRIImage([ad])
# ad = p.darkCorrect()[0]
# assert ad_compare(ad, os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070819S0104_darkCorrected.fits'))
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_darkCorrect_with_af(astrofaker):
science = astrofaker.create('NIRI', 'IMAGE')
dark = astrofaker.create('NIRI', 'IMAGE')
p = NIRIImage([science])
p.darkCorrect([science], dark=dark)
science.subtract(dark)
science.filename = 'N20010101S0001.fits'
assert ad_compare(science, dark)
# af.init_default_extensions()
# af[0].mask = np.zeros_like(af[0].data, dtype=np.uint16)
# def test_flatCorrect(self):
# ad = astrodata.open(os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070819S0104_darkCorrected.fits'))
# p = NIRIImage([ad])
# ad = p.flatCorrect()[0]
# assert ad_compare(ad, os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070819S0104_flatCorrected.fits'))
#
# def test_makeSky(self):
# pass
#
# def test_normalizeFlat(self):
# flat_file = os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070913S0220_flat.fits')
# ad = astrodata.open(flat_file)
# ad.multiply(10.0)
# del ad.phu['NORMLIZE'] # Delete timestamp of previous processing
# p = NIRIImage([ad])
# ad = p.normalizeFlat(suffix='_flat', strip=True)[0]
# assert ad_compare(ad, flat_file)
#
def test_separate_sky_offset(niri_sequence):
object_frames = niri_sequence('object')
sky_frames = niri_sequence('sky1')
adinputs = object_frames + sky_frames
target_filenames = set([ad.filename for ad in object_frames])
sky_filenames = set([ad.filename for ad in sky_frames])
p = NIRIImage(adinputs)
p.separateSky()
target_names = set([ad.phu['ORIGNAME'] for ad in p.streams['main']])
sky_names = set([ad.phu['ORIGNAME'] for ad in p.streams['sky']])
assert len(p.streams['main']) == len(object_frames)
assert len(p.streams['sky']) == len(sky_frames)
assert target_filenames == target_names
assert sky_filenames == sky_names
@pytest.mark.parametrize('target', ['object', 'sky1'])
def test_separate_sky_all_one_type(target, niri_sequence):
frames = niri_sequence(target)
in_names = set([ad.filename for ad in frames])
p = NIRIImage(frames)
p.separateSky()
out_obj_names = set(ad.phu['ORIGNAME'] for ad in p.streams['main'])
out_sky_names = set(ad.phu['ORIGNAME'] for ad in p.streams['sky'])
# Change to testing filenames
assert out_obj_names == out_sky_names
assert out_obj_names == in_names
assert out_sky_names == in_names
@pytest.mark.parametrize('frac_FOV', [0.9, 0.5])
def test_separate_sky_frac_FOV(frac_FOV, niri_sequence):
object_frames = niri_sequence('object')
sky_frames = niri_sequence('sky2')
adinputs = object_frames + sky_frames
p = NIRIImage(adinputs)
p.separateSky(frac_FOV=frac_FOV)
# Check filenames just in case we ever change things such that astrodata
# objects aren't held in memory anymore.
out_obj_names = set(ad.phu['ORIGNAME'] for ad in p.streams['main'])
out_sky_names = set(ad.phu['ORIGNAME'] for ad in p.streams['sky'])
assert out_obj_names != out_sky_names
def test_separate_sky_cross_assign_frames(niri_sequence):
niri_objects = niri_sequence('object')
niri_skies = niri_sequence('sky1')
obj_filenames = ','.join([ad.filename for ad in niri_objects])
sky_filenames = ','.join([ad.filename for ad in niri_skies])
adinputs = niri_objects
adinputs.extend(niri_skies)
p = NIRIImage(adinputs)
p.separateSky(ref_obj=sky_filenames, ref_sky=obj_filenames)
obj_names = ','.join([ad.phu['ORIGNAME'] for ad in p.streams['main']])
sky_names = ','.join([ad.phu['ORIGNAME'] for ad in p.streams['sky']])
assert obj_filenames == sky_names
assert sky_filenames == obj_names
@pytest.mark.parametrize('frames', [0, -1])
def test_separate_sky_cross_assign_single_frames(frames, niri_sequence):
"""Test user assigning frames as sky or object."""
niri_objects = niri_sequence('object')
niri_skies = niri_sequence('sky1')
manual_obj = niri_skies[frames].filename
manual_sky = niri_objects[frames].filename
obj_filenames = ','.join([ad.filename for ad in niri_objects])
sky_filenames = ','.join([ad.filename for ad in niri_skies])
adinputs = niri_objects
adinputs.extend(niri_skies)
p = NIRIImage(adinputs)
p.separateSky(ref_obj=manual_obj,
ref_sky=manual_sky)
obj_names = ','.join([ad.phu['ORIGNAME'] for ad in p.streams['main']])
sky_names = ','.join([ad.phu['ORIGNAME'] for ad in p.streams['sky']])
assert obj_filenames == sky_names
assert sky_filenames == obj_names
@pytest.mark.parametrize('marker', ('object', 'sky'))
def test_separate_sky_assign_one_group(marker, niri_sequence):
niri_objects = niri_sequence('object')
niri_skies = niri_sequence('sky1')
adinputs = niri_objects + niri_skies
for ad in adinputs:
for keyword in ('PWFS1_ST', 'PWFS2_ST', 'OIWFS_ST'):
try:
del ad.phu[keyword]
except KeyError:
pass
ad.phu["OIWFS_ST"] = True
filenames = {}
filenames['object'] = ','.join([ad.filename for ad in niri_objects])
filenames['sky'] = ','.join([ad.filename for ad in niri_skies])
p = NIRIImage(adinputs)
if marker == 'object':
p.separateSky(ref_obj=filenames['object'])
elif marker == 'sky':
p.separateSky(ref_sky=filenames['sky'])
obj_names = ','.join([ad.phu['ORIGNAME'] for ad in p.streams['main']])
sky_names = ','.join([ad.phu['ORIGNAME'] for ad in p.streams['sky']])
assert obj_names == filenames['object']
assert sky_names == filenames['sky']
def test_separate_sky_assigned_header_keywords(niri_sequence):
obj_frames = niri_sequence('object')
sky_frames = niri_sequence('sky1')
for ad in obj_frames:
ad.phu['OBJFRAME'] = 'True'
for ad in sky_frames:
ad.phu['SKYFRAME'] = 'True'
obj = obj_frames[0].filename
sky = sky_frames[0].filename
p = NIRIImage(obj_frames + sky_frames)
p.separateSky(ref_obj=sky, ref_sky=obj)
assert len(p.streams['main']) == len(p.streams['sky'])
assert len(p.streams['main']) == len(obj_frames) + len(sky_frames)
@pytest.mark.parametrize('marker', ['object', 'sky1'])
def test_separate_sky_missing(marker, niri_sequence):
input_length = len(niri_sequence(marker))
p = NIRIImage(niri_sequence(marker))
# Pass a non-existent filename to check handling of bad input
p.separateSky(ref_sky='S20110101S0001.fits',
ref_obj='S20110101S0002.fits')
assert len(p.streams['main']) == len(p.streams['sky'])
assert len(p.streams['main']) == input_length
@pytest.mark.parametrize('groups', [('object', 'sky1'),
('object', 'sky1', 'sky2', 'sky3')])
def test_separate_sky_proximity(groups, niri_sequence):
adinputs = []
for marker in groups:
adinputs.extend(niri_sequence(marker))
# Set all inputs to be "guided" data to remove that as a means of
# determining skies to force the use of proximity of groups.
for ad in adinputs:
for keyword in ('PWFS1_ST', 'PWFS2_ST', 'OIWFS_ST'):
try:
del ad.phu[keyword]
except KeyError:
pass
ad.phu["OIWFS_ST"] = True
p = NIRIImage(adinputs)
p.separateSky()
# In both cases the proximity assignment should put half the groups as
# 'object' and half as 'sky'.
assert len(p.streams['main']) * 2 == len(adinputs)
assert len(p.streams['sky']) * 2 == len(adinputs)
# @pytest.mark.parametrize('frame', ['object', 'sky'])#, 'mixed'])
# def test_list_ra_dec(frame, niri_sequence):
# adinputs = niri_sequence(frame)
# for ad in adinputs:
# # print(f'{ad.filename}')
# print(ad.phu['RA'], ad.phu['RAOFFSET'], sep=', ')
# for ad in adinputs:
# print(ad.phu['DEC'], ad.phu['DECOFFSE'], sep=',')
#
# def test_skyCorrect(self):
# pass
#
# def test_subtractSky(self):
# pass
#
# def test_subtractSkyBackground(self):
# ad = astrodata.open(os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070819S0104_flatCorrected.fits'))
# ad.hdr['SKYLEVEL'] = 1000.0
# orig_data = ad[0].data.copy()
# p = NIRIImage([ad])
# ad = p.subtractSkyBackground()[0]
# assert (orig_data - ad[0].data).min() > 999.99
# assert (orig_data - ad[0].data).max() < 1000.01
#
# def test_thresholdFlatfield(self):
# ad = astrodata.open(os.path.join(TESTDATAPATH, 'NIRI',
# 'N20070913S0220_flat.fits'))
# del ad.phu['TRHFLAT'] # Delete timestamp of previous processing
# ad[0].data[100, 100] = 20.0
# p = NIRIImage([ad])
# ad = p.thresholdFlatfield()[0]
# assert ad[0].mask[100, 100] == 64
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@core@tests@test_preprocess.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "geoffryan/afterglowpy",
"repo_path": "afterglowpy_extracted/afterglowpy-master/README.md",
"type": "Markdown"
}
|
# Numeric GRB Afterglow models
A Python 3 module to calculate GRB afterglow light curves and spectra. Details of the methods can be found in [Ryan et al 2020](https://ui.adsabs.harvard.edu/abs/2020ApJ...896..166R/abstract). Builds on [van Eerten & MacFadyen 2010](https://arxiv.org/abs/1006.5125) and [van Eerten 2018](https://arxiv.org/abs/1801.01848). This code is under active development.
Documentation available at <https://afterglowpy.readthedocs.io/>
## Attribution
If you use this code in a publication, please refer to the package by name and cite "Ryan, G., van Eerten, H., Piro, L. and Troja, E., 2020, Astrophysical Journal *896*, 166 (2020)" [arXiv link](https://arxiv.org/abs/1909.11691).
## Acknowledgements
This work is funded in part by the European Union’s Horizon 2020 Programme under the AHEAD2020 project (grant agreement n. 871158).
## Features
_afterglowpy_ computes synchrotron emission from the forward shock of a relativistic blast wave. It includes:
- Fully trans-relativistic shock evolution through a constant density medium.
- On-the-fly integration over the equal-observer-time slices of the shock surface.
- Approximate prescription for jet spreading.
- Arbitrary viewing angles.
- Angularly structured jets, ie. E(θ)
- Spherical velocity-stratified outflows, ie. E(u)
- Counter-jet emission.
- Deep Newtonian emission.
- Image moments suitable for astrometry: centroid position and image size.
It has limited support (these should be considered experimental) for:
- Initial energy injection
- Inverse comption spectra
- Early coasting phase
It does not include (yet):
- External wind medium, ie. n ∝ r<sup>-2</sup>
- Synchrotron self-absorbtion
- Reverse shock emission
_afterglowpy_ has been calibrated to the BoxFit code ([van Eerten, van der Horst, & Macfadyen 2011](https://arxiv.org/abs/1110.5089), available at the [Afterglow Library](https://cosmo.nyu.edu/afterglowlibrary/boxfit2011.html)) and produces similar light curves for top hat jets (within 50% when same parameters are used) both on- and off-axis. Its jet models by default do not include an initial coasting phase, which may effect predictions for early observations.
## Installation/Building
_afterglowpy_ is available via `pip`:
```bash
$ pip install afterglowpy
```
If you are working on a local copy of this repo and would like to install from source, you can the run the following from the top level directory of the project.
```bash
$ pip install -e .
```
## Using
In your python code, import the library with `import afterglowpy as grb`.
The main function of interest is`grb.fluxDensity(t, nu, **kwargs)`. See `examples/plotLightCurve.py` for a simple example.
For jet-like afterglows there are up to 13 required keyword arguments:
- `jetType` an integer code setting the jet structure. It can be `grb.jet.TopHat`, `grb.jet.Gaussian`, `grb.jet.PowerLawCore`, `grb.jet.GaussianCore`, `grb.jet.Spherical`, or `grb.jet.PowerLaw`.
- `specType` an integer code specifying flags for the emissivity function and spectrum. Can be `grb.jet.SimpleSpec` (basic spectrum with ν<sub>m</sub> and ν<sub>c</sub>), `grb.jet.DeepNewtonian`, `grb.jet.ICCooling` (simple inverse Compton effects on the cooling frequency, experimental).
- `thetaObs` viewing angle in radians
- `E0` on-axis isotropic equivalent energy in erg
- `thetaCore` half-width of the jet core in radians (jetType specific)
- `thetaWing` "wing" truncation angle of the jet, in radians
- `b` power for power-law structure, θ<sup>-b</sup>
- `n0` Number density of ISM, in cm<sup>-3</sup>
- `p` Electron distribution power-law index (p>2)
- `epsilon_e` Thermal energy fraction in electrons
- `epsilon_B` Thermal energy fraction in magnetic field
- `xi_N` Fraction of electrons that get accelerated
- `d_L` Luminosity distance in cm
Optional keyword arguments for all models are:
- `z` redshift (defaults to 0)
- `spread` boolean (defaults to True), whether to allow the jet to spread.
- `counterjet` boolean (defaults to False), whether to include the counterjet
- `moment` array (integer dtype, same shape as t and nu) which sky moment to compute.
- `L0` Fiducial luminosity for energy injection, in erg/s, default 0.0.
- `q` Temporal power-law index for energy injection, default 0.0.
- `ts` Fiducial time-scale for energy injection, in seconds, default 0.
- `tRes` time resolution of shock-evolution scheme, number of sample points per decade in time
- `latRes` latitudinal resolution for structured jets, number of shells per `thetaC`
- `rtol` target relative tolerance of flux integration
|
geoffryanREPO_NAMEafterglowpyPATH_START.@afterglowpy_extracted@afterglowpy-master@README.md@.PATH_END.py
|
{
"filename": "multithreading.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/packages/vaex-core/vaex/multithreading.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import asyncio
import threading
import multiprocessing
from warnings import warn
import vaex.utils
import logging
import concurrent.futures
import time
import vaex.settings
from .itertools import buffer
logger = logging.getLogger("vaex.multithreading")
main_pool = None
main_io_pool = None
thread_pools = {}
def get_thread_pool(nthreads):
if nthreads not in thread_pools:
thread_pools[nthreads] = ThreadPoolIndex(nthreads)
return thread_pools[nthreads]
def get_main_pool():
global main_pool
if main_pool is None:
main_pool = ThreadPoolIndex()
thread_pools[main_pool.nthreads] = main_pool
return main_pool
def get_main_io_pool():
global main_io_pool
if main_io_pool is None:
main_io_pool = concurrent.futures.ThreadPoolExecutor(max_workers=vaex.settings.main.thread_count_io)
return main_io_pool
class ThreadPoolIndex(concurrent.futures.ThreadPoolExecutor):
"""Thread pools that adds a thread index as first argument to the callback passed to map
This is useful if you keep a piece of memory (like ndgrid) per array
"""
def __init__(self, max_workers=None, *args, **kwargs):
if max_workers is None:
max_workers = vaex.settings.main.thread_count
super(ThreadPoolIndex, self).__init__(max_workers, *args, **kwargs)
self.lock = threading.Lock()
self.thread_indices = iter(range(1000000)) # enough threads until 2100?
self.local = threading.local()
self.nthreads = self._max_workers
self._debug_sleep = 0
def map(self, callable, iterator, count, on_error=None, progress=None, cancel=None, unpack=False, use_async=False, **kwargs_extra):
progress = progress or (lambda x: True)
cancelled = False
def wrapped(*args, **kwargs):
if not cancelled:
if self.nthreads == 1:
self.local.index = 0
with self.lock:
if not hasattr(self.local, 'index'):
self.local.index = next(self.thread_indices)
if unpack:
args = args[0] # it's passed as a tuple.. not sure why
if self._debug_sleep:
# print("SLEEP", self._debug_sleep)
time.sleep(self._debug_sleep)
return callable(self.local.index, *args, **kwargs, **kwargs_extra)
time_last = time.time() - 100
min_delta_t = 1. / 10 # max 10 per second
# we don't want to keep consuming the chunk iterator when we cancel
chunk_iterator = iterator
def cancellable_iter():
for value in chunk_iterator:
yield value
if cancelled:
break
if self.nthreads == 1: # when using 1 thread, it makes debugging easier (better stacktrace)
if use_async:
iterator = self._map_async(wrapped, cancellable_iter())
else:
iterator = self._map(wrapped, cancellable_iter())
else:
if use_async:
loop = asyncio.get_event_loop()
iterator = (loop.run_in_executor(self, lambda value=value: wrapped(value)) for value in cancellable_iter())
else:
iterator = super(ThreadPoolIndex, self).map(wrapped, cancellable_iter())
total = 0
iterator = iter(buffer(iterator, self._max_workers + 3))
try:
for value in iterator:
if use_async:
value = yield value
else:
yield value
if value != None:
total += value
progress_value = (total) / count
time_now = time.time()
if progress_value == 1 or (time_now - time_last) > min_delta_t:
time_last = time_now
if progress(progress_value) is False:
cancelled = True
cancel()
break
finally:
if not cancelled:
cancelled = True
# consume the rest of the iterators and await them to avoid un-awaited exceptions, which trigger a
# 'Future exception was never retrieved' printout
# TODO: since we don't use async any more, I think we can get rid of this
for value in iterator:
try:
pass
except:
pass
def _map(self, callable, iterator):
for i in iterator:
yield callable(i)
def _map_async(self, callable, iterator):
for i in iterator:
future = asyncio.Future()
future.set_result(callable(i))
yield future
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@packages@vaex-core@vaex@multithreading.py@.PATH_END.py
|
{
"filename": "models.py",
"repo_name": "shaoshanglqy/shap-shapley",
"repo_path": "shap-shapley_extracted/shap-shapley-master/shap/benchmark/models.py",
"type": "Python"
}
|
import sklearn
import sklearn.ensemble
import gc
from sklearn.preprocessing import StandardScaler
class KerasWrap(object):
""" A wrapper that allows us to set parameters in the constructor and do a reset before fitting.
"""
def __init__(self, model, epochs, flatten_output=False):
self.model = model
self.epochs = epochs
self.flatten_output = flatten_output
self.init_weights = None
self.scaler = StandardScaler()
def fit(self, X, y, verbose=0):
if self.init_weights is None:
self.init_weights = self.model.get_weights()
else:
self.model.set_weights(self.init_weights)
self.scaler.fit(X)
return self.model.fit(X, y, epochs=self.epochs, verbose=verbose)
def predict(self, X):
X = self.scaler.transform(X)
if self.flatten_output:
return self.model.predict(X).flatten()
else:
return self.model.predict(X)
# This models are all tuned for the corrgroups60 dataset
def corrgroups60__lasso():
""" Lasso Regression
"""
return sklearn.linear_model.Lasso(alpha=0.1)
def corrgroups60__ridge():
""" Ridge Regression
"""
return sklearn.linear_model.Ridge(alpha=1.0)
def corrgroups60__decision_tree():
""" Decision Tree
"""
return sklearn.tree.DecisionTreeRegressor(random_state=0)
def corrgroups60__random_forest():
""" Random Forest
"""
return sklearn.ensemble.RandomForestRegressor(random_state=0)
def corrgroups60__gbm():
""" Gradient Boosted Trees
"""
import xgboost
# max_depth and learning_rate were fixed then n_estimators was chosen using a train/test split
return xgboost.XGBRegressor(max_depth=6, n_estimators=50, learning_rate=0.1, n_jobs=8, random_state=0)
def corrgroups60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
def independentlinear60__lasso():
""" Lasso Regression
"""
return sklearn.linear_model.Lasso(alpha=0.1)
def independentlinear60__ridge():
""" Ridge Regression
"""
return sklearn.linear_model.Ridge(alpha=1.0)
def independentlinear60__decision_tree():
""" Decision Tree
"""
return sklearn.tree.DecisionTreeRegressor(random_state=0)
def independentlinear60__random_forest():
""" Random Forest
"""
return sklearn.ensemble.RandomForestRegressor(random_state=0)
def independentlinear60__gbm():
""" Gradient Boosted Trees
"""
import xgboost
# max_depth and learning_rate were fixed then n_estimators was chosen using a train/test split
return xgboost.XGBRegressor(max_depth=6, n_estimators=100, learning_rate=0.1, n_jobs=8, random_state=0)
def independentlinear60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
def cric__lasso():
""" Lasso Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l1", C=0.002)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__ridge():
""" Ridge Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l2")
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__decision_tree():
""" Decision Tree
"""
model = sklearn.tree.DecisionTreeClassifier(random_state=0)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__random_forest():
""" Random Forest
"""
model = sklearn.ensemble.RandomForestClassifier(100, random_state=0)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
def cric__gbm():
""" Gradient Boosted Trees
"""
import xgboost
# max_depth and subsample match the params used for the full cric data in the paper
# learning_rate was set a bit higher to allow for faster runtimes
# n_estimators was chosen based on a train/test split of the data
model = xgboost.XGBClassifier(max_depth=5, n_estimators=400, learning_rate=0.01, subsample=0.2, n_jobs=8, random_state=0)
# we want to explain the margin, not the transformed probability outputs
model.__orig_predict = model.predict
model.predict = lambda X: model.__orig_predict(X, output_margin=True) # pylint: disable=E1123
return model
def cric__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout
model = Sequential()
model.add(Dense(10, activation='relu', input_dim=336))
model.add(Dropout(0.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
return KerasWrap(model, 30, flatten_output=True)
|
shaoshanglqyREPO_NAMEshap-shapleyPATH_START.@shap-shapley_extracted@shap-shapley-master@shap@benchmark@models.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/coloraxis/colorbar/title/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@coloraxis@colorbar@title@__init__.py@.PATH_END.py
|
{
"filename": "SolverEWDialog.py",
"repo_name": "marblestation/iSpec",
"repo_path": "iSpec_extracted/iSpec-master/ispec/gui/dialogs/SolverEWDialog.py",
"type": "Python"
}
|
#
# This file is part of iSpec.
# Copyright Sergi Blanco-Cuaresma - http://www.blancocuaresma.com/s/
#
# iSpec is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iSpec is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with iSpec. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import matplotlib.pyplot as plt
from .CustomDialog import *
class SolverEWDialog(CustomDialog):
def plot(self, axes, component):
for i, ax in enumerate(axes):
## Draw
for j, element in enumerate(["Fe 1", "Fe 2"]):
element_linemasks = self.__linemasks[self.__linemasks['element'] == element]
element_abundances = self.__x_over_h[self.__selected_x_over_h[j]]
if len(element_abundances) == 0:
continue
if i == 0:
m = self.__fitted_lines_params[0]
c = self.__fitted_lines_params[1]
x = element_linemasks['lower_state_eV']
else:
m = self.__fitted_lines_params[2]
c = self.__fitted_lines_params[3]
x = element_linemasks['ewr']
ax.plot(x, element_abundances, linestyle='', marker='o', markersize=5, zorder=1, label=element)
ax.plot(x, m*x + c, color="red")
leg = ax.legend(loc='upper right', shadow=False, numpoints=1)
ltext = leg.get_texts()
plt.setp(ltext, fontsize='8')
ax.grid(True, which="both")
if i == 0:
ax.set_xlabel("lower state (eV)", fontsize="10")
else:
ax.set_xlabel("reduced equivalent width", fontsize="10")
ax.set_ylabel("[X/H]", fontsize="10")
ax.tick_params(axis='y', labelsize=8)
fig = ax.get_figure()
#fig.set_tight_layout(True)
fig.subplots_adjust(hspace = 0.5, bottom=0.2)
def register(self, linemasks, params, x_over_h, selected_x_over_h, fitted_lines_params):
self.__linemasks = linemasks
self.__params = params
self.__x_over_h = x_over_h
self.__selected_x_over_h = selected_x_over_h
self.__fitted_lines_params = fitted_lines_params
# We have data, we can assign the plotting function
self.__components[0]["function"] = self.plot
teff = params['teff']
logg = params['logg']
feh = params['MH']
vmic = params['vmic']
## Stats
for i in range(len(self.__stats)):
self.__stats.pop()
self.__stats.append("%-50s: %10.2f" % ("Effective temperature (k)", np.round(teff, 1)))
self.__stats.append("%-50s: %10.2f" % ("Surface gravity (log g)", np.round(logg, 2)))
self.__stats.append("%-50s: %10.2f" % ("Metallicity [Fe/H]", np.round(feh, 2)))
self.__stats.append("%-50s: %10.2f" % ("Microturbulence velocity (km/s)", np.round(vmic, 2)))
self.__stats.append("%-50s: %10.4f" % ("Excitation potential slope", np.round(self.__fitted_lines_params[0], 2)))
self.__stats.append("%-50s: %10.4f" % ("Reduced equivalent width slope", np.round(self.__fitted_lines_params[2], 2)))
diff = np.nanmedian(self.__x_over_h[self.__selected_x_over_h[0]]) - np.nanmedian(self.__x_over_h[self.__selected_x_over_h[1]])
self.__stats.append("%-50s: %10.2f" % ("Fe I - Fe II abundance difference", np.round(diff, 2)))
self.__stats.append("%-50s: %10.0f" % ("Total number of lines", np.round(len(x_over_h), 2)))
for i, element in enumerate(["Fe 1", "Fe 2"]):
element_abundances_over_h = x_over_h[selected_x_over_h[i]]
if len(element_abundances_over_h) == 0:
continue
self.__stats.append("%-50s: %10.2f" % ( element + " median abundance in [X/H] (dex)", np.round(np.nanmedian(element_abundances_over_h), 2)))
self.__stats.append("%-50s: %10.2f" % ( element + " mean abundance in [X/H] (dex)", np.round(np.nanmean(element_abundances_over_h), 2)))
self.__stats.append("%-50s: %10.2f" % ( element + " standard deviation in [X/H] (dex)", np.round(np.nanstd(element_abundances_over_h), 2)))
self.__stats.append("%-50s: %10.0f" % ( element + " lines number", np.round(len(element_abundances_over_h), 0)))
def __init__(self, parent, title, teff, logg, feh, alpha, vmic, lists, default_lists):
self.__parent = parent
self.__title = title
self.__plot = None
self.__params = None
self.__x_over_h = None
self.__selected_x_over_h = None
self.__fitted_lines_params = None
self.__stats = []
self.__components = []
component = {}
component["type"] = "Plot"
component["function"] = self.__plot
component["axes"] = 2
self.__components.append(component)
component = {}
component["type"] = "Listbox"
component["options"] = self.__stats
self.__components.append(component)
component = {}
component["type"] = "OptionMenu"
component["text"] = "Code"
component["options"] = lists['ew_code']
component["default"] = component["options"][default_lists['ew_code']]
self.__components.append(component)
component = {}
component["type"] = "OptionMenu"
component["text"] = "Model atmosphere"
component["options"] = lists['atmospheres']['name']
component["default"] = component["options"][default_lists['atmospheres']]
self.__components.append(component)
component = {}
component["type"] = "OptionMenu"
component["text"] = "Solar abundances"
component["options"] = lists['abundances']['name']
component["default"] = component["options"][default_lists['abundances']]
self.__components.append(component)
component = {}
component["type"] = "Entry"
component["text"] = "Effective temperature (K)"
component["text-type"] = "float" # float, int or str
component["default"] = teff
component["minvalue"] = 400
component["maxvalue"] = 55000
self.__components.append(component)
component = {}
component["type"] = "Checkbutton"
component["text"] = "Free Teff"
component["default"] = True
self.__components.append(component)
component = {}
component["type"] = "Entry"
component["text"] = "Surface gravity (log g)"
component["text-type"] = "float" # float, int or str
component["default"] = logg
component["minvalue"] = -0.5
component["maxvalue"] = 5.5
self.__components.append(component)
component = {}
component["type"] = "Checkbutton"
component["text"] = "Free Log(g)"
component["default"] = True
self.__components.append(component)
component = {}
component["type"] = "Entry"
component["text"] = "Metallicity [M/H]"
component["text-type"] = "float" # float, int or str
component["default"] = feh
component["minvalue"] = -5
component["maxvalue"] = 1
self.__components.append(component)
component = {}
component["type"] = "Entry"
component["text"] = "Alpha enhancement [alpha/Fe]"
component["text-type"] = "float" # float, int or str
component["default"] = alpha
component["minvalue"] = -2
component["maxvalue"] = 2
self.__components.append(component)
component = {}
component["type"] = "Checkbutton"
component["text"] = "Automatic alpha enhancement [alpha/Fe]"
component["default"] = True
self.__components.append(component)
component = {}
component["type"] = "Entry"
component["text"] = "Microturbulence velocity (km/s)"
component["text-type"] = "float" # float, int or str
component["default"] = vmic
component["minvalue"] = 0
component["maxvalue"] = np.inf
self.__components.append(component)
component = {}
component["type"] = "Checkbutton"
component["text"] = "Free Vmic"
component["default"] = True
self.__components.append(component)
component = {}
component["type"] = "Entry"
component["text"] = "Maximum number of iterations"
component["text-type"] = "int" # float, int or str
component["default"] = "10"
component["minvalue"] = 0
component["maxvalue"] = np.inf
self.__components.append(component)
def show(self):
self.results = None
CustomDialog.__init__(self, self.__parent, self.__title, self.__components)
|
marblestationREPO_NAMEiSpecPATH_START.@iSpec_extracted@iSpec-master@ispec@gui@dialogs@SolverEWDialog.py@.PATH_END.py
|
{
"filename": "add_rfi.py",
"repo_name": "cosmo-ethz/hide",
"repo_path": "hide_extracted/hide-master/hide/plugins/add_rfi.py",
"type": "Python"
}
|
# HIDE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HIDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HIDE. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Dec 8, 2014
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from ivy.plugin.base_plugin import BasePlugin
from scipy import stats
class Plugin(BasePlugin):
"""
Adds RFI to the time ordered data
"""
def __call__(self):
params = self.ctx.params
fit_sigma_freq = np.poly1d(params.coeff_freq)
fit_sigma_time = np.poly1d(params.coeff_time)
max_rfi_count = (self.ctx.tod_vx.shape[1] * self.ctx.params.strategy_step_size) / 3600 * params.max_rfi_count
rfi_count = np.floor(np.random.uniform(1, max_rfi_count)).astype(np.int)
for rfi_idx in range(rfi_count):
amp = np.fabs(stats.lognorm.rvs(1, loc=params.amp_loc, scale=params.amp_scale, size=1))
sigma_freq = fit_sigma_freq(amp)
sigma_time = fit_sigma_time(amp)
# print("amp, sigma_freq, sigma_time", amp, sigma_freq, sigma_time)
grid_x = np.arange(-params.sigma_range * sigma_time, params.sigma_range * sigma_time)
grid_y = np.arange(-params.sigma_range * sigma_freq, params.sigma_range * sigma_freq)
X,Y = np.meshgrid(grid_x,grid_y)
# time_offset = np.random.normal(0, 1)
time_offset = np.random.uniform(-params.sigma_range, params.sigma_range)
# time_offset = sigma_range if np.fabs(time_offset) > sigma_range else time_offset
Z = gaussian(amp, time_offset * sigma_time, 0, sigma_time, sigma_freq)(X,Y)
pos_time = np.floor(np.random.uniform(0, self.ctx.tod_vx.shape[1] - 2 * params.sigma_range * sigma_time))
pos_freq = np.floor(np.random.uniform(0, self.ctx.tod_vx.shape[0] - 2 * params.sigma_range * sigma_freq))
if pos_time >= 0 and pos_freq >= 0:
self.ctx.tod_vx[pos_freq: pos_freq + Z.shape[0], pos_time: pos_time + Z.shape[1]] += Z
#constant
for rfi_freq in params.rfi_freqs:
amp = np.random.uniform(params.min_amp, params.max_amp)
# print("amp", amp)
scales1 = amp * np.exp(-np.arange(params.rfi_width-1, 0, -1) * 1.0)
scales2 = amp * np.exp(-np.arange(0, params.rfi_width, 1) * 0.8)
scales = np.append(scales1, scales2)
# print("scales", scales)
for i, rfi_pos in enumerate(np.arange(params.rfi_width-1, -params.rfi_width, -1)):
scale = scales[i]
rfi = np.random.normal(scale, scale, self.ctx.tod_vx.shape[1])
self.ctx.tod_vx[rfi_freq - rfi_pos, : ] += rfi
def __str__(self):
return "Add RFI"
def gaussian(height, center_x, center_y, width_x, width_y):
"""Returns a gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(
-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
|
cosmo-ethzREPO_NAMEhidePATH_START.@hide_extracted@hide-master@hide@plugins@add_rfi.py@.PATH_END.py
|
{
"filename": "tfstep.py",
"repo_name": "hpparvi/opents",
"repo_path": "opents_extracted/opents-master/src/tfstep.py",
"type": "Python"
}
|
# OpenTS: Open exoplanet transit search pipeline.
# Copyright (C) 2015-2020 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from collections import namedtuple
from logging import getLogger
from pathlib import Path
from typing import Optional
from astropy.io.fits import HDUList, Card
from numba import njit
import pandas as pd
from astropy.table import Table
from matplotlib.pyplot import setp
from numpy.random import uniform
from numpy import ones, unique, argsort, atleast_2d, ndarray, squeeze, inf, isfinite, exp, concatenate, sqrt, clip
from numpy.core._multiarray_umath import floor, zeros, log, pi, array, sin
from pytransit.lpf.lpf import map_ldc
from pytransit.lpf.tesslpf import downsample_time
from pytransit.orbits import epoch, as_from_rhop, i_from_ba, i_from_baew, d_from_pkaiews
from pytransit.param import LParameter, UniformPrior as UP, NormalPrior as NP, PParameter, GParameter
from pytransit.utils.misc import fold
from scipy.interpolate import interp1d
from pytransit import QuadraticModel, BaseLPF
try:
from pytransit import QuadraticModelCL
except:
pass
from .otsstep import OTSStep
from .plots import bplot
logger = getLogger("transit-fit-step")
@njit(fastmath=True)
def sine_model(time, period, phase, amplitudes):
npv = period.size
npt = time.size
nsn = amplitudes.shape[1]
bl = zeros((npv, npt))
for i in range(npv):
for j in range(nsn):
bl[i, :] += amplitudes[i, j] * sin(2 * pi * (time - phase[i] * period[i]) / (period[i] / (j + 1)))
return bl
def delta_bic(dll, d1, d2, n):
return dll + 0.5 * (d1 - d2) * log(n)
class SineBaseline:
def __init__(self, lpf, name: str = 'sinbl', n: int = 1, lcids=None):
self.name = name
self.lpf = lpf
self.n = n
if lpf.lcids is None:
raise ValueError('The LPF data needs to be initialised before initialising LinearModelBaseline.')
self.init_data(lcids)
self.init_parameters()
def init_data(self, lcids=None):
self.time = self.lpf.timea - self.lpf._tref
def init_parameters(self):
"""Baseline parameter initialisation.
"""
fptp = self.lpf.ofluxa.ptp()
bls = []
bls.append(LParameter(f'c_sin', f'sin phase', '', UP(0.0, 1.0), bounds=(0, 1)))
for i in range(self.n):
bls.append(LParameter(f'a_sin_{i}', f'sin {i} amplitude', '', UP(0, fptp), bounds=(0, inf)))
self.lpf.ps.thaw()
self.lpf.ps.add_global_block(self.name, bls)
self.lpf.ps.freeze()
self.pv_slice = self.lpf.ps.blocks[-1].slice
self.pv_start = self.lpf.ps.blocks[-1].start
setattr(self.lpf, f"_sl_{self.name}", self.pv_slice)
setattr(self.lpf, f"_start_{self.name}", self.pv_start)
def __call__(self, pvp, bl: Optional[ndarray] = None):
pvp = atleast_2d(pvp)
if bl is None:
bl = ones((pvp.shape[0], self.time.size))
else:
bl = atleast_2d(bl)
bl += sine_model(self.time,
period=pvp[:, 1],
phase=pvp[:, self.pv_start],
amplitudes=pvp[:, self.pv_start + 1:])
return squeeze(bl)
class EvenOddBaseline:
def __init__(self, lpf, name: str = 'eobl', lcids=None):
self.name = name
self.lpf = lpf
if lpf.lcids is None:
raise ValueError('The LPF data needs to be initialised before initialising EvenOddBaseline.')
self.init_data(lcids)
self.init_parameters()
def init_data(self, lcids=None):
self.aeo = (self.lpf.epochs % 2).astype(int)
def init_parameters(self):
bls = []
bls.append(LParameter(f'ble', f'Even baseline level', '', NP(0.0, 0.01), bounds=(-inf, inf)))
bls.append(LParameter(f'blo', f'Odd baseline level', '', NP(0.0, 0.01), bounds=(-inf, inf)))
self.lpf.ps.thaw()
self.lpf.ps.add_global_block(self.name, bls)
self.lpf.ps.freeze()
self.pv_slice = self.lpf.ps.blocks[-1].slice
self.pv_start = self.lpf.ps.blocks[-1].start
setattr(self.lpf, f"_sl_{self.name}", self.pv_slice)
setattr(self.lpf, f"_start_{self.name}", self.pv_start)
def __call__(self, pvp, bl: Optional[ndarray] = None):
pvp = atleast_2d(pvp)
if bl is None:
bl = ones((pvp.shape[0], self.time.size))
else:
bl = atleast_2d(bl)
bl += pvp[:, self.pv_slice][:, self.aeo]
return squeeze(bl)
class SearchLPF(BaseLPF):
def __init__(self, times, fluxes, epochs, tm, nsamples, exptimes, tref):
self.epochs = epochs
super().__init__('transit_fit', [''], times=times, fluxes=fluxes, tm=tm,
nsamples=nsamples, exptimes=exptimes, tref=tref)
# def _init_lnlikelihood(self):
# self._add_lnlikelihood_model(CeleriteLogLikelihood(self))
def _init_baseline(self):
self._add_baseline_model(EvenOddBaseline(self))
def _init_p_limb_darkening(self):
pld = concatenate([
[PParameter(f'q1', 'q1 coefficient', '', UP(0, 1), bounds=(0, 1)),
PParameter(f'q2', 'q2 coefficient', '', UP(0, 1), bounds=(0, 1))]
for i, pb in enumerate(self.passbands)])
self.ps.add_passband_block('ldc', 2, self.npb, pld)
self._sl_ld = self.ps.blocks[-1].slice
self._start_ld = self.ps.blocks[-1].start
def _init_p_orbit(self):
"""Orbit parameter initialisation.
"""
porbit = [
GParameter('tc', 'zero epoch', 'd', NP(0.0, 0.1), (-inf, inf)),
GParameter('p', 'period', 'd', NP(1.0, 1e-5), (0, inf)),
GParameter('rho', 'stellar density', 'g/cm^3', UP(0.1, 25.0), (0, inf)),
GParameter('g', 'grazing parameter', 'R_s', UP(0.0, 1.0), (0, 1))]
self.ps.add_global_block('orbit', porbit)
def create_pv_population(self, npop=50):
return self.ps.sample_from_prior(npop)
def transit_model(self, pv, copy=True):
pv = atleast_2d(pv)
ldc = map_ldc(pv[:, self._sl_ld])
zero_epoch = pv[:, 0] - self._tref
period = pv[:, 1]
radius_ratio = sqrt(pv[:, 4:5])
smaxis = as_from_rhop(pv[:, 2], period)
impact_parameter = pv[:, 3] * (1 + radius_ratio)[:,0]
inclination = i_from_ba(impact_parameter, smaxis)
return self.tm.evaluate(radius_ratio, ldc, zero_epoch, period, smaxis, inclination)
def posterior_samples(self, burn: int = 0, thin: int = 1, derived_parameters: bool = True):
df = super().posterior_samples(burn=burn, thin=thin, derived_parameters=False)
if derived_parameters:
df['k'] = sqrt(df.k2)
df['a'] = as_from_rhop(df.rho.values, df.p.values)
df['b'] = df.g * (1 + df.k)
df['inc'] = i_from_ba(df.b.values, df.a.values)
df['t14'] = d_from_pkaiews(df.p.values, df.k.values, df.a.values, df.inc.values, 0., 0., 1, kind=14)
df['t23'] = d_from_pkaiews(df.p.values, df.k.values, df.a.values, df.inc.values, 0., 0., 1, kind=23)
return df
class TransitFitStep(OTSStep):
name = "tf"
def __init__(self, ts, mode: str, title: str, nsamples: int = 1, exptime: float = 1, use_opencl: bool = False, use_tqdm: bool = True):
assert mode in ('all', 'even', 'odd')
super().__init__(ts)
self.mode = mode
self.title = title
self.nsamples = nsamples
self.exptime = exptime
self.use_opencl = use_opencl
self.use_tqdm = use_tqdm
self.lpf = None
self.result = None
self.mask = None
self.time = None
self.phase = None
self.fobs = None
self.fmod = None
self.ftra = None
self.dll_epochs = None
self.dll_values = None
self.parameters = None
self.period = None # Best-fit period
self.zero_epoch = None # Best-fit zero epoch
self.duration = None # Best-fit duration
self.depth = None # Best-fit depth
def __call__(self, npop: int = 40, de_niter: int = 1000, mcmc_niter: int = 200, mcmc_repeats: int = 3, initialize_only: bool = False):
self.logger = getLogger(f"{self.name}:{self.ts.name.lower().replace('_','-')}")
self.logger.info(f"Fitting {self.mode} transits")
self.ts.transit_fits[self.mode] = self
epochs = epoch(self.ts.time, self.ts.zero_epoch, self.ts.period)
if self.mode == 'all':
mask = ones(self.ts.time.size, bool)
elif self.mode == 'even':
mask = epochs % 2 == 0
elif self.mode == 'odd':
mask = epochs % 2 == 1
else:
raise NotImplementedError
mask &= abs(self.ts.phase - 0.5*self.ts.period) < 4 * 0.5 * self.ts.duration
self.ts.transit_fit_masks[self.mode] = self.mask = mask
self.epochs = epochs = epochs[mask]
self.time = self.ts.time[mask]
self.fobs = self.ts.flux[mask]
tref = floor(self.time.min())
tm = QuadraticModelCL(klims=(0.01, 0.60)) if self.use_opencl else QuadraticModel(interpolate=False)
self.lpf = lpf = SearchLPF(times=self.time, fluxes=self.fobs, epochs=epochs, tm=tm,
nsamples=self.nsamples, exptimes=self.exptime, tref=tref)
# TODO: V-shaped transits are not always modelled well. Need to set smarter priors (or starting population)
# for the impact parameter and stellar density.
lpf.set_prior('rho', 'UP', 0.01, 25)
if self.mode == 'all':
d = min(self.ts.depth, 0.75)
lpf.set_prior('tc', 'NP', self.ts.zero_epoch, 0.01)
lpf.set_prior('p', 'NP', self.ts.period, 0.001)
lpf.set_prior('k2', 'UP', max(0.01**2, 0.5*d), min(max(0.08**2, 4*d), 0.75**2))
else:
pr = self.ts.tf_all.parameters
lpf.set_prior('tc', 'NP', pr.tc.med, 5*pr.tc.err)
lpf.set_prior('p', 'NP', pr.p.med, pr.p.err)
lpf.set_prior('k2', 'UP', max(0.01**2, 0.5 * pr.k2.med), max(0.08**2, min(0.6**2, 2 * pr.k2.med)))
lpf.set_prior('q1', 'NP', pr.q1.med, pr.q1.err)
lpf.set_prior('q2', 'NP', pr.q2.med, pr.q2.err)
# TODO: The limb darkening table has been computed for TESS. Needs to be made flexible.
if self.ts.teff is not None:
ldcs = Table.read(Path(__file__).parent / "data/ldc_table.fits").to_pandas()
ip = interp1d(ldcs.teff, ldcs[['q1', 'q2']].T)
q1, q2 = ip(clip(self.ts.teff, 2000., 12000.))
lpf.set_prior('q1', 'NP', q1, 1e-5)
lpf.set_prior('q2', 'NP', q2, 1e-5)
if initialize_only:
return
else:
lpf.optimize_global(niter=de_niter, npop=npop, use_tqdm=self.use_tqdm, plot_convergence=False)
lpf.sample_mcmc(mcmc_niter, repeats=mcmc_repeats, use_tqdm=self.use_tqdm, leave=False)
df = lpf.posterior_samples(derived_parameters=True)
df = pd.DataFrame((df.median(), df.std()), index='med err'.split())
pv = lpf.posterior_samples(derived_parameters=False).median().values
self.phase = fold(self.time, pv[1], pv[0], 0.5) * pv[1] - 0.5 * pv[1]
self.fmod = lpf.flux_model(pv)
self.ftra = lpf.transit_model(pv)
self.fbase = lpf.baseline(pv)
# Calculate the per-orbit log likelihood differences
# --------------------------------------------------
ues = unique(epochs)
lnl = zeros(ues.size)
err = 10 ** pv[7]
def lnlike_normal(o, m, e):
npt = o.size
return -npt * log(e) - 0.5 * npt * log(2. * pi) - 0.5 * sum((o - m) ** 2 / e ** 2)
for i, e in enumerate(ues):
m = epochs == e
lnl[i] = lnlike_normal(self.fobs[m], self.fmod[m], err) - lnlike_normal(self.fobs[m], 1.0, err)
self.parameters = df
self.dll_epochs = ues
self.dll_values = lnl
self.zero_epoch = df.tc.med
self.period = df.p.med
self.duration = df.t14.med
self.depth = df.k2.med
if self.mode == 'all':
self.delta_bic = self.ts.dbic = delta_bic(lnl.sum(), 0, 9, self.time.size)
self.ts.update_ephemeris(self.zero_epoch, self.period, self.duration, self.depth)
def add_to_fits(self, hdul: HDUList):
def fn(v):
return v if isfinite(v) else -1
if self.lpf is not None:
p = self.parameters
c = self.mode[0]
h = hdul[0].header
h.append(Card('COMMENT', '======================'))
h.append(Card('COMMENT', self.title))
h.append(Card('COMMENT', '======================'))
h.append(Card(f'TF{c}_T0', p.tc.med, 'Transit centre [BJD]'), bottom=True)
h.append(Card(f'TF{c}_T0E', p.tc.err, 'Transit centre uncertainty [d]'), bottom=True)
h.append(Card(f'TF{c}_PR', p.p.med, 'Orbital period [d]'), bottom=True)
h.append(Card(f'TF{c}_PRE', p.p.err, 'Orbital period uncertainty [d]'), bottom=True)
h.append(Card(f'TF{c}_RHO', p.rho.med, 'Stellar density [g/cm^3]'), bottom=True)
h.append(Card(f'TF{c}_RHOE', p.rho.err, 'Stellar density uncertainty [g/cm^3]'), bottom=True)
h.append(Card(f'TF{c}_B', p.b.med, 'Impact parameter'), bottom=True)
h.append(Card(f'TF{c}_BE', p.b.err, 'Impact parameter uncertainty'), bottom=True)
h.append(Card(f'TF{c}_AR', p.k2.med, 'Area ratio'), bottom=True)
h.append(Card(f'TF{c}_ARE', p.k2.err, 'Area ratio uncertainty'), bottom=True)
#h.append(Card(f'TF{c}_SC', p.c_sin.med, 'Sine phase'), bottom=True)
#h.append(Card(f'TF{c}_SCE', p.c_sin.err, 'Sine phase uncertainty'), bottom=True)
#h.append(Card(f'TF{c}_SA', p.a_sin_0.med, 'Sine amplitude'), bottom=True)
#h.append(Card(f'TF{c}_SAE', p.a_sin_0.err, 'Sine amplitude uncertainty'), bottom=True)
h.append(Card(f'TF{c}_RR', p.k.med, 'Radius ratio'), bottom=True)
h.append(Card(f'TF{c}_RRE', p.k.err, 'Radius ratio uncertainty'), bottom=True)
h.append(Card(f'TF{c}_A', p.a.med, 'Semi-major axis'), bottom=True)
h.append(Card(f'TF{c}_AE', p.a.err, 'Semi-major axis uncertainty'), bottom=True)
h.append(Card(f'TF{c}_T14', fn(p.t14.med), 'Transit duration T14 [d]'), bottom=True)
h.append(Card(f'TF{c}_T14E', fn(p.t14.err), 'Transit duration T14 uncertainty [d]'), bottom=True)
h.append(Card(f'TF{c}_T23', fn(p.t23.med), 'Transit duration T23 [d]'), bottom=True)
h.append(Card(f'TF{c}_T23E', fn(p.t23.err), 'Transit duration T23 uncertainty [d]'), bottom=True)
if isfinite(p.t23.med) and isfinite(p.t23.err):
h.append(Card(f'TF{c}_TDR', p.t23.med / p.t14.med, 'T23 to T14 ratio'), bottom=True)
else:
h.append(Card(f'TF{c}_TDR', 0, 'T23 to T14 ratio'), bottom=True)
h.append(Card(f'TF{c}_WN', 10 ** p.wn_loge_0.med, 'White noise std'), bottom=True)
h.append(Card(f'TF{c}_GRAZ', p.b.med + p.k.med > 1., 'Is the transit grazing'), bottom=True)
ep = self.dll_epochs
ll = self.dll_values
lm = ll.max()
h.append(Card(f'TF{c}_DLLA', log(exp(ll - lm).mean()) + lm, 'Mean per-orbit delta log likelihood'), bottom=True)
if self.mode == 'all':
m = ep % 2 == 0
lm = ll[m].max()
h.append(Card(f'TFA_DLLO', log(exp(ll[m] - lm).mean()) + lm, 'Mean per-orbit delta log likelihood (odd)'),
bottom=True)
m = ep % 2 != 0
lm = ll[m].max()
h.append(Card(f'TFA_DLLE', log(exp(ll[m] - lm).mean()) + lm, 'Mean per-orbit delta log likelihood (even)'),
bottom=True)
@bplot
def plot_transit_fit(self, ax=None, full_phase: bool = False, mode='all', nbins: int = 20, alpha=0.2):
zero_epoch, period, duration = self.parameters[['tc', 'p', 't14']].iloc[0].copy()
hdur = duration * array([-0.5, 0.5])
phase = self.phase
sids = argsort(phase)
phase = phase[sids]
pmask = ones(phase.size, bool) if full_phase else abs(phase) < 1.5 * duration
if pmask.sum() < 100:
alpha = 1
if self.mode == 'all':
fmod = self.ftra[sids]
fobs = self.fobs[sids] / self.fbase[sids]
else:
fmod = self.fmod[sids]
fobs = self.fobs[sids]
ax.plot(24 * phase[pmask], fobs[pmask], '.', alpha=alpha)
ax.plot(24 * phase[pmask], fmod[pmask], 'w', lw=5, alpha=0.5, zorder=99)
ax.plot(24 * phase[pmask], fmod[pmask], 'k', zorder=100)
# if duration > 1 / 24:
pb, fb, eb = downsample_time(phase[pmask], fobs[pmask], phase[pmask].ptp() / nbins)
mask = isfinite(pb)
pb, fb, eb = pb[mask], fb[mask], eb[mask]
ax.errorbar(24 * pb, fb, eb, fmt='k.')
ylim = fb.min() - 2 * eb.max(), fb.max() + 2 * eb.max()
# else:
# ylim = fobs[pmask].min(), fobs[pmask].max()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.axvline(0, alpha=0.25, ls='--', lw=1)
[ax.axvline(24 * hd, alpha=0.25, ls='-', lw=1) for hd in hdur]
ax.autoscale(axis='x', tight='true')
setp(ax, ylim=ylim, xlabel='Phase [h]', ylabel='Normalised flux')
|
hpparviREPO_NAMEopentsPATH_START.@opents_extracted@opents-master@src@tfstep.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/contrib/CHRU2014/__init__.py",
"type": "Python"
}
|
# intentionally left empty
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@contrib@CHRU2014@__init__.py@.PATH_END.py
|
{
"filename": "brewster_G395H_template.py",
"repo_name": "substellar/brewster",
"repo_path": "brewster_extracted/brewster-master/brewster_G395H_template.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""This is Brewster: the golden retriever of smelly atmospheres"""
from __future__ import print_function
import multiprocessing
import time
import numpy as np
import scipy as sp
import emcee
import testkit
import ciamod
import TPmod
import settings
import os
import gc
import sys
import pickle
from scipy import interpolate
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from schwimmbad import MPIPool
__author__ = "Ben Burningham"
__copyright__ = "Copyright 2015 - Ben Burningham"
__credits__ = ["Ben Burningham"]
__license__ = "GPL"
__version__ = "0.1"
__maintainer__ = "Ben Burningham"
__email__ = "burninghamster@gmail.com"
__status__ = "Development"
# This module set up the model arguments the drop these into
# theta(state vector) or runargs
# This version of the brewing file is for Python 3.7
# It requires emcee 3.0rc2 or later
# First get data and parameters for object
# Give the run name
runname = "WISE1935_NC_vsini"
# get the observed spectrum
obspec = np.asfortranarray(np.loadtxt("CWISE_J1935_G395H.dat",dtype='d',unpack=True,skiprows=3))
# convert to W/m2/um from erg/s/cm2/A
obspec[1] = obspec[1] * 10.
obspec[2] = obspec[2] * 10.
# Now the wavelength range
# ALLOW for a bit extra beyond data at each end to allow for Vrad
w1 = 2.85
w2 = 5.2
# FWHM of data in microns(WE DON'T USE THIS FOR SPEX DATA.
# >0 = some value of FWHM for convolving the data
# 0.0 = just spex
# -1 = spex + AKARI + spitzer
# -2 = spex + spitzer
# -3 = spex + L band + spitzer
# check code for full set of options
# 3.0 = JWST G395H, with Vsini included as parameter after vrad in theta
fwhm = 3
# DISTANCE (in parsecs)
dist = 14.43
# How many patches & clouds do we want??
# Must be at least 1 of each, but can turn off cloud below
npatches = 1
nclouds = 1
# set up array for setting patchy cloud answers
do_clouds = np.zeros([npatches], dtype='i')
# Which patchdes are cloudy
do_clouds[:] = 0
# set up cloud detail arrays
cloudnum = np.zeros([npatches, nclouds], dtype='i')
cloudtype = np.zeros([npatches, nclouds], dtype='i')
# Now fill cloud details. What kind of clouds and shape are they?
# Cloud types
# 1: slab cloud
# 2: deep thick cloud , we only see the top
# 3: slab with fixed thickness log dP = 0.005 (~1% height)
# 4: deep thick cloud with fixed height log dP = 0.005
# In both cases the cloud properties are density, rg, rsig for real clouds
# and dtau, w0, and power law for cloudnum = 89 or 99 for grey
# See cloudlist.dat for other cloudnum
cloudnum[:,0] = 5
cloudtype[:,0] = 1
# Are we assuming chemical equilibrium, or similarly precomputed gas abundances?
# Or are we retrieving VMRs (0)
chemeq = 0
# Are we doing H- bound-free, free-free continuum opacities?
# (Is the profile going above 3000K in the photosphere?)
do_bff = 0
# Set the profile type. If we're using a fixed one. Give the file name
# Set the profile type. If we're using a fixed one. Give the file name
# This code takes in parameters for T-P profile and spit out the profile
# Type 1 is the knots for a spline
# Type 2 is a Madhusudhan & Seager 2009 parameterised profile, no inversion
# i.e. T1,P1 == T2,P2
# Type 3 is Madhusudhan & Seager 2009 with an inversion
proftype = 1
pfile = "t1700g1000f3.dat"
# set up pressure grids in log(bar) cos its intuitive
logcoarsePress = np.arange(-4.0, 2.5, 0.53)
logfinePress = np.arange(-4.0, 2.4, 0.1)
# but forward model wants pressure in bar
coarsePress = pow(10,logcoarsePress)
press = pow(10,logfinePress)
# Where are the cross sections?
# give the full path
xpath = "/beegfs/car/bb/Linelists/"
xlist = 'gaslistR30K.dat' #The gaslistR10k better. Rox is sampled at 10k (rather than interpolated to 10k), but they don’t fit the data as well
# now the cross sections
# Now list the gases.
# If Na is after K, at the end of the list, alkalis will be tied
# together at Asplund solar ratio. See Line at al (2015)
# Else if K is after Na, they'll be separate
gaslist = ['h2o','ch4','co','co2','nh3','h2s','ph3']
ngas = len(gaslist)
# some switches for alternative cross sections
# Use Mike's (Burrows) Alkalis?
#Use Allard (=0), Burrow's(=1), and new Allard (=2)
malk = 0
# now set up the EMCEE stuff
# How many dimensions??? Count them up in the p0 declaration. Carefully
ndim = 26
# How many walkers we running?
nwalkers = ndim * 16
# How many burn runs do we want before reset?
nburn = 10000
# How many iterations are we running?
niter = 30000
# Is this a test or restart?
runtest = 1
# Are we writing the arguments to a pickle?
# Set= 0 for no and run,Set = 1 for write and exit (no run); = 2 for write and continue
# option 2 may cause a memory issue and crash a production run
make_arg_pickle = 2
# Where is the output going?
outdir = "/beegfs/car/bb/"
# Are we using DISORT for radiative transfer?
# (HINT: Not in this century)
use_disort = 0
# use the fudge factor / tolerance parameter?
do_fudge = 1
# Names for the final output files:
# full final sampler with likelihoods, chain, bells and whistles
finalout = runname+".pk1"
# periodic dumps/snapshots
# just the chain
chaindump = runname+"_last_nc.pic"
# The whole thing w/ probs
picdump = runname+"_snapshot.pic"
# Names for status file runtimes
statfile = "status_ball"+runname+".txt"
rfile = "runtimes_"+runname+".dat"
# scale factor r2d2 from distance 1 Rj radius
r2d2 = (71492e3)**2. / (dist * 3.086e+16)**2.
# If we want fresh guess set to 0, total inherit the previous set 1
# inherit plus randomise the VMRs. 2. See below to enter this filename
fresh = 0
p0 = np.empty([nwalkers,ndim])
if (fresh == 0):
# ----- "Gas" parameters (Includes gases, gravity, logg, scale factor, vrad, (vsini) , and tolerance parameter) --
# # For Non-chemical equilibrium
p0[:,0] = (0.5*np.random.randn(nwalkers).reshape(nwalkers)) - 3.5 # H2O
p0[:,1] = (0.5*np.random.randn(nwalkers).reshape(nwalkers)) - 4.0 # Ch4
p0[:,2] = (0.5*np.random.randn(nwalkers).reshape(nwalkers)) - 4.0 # CO
p0[:,3] = (0.5*np.random.randn(nwalkers).reshape(nwalkers)) - 4.0 # CO2
p0[:,4] = (0.5*np.random.randn(nwalkers).reshape(nwalkers)) - 4.0 # NH3
p0[:,5] = (0.5*np.random.randn(nwalkers).reshape(nwalkers)) - 4.0 #H2S
p0[:,6] = (1.0*np.random.randn(nwalkers).reshape(nwalkers)) - 4.0 # PH3
p0[:,7] = 0.1*np.random.randn(nwalkers).reshape(nwalkers) + 4.5 # gravity
p0[:,8] = r2d2 + (np.random.randn(nwalkers).reshape(nwalkers) * (0.1*r2d2))
p0[:,9] = np.random.randn(nwalkers).reshape(nwalkers) * 10. # vrad
p0[:,10] = 20. + (np.random.randn(nwalkers).reshape(nwalkers)*5.) # vsini only for FWHM ==3
p0[:,11] = np.log10((np.random.rand(nwalkers).reshape(nwalkers) * (max(obspec[2,:]**2)*(0.1 - 0.01))) + (0.01*min(obspec[2,10::3]**2))) # tolerance parameter 1
# For profile type 1
p0[:, ndim-14] = 50. + (np.random.randn(nwalkers).reshape(nwalkers)) # gamma - removes wiggles unless necessary to profile
BTprof = np.loadtxt("BTtemp800_45_13.dat")
for i in range(0, 13): # 13 layer points
p0[:,ndim-13 + i] = (BTprof[i] /2 ) + (50. * np.random.randn(nwalkers).reshape(nwalkers))
for i in range(0, nwalkers):
while True:
Tcheck = TPmod.set_prof(proftype, coarsePress, press, p0[i, ndim-13:])
if min(Tcheck) > 1.0:
break
else:
for i in range(0,13):
p0[:,ndim-13 + i] = (BTprof[i]/2) + (50. * np.random.randn(nwalkers).reshape(nwalkers))
# If we're doing profile type 1, we need to replace the last TP entries with
# this stuff.....
#p0[:, ndim-14] = 50. + (np.random.randn(nwalkers).reshape(nwalkers))
# gamma - removes wiggles unless necessary to profile
#BTprof = np.loadtxt("BTtemp800_45_13.dat")
#for i in range(0, 13): # 13 layer points
# p0[:,ndim-13 + i] = (BTprof[i] - 200.) + (150. * np.random.randn(nwalkers).reshape(nwalkers))
if (fresh != 0):
fname=chaindump
pic=pickle.load(open(fname,'rb'))
p0=pic
if (fresh == 2):
for i in range(0,9):
p0[:,i] = (np.random.rand(nwalkers).reshape(nwalkers)*0.5) + p0[:,i]
prof = np.full(13, 100.)
if proftype == 9:
modP, modT = np.loadtxt(pfile, skiprows=1, usecols=(1, 2), unpack=True)
tfit = InterpolatedUnivariateSpline(np.log10(modP), modT, k=1)
prof = tfit(logcoarsePress)
# Now we'll get the opacity files into an array
inlinetemps,inwavenum,linelist,gasnum,nwave = testkit.get_opacities(gaslist,w1,w2,press,xpath,xlist,malk)
# Get the cia bits
tmpcia, ciatemps = ciamod.read_cia("CIA_DS_aug_2015.dat",inwavenum)
cia = np.asfortranarray(np.empty((4,ciatemps.size,nwave)),dtype='float32')
cia[:,:,:] = tmpcia[:,:,:nwave]
ciatemps = np.asfortranarray(ciatemps, dtype='float32')
# grab BFF and Chemical grids
bff_raw,ceTgrid,metscale,coscale,gases_myP = testkit.sort_bff_and_CE(chemeq,"chem_eq_tables_P3K.pic",press,gaslist)
settings.init()
settings.runargs = gases_myP,chemeq,dist, cloudtype,do_clouds,gasnum,cloudnum,inlinetemps,coarsePress,press,inwavenum,linelist,cia,ciatemps,use_disort,fwhm,obspec,proftype,do_fudge, prof,do_bff,bff_raw,ceTgrid,metscale,coscale
# Now we set up the MPI bits
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit()
# Write the arguments to a pickle if needed
if (make_arg_pickle > 0):
pickle.dump(settings.runargs,open(outdir+runname+"_runargs.pic","wb"))
if( make_arg_pickle == 1):
sys.exit()
sampler = emcee.EnsembleSampler(nwalkers, ndim, testkit.lnprob, pool=pool)
# '''
# run the sampler
print("running the sampler")
clock = np.empty(80000)
k = 0
times = open(rfile, "w")
times.close()
if runtest == 0 and fresh == 0:
pos, prob, state = sampler.run_mcmc(p0, nburn)
sampler.reset()
p0 = pos
for result in sampler.sample(p0, iterations=niter):
clock[k] = time.perf_counter()
if k > 1:
tcycle = clock[k] - clock[k-1]
times = open(rfile, "a")
times.write("*****TIME FOR CYCLE*****\n")
times.write(str(tcycle))
times.close()
k = k+1
position = result.coords
f = open(statfile, "w")
f.write("****Iteration*****")
f.write(str(k))
f.write("****Reduced Chi2*****")
f.write(str(result.log_prob * -2.0/(obspec.shape[1] / 3.0)))
f.write("****Accept Fraction*****")
f.write(str(sampler.acceptance_fraction))
f.write("*****Values****")
f.write(str(result.coords))
f.close()
if (k==10 or k==1000 or k==1500 or k==2000 or k==2500 or k==3000 or k==3500 or k==4000 or k==4500 or k==5000 or k==6000 or k==7000 or k==8000 or k==9000 or k==10000 or k==11000 or k==12000 or k==15000 or k==18000 or k==19000 or k==20000 or k==21000 or k==22000 or k==23000 or k==24000 or k==25000 or k==26000 or k==27000 or k==28000 or k==29000 or k == 30000 or k == 35000 or k == 40000 or k == 45000 or k == 50000 or k == 55000 or k == 60000 or k == 65000):
chain=sampler.chain
lnprob=sampler.lnprobability
output=[chain,lnprob]
pickle.dump(output,open(outdir+picdump,"wb"))
pickle.dump(chain[:,k-1,:], open(chaindump,'wb'))
# get rid of problematic bit of sampler object
del sampler.__dict__['pool']
def save_object(obj, filename):
with open(filename, "wb") as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
pool.close()
save_object(sampler, outdir+finalout)
|
substellarREPO_NAMEbrewsterPATH_START.@brewster_extracted@brewster-master@brewster_G395H_template.py@.PATH_END.py
|
{
"filename": "test_scale.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/robust/tests/test_scale.py",
"type": "Python"
}
|
"""
Test functions for models.robust.scale
"""
import os
import numpy as np
from numpy.random import standard_normal
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
import pytest
import pandas as pd
from scipy.stats import norm as Gaussian
from scipy import stats
import statsmodels.api as sm
import statsmodels.robust.scale as scale
from statsmodels.robust.scale import mad, scale_tau
import statsmodels.robust.norms as rnorms
cur_dir = os.path.abspath(os.path.dirname(__file__))
file_name = 'hbk.csv'
file_path = os.path.join(cur_dir, 'results', file_name)
dta_hbk = pd.read_csv(file_path)
# Example from Section 5.5, Venables & Ripley (2002)
DECIMAL = 4
# TODO: Can replicate these tests using stackloss data and R if this
# data is a problem
class TestChem:
@classmethod
def setup_class(cls):
cls.chem = np.array(
[
2.20,
2.20,
2.4,
2.4,
2.5,
2.7,
2.8,
2.9,
3.03,
3.03,
3.10,
3.37,
3.4,
3.4,
3.4,
3.5,
3.6,
3.7,
3.7,
3.7,
3.7,
3.77,
5.28,
28.95,
]
)
def test_mean(self):
assert_almost_equal(np.mean(self.chem), 4.2804, DECIMAL)
def test_median(self):
assert_almost_equal(np.median(self.chem), 3.385, DECIMAL)
def test_mad(self):
assert_almost_equal(scale.mad(self.chem), 0.52632, DECIMAL)
def test_iqr(self):
assert_almost_equal(scale.iqr(self.chem), 0.68570, DECIMAL)
def test_qn(self):
assert_almost_equal(scale.qn_scale(self.chem), 0.73231, DECIMAL)
def test_huber_scale(self):
assert_almost_equal(scale.huber(self.chem)[0], 3.20549, DECIMAL)
def test_huber_location(self):
assert_almost_equal(scale.huber(self.chem)[1], 0.67365, DECIMAL)
def test_huber_huberT(self):
n = scale.norms.HuberT()
n.t = 1.5
h = scale.Huber(norm=n)
assert_almost_equal(
scale.huber(self.chem)[0], h(self.chem)[0], DECIMAL
)
assert_almost_equal(
scale.huber(self.chem)[1], h(self.chem)[1], DECIMAL
)
def test_huber_Hampel(self):
hh = scale.Huber(norm=scale.norms.Hampel())
assert_almost_equal(hh(self.chem)[0], 3.17434, DECIMAL)
assert_almost_equal(hh(self.chem)[1], 0.66782, DECIMAL)
class TestMad:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10))
def test_mad(self):
m = scale.mad(self.X)
assert_equal(m.shape, (10,))
def test_mad_empty(self):
empty = np.empty(0)
assert np.isnan(scale.mad(empty))
empty = np.empty((10, 100, 0))
assert_equal(scale.mad(empty, axis=1), np.empty((10, 0)))
empty = np.empty((100, 100, 0, 0))
assert_equal(scale.mad(empty, axis=-1), np.empty((100, 100, 0)))
def test_mad_center(self):
n = scale.mad(self.X, center=0)
assert_equal(n.shape, (10,))
with pytest.raises(TypeError):
scale.mad(self.X, center=None)
assert_almost_equal(
scale.mad(self.X, center=1),
np.median(np.abs(self.X - 1), axis=0) / Gaussian.ppf(3 / 4.0),
DECIMAL,
)
class TestMadAxes:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
def test_axis0(self):
m = scale.mad(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m = scale.mad(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m = scale.mad(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m = scale.mad(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
class TestIqr:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10))
def test_iqr(self):
m = scale.iqr(self.X)
assert_equal(m.shape, (10,))
def test_iqr_empty(self):
empty = np.empty(0)
assert np.isnan(scale.iqr(empty))
empty = np.empty((10, 100, 0))
assert_equal(scale.iqr(empty, axis=1), np.empty((10, 0)))
empty = np.empty((100, 100, 0, 0))
assert_equal(scale.iqr(empty, axis=-1), np.empty((100, 100, 0)))
empty = np.empty(shape=())
with pytest.raises(ValueError):
scale.iqr(empty)
class TestIqrAxes:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
def test_axis0(self):
m = scale.iqr(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m = scale.iqr(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m = scale.iqr(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m = scale.iqr(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
class TestQn:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.normal = standard_normal(size=40)
cls.range = np.arange(0, 40)
cls.exponential = np.random.exponential(size=40)
cls.stackloss = sm.datasets.stackloss.load_pandas().data
cls.sunspot = sm.datasets.sunspots.load_pandas().data.SUNACTIVITY
def test_qn_naive(self):
assert_almost_equal(
scale.qn_scale(self.normal), scale._qn_naive(self.normal), DECIMAL
)
assert_almost_equal(
scale.qn_scale(self.range), scale._qn_naive(self.range), DECIMAL
)
assert_almost_equal(
scale.qn_scale(self.exponential),
scale._qn_naive(self.exponential),
DECIMAL,
)
def test_qn_robustbase(self):
# from R's robustbase with finite.corr = FALSE
assert_almost_equal(scale.qn_scale(self.range), 13.3148, DECIMAL)
assert_almost_equal(
scale.qn_scale(self.stackloss),
np.array([8.87656, 8.87656, 2.21914, 4.43828]),
DECIMAL,
)
# sunspot.year from datasets in R only goes up to 289
assert_almost_equal(
scale.qn_scale(self.sunspot[0:289]), 33.50901, DECIMAL
)
def test_qn_empty(self):
empty = np.empty(0)
assert np.isnan(scale.qn_scale(empty))
empty = np.empty((10, 100, 0))
assert_equal(scale.qn_scale(empty, axis=1), np.empty((10, 0)))
empty = np.empty((100, 100, 0, 0))
assert_equal(scale.qn_scale(empty, axis=-1), np.empty((100, 100, 0)))
empty = np.empty(shape=())
with pytest.raises(ValueError):
scale.iqr(empty)
class TestQnAxes:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
def test_axis0(self):
m = scale.qn_scale(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m = scale.qn_scale(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m = scale.qn_scale(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m = scale.qn_scale(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
class TestHuber:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10))
def test_huber_result_shape(self):
h = scale.Huber(maxiter=100)
m, s = h(self.X)
assert_equal(m.shape, (10,))
class TestHuberAxes:
@classmethod
def setup_class(cls):
np.random.seed(54321)
cls.X = standard_normal((40, 10, 30))
cls.h = scale.Huber(maxiter=100, tol=1.0e-05)
def test_default(self):
m, s = self.h(self.X, axis=0)
assert_equal(m.shape, (10, 30))
def test_axis1(self):
m, s = self.h(self.X, axis=1)
assert_equal(m.shape, (40, 30))
def test_axis2(self):
m, s = self.h(self.X, axis=2)
assert_equal(m.shape, (40, 10))
def test_axisneg1(self):
m, s = self.h(self.X, axis=-1)
assert_equal(m.shape, (40, 10))
def test_mad_axis_none():
# GH 7027
a = np.array([[0, 1, 2], [2, 3, 2]])
def m(x):
return np.median(x)
direct = mad(a=a, axis=None)
custom = mad(a=a, axis=None, center=m)
axis0 = mad(a=a.ravel(), axis=0)
np.testing.assert_allclose(direct, custom)
np.testing.assert_allclose(direct, axis0)
def test_tau_scale1():
x = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1000.0]
# from R robustbase
# > scaleTau2(x, mu.too = TRUE, consistency = FALSE)
res2 = [4.09988889476747, 2.82997006475080]
res1 = scale_tau(x, normalize=False, ddof=0)
assert_allclose(res1, res2, rtol=1e-13)
# > scaleTau2(x, mu.too = TRUE)
res2 = [4.09988889476747, 2.94291554004125]
res1 = scale_tau(x, ddof=0)
assert_allclose(res1, res2, rtol=1e-13)
def test_tau_scale2():
import pandas as pd
cur_dir = os.path.abspath(os.path.dirname(__file__))
file_name = 'hbk.csv'
file_path = os.path.join(cur_dir, 'results', file_name)
dta_hbk = pd.read_csv(file_path)
# from R robustbase
# > scaleTau2(hbk[,1], mu.too = TRUE, consistency = FALSE)
# [1] 1.55545438650723 1.93522607240954
# > scaleTau2(hbk[,2], mu.too = TRUE, consistency = FALSE)
# [1] 1.87924505206092 1.72121373687210
# > scaleTau2(hbk[,3], mu.too = TRUE, consistency = FALSE)
# [1] 1.74163126730520 1.81045973143159
# > scaleTau2(hbk[,4], mu.too = TRUE, consistency = FALSE)
# [1] -0.0443521228044396 0.8343974588144727
res2 = np.array([
[1.55545438650723, 1.93522607240954],
[1.87924505206092, 1.72121373687210],
[1.74163126730520, 1.81045973143159],
[-0.0443521228044396, 0.8343974588144727]
])
res1 = scale_tau(dta_hbk, normalize=False, ddof=0)
assert_allclose(np.asarray(res1).T, res2, rtol=1e-13)
# > scaleTau2(hbk[,1], mu.too = TRUE, consistency = TRUE)
# [1] 1.55545438650723 2.01246188181448
# > scaleTau2(hbk[,2], mu.too = TRUE, consistency = TRUE)
# [1] 1.87924505206092 1.78990821036102
# > scaleTau2(hbk[,3], mu.too = TRUE, consistency = TRUE)
# [1] 1.74163126730520 1.88271605576794
# > scaleTau2(hbk[,4], mu.too = TRUE, consistency = TRUE)
# [1] -0.0443521228044396 0.8676986653327993
res2 = np.array([
[1.55545438650723, 2.01246188181448],
[1.87924505206092, 1.78990821036102],
[1.74163126730520, 1.88271605576794],
[-0.0443521228044396, 0.8676986653327993]
])
res1 = scale_tau(dta_hbk, ddof=0)
assert_allclose(np.asarray(res1).T, res2, rtol=1e-13)
def test_scale_iter():
# regression test, and approximately correct
np.random.seed(54321)
v = np.array([1, 0.5, 0.4])
x = standard_normal((40, 3)) * np.sqrt(v)
x[:2] = [2, 2, 2]
x = x[:, 0] # 1d only ?
v = v[0]
meef_scale = lambda x: rnorms.TukeyBiweight().rho(x) # noqa
scale_bias = 0.43684963023076195
s = scale._scale_iter(x, meef_scale=meef_scale, scale_bias=scale_bias)
assert_allclose(s, v, rtol=1e-1)
assert_allclose(s, 1.0683298, rtol=1e-6) # regression test number
chi = rnorms.TukeyBiweight()
scale_bias = 0.43684963023076195
mscale_biw = scale.MScale(chi, scale_bias)
s_biw = mscale_biw(x)
assert_allclose(s_biw, s, rtol=1e-10)
# regression test with 50% breakdown tuning
chi = rnorms.TukeyBiweight(c=1.547)
scale_bias = 0.1995
mscale_biw = scale.MScale(chi, scale_bias)
s_biw = mscale_biw(x)
assert_allclose(s_biw, 1.0326176662, rtol=1e-9) # regression test number
class TestMScale():
def test_huber_equivalence(self):
np.random.seed(54321)
nobs = 50
x = 1.5 * standard_normal(nobs)
# test equivalence of HuberScale and TrimmedMean M-scale
chi_tm = rnorms.TrimmedMean(c=2.5)
scale_bias_tm = 0.4887799917273257
mscale_tm = scale.MScale(chi_tm, scale_bias_tm)
s_tm = mscale_tm(x)
mscale_hub = scale.HuberScale()
s_hub = mscale_hub(nobs, nobs, x)
assert_allclose(s_tm, s_hub, rtol=1e-6)
def test_biweight(self):
y = dta_hbk["Y"].to_numpy()
ry = y - np.median(y)
chi = rnorms.TukeyBiweight(c=1.54764)
scale_bias = 0.19959963130721095
mscale_biw = scale.MScale(chi, scale_bias)
scale0 = mscale_biw(ry)
scale1 = 0.817260483784376 # from R RobStatTM scaleM
assert_allclose(scale0, scale1, rtol=1e-6)
def test_scale_trimmed_approx():
scale_trimmed = scale.scale_trimmed # shorthand
nobs = 500
np.random.seed(965578)
x = 2*np.random.randn(nobs)
x[:10] = 60
alpha = 0.2
res = scale_trimmed(x, alpha)
assert_allclose(res.scale, 2, rtol=1e-1)
s = scale_trimmed(np.column_stack((x, 2*x)), alpha).scale
assert_allclose(s, [2, 4], rtol=1e-1)
s = scale_trimmed(np.column_stack((x, 2*x)).T, alpha, axis=1).scale
assert_allclose(s, [2, 4], rtol=1e-1)
s = scale_trimmed(np.column_stack((x, x)).T, alpha, axis=None).scale
assert_allclose(s, [2], rtol=1e-1)
s2 = scale_trimmed(np.column_stack((x, x)).ravel(), alpha).scale
assert_allclose(s2, [2], rtol=1e-1)
assert_allclose(s2, s, rtol=1e-1)
s = scale_trimmed(x, alpha, distr=stats.t, distargs=(100,)).scale
assert_allclose(s, [2], rtol=1e-1)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@robust@tests@test_scale.py@.PATH_END.py
|
{
"filename": "test_ldp_extended.py",
"repo_name": "lwa-project/lsl",
"repo_path": "lsl_extracted/lsl-main/tests/test_ldp_extended.py",
"type": "Python"
}
|
"""
Extended unit test for the lsl.reader.ldp module.
"""
import os
import unittest
import tempfile
import shutil
import subprocess
from lsl.reader import ldp
from lsl.reader import errors
from lsl.reader.utils import SplitFileWrapper
from lsl.common.data_access import download_file
__version__ = "0.1"
__author__ = "Jayce Dowell"
run_extended_tests = False
if os.getenv('GITHUB_ACTIONS', None) is not None:
run_extended_tests = True
_TBN_URL = 'https://lda10g.alliance.unm.edu/tutorial/Meteors/056761_000099453'
_DRX_URL = 'https://lda10g.alliance.unm.edu/tutorial/UnknownPulsar/056227_000024985_DRX.dat'
_SPC_URL = 'https://lda10g.alliance.unm.edu/tutorial/B0329+54/056770_000044687'
tbnFile = os.path.join(os.path.dirname(__file__), 'data', 'tbn-extended.dat')
drxFile = os.path.join(os.path.dirname(__file__), 'data', 'drx-extended.dat')
drspecFile = os.path.join(os.path.dirname(__file__), 'data', 'drspec-extended.dat')
@unittest.skipUnless(run_extended_tests, "requires appropriate environment variable to be set")
class extended_ldp_tests(unittest.TestCase):
"""An extended unittest.TestCase collection of unit tests for the lsl.reader.ldp
module."""
def setUp(self):
"""Download the files."""
for filename,url in zip((tbnFile, drxFile, drspecFile), (_TBN_URL,_DRX_URL,_SPC_URL)):
if not os.path.exists(filename):
download_file(url, filename, byte_range=[0, 250*1024*1024])
def test_tbn_estimate(self):
"""Test estimating power levels in a TBN file."""
idf = ldp.TBNFile(tbnFile)
offset = idf.offset(0.1) # This file needs a skip at the beginning
levels = idf.estimate_levels()
self.assertEqual(len(levels), 520)
def test_tbn_read(self):
"""Test more involved reading from a TBN file."""
idf = ldp.TBNFile(tbnFile)
offset = idf.offset(0.1) # This file needs a skip at the beginning
# Read some
for i in range(21):
tInt, tStart, data = idf.read(0.1)
idf.close()
def test_tbn_offset(self):
"""Test offsetting inside a TBN file."""
idf = ldp.TBNFile(tbnFile)
offset = idf.offset(0.1) # This file needs a skip at the beginning
# Jump forwards
fileStart = idf.start_time
offset = idf.offset(0.1)
# Read
tInt, tStart, data = idf.read(0.11)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
# Jump forwards
fileStart = tStart + tInt
offset = idf.offset(0.1)
# Read
tInt, tStart, data = idf.read(0.12)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
# Jump backwards
fileStart = tStart + tInt
offset = idf.offset(-0.15)
# Read
tInt, tStart, data = idf.read(0.1)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
idf.close()
def test_drx_estimate(self):
"""Test estimating power levels in a DRX file."""
idf = ldp.DRXFile(drxFile)
levels = idf.estimate_levels()
self.assertEqual(len(levels), 4)
def test_drx_read(self):
"""Test more involved reading from a DRX file."""
idf = ldp.DRXFile(drxFile)
# Read some
for i in range(21):
tInt, tStart, data = idf.read(0.1)
idf.close()
def test_drx_offset(self):
"""Test offsetting inside a DRX file."""
idf = ldp.DRXFile(drxFile)
# Jump forwards
fileStart = idf.start_time
offset = idf.offset(0.1)
# Read
tInt, tStart, data = idf.read(0.09)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
# Jump forwards
fileStart = tStart + tInt
offset = idf.offset(0.1)
# Read
tInt, tStart, data = idf.read(0.11)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
# Jump backwards
fileStart = tStart + tInt
offset = idf.offset(-0.15)
# Read
tInt, tStart, data = idf.read(0.12)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
idf.close()
def test_drspec_read(self):
"""Test more involved reading from a DR Spectrometer file."""
idf = ldp.DRSpecFile(drspecFile)
# Read some
for i in range(21):
tInt, tStart, data = idf.read(0.1)
idf.close()
def test_drspec_offset(self):
"""Test offsetting inside a DR Spectrometer file."""
idf = ldp.DRSpecFile(drspecFile)
# Jump forwards
fileStart = idf.start_time
offset = idf.offset(0.1)
# Read
tInt, tStart, data = idf.read(0.1)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
# Jump forwards
fileStart = tStart + tInt
offset = idf.offset(0.1)
# Read
tInt, tStart, data = idf.read(0.1)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
# Jump backwards
fileStart = tStart + tInt
offset = idf.offset(-0.15)
# Read
tInt, tStart, data = idf.read(0.1)
self.assertAlmostEqual(tStart, fileStart+offset, 9)
idf.close()
def testDown(self):
"""Cleanup"""
for handler in list(ldp._open_ldp_files.handlers):
handler.close()
class extended_ldp_test_suite(unittest.TestSuite):
"""An extended unittest.TestSuite class which contains all of the lsl.reader.ldp
unit tests."""
def __init__(self):
unittest.TestSuite.__init__(self)
loader = unittest.TestLoader()
self.addTests(loader.loadTestsFromTestCase(extended_ldp_tests))
if __name__ == '__main__':
unittest.main()
|
lwa-projectREPO_NAMElslPATH_START.@lsl_extracted@lsl-main@tests@test_ldp_extended.py@.PATH_END.py
|
{
"filename": "codespaces.md",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/doc/devel/codespaces.md",
"type": "Markdown"
}
|
# Contributing to Matplotlib using GitHub codespaces
* For a general overview of contributing to Matplotlib, see https://matplotlib.org/devdocs/devel/index.html
* For instructions on how to submit Pull Requests using GitHub codespaces, see https://matplotlib.org/devdocs/devel/contribute.html#contributing-code
* For instructions on running tests to verify your changes, see https://matplotlib.org/devdocs/devel/testing.html
* For instructions on building the Matplotlib documentation, see https://matplotlib.org/devdocs/devel/document.html#documenting-matplotlib
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@doc@devel@codespaces.md@.PATH_END.py
|
{
"filename": "python-reference_catboostregressor_grid_search.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/python-reference_catboostregressor_grid_search.md",
"type": "Markdown"
}
|
# grid_search
{% include [grid_search-grid-search__div__desc](../_includes/work_src/reusage-python/grid-search__div__desc.md) %}
## {{ dl--invoke-format }} {#method-call-format}
```python
grid_search(param_grid,
X,
y=None,
cv=3,
partition_random_seed=0,
calc_cv_statistics=True,
search_by_train_test_split=True,
refit=True,
shuffle=True,
stratified=None,
train_size=0.8,
verbose=True,
plot=False,
log_cout=sys.stdout,
log_cerr=sys.stderr)
```
## {{ dl--parameters }} {#parameters}
### param_grid
#### Description
Dictionary with parameters names ({{ python-type--string }}) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored.
This enables searching over any sequence of parameter settings.
**Possible types**
- {{ python-type--dict }}
- {{ python-type--list }}
**Default value**
{{ python--required }}
### X
#### Description
The description is different for each group of possible types.
**Possible types**
{% cut "{{ python-type--pool }}" %}
The input training dataset.
{% note info %}
If a nontrivial value of the `cat_features` parameter is specified in the constructor of this class, {{ product }} checks the equivalence of categorical features indices specification from the constructor parameters and in this Pool class.
{% endnote %}
{% endcut %}
{% cut "{{ python-type--numpy-ndarray }}, {{ python-type--pandasDataFrame }}" %}
The input training dataset in the form of a two-dimensional feature matrix.
{% endcut %}
{% cut "{{ python_type__pandas-SparseDataFrame }}, {{ python_type__scipy-sparse-spmatrix }} (all subclasses except dia_matrix)" %}
{% include [libsvm-libsvm__desc](../_includes/work_src/reusage-formats/libsvm__desc.md) %}
{% endcut %}
**Possible types**
{% include [libsvm-libsvm__desc](../_includes/work_src/reusage-formats/libsvm__desc.md) %}
**Default value**
{{ python--required }}
### y
#### Description
{% include [methods-param-desc-label--short-desc-training](../_includes/work_src/reusage/label--short-desc-training.md) %}
{% include [methods-param-desc-label--detailed-desc-regression](../_includes/work_src/reusage/label--detailed-desc-regression.md) %}
{% note info %}
Do not use this parameter if the input training dataset (specified in the `X` parameter) type is {{ python-type--pool }}.
{% endnote %}
{% include [methods-param-desc-label--possible-types-default-supported-processing-units](../_includes/work_src/reusage/label--possible-types-default-supported-processing-units.md) %}
### cv
#### Description
The cross-validation splitting strategy.
The interpretation of this parameter depends on the input data type:
- None — Use the default three-fold cross-validation.
- {{ python-type--int }} — The number of folds in a (Stratified)KFold
- {% include [reusage-python-object-scikitlearn](../_includes/work_src/reusage-python/object-scikitlearn.md) %}
- An iterable yielding train and test splits as arrays of indices.
**Possible types**
- {{ python-type--int }}
- scikit-learn splitter {{ python-type__object }}
- cross-validation generator
- iterable
**Default value**
None
### partition_random_seed
#### Description
{% include [reusage-cv-rand__desc_intro](../_includes/work_src/reusage/cv-rand__desc_intro.md) %}
{% include [reusage-cv-rand__permutation-is-performed](../_includes/work_src/reusage/cv-rand__permutation-is-performed.md) %}
{% include [reusage-cv-rand__unique-data-splits](../_includes/work_src/reusage/cv-rand__unique-data-splits.md) %}
**Possible types**
{{ python-type--int }}
**Default value**
`0`
### calc_cv_statistics
#### Description
Estimate the quality by using cross-validation with the best of the found parameters. The model is fitted using these parameters.
This option can be enabled if the `search_by_train_test_split` parameter is set to True.
**Possible types**
{{ python-type--bool }}
**Default value**
True
### search_by_train_test_split
#### Description
Split the source dataset into train and test parts. Models are trained on the train part, while parameters are compared by the loss function score on the test dataset.
It is recommended to enable this option for large datasets and disable it for the small ones.
**Possible types**
{{ python-type--bool }}
**Default value**
True
### refit
#### Description
Refit an estimator using the best-found parameters on the whole dataset.
**Possible types**
{{ python-type--bool }}
**Default value**
`True`
### shuffle
#### Description
Shuffle the dataset objects before splitting into folds.
**Possible types**
{{ python-type--bool }}
**Default value**
`True`
### stratified
#### Description
Perform stratified sampling. True for classification and False otherwise.
**Possible types**
{{ python-type--bool }}
**Default value**
{{ python-type--none }}
### train_size
#### Description
The proportion of the dataset to include in the train split.
Possible values are in the range [0;1].
**Possible types**
{{ python-type--float }}
**Default value**
0.8
### verbose
#### Description
{% include [sections-with-methods-desc-python__feature-importances__verbose__short-description__list-intro](../_includes/work_src/reusage/python__feature-importances__verbose__short-description__list-intro.md) %}
- {{ python-type--int }} — The frequency of iterations to print the information to stdout.
- {{ python-type--bool }} — Print the information to stdout on every iteration (if set to <q>True</q>) or disable any logging (if set to <q>False</q>).
**Possible types**
- {{ python-type--bool }}
- {{ python-type--int }}
**Default value**
True
### plot
#### Description
Draw train and evaluation metrics for every set of parameters in Jupyter [Jupyter Notebook](../features/visualization_jupyter-notebook.md).
**Possible types**
{{ python-type--bool }}
**Default value**
False
{% include [python__log-params](../_includes/work_src/reusage-python/python__log-params.md) %}
## {{ dl__return-value }} {#output-format}
Dict with two fields:
- `params` — `dict` of best-found parameters.
- `cv_results` — `dict` or {{ python-type--pandascoreframeDataFrame }} with cross-validation results. Сolumns are: `test-error-mean`, `test-error-std`, `train-error-mean`, `train-error-std`.
## {{ dl--example }} {#example}
```python
from catboost import CatBoostRegressor
import numpy as np
train_data = np.random.randint(1, 100, size=(100, 10))
train_labels = np.random.randint(2, size=(100))
model = CatBoostRegressor()
grid = {'learning_rate': [0.03, 0.1],
'depth': [4, 6, 10],
'l2_leaf_reg': [1, 3, 5, 7, 9]}
grid_search_result = model.grid_search(grid,
X=train_data,
y=train_labels,
plot=True)
```
{% include [reusage-code-examples-graph-plotted-with-jupyter-notebook](../_includes/work_src/reusage-code-examples/graph-plotted-with-jupyter-notebook.md) %}

|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@python-reference_catboostregressor_grid_search.md@.PATH_END.py
|
{
"filename": "celerite_rotation.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/models/celerite_rotation.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import np, OrderedSet
from pyorbit.models.abstract_model import AbstractModel
from pyorbit.keywords_definitions import *
try:
from pyorbit.models.celerite_term import celerite, SHOTerm
except (ModuleNotFoundError,ImportError):
pass
class Celerite_Rotation(AbstractModel):
r"""A mixture of two SHO terms that can be used to model stellar rotation
This term has two modes in Fourier space: one at ``period`` and one at
``0.5 * period``. This can be a good descriptive model for a wide range of
stochastic variability in stellar time series from rotation to pulsations.
from Foreman-Mackey+2017 and exoplanet, but keeping the notation of
the semi-periodic goerge kernel used in PyORBIT
differently from the example provided in the paper, here the terms are passed in the linear space already. It will
the job of the sampler to convert from Logarithmic to Linear space for those parameters that the user has decided
to explore in logarithmic space
Args:
amp: The amplitude of the variability.
period: The primary period of variability.
Q0: The quality factor (or really the quality factor
minus one half) for the secondary oscillation.
deltaQ: The difference between the quality factors of the first
and the second modes. This parameterization (if ``deltaQ > 0``)
ensures that the primary mode alway has higher quality.
mix: The fractional amplitude of the secondary mode compared to the
primary. This should probably always be ``0 < mix < 1``.
"""
default_common = 'activity'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
from pyorbit.models.celerite_term import celerite, SHOTerm
except (ModuleNotFoundError,ImportError):
print("ERROR: celerite not installed, this will not work")
quit()
self.model_class = 'celerite_rotation'
self.internal_likelihood = True
self.list_pams_common = OrderedSet([
'Prot', # Rotational period of the star
'Q0',
'deltaQ',
'mix'
])
self.list_pams_dataset = OrderedSet([
'amp',
])
self.n_pams = 6
self.gp = {}
def convert_val2gp(self, input_pams):
"""
:param input_pams: dictionary with the 'physically meaningful'
parameters of the GP kernel
:return: array with the parameters to be fed to 'celerite'
WARNING: this subroutine is HIGHLY specific of your choice of the
kernel! I recommend to create a new Class with different
transformations if you are planning of using a different kernel
"""
output_pams = np.zeros(self.n_pams, dtype=np.double)
""" You must check _celerite_ documentation (and possibily do a lot of
testing) to know how to convert physical values to the parameter
vector accepted by celerite.set_parameter_vector() function. Note:
these values may be different from ones accepted by the kernel
"""
# S0, Q, w0 = output_pams[:3] for SHOterm 1
output_pams[1] = 0.5 + input_pams['Q0'] + input_pams['deltaQ']
output_pams[2] = 4 * np.pi * output_pams[1] \
/ (input_pams['Prot'] * np.sqrt(4 * output_pams[1] ** 2 - 1))
output_pams[0] = input_pams['amp'] \
/ (output_pams[2] * output_pams[1])
# Another term at half the period
output_pams[4] = 0.5 + input_pams['Q0']
output_pams[5] = 8 * np.pi * output_pams[4] \
/ (input_pams['Prot'] * np.sqrt(4 * output_pams[4] ** 2 - 1))
output_pams[3] = input_pams['mix'] * input_pams['amp'] \
/ (output_pams[5] * output_pams[4])
return output_pams
def initialize_model(self, mc, **kwargs):
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_rotation_period = getattr(mc.common_models[common_ref], 'use_stellar_rotation_period', False)
break
for keyword in keywords_stellar_rotation:
self.use_stellar_rotation_period = kwargs.get(keyword, self.use_stellar_rotation_period)
if self.use_stellar_rotation_period:
self.list_pams_common.update(['rotation_period'])
self.list_pams_common.discard('Prot')
def initialize_model_dataset(self, mc, dataset, **kwargs):
self.define_kernel(dataset)
return
def define_kernel(self, dataset):
input_pams = {
'Prot': 10.0,
'Q0': 1.0,
'deltaQ': 0.5,
'mix': 0.5,
'amp': 10.0
}
gp_pams = self.convert_val2gp(input_pams)
kernel = SHOTerm(S0=gp_pams[0], Q=gp_pams[1], w0=gp_pams[2]) \
+ SHOTerm(S0=gp_pams[3], Q=gp_pams[4], w0=gp_pams[5])
self.gp[dataset.name_ref] = celerite.GP(kernel)
""" I've decided to add the jitter in quadrature instead of using a constant kernel to allow the use of
different / selective jitter within the dataset
"""
env = np.sqrt(dataset.e ** 2.0 + dataset.jitter ** 2.0)
self.gp[dataset.name_ref].compute(dataset.x0, env)
return
def lnlk_compute(self, parameter_values, dataset):
""" 2 steps:
1) theta parameters must be converted in physical units (e.g. from logarithmic to linear spaces)
2) physical values must be converted to {\tt george} input parameters
"""
if self.use_stellar_rotation_period:
parameter_values['Prot'] = parameter_values['rotation_period']
gp_pams = self.convert_val2gp(parameter_values)
env = np.sqrt(dataset.e ** 2.0 + dataset.jitter ** 2.0)
self.gp[dataset.name_ref].set_parameter_vector(gp_pams)
self.gp[dataset.name_ref].compute(dataset.x0, env)
return self.gp[dataset.name_ref].log_likelihood(dataset.residuals)
def sample_predict(self, parameter_values, dataset, x0_input=None, return_covariance=False, return_variance=False):
if self.use_stellar_rotation_period:
parameter_values['Prot'] = parameter_values['rotation_period']
gp_pams = self.convert_val2gp(parameter_values)
env = np.sqrt(dataset.e ** 2.0 + dataset.jitter ** 2.0)
self.gp[dataset.name_ref].set_parameter_vector(gp_pams)
self.gp[dataset.name_ref].compute(dataset.x0, env)
if x0_input is None:
return self.gp[dataset.name_ref].predict(dataset.residuals, dataset.x0, return_cov=return_covariance, return_var=return_variance)
else:
return self.gp[dataset.name_ref].predict(dataset.residuals, x0_input, return_cov=return_covariance, return_var=return_variance)
def sample_conditional(self, parameter_values, dataset, x0_input=None):
if self.use_stellar_rotation_period:
parameter_values['Prot'] = parameter_values['rotation_period']
gp_pams = self.convert_val2gp(parameter_values)
env = np.sqrt(dataset.e ** 2.0 + dataset.jitter ** 2.0)
self.gp[dataset.name_ref].set_parameter_vector(gp_pams)
self.gp[dataset.name_ref].compute(dataset.x0, env)
if x0_input is None:
return self.gp[dataset.name_ref].sample_conditional(dataset.residuals, dataset.x0)
else:
return self.gp[dataset.name_ref].sample_conditional(dataset.residuals, x0_input)
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@models@celerite_rotation.py@.PATH_END.py
|
{
"filename": "test_martini.py",
"repo_name": "kyleaoman/martini",
"repo_path": "martini_extracted/martini-main/tests/test_martini.py",
"type": "Python"
}
|
import os
import pytest
import numpy as np
from martini.martini import Martini, GlobalProfile, _BaseMartini
from martini.datacube import DataCube, HIfreq
from martini.beams import GaussianBeam
from test_sph_kernels import simple_kernels
from martini.sph_kernels import _CubicSplineKernel, _GaussianKernel, DiracDeltaKernel
from martini.spectral_models import DiracDeltaSpectrum, GaussianSpectrum
from astropy import units as U
from astropy.io import fits
from astropy import wcs
from astropy.coordinates import FK5, ICRS
from scipy.signal import fftconvolve
def check_mass_accuracy(m, out_mode):
if out_mode == "hdf5":
h5py = pytest.importorskip(
"h5py", reason="h5py (optional dependency) not available"
)
# flux in channels
F = (m.datacube._array * m.datacube.px_size**2).sum((0, 1)).squeeze() # Jy
# distance
D = m.source.distance
# channel width
dv = np.abs(np.diff(m.datacube.velocity_channel_edges))
# HI mass
MHI = np.sum(
2.36e5
* U.Msun
* D.to_value(U.Mpc) ** 2
* F.to_value(U.Jy)
* dv.to_value(U.km / U.s)
).to(U.Msun)
# demand accuracy within 1% after source insertion
assert U.isclose(MHI, m.source.mHI_g.sum(), rtol=1e-2)
m.convolve_beam()
# radiant intensity
Irad = m.datacube._array.sum((0, 1)).squeeze() # Jy / beam
# beam area, for an equivalent Gaussian beam
A = np.pi * m.beam.bmaj * m.beam.bmin / 4 / np.log(2) / U.beam
# distance
D = m.source.distance
# channel width
dv = np.abs(np.diff(m.datacube.velocity_channel_edges))
# flux
F = (Irad / A).to(U.Jy / U.arcsec**2) * m.datacube.px_size**2
# HI mass
MHI = np.sum(
2.36e5
* U.Msun
* D.to_value(U.Mpc) ** 2
* F.to_value(U.Jy)
* dv.to_value(U.km / U.s)
).to(U.Msun)
# demand accuracy within 1% after beam convolution
assert U.isclose(MHI, m.source.mHI_g.sum(), rtol=1e-2)
if out_mode == "fits":
filename = "cube.fits"
try:
m.write_fits(filename)
with fits.open(filename) as f:
# distance
D = m.source.distance
# radiant intensity
fits_wcs = wcs.WCS(f[0].header)
Irad = U.Quantity(
f[0].data.T.sum((0, 1)).squeeze(), unit=f[0].header["BUNIT"]
)
A = (
np.pi
* (f[0].header["BMAJ"] * U.deg)
* (f[0].header["BMIN"] * U.deg)
/ 4
/ np.log(2)
/ U.beam
)
px_area = U.Quantity(
np.abs(f[0].header["CDELT1"]), unit=f[0].header["CUNIT1"]
) * U.Quantity(
np.abs(f[0].header["CDELT2"]), unit=f[0].header["CUNIT2"]
)
# flux
F = (Irad / A).to(U.Jy / U.arcsec**2) * px_area
channel_edges = fits_wcs.sub(("spectral",)).all_pix2world(
np.arange(fits_wcs.sub(("spectral",)).pixel_shape[0] + 1) - 0.5,
0,
) * U.Unit(fits_wcs.wcs.cunit[fits_wcs.wcs.spec], format="fits")
dv = np.abs(
np.diff(
channel_edges.squeeze().to(
U.km / U.s, equivalencies=U.doppler_radio(HIfreq)
)
)
)
# HI mass
MHI = np.sum(
2.36e5
* U.Msun
* D.to_value(U.Mpc) ** 2
* F.to_value(U.Jy)
* dv.to_value(U.km / U.s)
).to(U.Msun)
# demand accuracy within 1% in output fits file
assert U.isclose(MHI, m.source.mHI_g.sum(), rtol=1e-2)
finally:
if os.path.exists(filename):
os.remove(filename)
if out_mode == "hdf5":
filename = "cube.hdf5"
try:
m.write_hdf5(filename)
with h5py.File(filename, "r") as f:
# distance
D = m.source.distance
# radiant intensity
Irad = U.Quantity(
f["FluxCube"][()].sum((0, 1)).squeeze(),
unit=f["FluxCube"].attrs["FluxCubeUnit"],
)
A = (
np.pi
* (f["FluxCube"].attrs["BeamMajor_in_deg"] * U.deg)
* (f["FluxCube"].attrs["BeamMinor_in_deg"] * U.deg)
/ 4
/ np.log(2)
/ U.beam
)
dv = np.abs(
np.diff(
f["velocity_channel_edges"]
* U.Unit(f["velocity_channel_edges"].attrs["Unit"])
)
)
px_area = U.Quantity(
np.abs(f["FluxCube"].attrs["deltaRA_in_RAUnit"]),
unit=f["FluxCube"].attrs["RAUnit"],
) * U.Quantity(
np.abs(f["FluxCube"].attrs["deltaDec_in_DecUnit"]),
unit=f["FluxCube"].attrs["DecUnit"],
)
# flux
F = (Irad / A).to(U.Jy / U.arcsec**2) * px_area
# HI mass
MHI = np.sum(
2.36e5
* U.Msun
* D.to_value(U.Mpc) ** 2
* F.to_value(U.Jy)
* dv.to_value(U.km / U.s)
).to(U.Msun)
# demand accuracy within 1% in output hdf5 file
assert U.isclose(MHI, m.source.mHI_g.sum(), rtol=1e-2)
finally:
if os.path.exists(filename):
os.remove(filename)
class TestMartini:
@pytest.mark.parametrize("sph_kernel", simple_kernels)
@pytest.mark.parametrize("spectral_model", (DiracDeltaSpectrum, GaussianSpectrum))
@pytest.mark.parametrize("out_mode", ("fits", "hdf5"))
def test_mass_accuracy(
self, dc_zeros, sph_kernel, spectral_model, single_particle_source, out_mode
):
"""
Check that the input mass in the particles gives the correct total mass in the
datacube, by checking the conversion back to total mass. Covers testing
Martini.insert_source_in_cube.
"""
hsm_g = (
0.1 * U.kpc if sph_kernel.__name__ == "DiracDeltaKernel" else 1.0 * U.kpc
)
source = single_particle_source(hsm_g=hsm_g)
# single_particle_source has a mass of 1E4Msun, temperature of 1E4K
m = Martini(
source=source,
datacube=dc_zeros,
beam=GaussianBeam(),
noise=None,
spectral_model=spectral_model(),
sph_kernel=sph_kernel(),
)
m.insert_source_in_cube(progressbar=False)
check_mass_accuracy(m, out_mode)
def test_convolve_beam(self, single_particle_source):
"""
Check that beam convolution gives result matching manual calculation.
"""
source = single_particle_source()
datacube = DataCube(
n_px_x=16,
n_px_y=16,
n_channels=16,
spectral_centre=source.distance * source.h * 100 * U.km / U.s / U.Mpc,
)
beam = GaussianBeam()
noise = None
sph_kernel = _GaussianKernel()
spectral_model = GaussianSpectrum()
m = Martini(
source=source,
datacube=datacube,
beam=beam,
noise=noise,
sph_kernel=sph_kernel,
spectral_model=spectral_model,
)
m.insert_source_in_cube()
unconvolved_cube = m.datacube._array.copy()
unit = unconvolved_cube.unit
s = np.s_[..., 0] if m.datacube.stokes_axis else np.s_[...]
for spatial_slice in iter(unconvolved_cube[s].transpose((2, 0, 1))):
spatial_slice[...] = (
fftconvolve(spatial_slice, m.beam.kernel, mode="same") * unit
)
convolved_cube = unconvolved_cube[
m.datacube.padx : -m.datacube.padx, m.datacube.pady : -m.datacube.padx
]
convolved_cube = convolved_cube.to(
U.Jy * U.beam**-1,
equivalencies=U.beam_angular_area(m.beam.area),
)
m.convolve_beam()
assert U.allclose(m.datacube._array, convolved_cube)
def test_add_noise(self, m_init):
"""
Check that noise provided goes into the datacube when we call add_noise.
"""
assert (m_init.datacube._array.sum() == 0).all()
assert m_init.noise.seed is not None
expected_noise = m_init.noise.generate(m_init.datacube, m_init.beam)
m_init.noise.reset_rng()
m_init.add_noise()
assert U.allclose(
m_init.datacube._array,
expected_noise.to(
U.Jy * U.arcsec**-2,
equivalencies=U.beam_angular_area(m_init.beam.area),
).to(
m_init.datacube._array.unit,
equivalencies=[m_init.datacube.arcsec2_to_pix],
),
)
@pytest.mark.parametrize(
("ra_off", "ra_in"),
(
(0 * U.arcsec, True),
(3 * U.arcsec, True),
(9 * U.arcsec, False),
(-3 * U.arcsec, True),
(-9 * U.arcsec, False),
),
)
@pytest.mark.parametrize(
("dec_off", "dec_in"),
(
(0 * U.arcsec, True),
(3 * U.arcsec, True),
(9 * U.arcsec, False),
(-3 * U.arcsec, True),
(-9 * U.arcsec, False),
),
)
@pytest.mark.parametrize(
("v_off", "v_in"),
(
(0 * U.km / U.s, True),
(3 * U.km / U.s, True),
(7 * U.km / U.s, False),
(-3 * U.km / U.s, True),
(-7 * U.km / U.s, False),
),
)
@pytest.mark.parametrize(("mass_off", "mass_in"), ((0, False), (1, True)))
@pytest.mark.parametrize("spatial", (True, False))
@pytest.mark.parametrize("spectral", (True, False))
@pytest.mark.parametrize("mass", (True, False))
def test_prune_particles(
self,
ra_off,
ra_in,
dec_off,
dec_in,
v_off,
v_in,
mass_off,
mass_in,
single_particle_source,
spatial,
spectral,
mass,
):
"""
Check that a particle offset by a specific set of (RA, Dec, v) is inside/outside
the cube as expected.
"""
if spatial and spectral and mass:
expect_particle = all((ra_in, dec_in, v_in, mass_in))
elif spatial and spectral and not mass:
expect_particle = all((ra_in, dec_in, v_in))
elif spatial and not spectral and mass:
expect_particle = all((ra_in, dec_in, mass_in))
elif spatial and not spectral and not mass:
expect_particle = all((ra_in, dec_in))
elif not spatial and spectral and mass:
expect_particle = all((v_in, mass_in))
elif not spatial and spectral and not mass:
expect_particle = v_in
elif not spectral and not spatial and mass:
expect_particle = mass_in
elif not spectral and not spatial and not mass:
expect_particle = True
# set distance so that 1kpc = 1arcsec
distance = (1 * U.kpc / 1 / U.arcsec).to(U.Mpc, U.dimensionless_angles())
source = single_particle_source(
distance=distance,
ra=ra_off,
dec=dec_off,
vpeculiar=v_off,
mHI_g=mass_off * np.ones(1) * 1.0e4 * U.Msun,
)
datacube = DataCube(
n_px_x=2,
n_px_y=2,
n_channels=2,
spectral_centre=source.distance * source.h * 100 * U.km / U.s / U.Mpc,
px_size=1 * U.arcsec,
channel_width=1 * U.km / U.s,
ra=0 * U.deg,
dec=0 * U.deg,
)
# pad size will be 5, so datacube is 12x12 pixels
beam = GaussianBeam(bmaj=1 * U.arcsec, bmin=1 * U.arcsec, truncate=4)
sph_kernel = _CubicSplineKernel()
spectral_model = GaussianSpectrum(sigma=1 * U.km / U.s)
# need to use _BaseMartini below to manipulate _prune_kwargs
kwargs = dict(
source=source,
datacube=datacube,
beam=beam,
noise=None,
sph_kernel=sph_kernel,
spectral_model=spectral_model,
_prune_kwargs=dict(spatial=spatial, spectral=spectral, mass=mass),
)
# if more than 1px (datacube) + 5px (pad) + 2px (sm_range) then expect to prune
# if more than 1px (datacube) + 4px (4*spectrum_half_width) then expect to prune
if not expect_particle:
with pytest.raises(
RuntimeError,
match="No non-zero mHI source particles in target region.",
):
_BaseMartini(**kwargs)
else:
assert _BaseMartini(**kwargs).source.npart == 1
def test_reset(self, m_nn):
"""
Check that resetting martini instance zeros out datacube.
"""
cube_array = m_nn.datacube._array
assert m_nn.datacube._array.sum() > 0
m_nn.reset()
assert m_nn.datacube._array.sum() == 0
# check that can start over and get the same result w/o errors
m_nn.insert_source_in_cube(progressbar=False)
m_nn.convolve_beam()
assert U.allclose(cube_array, m_nn.datacube._array)
# check that can reset after doing nothing
m_nn.reset()
m_nn.reset()
def test_reset_preserves_shape(self, single_particle_source, dc_zeros):
m = Martini(
source=single_particle_source(),
datacube=dc_zeros,
beam=GaussianBeam(),
noise=None,
spectral_model=DiracDeltaSpectrum(),
sph_kernel=DiracDeltaKernel(),
)
expected_shape = m.datacube._array.shape
m.reset()
assert m.datacube._array.shape == expected_shape
def test_preview(self, m_init):
"""
Simply check that the preview visualisation runs without error.
"""
pytest.importorskip(
"matplotlib", reason="matplotlib (optional dependency) not available."
)
# with default arguments
with pytest.warns(UserWarning, match="singular"):
# warning: single-particle source is used, so axis limits try to be equal
m_init.preview()
# with non-default arguments
m_init.preview(
max_points=1000,
fig=2,
lim="datacube",
vlim="datacube",
point_scaling="fixed",
title="test",
)
def test_source_to_datacube_coord_transformation(self, single_particle_source):
"""
Check that transformation is applied if source and datacube have different
coordinate frames.
"""
source = single_particle_source(hsm_g=0.01 * U.kpc)
assert source.coordinate_frame.name == "icrs"
datacube_icrs = DataCube(
n_px_x=16,
n_px_y=16,
n_channels=16,
channel_width=4 * U.km / U.s,
px_size=10 * U.arcsec,
spectral_centre=source.vsys,
ra=source.ra,
dec=source.dec,
coordinate_frame=ICRS(),
)
m_icrs = Martini(
source=source,
datacube=datacube_icrs,
beam=GaussianBeam(),
noise=None,
sph_kernel=DiracDeltaKernel(),
spectral_model=DiracDeltaSpectrum(),
)
def centre_pixels_slice(m):
datacube = m.datacube
return m.datacube._array[
datacube.n_px_x // 2
- 1
+ datacube.padx : datacube.n_px_x // 2
+ 1
+ datacube.padx,
datacube.n_px_y // 2
- 1
+ datacube.pady : datacube.n_px_y // 2
+ 1
+ datacube.pady,
]
assert np.sum(centre_pixels_slice(m_icrs).sum()) == 0
m_icrs.insert_source_in_cube(progressbar=False)
assert np.sum(centre_pixels_slice(m_icrs).sum()) > 0
# ICRS is ~J2000 equinox. J1950 equinox is about a degree off,
# so we should completely miss the cube (16 pix of 10 arcsec).
datacube_fk5_J1950 = DataCube(
n_px_x=16,
n_px_y=16,
n_channels=16,
channel_width=4 * U.km / U.s,
px_size=10 * U.arcsec,
spectral_centre=source.vsys,
ra=source.ra,
dec=source.dec,
coordinate_frame=FK5(equinox="J1950"),
)
with pytest.raises(
RuntimeError,
match="No non-zero mHI source particles in target region.",
):
Martini(
source=source,
datacube=datacube_fk5_J1950,
beam=GaussianBeam(),
noise=None,
sph_kernel=DiracDeltaKernel(),
spectral_model=DiracDeltaSpectrum(),
)
def test_source_to_datacube_specsys_transformation(self, single_particle_source):
"""
Check that spectral reference transformation is applied if source and datacube
have different specsys.
"""
source = single_particle_source(hsm_g=0.01 * U.kpc)
datacube_icrs = DataCube(
n_px_x=16,
n_px_y=16,
n_channels=16,
channel_width=4 * U.km / U.s,
px_size=10 * U.arcsec,
spectral_centre=source.vsys,
ra=source.ra,
dec=source.dec,
coordinate_frame=ICRS(),
specsys="icrs",
)
m_icrs = Martini(
source=source,
datacube=datacube_icrs,
beam=GaussianBeam(),
noise=None,
sph_kernel=DiracDeltaKernel(),
spectral_model=DiracDeltaSpectrum(),
)
def centre_channels_slice(m):
datacube = m.datacube
return m.datacube._array[
:, :, datacube.n_channels // 2 - 1 : datacube.n_channels // 2 + 1
]
assert np.sum(centre_channels_slice(m_icrs).sum()) == 0
m_icrs.insert_source_in_cube(progressbar=False)
assert np.sum(centre_channels_slice(m_icrs).sum()) > 0
# ICRS and Galactocentric are offset by many km/s depending on direction
# so with 4 channels of 1 km/s we should completely miss the cube
datacube_galactocentric = DataCube(
n_px_x=16,
n_px_y=16,
n_channels=4,
channel_width=1 * U.km / U.s,
px_size=10 * U.arcsec,
spectral_centre=source.vsys,
ra=source.ra,
dec=source.dec,
coordinate_frame=ICRS(),
specsys="galactocentric",
)
assert datacube_galactocentric.wcs.wcs.specsys == "galactocentric"
with pytest.raises(
RuntimeError,
match="No non-zero mHI source particles in target region.",
):
Martini(
source=source,
datacube=datacube_galactocentric,
beam=GaussianBeam(),
noise=None,
sph_kernel=DiracDeltaKernel(),
spectral_model=DiracDeltaSpectrum(),
)
def test_hdf5_grids(self, m):
h5py = pytest.importorskip(
"h5py", reason="h5py (optional dependency) not available."
)
origin = 0 # numpy-like, not fits-like, indexing
filename = "cube.hdf5"
try:
m.write_hdf5(filename)
with h5py.File(filename, "r") as f:
voxel_coords = np.vstack(
(
(
f["RA"][()].flatten()
* U.Unit(f["RA"].attrs["Unit"], format="fits")
).to_value(m.datacube.wcs.wcs.cunit[0]),
(
f["Dec"][()].flatten()
* U.Unit(f["Dec"].attrs["Unit"], format="fits")
).to_value(m.datacube.wcs.wcs.cunit[1]),
(
f["channel_mids"][()].flatten()
* U.Unit(f["channel_mids"].attrs["Unit"], format="fits")
).to_value(m.datacube.wcs.wcs.cunit[2]),
)
).T
ra_idx, dec_idx, spec_idx = m.datacube.wcs.all_world2pix(
voxel_coords, origin
).T
ra_idx = ra_idx.reshape((m.datacube._array.shape))
dec_idx = dec_idx.reshape((m.datacube._array.shape))
spec_idx = spec_idx.reshape((m.datacube._array.shape))
expected_idx = np.meshgrid(
np.arange(m.datacube.n_px_x),
np.arange(m.datacube.n_px_y),
np.arange(m.datacube.n_channels),
indexing="ij",
)
assert np.allclose(ra_idx, expected_idx[0])
assert np.allclose(dec_idx, expected_idx[1])
assert np.allclose(spec_idx, expected_idx[2])
vertex_coords = np.vstack(
(
(
f["RA_vertices"][()].flatten()
* U.Unit(f["RA_vertices"].attrs["Unit"], format="fits")
).to_value(m.datacube.wcs.wcs.cunit[0]),
(
f["Dec_vertices"][()].flatten()
* U.Unit(f["Dec_vertices"].attrs["Unit"], format="fits")
).to_value(m.datacube.wcs.wcs.cunit[1]),
(
f["channel_vertices"][()].flatten()
* U.Unit(f["channel_vertices"].attrs["Unit"], format="fits")
).to_value(m.datacube.wcs.wcs.cunit[2]),
)
).T
ra_vx_idx, dec_vx_idx, spec_vx_idx = m.datacube.wcs.all_world2pix(
vertex_coords, origin
).T
shape = [s + 1 for s in m.datacube._array.shape]
ra_vx_idx = ra_vx_idx.reshape(shape)
dec_vx_idx = dec_vx_idx.reshape(shape)
spec_vx_idx = spec_vx_idx.reshape(shape)
expected_vx_idx = np.meshgrid(
np.arange(m.datacube.n_px_x + 1) - 0.5,
np.arange(m.datacube.n_px_y + 1) - 0.5,
np.arange(m.datacube.n_channels + 1) - 0.5,
indexing="ij",
)
assert np.allclose(ra_vx_idx, expected_vx_idx[0])
assert np.allclose(dec_vx_idx, expected_vx_idx[1])
assert np.allclose(spec_vx_idx, expected_vx_idx[2])
finally:
if os.path.exists(filename):
os.remove(filename)
class TestParallel:
def test_parallel_consistent_with_serial(self, many_particle_source, dc_zeros):
"""
Check that running the source insertion loop in parallel gives the same result
as running in serial.
"""
pytest.importorskip(
"multiprocess", reason="multiprocess (optional dependency) not available"
)
m = Martini(
source=many_particle_source(),
datacube=dc_zeros,
beam=GaussianBeam(),
noise=None,
sph_kernel=_GaussianKernel(),
spectral_model=GaussianSpectrum(),
)
m.insert_source_in_cube(ncpu=1, progressbar=False)
expected_result = m.datacube._array
# check that we're not testing on a zero array
assert m.datacube._array.sum() > 0
m.reset()
# check the reset was successful
assert np.allclose(
m.datacube._array.to_value(m.datacube._array.unit),
0.0,
)
m.insert_source_in_cube(ncpu=2, progressbar=False)
assert U.allclose(m.datacube._array, expected_result)
class TestGlobalProfile:
@pytest.mark.parametrize("spectral_model", (DiracDeltaSpectrum, GaussianSpectrum))
@pytest.mark.parametrize("ra", (0 * U.deg, 180 * U.deg))
def test_mass_accuracy(self, spectral_model, single_particle_source, ra):
"""
Check that the input mass in the particles gives the correct total mass in the
spectrum, by checking the conversion back to total mass. Covers testing
GlobalProfile.insert_source_in_spectrum.
"""
# single_particle_source has a mass of 1E4Msun, temperature of 1E4K
# we test both ra=0deg and ra=180deg to make sure all particles included
source = single_particle_source(ra=ra)
m = GlobalProfile(
source=source,
spectral_model=spectral_model(),
n_channels=32,
channel_width=10 * U.km * U.s**-1,
spectral_centre=source.vsys,
)
m.insert_source_in_spectrum()
# flux
F = m.spectrum.sum() # Jy
# distance
D = m.source.distance
# channel width
dv = m.channel_width
# HI mass
MHI = (
2.36e5
* U.Msun
* D.to_value(U.Mpc) ** 2
* F.to_value(U.Jy)
* dv.to_value(U.km / U.s)
).to(U.Msun)
# demand accuracy within 1% after source insertion
assert U.isclose(MHI, m.source.mHI_g.sum(), rtol=1e-2)
@pytest.mark.parametrize(
("ra_off", "ra_in"),
(
(0 * U.arcsec, True),
(3 * U.arcsec, True),
(5 * U.deg, False), # global profile uses 1 deg pixel
(-3 * U.arcsec, True),
(-5 * U.deg, False), # global profile uses 1 deg pixel
),
)
@pytest.mark.parametrize(
("dec_off", "dec_in"),
(
(0 * U.arcsec, True),
(3 * U.arcsec, True),
(5 * U.deg, False), # global profile uses 1 deg pixel
(-3 * U.arcsec, True),
(-5 * U.deg, False), # global profile uses 1 deg pixel
),
)
@pytest.mark.parametrize(
("v_off", "v_in"),
(
(0 * U.km / U.s, True),
(3 * U.km / U.s, True),
(7 * U.km / U.s, False),
(-3 * U.km / U.s, True),
(-7 * U.km / U.s, False),
),
)
def test_prune_particles(
self, ra_off, ra_in, dec_off, dec_in, v_off, v_in, single_particle_source
):
"""
Check that a particle offset by a specific set of (RA, Dec, v) is inside/outside
the cube as expected. GlobalProfile should ignore RA, Dec when pruning.
"""
# GlobalProfile should ignore RA, Dec when pruning:
expect_particle = v_in
# set distance so that 1kpc = 1arcsec
distance = (1 * U.kpc / 1 / U.arcsec).to(U.Mpc, U.dimensionless_angles())
source = single_particle_source(
distance=distance, ra=ra_off, dec=dec_off, vpeculiar=v_off
)
spectral_model = GaussianSpectrum(sigma=1 * U.km / U.s)
kwargs = dict(
source=source,
spectral_model=spectral_model,
n_channels=2,
channel_width=1 * U.km / U.s,
spectral_centre=source.distance * source.h * 100 * U.km / U.s / U.Mpc,
)
# if more than 1px (datacube) + 4px (4*spectrum_half_width) then expect to prune
if not expect_particle:
with pytest.raises(
RuntimeError,
match="No non-zero mHI source particles in target region.",
):
GlobalProfile(**kwargs)
else:
assert GlobalProfile(**kwargs).source.npart == 1
def test_reset(self, gp):
"""
Check that resetting global profile instance zeros out datacube and spectrum.
"""
cube_array = gp._datacube._array
assert gp._datacube._array.sum() > 0
spectrum = gp.spectrum
assert spectrum.sum() > 0
gp.reset()
assert gp._datacube._array.sum() == 0
assert not hasattr(gp, "_spectrum")
# check that can start over and get the same result w/o errors
gp.insert_source_in_spectrum()
assert U.allclose(cube_array, gp._datacube._array)
assert U.allclose(spectrum, gp.spectrum)
# check that can reset after doing nothing
gp.reset()
gp.reset()
def test_preview(self, gp):
"""
Simply check that the preview visualisation runs without error.
"""
pytest.importorskip(
"matplotlib", reason="matplotlib (optional dependency) not available."
)
# with default arguments
with pytest.warns(UserWarning, match="singular"):
# warning: single-particle source is used, so axis limits try to be equal
gp.preview()
# with non-default arguments
gp.preview(
max_points=1000,
fig=2,
lim="datacube",
vlim="datacube",
point_scaling="fixed",
title="test",
)
def test_channel_modes(self, single_particle_source):
"""
Check that channels have expected units in both modes (frequency, velocity).
"""
source = single_particle_source()
channel_width = 10 * U.km * U.s**-1
m = GlobalProfile(
source=source,
spectral_model=GaussianSpectrum(sigma="thermal"),
n_channels=32,
channel_width=channel_width,
spectral_centre=source.vsys,
)
expected_units = channel_width.unit
# these will raise if there's a problem:
m.channel_edges.to(expected_units)
m.channel_mids.to(expected_units)
def test_view_spectrum(self, gp):
"""
Simply check that plotting spectrum runs without error.
"""
pytest.importorskip(
"matplotlib", reason="matplotlib (optional dependency) not available."
)
# with default arguments
gp.plot_spectrum()
# with non-default arguments
gp.plot_spectrum(fig=2, title="test", show_vsys=False)
class TestMartiniWithDataCubeFromWCS:
@pytest.mark.parametrize("out_mode", ("fits", "hdf5"))
def test_source_insertion(self, dc_wcs, single_particle_source, out_mode):
datacube = dc_wcs
distance = (
datacube.spectral_centre.to(
U.km / U.s, equivalencies=U.doppler_radio(HIfreq)
)
/ (70 * U.km / U.s / U.Mpc)
).to(U.Mpc)
source = single_particle_source(
ra=datacube.ra,
dec=datacube.dec,
distance=distance,
hsm_g=(3 * datacube.px_size * distance).to(
U.kpc, equivalencies=U.dimensionless_angles()
),
)
beam = GaussianBeam(bmaj=3 * datacube.px_size, bmin=3 * datacube.px_size)
m = Martini(
source=source,
datacube=datacube,
beam=beam,
noise=None,
spectral_model=GaussianSpectrum(sigma="thermal"),
sph_kernel=_CubicSplineKernel(),
)
m.insert_source_in_cube(progressbar=False)
check_mass_accuracy(m, out_mode)
|
kyleaomanREPO_NAMEmartiniPATH_START.@martini_extracted@martini-main@tests@test_martini.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/pie/hoverlabel/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="style", parent_name="pie.hoverlabel.font", **kwargs
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@pie@hoverlabel@font@_style.py@.PATH_END.py
|
{
"filename": "image_dataset_utils_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/utils/image_dataset_utils_test.py",
"type": "Python"
}
|
import os
import numpy as np
from keras.src import backend
from keras.src import testing
from keras.src.utils import image_dataset_utils
from keras.src.utils import image_utils
from keras.src.utils.module_utils import tensorflow as tf
class ImageDatasetFromDirectoryTest(testing.TestCase):
def _get_images(self, count=16, color_mode="rgb"):
width = height = 24
imgs = []
for _ in range(count):
if color_mode == "grayscale":
img = np.random.randint(0, 256, size=(height, width, 1))
elif color_mode == "rgba":
img = np.random.randint(0, 256, size=(height, width, 4))
else:
img = np.random.randint(0, 256, size=(height, width, 3))
if backend.config.image_data_format() == "channels_first":
img = np.transpose(img, (2, 0, 1))
img = image_utils.array_to_img(img)
imgs.append(img)
return imgs
def _prepare_directory(
self,
num_classes=2,
nested_dirs=False,
color_mode="rgb",
count=16,
):
# Generate paths to class subdirectories
temp_dir = self.get_temp_dir()
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save images to the paths
i = 0
for img in self._get_images(color_mode=color_mode, count=count):
path = paths[i % len(paths)]
if color_mode == "rgb":
ext = "jpg"
else:
ext = "png"
filename = os.path.join(path, f"image_{i}.{ext}")
img.save(os.path.join(temp_dir, filename))
i += 1
return temp_dir
def test_image_dataset_from_directory_no_labels(self):
# Test retrieving images without labels from a directory and its
# subdirs.
# Save a few extra images in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, img in enumerate(self._get_images(3)):
filename = f"image_{i}.jpg"
img.save(os.path.join(directory, filename))
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=5, image_size=(18, 18), labels=None
)
if backend.config.image_data_format() == "channels_last":
output_shape = [5, 18, 18, 3]
else:
output_shape = [5, 3, 18, 18]
self.assertEqual(dataset.class_names, None)
batch = next(iter(dataset))
# We return plain images
self.assertEqual(batch.shape, output_shape)
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_image_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 3]
else:
output_shape = [8, 3, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="binary"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_static_shape_in_graph(self):
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
test_case = self
if backend.config.image_data_format() == "channels_last":
output_shape = [None, 18, 18, 3]
else:
output_shape = [None, 3, 18, 18]
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), output_shape)
symbolic_fn(dataset)
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_image_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 3]
else:
output_shape = [8, 3, 18, 18]
batch = next(iter(dataset))
self.assertEqual(batch.shape, output_shape)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (output_shape))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_image_dataset_from_directory_color_modes(self):
directory = self._prepare_directory(num_classes=4, color_mode="rgba")
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode="rgba"
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 4]
else:
output_shape = [8, 4, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
self.assertEqual(batch[0].dtype.name, "float32")
directory = self._prepare_directory(
num_classes=4, color_mode="grayscale"
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode="grayscale"
)
if backend.config.image_data_format() == "channels_last":
output_shape = [8, 18, 18, 1]
else:
output_shape = [8, 1, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
self.assertEqual(batch[0].dtype.name, "float32")
def test_image_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
if backend.config.image_data_format() == "channels_last":
train_output_shape = [8, 18, 18, 3]
val_output_shape = [2, 18, 18, 3]
else:
train_output_shape = [8, 3, 18, 18]
val_output_shape = [2, 3, 18, 18]
self.assertEqual(batch[0].shape, train_output_shape)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, val_output_shape)
(
train_dataset,
val_dataset,
) = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="both",
seed=1337,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, train_output_shape)
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, val_output_shape)
def test_image_dataset_from_directory_manual_labels(self):
# Case: wrong number of labels
directory = self._prepare_directory(num_classes=1, count=4)
with self.assertRaisesRegex(ValueError, "match the number of files"):
image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1, 0],
shuffle=False,
)
# Case: single directory
directory = self._prepare_directory(num_classes=1, count=4)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1, 0, 1],
shuffle=False,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [18, 18, 3]
else:
output_shape = [3, 18, 18]
self.assertEqual(dataset.class_names, ["0", "1"])
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, [4] + output_shape)
self.assertAllClose(batch[1], [0, 1, 0, 1])
# Case: multiple directories
directory = self._prepare_directory(num_classes=3, count=6)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1, 0, 1, 1, 1],
shuffle=False,
)
self.assertEqual(dataset.class_names, ["0", "1"])
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, [6] + output_shape)
self.assertAllClose(batch[1], [0, 1, 0, 1, 1, 1])
def test_image_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_image_dataset_from_directory_no_images(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No images found."):
_ = image_dataset_utils.image_dataset_from_directory(directory)
def test_image_dataset_from_directory_crop_to_aspect_ratio(self):
directory = self._prepare_directory(num_classes=2, count=5)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=5,
image_size=(18, 18),
crop_to_aspect_ratio=True,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [5, 18, 18, 3]
else:
output_shape = [5, 3, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
def test_image_dataset_from_directory_pad_to_aspect_ratio(self):
directory = self._prepare_directory(num_classes=2, count=5)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=5,
image_size=(18, 18),
pad_to_aspect_ratio=True,
)
if backend.config.image_data_format() == "channels_last":
output_shape = [5, 18, 18, 3]
else:
output_shape = [5, 3, 18, 18]
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, output_shape)
def test_image_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(ValueError, "`color_mode` must be one of"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, color_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = image_dataset_utils.image_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=0.0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_image_dataset_from_directory_not_batched(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=None,
image_size=(18, 18),
label_mode=None,
shuffle=False,
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 3)
def test_image_dataset_from_directory_shuffle(self):
# TODO: add same test for train/val
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
shuffle=False,
)
batches_1 = []
batches_2 = []
for b in dataset:
batches_1.append(b)
batches_1 = np.concatenate(batches_1, axis=0)
for b in dataset:
batches_2.append(b)
batches_2 = np.concatenate(batches_2, axis=0)
self.assertAllClose(batches_1, batches_2, atol=1e-6)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
shuffle=True,
seed=1337,
)
batches_1 = []
batches_2 = []
for b in dataset:
batches_1.append(b)
batches_1 = np.concatenate(batches_1, axis=0)
for b in dataset:
batches_2.append(b)
batches_2 = np.concatenate(batches_2, axis=0)
self.assertNotAllClose(batches_1, batches_2, atol=1e-6)
# Test random seed determinism
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
shuffle=True,
seed=1337,
)
batches_1_alt = []
for b in dataset:
batches_1_alt.append(b)
batches_1_alt = np.concatenate(batches_1_alt, axis=0)
self.assertAllClose(batches_1, batches_1_alt, atol=1e-6)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@utils@image_dataset_utils_test.py@.PATH_END.py
|
{
"filename": "calculate_splus_filters.py",
"repo_name": "splus-collab/splus_filters",
"repo_path": "splus_filters_extracted/splus_filters-master/calculate_splus_filters.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import os
import sys
import argparse
import pandas as pd
from astropy.io import fits, ascii
import glob
import colorlog
import logging
from dust_laws import CCM
def get_logger(
name: str,
loglevel: str = 'INFO'
):
"""Return a logger with a default ColoredFormatter."""
logger = logging.getLogger(name)
logger.setLevel(loglevel)
handler = logging.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter(
"%(asctime)s [%(log_color)s%(levelname)s%(reset)s] @%(module)s.%(funcName)s() %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_yellow,bg_red',
},
))
logger.addHandler(handler)
return logger
def get_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=" ".join([
'Calculate the transmission curve or a given filtre.',
'Estimate the central lambda from the FWHM of that filter.']))
parser.add_argument('--work_dir', type=str, help='Working directory. Default: current directory.',
default=os.getcwd())
parser.add_argument('--save_plots', action='store_true',
help='Save the plot of the filter.')
parser.add_argument('--save_csv_filters', action='store_true',
help='Save the transmission curve of the filter.')
parser.add_argument('--save_central_wavelentghs', action='store_true',
help='Save the central wavelengths of the filters in a csv file.')
parser.add_argument('--show_individual_filters', action='store_true',
help='Show the individual filters. Only activate when --save_csv_filters is used.')
parser.add_argument('--show_plots', action='store_true',
help='Show the main plots.')
parser.add_argument('--prepare_latex', action='store_true',
help='Prepare the latex table with the central wavelengths.')
parser.add_argument('--loglevel', type=str, help='Log level.',
default='INFO')
parser.add_argument('--debug', action='store_true',
help='Activate debug mode.')
args = parser.parse_args()
return args
def main(
args: argparse.Namespace,
):
"""Main function. Run all steps of the code in the sequence required.
Parameters
----------
args : argparse.Namespace
"""
fnames2filters = {
'20150918C080uJAVA02': {'fname': 'uJAVA', 'color': 'indigo', 'pos': (3563, -200, -4)},
'20150429C080F037802': {'fname': 'J0378', 'color': 'darkviolet', 'pos': (3770, -350, 1)},
'20150922C080F039502': {'fname': 'J0395', 'color': 'navy', 'pos': (3940, -400, 1)},
'20150923C080F041002': {'fname': 'J0410', 'color': 'b', 'pos': (4094, -350, -0.1)},
'20150514C080F043002': {'fname': 'J0430', 'color': 'dodgerblue', 'pos': (4292, -400, 1.3)},
'20150924C080gSDSS02': {'fname': 'gSDSS', 'color': 'turquoise', 'pos': (4751, -200, 0.)},
'20140606C080F051502': {'fname': 'J0515', 'color': 'lime', 'pos': (5133, -100, 1.5)},
'20140604C080F062502': {'fname': 'rSDSS', 'color': 'limegreen', 'pos': (6258, -200, -2)},
'20140609C080F066002': {'fname': 'J0660', 'color': 'y', 'pos': (6614, -300, 1)},
'20150506C080iSDSS02': {'fname': 'iSDSS', 'color': 'darkorange', 'pos': (7690, -100, 1)},
'20150922C080F086102': {'fname': 'J0861', 'color': 'orangered', 'pos': (8611, -250, 1)},
'20150504C080zSDSS02': {'fname': 'zSDSS', 'color': 'darkred', 'pos': (8831, 300, -12)}}
logger = get_logger(__name__, loglevel=args.loglevel)
logger.info('Calculating the lab transmission curves of the filters.')
lab_filters = get_lab_curves(args)
plot_lab_curves(lab_filters, fnames2filters, args,
outname='lab_curves.png', figlevel='lab')
allcurves = calc_trasm_curve(lab_filters, fnames2filters, args)
plot_lab_curves(allcurves, fnames2filters, args,
outname='convoluted_curves.png', figlevel='convoluted')
plot_all_curves(allcurves, args)
make_final_plot(allcurves, fnames2filters, args)
allcurves = calculate_central_lambda(allcurves, fnames2filters, args)
plot_lab_curves(allcurves, fnames2filters, args,
outname='convoluted_curves_central.png', figlevel='central')
plot_lab_curves(allcurves, fnames2filters, args,
outname='convoluted_curves_trapz.png', figlevel='trapz')
plot_lab_curves(allcurves, fnames2filters, args,
outname='convoluted_curves_mean.png', figlevel='mean')
plot_lab_curves(allcurves, fnames2filters, args,
outname='convoluted_curves_mean_1.png', figlevel='mean_1')
plot_lab_curves(allcurves, fnames2filters, args,
outname='convoluted_curves_pivot.png', figlevel='pivot')
calculate_alambda(allcurves, fnames2filters, args)
make_html(allcurves, fnames2filters, args)
if args.save_central_wavelentghs:
make_csv_of_central_lambdas(allcurves, fnames2filters, args)
if args.prepare_latex:
prepare_latex_table(allcurves, fnames2filters, args)
def get_lab_curves(
args: argparse.Namespace
):
"""
Read the lab files and return a dictionary with the transmission curves.
The files containing the lab measures were sent to the S-PLUS team by
the J-PLUS team. The files were read and the transmission curves were
calculated as the average of the measures. The files for each filter
contain the wavelength, the lab transmission (before convolution with
atmosphere and instrument), and the standard deviation of the measures.
Parameters
----------
args : argparse.Namespace
Returns
-------
lab_filters : dict
"""
logger = get_logger(__name__, loglevel=args.loglevel)
work_dir = args.work_dir
data_dir = 'data-from-lab'
list_of_filter_files = glob.glob(os.path.join(work_dir, data_dir, '*.txt'))
lab_filters = {}
for filter_file in list_of_filter_files:
logger.debug('Reading {}'.format(filter_file))
n = 1
with open(filter_file, 'r') as f:
for line in f.readlines():
if line.startswith('Wavelength nm'):
break
if line.startswith('J-PLUS #07'):
n += 2
break
n += 1
f.close()
logger.debug('Skipping {} lines'.format(n))
try:
df = pd.read_csv(filter_file, delimiter='\t',
decimal=',', skiprows=n, header=None)
except Exception as e:
logger.error(" ".join(['Reading the filter file failed.',
'Error: {}'.format(e)]))
sys.exit(1)
logger.info(" ".join(['The transmission level is calculated from the',
'average of the measures in the lab file.']))
mid_columns_average = df[df.columns[2:-1]].mean(axis=1)
mid_columns_std = df[df.columns[2:-1]].std(axis=1)
transmission_mean = mid_columns_average / df[[1, 102]].mean(axis=1)
wave = df[0]
logger.debug('Test if filters were read correctly.')
if wave.size != transmission_mean.size:
logger.error(" ".join(['Reading the filter file failed.',
'Size of wave and transmission differ.']))
sys.exit(1)
if wave.size < 100:
logger.error(" ".join(['Reading the filter file failed.',
'Size of wave is too small.']))
sys.exit(1)
if (wave is None) or (transmission_mean is None):
logger.error(" ".join(['Reading the filter file failed.',
'Wave or transmission is None.']))
sys.exit(1)
logger.debug('Loding filters to memory.')
lab_filters[filter_file.split('/')[-1].split('.')[0]] = \
{'wave': wave, 'transm': transmission_mean, 'std': mid_columns_std}
del logger
return lab_filters
def plot_lab_curves(
lab_filters: dict,
fnames2filters: dict,
args: argparse.Namespace,
outname: str = 'fig.png',
figlevel: str = 'lab'
):
"""
Plot the lab transmission curves for each filter and configuration.
Parameters
----------
lab_filters : dict
fnames2filters : dict
args : argparse.Namespace
outname : str
figlevel : str
"""
logger = get_logger(__name__, loglevel=args.loglevel)
fig = plt.figure(figsize=(10, 10))
for i, filter_name in enumerate(fnames2filters.keys()):
ax = fig.add_subplot(4, 3, i+1)
w = np.array(lab_filters[filter_name]['wave'] * 10.)
t = np.array(lab_filters[filter_name]['transm'])
if t.max() > 1.:
t = t / 100.
logger.debug('Plotting filter: %s' % filter_name)
ax.plot(w, t, lw=1.5, label=fnames2filters[filter_name]['fname'],
color=fnames2filters[filter_name]['color'])
if figlevel == 'lab':
title = 'Lab transmission curves'
logger.info('Plotting lab curve %s' % filter_name)
elif figlevel == 'convoluted':
title = 'Convoluted transmission curves'
logger.info('Plotting convoluted curve %s' % filter_name)
elif figlevel == 'central':
title = 'Central wavelength'
logger.debug('Plotting central wavelength %s' % filter_name)
central_wave = lab_filters[filter_name]['central']['central_wave']
min_wave = lab_filters[filter_name]['central']['central_wave'] - \
lab_filters[filter_name]['central']['delta_wave'] / 2.
max_wave = lab_filters[filter_name]['central']['central_wave'] + \
lab_filters[filter_name]['central']['delta_wave'] / 2.
half_height = lab_filters[filter_name]['transm'].max() / 2.
ax.plot([min_wave, max_wave], [half_height, half_height],
'k-', marker='p', lw=1.5)
ax.plot([central_wave], [half_height], 'kx', ms=10)
ax.plot([min_wave, min_wave], [0., half_height], 'k-', lw=1.5)
ax.plot([max_wave, max_wave], [0., half_height], 'k-', lw=1.5)
ax.plot([central_wave, central_wave], [
0., half_height * 2.], 'k-', lw=1.5)
elif figlevel == 'trapz':
title = 'Method: Trapezoidal rule'
logger.debug('Plotting trapezoidal rule for %s' % filter_name)
central_wave = lab_filters[filter_name]['trapz']['central_wave']
min_wave = lab_filters[filter_name]['trapz']['central_wave'] - \
lab_filters[filter_name]['trapz']['delta_wave'] / 2.
max_wave = lab_filters[filter_name]['trapz']['central_wave'] + \
lab_filters[filter_name]['trapz']['delta_wave'] / 2.
height = lab_filters[filter_name]['transm'].max()
ax.fill_between([min_wave, max_wave], [height, height], color='brown',
alpha=0.7)
ax.plot([central_wave, central_wave],
[0, height], 'k--', lw=1.5)
elif figlevel == 'mean':
title = 'Method: Mean'
centr_wave = lab_filters[filter_name]['mean']['central_wave']
min_wave = lab_filters[filter_name]['mean']['central_wave'] - \
lab_filters[filter_name]['mean']['delta_wave'] / 2.
max_wave = lab_filters[filter_name]['mean']['central_wave'] + \
lab_filters[filter_name]['mean']['delta_wave'] / 2.
print(min_wave, max_wave,
lab_filters[filter_name]['mean']['delta_wave'])
ax.fill_between([min_wave, max_wave], 0, t.max(), color='brown',
alpha=0.7)
ax.plot([centr_wave, centr_wave], [
0, t.max()], '--', color='k', lw=1.5)
elif figlevel == 'mean_1':
title = 'Method: Mean with 1\% threshold'
centr_wave = lab_filters[filter_name]['mean_1']['central_wave']
min_wave = lab_filters[filter_name]['mean_1']['central_wave'] - \
lab_filters[filter_name]['mean_1']['delta_wave'] / 2.
max_wave = lab_filters[filter_name]['mean_1']['central_wave'] + \
lab_filters[filter_name]['mean_1']['delta_wave'] / 2.
ax.fill_between([min_wave, max_wave], 0, t.max(), color='brown',
alpha=0.7)
ax.plot([centr_wave, centr_wave], [
0, t.max()], '--', color='k', lw=1.5)
elif figlevel == 'pivot':
title = 'Method: Pivot lambda'
centr_wave = lab_filters[filter_name]['pivot']['central_wave']
min_wave = lab_filters[filter_name]['pivot']['central_wave'] - \
lab_filters[filter_name]['pivot']['delta_wave'] / 2.
max_wave = lab_filters[filter_name]['pivot']['central_wave'] + \
lab_filters[filter_name]['pivot']['delta_wave'] / 2.
ax.plot([centr_wave, centr_wave], [
0, t.max()], '--', color='k', lw=1.5)
else:
logger.critical('Unknown figlevel: %s' % figlevel)
raise ValueError('Unknown figlevel: %s' % figlevel)
plt.legend()
ax.set_ylim(0, 1.)
if i == 1:
ax.set_title(title)
if i == 10:
ax.set_xlabel('Wavelength [A]')
if i == 3:
ax.set_ylabel('Transmission')
plt.grid()
plt.tight_layout()
if args.save_plots:
logger.info('Saving fig to %s' % os.path.join(args.work_dir, outname))
plt.savefig(os.path.join(args.work_dir, outname), dpi=300)
if args.show_plots:
logger.debug('Showing plot.')
plt.show()
else:
logger.debug('Closing plot.')
plt.close()
del logger
return
def calc_trasm_curve(
lab_filters: dict,
fnames2filters: dict,
args: argparse.Namespace
):
"""
Calculate the transmission curve of the filters.
Parameters
----------
lab_filters : dict
fnames2filters : dict
args : argparse.Namespace
Returns
-------
allcurves : dict
"""
logger = get_logger(__name__, loglevel=args.loglevel)
logger.info('Calculating transmission curves.')
work_dir = args.work_dir
data_dir = 'data-from-lab'
allcurves = {}
logger.debug('Calculating atmospheric transmission.')
atm_transm_file = os.path.join(work_dir, data_dir, 'sky_trans.ascii')
atmosph_transmitance = pd.read_csv(atm_transm_file, delimiter=' ')
atm_wave = atmosph_transmitance['wave']
atm_transm = atmosph_transmitance['transm']
atm_ius = interp1d(atm_wave, atm_transm)
allcurves['atm'] = {'wave': atm_wave, 'transm': atm_transm,
'fname': 'atm', 'color': 'k'}
logger.debug('Calculating mirror reflectance.')
mirror_reflectance_file = os.path.join(
work_dir, data_dir, 'mirror_reflectance.fits')
mirror_reflect = fits.open(mirror_reflectance_file)[1].data
mirror_wave = np.array([float(a) for a in mirror_reflect.col1])
mirror_reflect = np.array([float(a) for a in mirror_reflect.col2]) / 100.
mr_ius = interp1d(mirror_wave, mirror_reflect)
allcurves['mirror'] = {'wave': mirror_wave, 'transm': mirror_reflect,
'fname': 'mirror', 'color': 'grey'}
if args.debug:
# the reflectance bellow was obtained from:
# https://laserbeamproducts.wordpress.com/2014/06/19/reflectivity-of-aluminium-uv-visible-and-infrared/
logger.debug(" ".join(['The following values are not measured.',
'They were taken as a reference to check',
'the extrapolation of the measured values.',
'This was necessary because the measured',
'values do not cover the whole wavelength',
'range to the red. After debating on using',
'a linear extrapolation or the values bellow,',
'we reached that the conclusion that any of',
'the methods would involve fabricating data.',
'We decided to use the extrapolation because',
'it gives as good as invented values as any',
'other similar method. The values bellow',
'are kept here are reference only.']))
mirror_measured_wave = np.array([300., 350., 420., 470., 530., 650., 880.,
950., 1000., 1100])
mirror_measured_flux = np.array([.9126, .9126, .9126, .9126,
.911, .8725, .7971, .82, .84, .85])
allcurves['aluminum_reflect'] = {'wave': mirror_measured_wave,
'transm': mirror_measured_flux,
'fname': 'mirror_measured',
'color': 'g'}
logger.debug('Calculating CCD efficiency from lab.')
ccd_efficiency_file = os.path.join(work_dir, data_dir, 'ccd_curve.fits')
ccd_curve = fits.open(ccd_efficiency_file)[1].data
ccd_wave = np.array([float(a) for a in ccd_curve.col1])
ccd_eff = np.array([float(a) for a in ccd_curve.col2]) / 100.
ccd_ius = interp1d(np.float_(ccd_wave), np.float_(ccd_eff))
allcurves['ccd'] = {'wave': ccd_wave, 'transm': ccd_eff,
'fname': 'ccd', 'color': 'b'}
logger.debug('Calculating CCD efficiency from Tololo.')
ccd_measured_wave = np.array([300., 350., 400., 450., 500., 550., 600.,
650., 725., 800., 850., 900, 970.])
ccd_measured_flux = np.array([.2, .45, .90, .93, .88, .88, .91, .92, .95,
.88, .8, .6, .3])
allcurves['ccd_measured'] = {'wave': ccd_measured_wave,
'transm': ccd_measured_flux,
'fname': 'ccd_measured', 'color': None}
for lab_curve in lab_filters:
lab_wave = lab_filters[lab_curve]['wave']
lab_transm = lab_filters[lab_curve]['transm']
lab_std = lab_filters[lab_curve]['std']
lab_ius = interp1d(lab_wave, lab_transm)
std_ius = interp1d(lab_wave, lab_std)
xmin = np.array([min(atm_wave / 10.), min(mirror_wave),
min(ccd_wave), min(lab_wave)])
wave_range = np.arange(max(xmin), max(lab_wave), 1.)
new_transm = lab_ius(wave_range)
new_atm_transm = atm_ius(wave_range)
new_mirror_reflect = mr_ius(wave_range)
mirror_measured_wave = np.array(
[300., 350., 420., 470., 530., 650., 88.])
mirror_measured_flux = np.array(
[.9126, .9126, .9126, .9126, .911, .8725, .7971])
new_ccd_eff = ccd_ius(wave_range)
new_filter_trans = (new_transm * new_atm_transm * new_mirror_reflect *
new_ccd_eff)
if args.save_csv_filters:
new_std = std_ius(wave_range)
dat = wave_range, new_filter_trans, new_std
outputname = "".join([fnames2filters[lab_curve]['fname'], '.csv'])
logger.info('Wrinting file: %s.', outputname)
np.savetxt(outputname, np.transpose(dat), delimiter=',',
header='wavelength,transmittance,std')
if args.show_individual_filters:
logger.info('Plotting filter: %s.', lab_curve)
plt.plot(wave_range, new_filter_trans, fnames2filters[lab_curve]['color'],
label=fnames2filters[lab_curve]['fname'])
plt.legend()
plt.show()
else:
plt.close()
allcurves[lab_curve] = {'wave': wave_range,
'transm': new_filter_trans,
'fname': fnames2filters[lab_curve]['fname'],
'color': fnames2filters[lab_curve]['color']}
del logger
return allcurves
def plot_all_curves(
allcurves: dict,
args: argparse.Namespace
):
"""
Plot all the transmission curves in the same plot.
Parameters
----------
allcurves : dict
args : argparse.Namespace
"""
logger = get_logger(__name__, loglevel=args.loglevel)
work_dir = args.work_dir
plt.figure(figsize=(10, 6))
for curve in allcurves.keys():
plt.plot(np.array(allcurves[curve]['wave']),
np.array(allcurves[curve]['transm']),
color=allcurves[curve]['color'],
label=allcurves[curve]['fname'])
plt.xlim(300, 1100)
plt.ylim(0, 1)
plt.xlabel('Wavelength (nm)')
plt.ylabel('Transmittance')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.grid()
plt.tight_layout()
if args.save_plots:
logger.debug('Saving plot to file.')
plt.savefig(os.path.join(work_dir, 'allcurves.png'), dpi=300)
if args.show_plots:
plt.show()
plt.close()
else:
plt.close()
def make_final_plot(
allcurves: dict,
fnames2filters: dict,
args: argparse.Namespace
):
"""
Make the final plot of the transmission curves.
Parameters
----------
allcurves : dict
fnames2filters : dict
args : argparse.Namespace
"""
logger = get_logger(__name__, loglevel=args.loglevel)
logger.info('Making presentation plot.')
work_dir = args.work_dir
plt.figure(figsize=(8, 4))
for key in fnames2filters.keys():
plt.plot(allcurves[key]['wave'] * 10.,
allcurves[key]['transm'] * 100.,
color=fnames2filters[key]['color'],
label=fnames2filters[key]['fname'], lw=2)
if fnames2filters[key]['fname'] == 'uJAVA':
fname = 'u'
elif fnames2filters[key]['fname'] == 'gSDSS':
fname = 'g'
elif fnames2filters[key]['fname'] == 'rSDSS':
fname = 'r'
elif fnames2filters[key]['fname'] == 'iSDSS':
fname = 'i'
elif fnames2filters[key]['fname'] == 'zSDSS':
fname = 'z'
else:
fname = fnames2filters[key]['fname']
plt.text(fnames2filters[key]['pos'][0] + fnames2filters[key]['pos'][1],
max(allcurves[key]['transm'] * 100.) +
fnames2filters[key]['pos'][2],
fname, fontsize=12, color=fnames2filters[key]['color'])
plt.xlabel(r'$\lambda\ \mathrm{[\AA]}$', fontsize=16)
plt.ylabel(r'$R_\lambda\ \mathrm{[\%]}$', fontsize=16)
plt.xlim(3000, 10000)
plt.ylim(0.2, 83)
plt.tight_layout()
if args.save_plots:
logger.info('Saving plot to %s.', os.path.join(
work_dir, 'splus_filters.png'))
plt.savefig(os.path.join(work_dir, 'splus_filters.png'),
format='png', dpi=300)
if args.show_plots:
logger.debug('Showing plot.')
plt.show()
plt.close()
else:
plt.close()
del logger
def calculate_central_lambda(
allcurves: dict,
fnames2filters: dict,
args: argparse.Namespace
):
"""
Calculate the central wavelength of the filters.
Parameters
----------
allcurves : dict
fnames2filters : dict
args : argparse.Namespace
Returns
-------
allcurves : dict
"""
logger = get_logger(__name__, loglevel=args.loglevel)
logger.info('Calculating central wavelength')
for curve in fnames2filters.keys():
wave = allcurves[curve]['wave'] * 10.
transm = allcurves[curve]['transm']
interp = interp1d(wave, transm)
synt_wave = np.linspace(min(wave), max(wave), 100000)
synt_transm = interp(synt_wave) * synt_wave
logger.debug('Claculating curves via trapezoidal rule approach')
half_height = max(synt_transm) / 2.
left_index = np.where(synt_transm > half_height)[0][0]
right_index = np.where(synt_transm > half_height)[0][-1]
min_wave = synt_wave[left_index]
max_wave = synt_wave[right_index]
central_wave = (max_wave + min_wave) / 2.
allcurves[curve]['central'] = {'central_wave': central_wave,
'delta_wave': max_wave - min_wave}
logger.debug('Calculating trapezoidal rule')
mid_wave = np.trapz(synt_wave * synt_transm, synt_wave) / np.trapz(
synt_transm, synt_wave)
trapz_width = np.trapz(synt_transm, synt_wave) / max(synt_transm)
allcurves[curve]['trapz'] = {'central_wave': mid_wave,
'delta_wave': trapz_width}
logger.debug('Calculating mean with 0% threshold')
mask = synt_transm > synt_transm.max() * 0.0
lambda_mean = np.sum(
synt_wave[mask] * synt_transm[mask]) / np.sum(synt_transm[mask])
mean_width = np.trapz(synt_transm[mask], synt_wave[mask]) / max(
synt_transm[mask])
allcurves[curve]['mean'] = {'central_wave': lambda_mean,
'delta_wave': mean_width}
logger.debug('Calculating mean with 1% threshold')
mask = synt_transm > synt_transm.max() * 0.01
lambda_mean = np.sum(synt_wave[mask] * synt_transm[mask]) / np.sum(
synt_transm[mask])
mean_width = np.trapz(synt_transm[mask], synt_wave[mask]) / max(
synt_transm[mask])
allcurves[curve]['mean_1'] = {'central_wave': lambda_mean,
'delta_wave': mean_width}
logger.debug('Calculating pivot wavelength')
lambda_pivot = np.sqrt(np.sum(synt_transm) /
np.sum(synt_transm / synt_wave**2))
mean_pivot = max_wave - min_wave
allcurves[curve]['pivot'] = {'central_wave': lambda_pivot,
'delta_wave': mean_pivot}
del logger
return allcurves
def make_html(
allcurves: dict,
fnames2filters: dict,
args: argparse.Namespace
):
"""
Make a html file with the central wavelengths of the filters.
Parameters
----------
allcurves : dict
fnames2filters : dict
args : argparse.Namespace
"""
logger = get_logger(__name__, loglevel=args.loglevel)
htmlf = open(os.path.join(args.work_dir, 'central_wavelengths.html'), 'w')
htmlf.write('<div class="dlpage">\n')
htmlf.write('<table class="docutils" style="width:100%" border=1>\n')
htmlf.write('<colgroup>\n')
htmlf.write('<tr>')
htmlf.write('<th colspan="11"><b>S-PLUS filters summary</b></th>\n')
htmlf.write('</tr>\n')
htmlf.write('<tr>')
htmlf.write('<td>Filter</td>\n')
htmlf.write('<td>λ<sub>central</sub></td>\n')
htmlf.write('<td>FWHM</td>\n')
htmlf.write('<td>λ<sub>trapz</sub></td>\n')
htmlf.write('<td>W<sub>trapz</sub></td>\n')
htmlf.write('<td>λ<sub>mean</sub></td>\n')
htmlf.write('<td>Δλ<sub>mean</sub></td>\n')
htmlf.write('<td>λ<sub>mean</sub> (>1%)</td>\n')
htmlf.write('<td>W<sub>mean</sub> (>1%)</td>\n')
htmlf.write('<td>λ<sub>pivot</sub></td>\n')
htmlf.write('<td>A<sub>λ</sub>/A<sub>V</sub></td>\n')
htmlf.write('<td>A<sub>λ</sub>/A<sub>V</sub> (CCM)</td>\n')
htmlf.write('</tr>\n')
htmlf.write('</colgroup>\n')
logger.info('Writing central wavelengths to html file')
for curve in fnames2filters.keys():
htmlf.write('<tr>\n')
htmlf.write('<td>%s</td>\n' % fnames2filters[curve]['fname'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['central']['central_wave'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['central']['delta_wave'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['trapz']['central_wave'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['trapz']['delta_wave'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['mean']['central_wave'])
htmlf.write('<td>%.0f</td>\n' % allcurves[curve]['mean']['delta_wave'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['mean_1']['central_wave'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['mean_1']['delta_wave'])
htmlf.write('<td>%.0f</td>\n' %
allcurves[curve]['pivot']['central_wave'])
htmlf.write('<td>%.3f</td>\n' % allcurves[curve]['a_lambda_a_v'])
htmlf.write('<td>%.3f</td>\n' % allcurves[curve]['a_lambda_a_v_ccm'])
htmlf.write('</tr>\n')
htmlf.write('</table>\n')
htmlf.write('</div>\n')
htmlf.close()
del logger
def make_csv_of_central_lambdas(
allcurves: dict,
fnames2filters: dict,
args: argparse.Namespace
):
"""
Make a csv file with the central wavelengths of the filters.
Parameters
----------
allcurves : dict
fnames2filters : dict
args : argparse.Namespace
Returns
-------
allcurves : dict
"""
logger = get_logger(__name__, loglevel=args.loglevel)
workdir = args.work_dir
logger.info('Writing central wavelengths to csv file')
filters = []
central_wave = []
delta_wave = []
trapz_wave = []
trapz_width = []
mean_wave = []
mean_width = []
mean_1_wave = []
mean_1_width = []
pivot_wave = []
alambda_av = []
alambda_av_ccm = []
for curve in fnames2filters.keys():
logger.debug('Getting params for %s' % fnames2filters[curve]['fname'])
filters.append(fnames2filters[curve]['fname'])
central_wave.append(allcurves[curve]['central']['central_wave'])
delta_wave.append(allcurves[curve]['central']['delta_wave'])
trapz_wave.append(allcurves[curve]['trapz']['central_wave'])
trapz_width.append(allcurves[curve]['trapz']['delta_wave'])
mean_wave.append(allcurves[curve]['mean']['central_wave'])
mean_width.append(allcurves[curve]['mean']['delta_wave'])
mean_1_wave.append(allcurves[curve]['mean_1']['central_wave'])
mean_1_width.append(allcurves[curve]['mean_1']['delta_wave'])
pivot_wave.append(allcurves[curve]['pivot']['central_wave'])
alambda_av.append(allcurves[curve]['a_lambda_a_v'])
alambda_av_ccm.append(allcurves[curve]['a_lambda_a_v_ccm'])
data = {'filter': filters,
'central_wave': central_wave,
'delta_wave': delta_wave,
'trapz_wave': trapz_wave,
'trapz_width': trapz_width,
'mean_wave': mean_wave,
'mean_width': mean_width,
'mean_1_wave': mean_1_wave,
'mean_1_width': mean_1_width,
'pivot_wave': pivot_wave,
'alambda_av': alambda_av,
'alambda_av_ccm': alambda_av_ccm}
df = pd.DataFrame(data)
logger.info('Writing central wavelengths to csv file')
df.to_csv(os.path.join(workdir, 'central_wavelengths.csv'), index=False)
del logger
def calculate_alambda(
allcurves: dict,
fnames2filters: dict,
args: argparse.Namespace
):
"""
Calculate the A_lambda/A_V for each filter.
The opacity file was obtained from:
http://svo2.cab.inta-csic.es/theory/fps/getextlaw.php
The value for kv was obtained from:
http://svo2.cab.inta-csic.es/theory/fps/index.php?id=CTIO/S-PLUS.z&&mode=browse&gname=CTIO&gname2=S-PLUS#filter
Parameters
----------
allcurves : dict
fnames2filters : dict
args : argparse.Namespace
Returns
-------
allcurves : dict
"""
logger = get_logger(__name__, loglevel=args.loglevel)
logger.info('Calculating A_lambda')
workdir = args.work_dir
data_extra_dir = os.path.join(workdir, 'data-extra')
if not os.path.exists(data_extra_dir):
logger.critical(
'Directory %s does not exist. Please make sure work_dir points to the right place.' % data_extra_dir)
raise ValueError(
'Directory %s does not exist. Please make sure work_dir points to the right place.' % data_extra_dir)
data_extinction_file = os.path.join(
data_extra_dir, 'ExtLaw_FitzIndeb_3.1.dat')
opacity_tab = ascii.read(data_extinction_file)
opacity_wave = opacity_tab['wave(A)']
opacity = opacity_tab['opacity(cm2/g)']
kv = 211.4
interp_opacity = interp1d(opacity_wave, opacity)
for curve in fnames2filters.keys():
lambda_pivot = allcurves[curve]['pivot']['central_wave']
a_lambda_a_v = interp_opacity(lambda_pivot) / kv
allcurves[curve]['a_lambda_a_v'] = a_lambda_a_v
a_lambda_a_v_ccm = CCM(np.array([lambda_pivot]))
allcurves[curve]['a_lambda_a_v_ccm'] = a_lambda_a_v_ccm
del logger
return allcurves
def prepare_latex_table(
allcurves: dict,
fnames2filters: dict,
args: argparse.Namespace
):
"""
Prepare a latex table with the central wavelengths of the filters.
"""
logger = get_logger(__name__, loglevel=args.loglevel)
logger.info('Preparing latex table')
latex_filename = os.path.join(args.work_dir, 'central_wavelengths.tex')
with open(latex_filename, 'w') as f:
f.write('\\begin{table*}\n')
f.write('\\centering\n')
f.write('\\caption{Central wavelengths of the S-PLUS filters.}\n')
f.write('\\label{tab:central_wavelengths}\n')
f.write('\\begin{tabular}{ccccccc}\n')
f.write('\\hline\n')
f.write('\\hline\n')
f.write(
'Filter & $\\lambda_{\\mathrm{central}}$ & FWHM & $\\lambda_{\\mathrm{mean}}$ & $\\Delta\\lambda_{\\mathrm{mean}}$ & $\\lambda_{\\mathrm{pivot}}$ & $A_{\\lambda}/A_{V}$ \\\\\n')
f.write(" & [\\AA] & [\\AA] & [\\AA] & [\\AA] & [\\AA] & \\\\\n")
f.write('\\hline\n')
for curve in fnames2filters.keys():
f.write('%s & %.0f & %.0f & %.0f & %.0f & %.0f & %.3f & %.3f\\\\\n' %
(fnames2filters[curve]['fname'],
allcurves[curve]['central']['central_wave'],
allcurves[curve]['central']['delta_wave'],
allcurves[curve]['mean']['central_wave'],
allcurves[curve]['mean']['delta_wave'],
allcurves[curve]['pivot']['central_wave'],
allcurves[curve]['a_lambda_a_v'],
allcurves[curve]['a_lambda_a_v_ccm']))
f.write('\\hline\n')
f.write('\\end{tabular}\n')
f.write('\\end{table*}\n')
del logger
return
if __name__ == '__main__':
args = get_args()
main(args)
|
splus-collabREPO_NAMEsplus_filtersPATH_START.@splus_filters_extracted@splus_filters-master@calculate_splus_filters.py@.PATH_END.py
|
{
"filename": "_tickcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/xaxis/_tickcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="tickcolor", parent_name="layout.xaxis", **kwargs):
super(TickcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@xaxis@_tickcolor.py@.PATH_END.py
|
{
"filename": "_global_imports.py",
"repo_name": "ThomasEdwardRiley/xpsi-pre-transfer",
"repo_path": "xpsi-pre-transfer_extracted/xpsi-pre-transfer-master/xpsi/PostProcessing/_global_imports.py",
"type": "Python"
}
|
from __future__ import division, print_function
__all__ = ["make_verbose",
"verbose",
"fragile",
"_verbose",
"_warning",
"xpsiError",
"random_seed",
"fix_random_seed",
"getdist",
"nestcheck",
"OrderedDict",
"ABCMeta",
"abstractmethod",
"_six",
"_os",
"_sys",
"_np",
"AmbiguityError",
"rc", "rcParams",
"_mpl",
"plt",
"gridspec",
"cm",
"_get_default_locator",
"_get_default_formatter",
"MultipleLocator",
"MaxNLocator",
"AutoMinorLocator",
"AutoLocator",
"ScalarFormatter",
"LogLocator",
"NullFormatter"]
from abc import ABCMeta, abstractmethod
from ..global_imports import *
from .. import make_verbose, verbose, fragile, _warning, _verbose
import wrapt
from collections import OrderedDict
from matplotlib import pyplot as plt
from matplotlib import rc, rcParams
from matplotlib.ticker import MultipleLocator, MaxNLocator, AutoMinorLocator,\
AutoLocator, ScalarFormatter, LogLocator,\
NullFormatter
def _get_default_locator(prune):
return MaxNLocator(nbins=5, min_n_ticks=3, prune=prune)
def _get_default_formatter():
default = ScalarFormatter(useOffset=False)
default.set_powerlimits(lims = (-2.0,3.0))
return default
from matplotlib import gridspec
from matplotlib import cm
try:
import getdist
except ImportError:
_warning('Cannot import GetDist.')
getdist = None
else:
if _verbose:
print('Imported GetDist version: %s' % getdist.__version__)
_expected_version = '0.3.1'
if getdist.__version__ != _expected_version:
if _verbose:
_warning('The PostProcessing module is compatible with a specific '
'GetDist commit, with version %s, so this module will '
'likely not work as intended.' % _expected_version)
# the following disables getdist.chains.print_load_details
getdist.chains.print_load_details = False
try:
import nestcheck
except ImportError:
_warning('Cannot import nestcheck.')
nestcheck = None
else:
if _verbose:
print('Imported nestcheck version: %s' % nestcheck.__version__)
_expected_version = '0.2.0'
if nestcheck.__version__ != _expected_version:
if _verbose:
_warning('The PostProcessing module is compatible with a specific '
'nestcheck commit, with version %s, so this module will '
'likely not work as intended.' % _expected_version)
class AmbiguityError(xpsiError):
""" Thrown if ambiguous IDs are declared for objects. """
random_seed = None
@wrapt.decorator
def fix_random_seed(func, instance, args, kwargs):
global random_seed
state = _np.random.get_state()
_np.random.seed(random_seed)
output = func(*args, **kwargs)
_np.random.set_state(state)
return output
|
ThomasEdwardRileyREPO_NAMExpsi-pre-transferPATH_START.@xpsi-pre-transfer_extracted@xpsi-pre-transfer-master@xpsi@PostProcessing@_global_imports.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/library/cpp/string_utils/base64/bench/metrics/main.py",
"type": "Python"
}
|
import yatest.common as yc
def test_export_metrics(metrics):
metrics.set_benchmark(yc.execute_benchmark('library/cpp/string_utils/base64/bench/bench'))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@library@cpp@string_utils@base64@bench@metrics@main.py@.PATH_END.py
|
{
"filename": "parasite_axes.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/parasite_axes.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import (
artist as martist, collections as mcoll, transforms as mtransforms,
rcParams)
from matplotlib.axes import subplot_class_factory
from matplotlib.transforms import Bbox
from .mpl_axes import Axes
import numpy as np
class ParasiteAxesBase(object):
def get_images_artists(self):
artists = {a for a in self.get_children() if a.get_visible()}
images = {a for a in self.images if a.get_visible()}
return list(images), list(artists - images)
def __init__(self, parent_axes, **kargs):
self._parent_axes = parent_axes
kargs.update(dict(frameon=False))
self._get_base_axes_attr("__init__")(self, parent_axes.figure,
parent_axes._position, **kargs)
def cla(self):
self._get_base_axes_attr("cla")(self)
martist.setp(self.get_children(), visible=False)
self._get_lines = self._parent_axes._get_lines
# In mpl's Axes, zorders of x- and y-axis are originally set
# within Axes.draw().
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
_parasite_axes_classes = {}
def parasite_axes_class_factory(axes_class=None):
if axes_class is None:
axes_class = Axes
new_class = _parasite_axes_classes.get(axes_class)
if new_class is None:
def _get_base_axes_attr(self, attrname):
return getattr(axes_class, attrname)
new_class = type(str("%sParasite" % (axes_class.__name__)),
(ParasiteAxesBase, axes_class),
{'_get_base_axes_attr': _get_base_axes_attr})
_parasite_axes_classes[axes_class] = new_class
return new_class
ParasiteAxes = parasite_axes_class_factory()
# #class ParasiteAxes(ParasiteAxesBase, Axes):
# @classmethod
# def _get_base_axes_attr(cls, attrname):
# return getattr(Axes, attrname)
class ParasiteAxesAuxTransBase(object):
def __init__(self, parent_axes, aux_transform, viewlim_mode=None,
**kwargs):
self.transAux = aux_transform
self.set_viewlim_mode(viewlim_mode)
self._parasite_axes_class.__init__(self, parent_axes, **kwargs)
def _set_lim_and_transforms(self):
self.transAxes = self._parent_axes.transAxes
self.transData = \
self.transAux + \
self._parent_axes.transData
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def set_viewlim_mode(self, mode):
if mode not in [None, "equal", "transform"]:
raise ValueError("Unknown mode : %s" % (mode,))
else:
self._viewlim_mode = mode
def get_viewlim_mode(self):
return self._viewlim_mode
def update_viewlim(self):
viewlim = self._parent_axes.viewLim.frozen()
mode = self.get_viewlim_mode()
if mode is None:
pass
elif mode == "equal":
self.axes.viewLim.set(viewlim)
elif mode == "transform":
self.axes.viewLim.set(viewlim.transformed(self.transAux.inverted()))
else:
raise ValueError("Unknown mode : %s" % (self._viewlim_mode,))
def _pcolor(self, method_name, *XYC, **kwargs):
if len(XYC) == 1:
C = XYC[0]
ny, nx = C.shape
gx = np.arange(-0.5, nx, 1.)
gy = np.arange(-0.5, ny, 1.)
X, Y = np.meshgrid(gx, gy)
else:
X, Y, C = XYC
pcolor_routine = self._get_base_axes_attr(method_name)
if "transform" in kwargs:
mesh = pcolor_routine(self, X, Y, C, **kwargs)
else:
orig_shape = X.shape
xy = np.vstack([X.flat, Y.flat])
xyt=xy.transpose()
wxy = self.transAux.transform(xyt)
gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
mesh = pcolor_routine(self, gx, gy, C, **kwargs)
mesh.set_transform(self._parent_axes.transData)
return mesh
def pcolormesh(self, *XYC, **kwargs):
return self._pcolor("pcolormesh", *XYC, **kwargs)
def pcolor(self, *XYC, **kwargs):
return self._pcolor("pcolor", *XYC, **kwargs)
def _contour(self, method_name, *XYCL, **kwargs):
if len(XYCL) <= 2:
C = XYCL[0]
ny, nx = C.shape
gx = np.arange(0., nx, 1.)
gy = np.arange(0., ny, 1.)
X,Y = np.meshgrid(gx, gy)
CL = XYCL
else:
X, Y = XYCL[:2]
CL = XYCL[2:]
contour_routine = self._get_base_axes_attr(method_name)
if "transform" in kwargs:
cont = contour_routine(self, X, Y, *CL, **kwargs)
else:
orig_shape = X.shape
xy = np.vstack([X.flat, Y.flat])
xyt=xy.transpose()
wxy = self.transAux.transform(xyt)
gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
cont = contour_routine(self, gx, gy, *CL, **kwargs)
for c in cont.collections:
c.set_transform(self._parent_axes.transData)
return cont
def contour(self, *XYCL, **kwargs):
return self._contour("contour", *XYCL, **kwargs)
def contourf(self, *XYCL, **kwargs):
return self._contour("contourf", *XYCL, **kwargs)
def apply_aspect(self, position=None):
self.update_viewlim()
self._get_base_axes_attr("apply_aspect")(self)
#ParasiteAxes.apply_aspect()
_parasite_axes_auxtrans_classes = {}
def parasite_axes_auxtrans_class_factory(axes_class=None):
if axes_class is None:
parasite_axes_class = ParasiteAxes
elif not issubclass(axes_class, ParasiteAxesBase):
parasite_axes_class = parasite_axes_class_factory(axes_class)
else:
parasite_axes_class = axes_class
new_class = _parasite_axes_auxtrans_classes.get(parasite_axes_class)
if new_class is None:
new_class = type(str("%sParasiteAuxTrans" % (parasite_axes_class.__name__)),
(ParasiteAxesAuxTransBase, parasite_axes_class),
{'_parasite_axes_class': parasite_axes_class,
'name': 'parasite_axes'})
_parasite_axes_auxtrans_classes[parasite_axes_class] = new_class
return new_class
ParasiteAxesAuxTrans = parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
def _get_handles(ax):
handles = ax.lines[:]
handles.extend(ax.patches)
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.RegularPolyCollection)])
handles.extend([c for c in ax.collections
if isinstance(c, mcoll.CircleCollection)])
return handles
class HostAxesBase(object):
def __init__(self, *args, **kwargs):
self.parasites = []
self._get_base_axes_attr("__init__")(self, *args, **kwargs)
def get_aux_axes(self, tr, viewlim_mode="equal", axes_class=None):
parasite_axes_class = parasite_axes_auxtrans_class_factory(axes_class)
ax2 = parasite_axes_class(self, tr, viewlim_mode)
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
self.parasites.append(ax2)
ax2._remove_method = lambda h: self.parasites.remove(h)
return ax2
def _get_legend_handles(self, legend_handler_map=None):
# don't use this!
Axes_get_legend_handles = self._get_base_axes_attr("_get_legend_handles")
all_handles = list(Axes_get_legend_handles(self, legend_handler_map))
for ax in self.parasites:
all_handles.extend(ax._get_legend_handles(legend_handler_map))
return all_handles
def draw(self, renderer):
orig_artists = list(self.artists)
orig_images = list(self.images)
if hasattr(self, "get_axes_locator"):
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.set_position(pos, which="active")
self.apply_aspect(pos)
else:
self.apply_aspect()
else:
self.apply_aspect()
rect = self.get_position()
for ax in self.parasites:
ax.apply_aspect(rect)
images, artists = ax.get_images_artists()
self.images.extend(images)
self.artists.extend(artists)
self._get_base_axes_attr("draw")(self, renderer)
self.artists = orig_artists
self.images = orig_images
def cla(self):
for ax in self.parasites:
ax.cla()
self._get_base_axes_attr("cla")(self)
#super(HostAxes, self).cla()
def twinx(self, axes_class=None):
"""
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharex=self, frameon=False)
self.parasites.append(ax2)
self.axis["right"].set_visible(False)
ax2.axis["right"].set_visible(True)
ax2.axis["left", "top", "bottom"].set_visible(False)
def _remove_method(h):
self.parasites.remove(h)
self.axis["right"].set_visible(True)
self.axis["right"].toggle(ticklabels=False, label=False)
ax2._remove_method = _remove_method
return ax2
def twiny(self, axes_class=None):
"""
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharey=self, frameon=False)
self.parasites.append(ax2)
self.axis["top"].set_visible(False)
ax2.axis["top"].set_visible(True)
ax2.axis["left", "right", "bottom"].set_visible(False)
def _remove_method(h):
self.parasites.remove(h)
self.axis["top"].set_visible(True)
self.axis["top"].toggle(ticklabels=False, label=False)
ax2._remove_method = _remove_method
return ax2
def twin(self, aux_trans=None, axes_class=None):
"""
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_auxtrans_class = parasite_axes_auxtrans_class_factory(axes_class)
if aux_trans is None:
ax2 = parasite_axes_auxtrans_class(self, mtransforms.IdentityTransform(),
viewlim_mode="equal",
)
else:
ax2 = parasite_axes_auxtrans_class(self, aux_trans,
viewlim_mode="transform",
)
self.parasites.append(ax2)
ax2._remove_method = lambda h: self.parasites.remove(h)
self.axis["top", "right"].set_visible(False)
ax2.axis["top", "right"].set_visible(True)
ax2.axis["left", "bottom"].set_visible(False)
def _remove_method(h):
self.parasites.remove(h)
self.axis["top", "right"].set_visible(True)
self.axis["top", "right"].toggle(ticklabels=False, label=False)
ax2._remove_method = _remove_method
return ax2
def get_tightbbox(self, renderer, call_axes_locator=True):
bbs = [ax.get_tightbbox(renderer, call_axes_locator)
for ax in self.parasites]
get_tightbbox = self._get_base_axes_attr("get_tightbbox")
bbs.append(get_tightbbox(self, renderer, call_axes_locator))
_bbox = Bbox.union([b for b in bbs if b.width!=0 or b.height!=0])
return _bbox
_host_axes_classes = {}
def host_axes_class_factory(axes_class=None):
if axes_class is None:
axes_class = Axes
new_class = _host_axes_classes.get(axes_class)
if new_class is None:
def _get_base_axes(self):
return axes_class
def _get_base_axes_attr(self, attrname):
return getattr(axes_class, attrname)
new_class = type(str("%sHostAxes" % (axes_class.__name__)),
(HostAxesBase, axes_class),
{'_get_base_axes_attr': _get_base_axes_attr,
'_get_base_axes': _get_base_axes})
_host_axes_classes[axes_class] = new_class
return new_class
def host_subplot_class_factory(axes_class):
host_axes_class = host_axes_class_factory(axes_class=axes_class)
subplot_host_class = subplot_class_factory(host_axes_class)
return subplot_host_class
HostAxes = host_axes_class_factory(axes_class=Axes)
SubplotHost = subplot_class_factory(HostAxes)
def host_axes(*args, **kwargs):
"""
Create axes that can act as a hosts to parasitic axes.
Parameters
----------
figure : `matplotlib.figure.Figure`
Figure to which the axes will be added. Defaults to the current figure
`pyplot.gcf()`.
*args, **kwargs :
Will be passed on to the underlying ``Axes`` object creation.
"""
import matplotlib.pyplot as plt
axes_class = kwargs.pop("axes_class", None)
host_axes_class = host_axes_class_factory(axes_class)
fig = kwargs.get("figure", None)
if fig is None:
fig = plt.gcf()
ax = host_axes_class(fig, *args, **kwargs)
fig.add_axes(ax)
plt.draw_if_interactive()
return ax
def host_subplot(*args, **kwargs):
"""
Create a subplot that can act as a host to parasitic axes.
Parameters
----------
figure : `matplotlib.figure.Figure`
Figure to which the subplot will be added. Defaults to the current
figure `pyplot.gcf()`.
*args, **kwargs :
Will be passed on to the underlying ``Axes`` object creation.
"""
import matplotlib.pyplot as plt
axes_class = kwargs.pop("axes_class", None)
host_subplot_class = host_subplot_class_factory(axes_class)
fig = kwargs.get("figure", None)
if fig is None:
fig = plt.gcf()
ax = host_subplot_class(fig, *args, **kwargs)
fig.add_subplot(ax)
plt.draw_if_interactive()
return ax
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py2@mpl_toolkits@axes_grid1@parasite_axes.py@.PATH_END.py
|
{
"filename": "random_initializers.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/initializers/random_initializers.py",
"type": "Python"
}
|
import math
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend import random
from keras.src.initializers.initializer import Initializer
from keras.src.saving import serialization_lib
class RandomInitializer(Initializer):
def __init__(self, seed=None):
self._init_seed = seed
if seed is None:
seed = random.make_default_seed()
elif isinstance(seed, dict):
seed = serialization_lib.deserialize_keras_object(seed)
elif not isinstance(seed, (int, random.SeedGenerator)):
raise ValueError(
"`seed` argument should be an instance of "
"`keras.random.SeedGenerator()` or an integer. "
f"Received: seed={seed}"
)
self.seed = seed
def get_config(self):
seed_config = serialization_lib.serialize_keras_object(self._init_seed)
return {"seed": seed_config}
@keras_export(
[
"keras.initializers.RandomNormal",
"keras.initializers.random_normal",
]
)
class RandomNormal(RandomInitializer):
"""Random normal initializer.
Draws samples from a normal distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"mean": self.mean, "stddev": self.stddev}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.TruncatedNormal",
"keras.initializers.truncated_normal",
]
)
class TruncatedNormal(RandomInitializer):
"""Initializer that generates a truncated normal distribution.
The values generated are similar to values from a
`RandomNormal` initializer, except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.truncated_normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"mean": self.mean, "stddev": self.stddev}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.RandomUniform",
"keras.initializers.random_uniform",
]
)
class RandomUniform(RandomInitializer):
"""Random uniform initializer.
Draws samples from a uniform distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar keras tensor. Lower bound of the
range of random values to generate (inclusive).
maxval: A python scalar or a scalar keras tensor. Upper bound of the
range of random values to generate (exclusive).
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.uniform(
shape=shape,
minval=self.minval,
maxval=self.maxval,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
config = {"minval": self.minval, "maxval": self.maxval}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.VarianceScaling",
"keras.initializers.variance_scaling",
]
)
class VarianceScaling(RandomInitializer):
"""Initializer that adapts its scale to the shape of its input tensors.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero
and a standard deviation (after truncation, if used) `stddev = sqrt(scale /
n)`, where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of `"fan_in"`, `"fan_out"`, `"fan_avg"`.
distribution: Random distribution to use.
One of `"truncated_normal"`, `"untruncated_normal"`, or `"uniform"`.
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
"""
def __init__(
self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None,
):
if scale <= 0.0:
raise ValueError(
"Argument `scale` must be positive float. "
f"Received: scale={scale}"
)
allowed_modes = {"fan_in", "fan_out", "fan_avg"}
if mode not in allowed_modes:
raise ValueError(
f"Invalid `mode` argument: {mode}. "
f"Please use one of {allowed_modes}"
)
distribution = distribution.lower()
if distribution == "normal":
distribution = "truncated_normal"
allowed_distributions = {
"uniform",
"truncated_normal",
"untruncated_normal",
}
if distribution not in allowed_distributions:
raise ValueError(
f"Invalid `distribution` argument: {distribution}."
f"Please use one of {allowed_distributions}"
)
self.scale = scale
self.mode = mode
self.distribution = distribution
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
scale = self.scale
fan_in, fan_out = compute_fans(shape)
if self.mode == "fan_in":
scale /= max(1.0, fan_in)
elif self.mode == "fan_out":
scale /= max(1.0, fan_out)
else:
scale /= max(1.0, (fan_in + fan_out) / 2.0)
if self.distribution == "truncated_normal":
stddev = math.sqrt(scale) / 0.87962566103423978
return random.truncated_normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return random.normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
else:
limit = math.sqrt(3.0 * scale)
return random.uniform(
shape, minval=-limit, maxval=limit, dtype=dtype, seed=self.seed
)
def get_config(self):
base_config = super().get_config()
config = {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
}
return {**base_config, **config}
@keras_export(
[
"keras.initializers.GlorotUniform",
"keras.initializers.glorot_uniform",
]
)
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input
units in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = GlorotUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_avg", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(
[
"keras.initializers.GlorotNormal",
"keras.initializers.glorot_normal",
]
)
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of
input units in the weight tensor and `fan_out` is the number of output units
in the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = GlorotNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed,
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(
[
"keras.initializers.LecunNormal",
"keras.initializers.lecun_normal",
]
)
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in
the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = LecunNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_in", distribution="truncated_normal", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(
[
"keras.initializers.LecunUniform",
"keras.initializers.lecun_uniform",
]
)
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = LecunUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_in", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(["keras.initializers.HeNormal", "keras.initializers.he_normal"])
class HeNormal(VarianceScaling):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in
the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = HeNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [He et al., 2015](https://arxiv.org/abs/1502.01852)
"""
def __init__(self, seed=None):
super().__init__(
scale=2.0, mode="fan_in", distribution="truncated_normal", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(["keras.initializers.HeUniform", "keras.initializers.he_uniform"])
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = HeUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [He et al., 2015](https://arxiv.org/abs/1502.01852)
"""
def __init__(self, seed=None):
super().__init__(
scale=2.0, mode="fan_in", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
def compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple.
Returns:
A tuple of integer scalars: `(fan_in, fan_out)`.
"""
shape = tuple(shape)
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return int(fan_in), int(fan_out)
@keras_export(
[
"keras.initializers.Orthogonal",
"keras.initializers.orthogonal",
"keras.initializers.OrthogonalInitializer",
]
)
class Orthogonal(RandomInitializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, it is
initialized with an orthogonal matrix obtained from the QR decomposition of
a matrix of random numbers drawn from a normal distribution. If the matrix
has fewer rows than columns then the output will have orthogonal rows.
Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = keras.initializers.Orthogonal()
>>> layer = keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the orthogonal matrix.
seed: A Python integer. Used to make the behavior of the initializer
deterministic.
Reference:
- [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
if len(shape) < 2:
raise ValueError(
"The tensor to initialize must be "
"at least two-dimensional. Received: "
f"shape={shape} of rank {len(shape)}."
)
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = random.normal(flat_shape, seed=self.seed, dtype=dtype)
# Compute the qr factorization
q, r = ops.qr(a)
# Make Q uniform
d = ops.diag(r)
q *= ops.sign(d)
if num_rows < num_cols:
q = ops.transpose(q)
return self.gain * ops.reshape(q, shape)
def get_config(self):
base_config = super().get_config()
config = {"gain": self.gain}
return {**base_config, **config}
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@initializers@random_initializers.py@.PATH_END.py
|
{
"filename": "contour.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/mayavi/filters/contour.py",
"type": "Python"
}
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Prabhu Ramachandran
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance
# Local imports.
from mayavi.components.contour import Contour as ContourComponent
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.filters.wrapper import Wrapper
################################################################################
# `Contour` class.
################################################################################
class Contour(Wrapper):
"""
A contour filter that wraps around the Contour component to generate
iso-surfaces on any input dataset.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The contour component this wraps.
filter = Instance(ContourComponent, args=(), record=True)
input_info = PipelineInfo(datasets=['any'],
attribute_types=['point'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@mayavi@filters@contour.py@.PATH_END.py
|
{
"filename": "_tickmode.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/carpet/aaxis/_tickmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="tickmode", parent_name="carpet.aaxis", **kwargs):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["linear", "array"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@carpet@aaxis@_tickmode.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/annotation/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="layout.annotation", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@annotation@_visible.py@.PATH_END.py
|
{
"filename": "intensity_profiles.py",
"repo_name": "delinea/LDCU",
"repo_path": "LDCU_extracted/LDCU-main/intensity_profiles.py",
"type": "Python"
}
|
import os
import pickle
import numpy as np
from uncertainties import ufloat
import tqdm
import matplotlib
from matplotlib import pyplot as plt
from astropy.io import fits
from astropy import table
import get_lds_with_errors_v3 as glds
LINECOLOR = matplotlib.rcParams["ytick.color"]
# Compute and store intensity profile interpolators
def intensity_profile_interpolators(star, RF_list, models=None, main_dir="",
overwrite=False, nsig=4):
Teff = star["Teff"]
logg = star["logg"]
M_H = star["M_H"]
vturb = ufloat(2, 0.5) if star["vturb"] is None else star["vturb"]
name = star["Name"].replace(" ", "_")
fn = os.path.join(main_dir,
"{}_intensity_profile_interpolators.pkl".format(name))
if not os.path.isfile(fn) or overwrite:
bounds = {"Teff": (max(0, Teff.n-nsig*Teff.s), Teff.n+nsig*Teff.s),
"logg": (logg.n-nsig*logg.s, logg.n+nsig*logg.s),
"M_H": (M_H.n-nsig*M_H.s, M_H.n+nsig*M_H.s),
"vturb": (max(0, vturb.n-nsig*vturb.s),
vturb.n+nsig*vturb.s)}
glds.download_files(**bounds, models=models, force_download=False)
subgrids = glds.get_subgrids(**bounds, models=models)
ip_interp = glds.get_profile_interpolators(subgrids, RF_list,
models=models,
interpolation_order=1,
atlas_correction=True,
photon_correction=True,
max_bad_RF=0.0,
overwrite_pck=False,
atlas_hdu=1)
with open(fn, "wb") as f:
pickle.dump(ip_interp, f)
else:
with open(fn, "rb") as f:
ip_interp = pickle.load(f)
return ip_interp
# Drawing stellar parameters from normal distributions
def get_samples(star, RF_list, nsamples=10000):
Teff = star["Teff"]
logg = star["logg"]
M_H = star["M_H"]
vturb = ufloat(2, 0.5) if star["vturb"] is None else star["vturb"]
vals = np.full((nsamples, 4), np.nan)
for i, p in enumerate([Teff, logg, M_H, vturb]):
vals[:, i] = np.random.normal(p.n, p.s, nsamples)
if (i == 0) or (i == 3): # removing negative values
n_max = 100
j = 0
while np.any(vals[:, i] < 0):
idx = np.where(vals[:, i] < 0, True, False)
vals[:, i][idx] = np.random.normal(p.n, p.s, sum(idx))
if j >= n_max:
raise RuntimeError("failed to draw stellar parameters")
j += 1
return vals
# Interpolating ATLAS and PHOENIX LD curves
def intensity_profiles(star, ip_interp, samples, main_dir="", save=False):
intensity_profiles_dict = {}
RF_list = list(ip_interp.keys())
pbar = tqdm.tqdm(total=len(RF_list)*(len(ip_interp[RF_list[0]])),
desc="Computing intensity profiles", dynamic_ncols=True)
for RF in RF_list:
intensity_profiles_RF = {}
for FT in ip_interp[RF]:
lndi_mu, lndi_I = ip_interp[RF][FT]
if lndi_I is None:
continue
if FT.startswith("A"):
mu = lndi_mu
I = lndi_I(samples)
mu = np.tile(mu, (len(I), 1))
elif FT.startswith("P"):
mu = lndi_mu(samples[:, :-1])
I = lndi_I(samples[:, :-1])
idx = np.isfinite(mu) & np.isfinite(I)
mu = mu[idx]
I = I[idx]
idx = np.argsort(mu)
mu = mu[idx]
I = I[idx]
intensity_profiles_RF[FT] = (mu, I)
pbar.update()
intensity_profiles_dict[RF] = intensity_profiles_RF
if save:
# saving intensity profiles
rf_name = os.path.splitext(os.path.basename(RF))[0]
fn = os.path.join(main_dir,
"{}_intensity_profiles__{}.fits"
.format(star["Name"].replace(" ", "_"), rf_name))
hdu = fits.PrimaryHDU()
for k, v in star.items():
hdu.header[k] = str(v)
hdu.header["RF"] = RF
hdulist = fits.HDUList([hdu])
for mn, (mu, I) in intensity_profiles_RF.items():
tbl = table.Table(data=[mu, I], names=["mu", "Intensity"])
hdu = fits.table_to_hdu(tbl)
hdu.name = mn
hdulist.append(hdu)
hdulist.writeto(fn, overwrite=True)
os.system("gzip '{}'".format(fn))
pbar.close()
return intensity_profiles_dict
# plotting and computing log-likelihood
def ld_linear_model(mu, a):
return 1.0-a*(1.0-mu)
def ld_square_root_model(mu, s1, s2):
return 1.0-s1*(1.0-mu)-s2*(1.0-np.sqrt(mu))
def ld_quadratic_model(mu, u1, u2):
return 1.0-u1*(1.0-mu)-u2*(1.0-mu)**2
def ld_kipping2013_model(mu, q1, q2):
u1 = 2*np.sqrt(q1)*q2
u2 = np.sqrt(q1)*(1-2*q2)
return 1.0-u1*(1.0-mu)-u2*(1.0-mu)**2
def ld_three_parameter_model(mu, b1, b2, b3):
return 1.0-b1*(1.0-mu)-b2*(1.0-np.sqrt(mu)**3)-b3*(1-mu**2)
def ld_non_linear_model(mu, c1, c2, c3, c4):
return (1.0-c1*(1.0-np.sqrt(mu))-c2*(1.0-mu)-c3*(1.0-np.sqrt(mu)**3)
- c4*(1-mu**2))
def ld_logarithmic_model(mu, l1, l2):
return 1.0-l1*(1.0-mu)-l2*mu*np.log(mu)
def ld_exponential_model(mu, e1, e2):
return 1.0-e1*(1.0-mu)-e2/(1.0-np.exp(mu))
def ld_power2_model(mu, p1, p2):
return 1.0-p1*(1.0-mu**p2)
def chi2(mu, I, u1, u2, ld_model):
return np.sum((I-ld_model(mu, u1, u2))**2)
def lnlike(mu, I, u1, u2, fit_func):
u1_best, u2_best = fit_func(mu, I)
chi2_min = chi2(mu, I, u1_best.n, u2_best.n)
chi2_val = chi2(mu, I, u1, u2)
ll = -0.5*(chi2_val/chi2_min-1)
return ll
ld_laws = {"linear": (ld_linear_model, glds.lds.fit_linear),
"square-root": (ld_square_root_model, glds.lds.fit_square_root),
"quadratic": (ld_quadratic_model, glds.lds.fit_quadratic),
"kipping2013": (ld_kipping2013_model, glds.lds.fit_kipping2013),
"three-parameter": (ld_three_parameter_model,
glds.lds.fit_three_parameter),
"non-linear": (ld_non_linear_model, glds.lds.fit_non_linear),
"logarithmic": (ld_logarithmic_model, glds.lds.fit_logarithmic),
"exponential": (ld_exponential_model, glds.lds.fit_exponential),
"power-2": (ld_power2_model, glds.lds.fit_power2)}
def plot_intensity_profiles(star, intensity_profiles, ld_law="quadratic",
main_dir="", FT_idx=2):
ld_model, fit_func = ld_laws[ld_law]
FT_names = ["", "(discarded points with $\\mu \\leq 0.05$ $-$ Sing 2010)",
"(100-point interpolation $-$ Claret & Bloemen 2011)"]
fn_suffix = ["", "_mu>0.05", "_interp100"]
kwargs = dict(s=5, alpha=.2, zorder=1)
RF_list = list(intensity_profiles.keys())
for RF in RF_list:
rf_name = os.path.splitext(os.path.basename(RF))[0]
fn = ("{}_intensity_profiles__{}{}.png"
.format(star["Name"].replace(" ", "_"), rf_name,
fn_suffix[FT_idx]))
if rf_name.lower().startswith("uniform"):
rf_name = ("top-hat filter from {} A to {} A"
.format(*rf_name.split("_")[1:]))
elif rf_name.lower().startswith("gaussian"):
rf_name = ("gaussian filter centered on {} A (FWHM = {} A)"
.format(*rf_name.split("_")[1:]))
ttl = ("Intensity profile for {}\nwith {}\n"
.format(star["Name"], rf_name.replace("_", " ")))
fig, ax = plt.subplots(figsize=(6, 4.5), gridspec_kw=dict(top=0.83))
fig.suptitle(ttl, fontsize=13)
fig.text(0.5, 0.87, FT_names[FT_idx], fontsize=10,
ha="center", va="center")
mu_all, I_all = np.array([]), np.array([])
n = 0
for FT in intensity_profiles[RF]:
FT_flags = [FT in ["A17", "P"], # original data points
FT.endswith("S"), # Sing 2010 (mu ≥ 0.05)
FT.endswith("100")] # Claret & Bloemen 2011 (interp.)
if not FT_flags[FT_idx]:
continue
if "A" in FT:
mn, c, ls = "ATLAS", "C0", ":"
elif "P" in FT:
mn, c, ls = "PHOENIX", "C1", "--"
else:
mn, c, ls = "??", "C2", "-."
mu, I = intensity_profiles[RF][FT]
mu_all = np.append(mu_all, mu)
I_all = np.append(I_all, I)
ldc = [ldc_i.n for ldc_i in fit_func(mu, I)]
ax.scatter(mu, I, c=c, label="{} data".format(mn), **kwargs)
lbl_pattern = ("{{}} {{}} model: ({0})"
.format(", ".join(["{:.3f}" for ldc_i in ldc])))
lbl = lbl_pattern.format(mn, ld_law, *ldc)
ax.plot(mu, ld_model(mu, *ldc), c=LINECOLOR, ls=ls, label=lbl,
zorder=100)
n += 1
if n > 1:
ldc = [ldc_i.n for ldc_i in fit_func(mu_all, I_all)]
lbl_pattern = ("Joint {{}} model: ({0})"
.format(", ".join(["{:.3f}" for ldc_i in ldc])))
lbl = lbl_pattern.format(ld_law, *ldc)
ax.plot(mu, ld_model(mu, *ldc), c=LINECOLOR, ls="-", label=lbl,
zorder=100)
i = 1.1
ax.set_ylabel(r"$\frac{\mathcal{I}\left(\mu\right)}{\mathcal{I}_0}$",
fontsize=14*i, rotation=0, labelpad=15)
ax.set_xlabel(r"$\mu=\cos\!\left(\theta\right)$", fontsize=10*i)
ax.set_ylim(-0.01, 1.01)
ax.set_xlim(-0.01, 1.01)
for i, txt in enumerate(["limb", "center"]):
ax.text(i, -0.11*np.ptp(ax.get_ylim()), "({})".format(txt),
ha="center", va="center", fontsize=10, style="italic")
ax.legend(loc="lower right", fontsize=8)
fig.savefig(os.path.join(main_dir, fn), dpi=300)
return fig
|
delineaREPO_NAMELDCUPATH_START.@LDCU_extracted@LDCU-main@intensity_profiles.py@.PATH_END.py
|
{
"filename": "WebSocket-Compliance-Testing.md",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/docs-old/pages/administration/production/WebSocket-Compliance-Testing.md",
"type": "Markdown"
}
|
title: WebSocket Compliance Testing
toc: [Documentation, Administration, Going to Production, WebSocket Compliance Testing]
# WebSocket Compliance Testing
Crossbar.io has best-in-class compliance to the WebSocket protocol (RFC6455).
> Compliance is testified via the [**Autobahn**Testsuite](http://autobahn.ws/testsuite/), the [industry standard](http://autobahn.ws/testsuite/#users) WebSocket compliance testsuite which includes more than 500 automated test cases. Crossbar.io passed *all* tests - 100% strict. No matter what WebSocket server you use, we encourage you to run the testsuite against it and compare.
Protocol compliance is very important for two reasons:
* interoperability
* security
You don't want an evil client disturb or break your servers, or fail to serve clients because of interoperability issues.
## Testing yourself
Install the testsuite:
```
pip install -U autobahntestsuite
```
Create a Crossbar.io node with a node configuration starting a WebSocket testee transport:
```json
{
"workers": [
{
"type": "router",
"transports": [
{
"type": "websocket.testee",
"endpoint": {
"type": "tcp",
"port": 9001,
"backlog": 1024
},
"options": {
"compression": {
"deflate": {
}
}
}
}
]
}
]
}
```
Now create a file `fuzzingclient.json`:
```json
{
"servers": [
{
"agent": "Crossbar.io",
"url": "ws://127.0.0.1:9001"
}
],
"cases": ["*"],
"exclude-cases": [],
"exclude-agent-cases": {}
}
```
This test specification defines which test cases to run against what servers.
Then, start Crossbar.io in a first terminal
```
crossbar start
```
and start the testsuite in a second terminal
```
wstest -m fuzzingclient -s fuzzingclient.json
```
Testing will take some time. It runs over 500 test cases. In the end, it'll generate HTML report files. Open the `reports/servers/index.html` overview page in your browser - click on the green "Pass" links to view the case detail reports.
## Configuration
option | description
---|---
**`id`** | ID of the transport within the running node (default: **`transport<N>`** where `N` is numbered automatically starting from `1`)
**`type`** | Type of transport - must be `"websocket.testee"`.
**`endpoint`** | Listening endpoint for transport. See [Transport Endpoints](Transport Endpoints) for configuration
**`debug`** | Turn on debug logging for this transport instance (default: **`false`**).
**`url`** | The WebSocket server URL to use (default: `null`)
**`options`** | See [WebSocket Options](WebSocket-Options)
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@docs-old@pages@administration@production@WebSocket-Compliance-Testing.md@.PATH_END.py
|
{
"filename": "two_dim.py",
"repo_name": "PhaseTracer/PhaseTracer",
"repo_path": "PhaseTracer_extracted/PhaseTracer-master/example/xSM/plot_scripts/two_dim.py",
"type": "Python"
}
|
"""
2d plots for paper
==================
"""
import click
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from plot_fun import fun_diff, fun_gamma_line
from three_dim import add_max_num_legend, max_num_cmap
from style import style
@click.command()
@click.argument("plot_type")
@click.argument("show")
def make_plot(plot_type, show):
style()
plot_xi = plot_type == "xi"
plot_scale = plot_type == "scale"
plot_scheme = plot_type == "scheme"
plot_daisy = plot_type == "daisy"
plot_max = plot_type == "max"
show_deltagamma = show == "deltagamma"
show_deltaT = show == "deltaT"
show_max_num = show == "max_num"
if plot_xi:
figure_name = "xi"
if plot_scale:
figure_name = "scale"
if plot_scheme:
figure_name = "scheme"
if plot_daisy:
figure_name = "daisy"
if plot_max:
figure_name = "max"
if show_max_num:
figure_name = "max_num"
show_deltaT = True
if show_deltaT:
if plot_xi:
title = r"$T_c{(\xi=25)}-T_c{(\xi=0)}$ (GeV)"
if plot_scale:
title = r"$T_c{(Q=2m_t)}-T_c{(Q=\frac{1}{2}m_t)}$ (GeV)"
if plot_scheme:
title = r"$T_c(\overline{\rm MS})-T_c({\text{OS-like}})$ (GeV)"
if plot_daisy:
if show_deltagamma:
title = r"$\left|\gamma_{\rm EW}^{\rm AE}-\gamma_{\rm EW}^{\rm PW}|/\gamma_{\rm EW}^{\rm AE}\right|$"
else:
title = r"$\left|T_c^{\rm PW}-T_c^{\rm AE}\right|$ (GeV)"
if plot_max:
title = r"$\max |\Delta T_c| $ (GeV)"
if show_max_num:
title = r""
cm = 'rainbow'
else:
title = r"$T_c{(\xi=0)}$ (GeV)"
cm = 'cool'
labels = [r'$M_s$ (GeV)', r'$\lambda_{S}$', r'$\lambda_{hs}$']
def make_plot(ax, par, cbar=False, legend=False):
#############################
if plot_xi:
data2 = np.loadtxt("../2d_scan/"+par+"_default.txt")
data1 = np.loadtxt("../2d_scan/"+par+"_xi25.txt")
show_data = fun_diff(data1, data2, data1,
show_gamma=show_deltagamma)
print("max=",max(show_data[:, 4]))
print("min=",min(show_data[:, 4]))
vmax = 10
if plot_scale:
data2 = np.loadtxt("../2d_scan/"+par+"_05mt.txt")
data1 = np.loadtxt("../2d_scan/"+par+"_2mt.txt")
show_data = fun_diff(data1, data2, data1,
show_gamma=show_deltagamma)
if plot_scheme:
data1 = np.loadtxt("../2d_scan/"+par+"_default.txt")
data2 = np.loadtxt("../2d_scan/"+par+"_OSlike.txt")
vmax = 6
show_data = fun_diff(data1, data2, data1, show_gamma=show_deltagamma,
norm=False, use_abs=False, gamma_min=0, sort=True)
if plot_daisy:
data1 = np.loadtxt("../2d_scan/"+par+"_default.txt")
data2 = np.loadtxt("../2d_scan/"+par+"_Parwani.txt")
vmax = 20
show_data = fun_diff(data1, data2, data1,
show_gamma=show_deltagamma)
if plot_max:
vmax = 16
data_default = np.loadtxt("../2d_scan/"+par+"_default.txt")
data_xi3 = np.loadtxt("../2d_scan/"+par+"_xi25.txt")
data_05mt = np.loadtxt("../2d_scan/"+par+"_05mt.txt")
data_2mt = np.loadtxt("../2d_scan/"+par+"_2mt.txt")
data_set = [data_default, data_xi3, data_05mt, data_2mt]
len_data = len(data_default)
for ii in range(len(data_set)):
if len(data_set[ii]) != len_data:
print("Length of data file " +
par + str(ii) + " is wrong.")
sys.exit()
data_diff = []
for ii in range(len_data):
ms = data_default[ii][0]
lambda_s = data_default[ii][1]
lambda_hs = data_default[ii][2]
flag_sel = True
for jj in range(len(data_set)):
if abs(data_set[jj][ii][0] - ms) > 0.01:
print(data_set[jj][ii][0], ms)
print("Content of data file " +
par + str(jj) + " is wrong.")
sys.exit()
TCjj = data_set[jj][ii][4]
vsjj = data_set[jj][ii][8]
gammajj = fun_gamma_line(data_set[jj][ii])
if gammajj <= 0 or vsjj < 10:
flag_sel = False
if flag_sel:
if show_deltaT:
d_xi = abs(data_default[ii][4] - data_xi3[ii][4])
d_scale = abs(data_05mt[ii][4] - data_2mt[ii][4])
else:
d_xi = abs(fun_gamma_line(
data_default[ii]) - fun_gamma_line(data_xi3[ii]))
d_scale = abs(fun_gamma_line(
data_05mt[ii]) - fun_gamma_line(data_2mt[ii]))
d_set = [d_scale, d_xi]
data_diff.append([ms, lambda_s, lambda_hs, np.where(
d_set == np.max(d_set))[0][0], max(d_set)])
show_data = np.array(data_diff)
# 0=lambda_hs, 1=lambda_s, 2=ms
if par == "lhs_ls":
nx = 1
ny = 2
label = r"$M_s=65$ GeV"
if par == "ms_lhs":
nx = 0
ny = 2
label = r"$\lambda_{S}=0.1$"
if par == "ms_ls":
nx = 0
ny = 1
label = r"$\lambda_{hs}=0.3$"
xmin = min(show_data[:, nx])
xmax = max(show_data[:, nx])
ymin = min(show_data[:, ny])
ymax = max(show_data[:, ny])
if show_deltaT:
if plot_daisy:
if show_deltagamma:
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=abs(
show_data[:, 4]), cmap=cm, edgecolor='none', s=5, vmin=0, vmax=.2, alpha=1, rasterized=True)
else:
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=show_data[:, 4],
cmap=cm, edgecolor='none', s=5, vmin=-0.1, vmax=4, alpha=1, rasterized=True)
elif plot_scheme:
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=show_data[:, 4],
cmap=cm, edgecolor='none', s=5, vmin=-1, vmax=10, alpha=1, rasterized=True)
elif plot_max and show_max_num:
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=abs(
show_data[:, 3]), cmap=max_num_cmap, edgecolor='none', s=5, alpha=1, rasterized=True)
elif plot_max and not show_max_num:
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=show_data[:, 4],
cmap=cm, edgecolor='none', s=5, vmin=5, vmax=30, alpha=1, rasterized=True)
elif plot_scale:
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=show_data[:, 4],
cmap=cm, edgecolor='none', s=5, vmin=8, vmax=16, alpha=1, rasterized=True)
print("max",max(show_data[:, 4]))
print("par",show_data[np.where(show_data[:, 4] == np.max(show_data[:, 4]))[0][0]])
elif plot_xi:
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=show_data[:, 4],
cmap=cm, edgecolor='none', s=5, vmin=-73, vmax=55, alpha=1, rasterized=True)
if par == "ms_ls":
xmin = 10.0
xmax = 90.7035
ymin = 0.01
ymax = 0.3
if par == "ms_lhs":
xmin = 10.0
xmax = 115.025
ymin = 0.1
ymax = 0.5
else:
data2 = np.loadtxt("../2d_scan/"+par+"_default.txt")
data1 = np.loadtxt("../2d_scan/"+par+"_default.txt")
show_data = fun_diff(data1, data2, data1,
show_gamma=show_deltagamma)
xmin = min(show_data[:, nx])
xmax = max(show_data[:, nx])
ymin = min(show_data[:, ny])
ymax = max(show_data[:, ny])
map1 = ax.scatter(show_data[:, nx], show_data[:, ny], c=show_data[:, 5],
cmap=cm, s=2, vmax=150, alpha=1, rasterized=True)
print("xmin",xmin,"xmax",xmax)
print("ymin",ymin,"ymax",ymax)
ax.set_xlabel(labels[nx])
ax.set_ylabel(labels[ny])
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_title(title, fontsize=14)
ax.text(xmin+0.1*(xmax-xmin), ymax-0.1*(ymax-ymin), label)
if plot_max and show_max_num and legend:
add_max_num_legend(ax, loc="lower right")
if cbar:
fig = plt.gcf()
fig.subplots_adjust(right=0.9, wspace=0.3, bottom=0.125)
if not show_max_num:
cbar_ax = fig.add_axes([0.915, 0.15, 0.02, 0.7])
fig.colorbar(map1, cax=cbar_ax)
if plot_daisy and show_deltagamma:
clb.set_ticks([0, .05, .1, .15, .2])
clb.set_ticklabels(['0%', '5%', '10%', '15%', '20%'])
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
make_plot(axs[0], par="lhs_ls", legend=True)
make_plot(axs[1], par="ms_ls")
make_plot(axs[2], par="ms_lhs", cbar=True)
figname = '2d_scan_'+figure_name
if show_deltaT:
if plot_daisy and show_deltagamma:
figname += '_deltagamma'
else:
figname += '_deltaT'
else:
figname += '_T'
plt.savefig(figname+'.pdf')
if __name__ == "__main__":
make_plot()
|
PhaseTracerREPO_NAMEPhaseTracerPATH_START.@PhaseTracer_extracted@PhaseTracer-master@example@xSM@plot_scripts@two_dim.py@.PATH_END.py
|
{
"filename": "_symmetric.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram/error_y/_symmetric.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SymmetricValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="symmetric", parent_name="histogram.error_y", **kwargs
):
super(SymmetricValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram@error_y@_symmetric.py@.PATH_END.py
|
{
"filename": "ImageDraw.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pillow/py2/PIL/ImageDraw.py",
"type": "Python"
}
|
#
# The Python Imaging Library
# $Id$
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import math
import numbers
from . import Image, ImageColor
from ._util import isStringType
"""
A simple 2D drawing interface for PIL images.
<p>
Application code should use the <b>Draw</b> factory, instead of
directly.
"""
class ImageDraw(object):
def __init__(self, im, mode=None):
"""
Create a drawing instance.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
im.load()
if im.readonly:
im._copy() # make it writeable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1)
else:
self.ink = self.draw.draw_ink(-1)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
def getfont(self):
"""
Get the current default font.
:returns: An image font."""
if not self.font:
# FIXME: should add a font repository
from . import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink)
if fill is not None:
if isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill)
return ink, fill
def arc(self, xy, start, end, fill=None, width=0):
"""Draw an arc."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink, width)
def bitmap(self, xy, bitmap, fill=None):
"""Draw a bitmap."""
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
def chord(self, xy, start, end, fill=None, outline=None, width=0):
"""Draw a chord."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_chord(xy, start, end, ink, 0, width)
def ellipse(self, xy, fill=None, outline=None, width=0):
"""Draw an ellipse."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_ellipse(xy, ink, 0, width)
def line(self, xy, fill=None, width=0, joint=None):
"""Draw a line, or a connected sequence of line segments."""
ink = self._getink(fill)[0]
if ink is not None:
self.draw.draw_lines(xy, ink, width)
if joint == "curve" and width > 4:
for i in range(1, len(xy) - 1):
point = xy[i]
angles = [
math.degrees(math.atan2(end[0] - start[0], start[1] - end[1]))
% 360
for start, end in ((xy[i - 1], point), (point, xy[i + 1]))
]
if angles[0] == angles[1]:
# This is a straight line, so no joint is required
continue
def coord_at_angle(coord, angle):
x, y = coord
angle -= 90
distance = width / 2 - 1
return tuple(
[
p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d))
for p, p_d in (
(x, distance * math.cos(math.radians(angle))),
(y, distance * math.sin(math.radians(angle))),
)
]
)
flipped = (
angles[1] > angles[0] and angles[1] - 180 > angles[0]
) or (angles[1] < angles[0] and angles[1] + 180 > angles[0])
coords = [
(point[0] - width / 2 + 1, point[1] - width / 2 + 1),
(point[0] + width / 2 - 1, point[1] + width / 2 - 1),
]
if flipped:
start, end = (angles[1] + 90, angles[0] + 90)
else:
start, end = (angles[0] - 90, angles[1] - 90)
self.pieslice(coords, start - 90, end - 90, fill)
if width > 8:
# Cover potential gaps between the line and the joint
if flipped:
gapCoords = [
coord_at_angle(point, angles[0] + 90),
point,
coord_at_angle(point, angles[1] + 90),
]
else:
gapCoords = [
coord_at_angle(point, angles[0] - 90),
point,
coord_at_angle(point, angles[1] - 90),
]
self.line(gapCoords, fill, width=3)
def shape(self, shape, fill=None, outline=None):
"""(Experimental) Draw a shape."""
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_outline(shape, ink, 0)
def pieslice(self, xy, start, end, fill=None, outline=None, width=0):
"""Draw a pieslice."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_pieslice(xy, start, end, ink, 0, width)
def point(self, xy, fill=None):
"""Draw one or more individual pixels."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
def polygon(self, xy, fill=None, outline=None):
"""Draw a polygon."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_polygon(xy, ink, 0)
def rectangle(self, xy, fill=None, outline=None, width=0):
"""Draw a rectangle."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_rectangle(xy, ink, 0, width)
def _multiline_check(self, text):
"""Draw text."""
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def text(
self,
xy,
text,
fill=None,
font=None,
anchor=None,
spacing=4,
align="left",
direction=None,
features=None,
language=None,
stroke_width=0,
stroke_fill=None,
*args,
**kwargs
):
if self._multiline_check(text):
return self.multiline_text(
xy,
text,
fill,
font,
anchor,
spacing,
align,
direction,
features,
language,
stroke_width,
stroke_fill,
)
if font is None:
font = self.getfont()
def getink(fill):
ink, fill = self._getink(fill)
if ink is None:
return fill
return ink
def draw_text(ink, stroke_width=0, stroke_offset=None):
coord = xy
try:
mask, offset = font.getmask2(
text,
self.fontmode,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
*args,
**kwargs
)
coord = coord[0] + offset[0], coord[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(
text,
self.fontmode,
direction,
features,
language,
stroke_width,
*args,
**kwargs
)
except TypeError:
mask = font.getmask(text)
if stroke_offset:
coord = coord[0] + stroke_offset[0], coord[1] + stroke_offset[1]
self.draw.draw_bitmap(coord, mask, ink)
ink = getink(fill)
if ink is not None:
stroke_ink = None
if stroke_width:
stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink
if stroke_ink is not None:
# Draw stroked text
draw_text(stroke_ink, stroke_width)
# Draw normal text
draw_text(ink, 0, (stroke_width, stroke_width))
else:
# Only draw normal text
draw_text(ink)
def multiline_text(
self,
xy,
text,
fill=None,
font=None,
anchor=None,
spacing=4,
align="left",
direction=None,
features=None,
language=None,
stroke_width=0,
stroke_fill=None,
):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = (
self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing
)
for line in lines:
line_width, line_height = self.textsize(
line,
font,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += max_width - widths[idx]
else:
raise ValueError('align must be "left", "center" or "right"')
self.text(
(left, top),
line,
fill,
font,
anchor,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
stroke_fill=stroke_fill,
)
top += line_spacing
left = xy[0]
def textsize(
self,
text,
font=None,
spacing=4,
direction=None,
features=None,
language=None,
stroke_width=0,
):
"""Get the size of a given string, in pixels."""
if self._multiline_check(text):
return self.multiline_textsize(
text, font, spacing, direction, features, language, stroke_width
)
if font is None:
font = self.getfont()
return font.getsize(text, direction, features, language, stroke_width)
def multiline_textsize(
self,
text,
font=None,
spacing=4,
direction=None,
features=None,
language=None,
stroke_width=0,
):
max_width = 0
lines = self._multiline_split(text)
line_spacing = (
self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing
)
for line in lines:
line_width, line_height = self.textsize(
line, font, spacing, direction, features, language, stroke_width
)
max_width = max(max_width, line_width)
return max_width, len(lines) * line_spacing
def Draw(im, mode=None):
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except AttributeError:
Outline = None
def getdraw(im=None, hints=None):
"""
(Experimental) A more advanced 2D drawing interface for PIL images,
based on the WCK interface.
:param im: The image to draw in.
:param hints: An optional list of hints.
:returns: A (drawing context, drawing resource factory) tuple.
"""
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
from . import _imagingagg as handler
except ImportError:
pass
if handler is None:
from . import ImageDraw2 as handler
if im:
im = handler.Draw(im)
return im, handler
def floodfill(image, xy, value, border=None, thresh=0):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple). See
:ref:`coordinate-system`.
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of
non-homogeneous, but similar, colors.
"""
# based on an implementation by Eric S. Raymond
# amended by yo1995 @20180806
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if _color_diff(value, background) <= thresh:
return # seed point already has fill color
pixel[x, y] = value
except (ValueError, IndexError):
return # seed point outside image
edge = {(x, y)}
# use a set to keep record of current and previous edge pixels
# to reduce memory consumption
full_edge = set()
while edge:
new_edge = set()
for (x, y) in edge: # 4 adjacent method
for (s, t) in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)):
# If already processed, or if a coordinate is negative, skip
if (s, t) in full_edge or s < 0 or t < 0:
continue
try:
p = pixel[s, t]
except (ValueError, IndexError):
pass
else:
full_edge.add((s, t))
if border is None:
fill = _color_diff(p, background) <= thresh
else:
fill = p != value and p != border
if fill:
pixel[s, t] = value
new_edge.add((s, t))
full_edge = edge # discard pixels processed
edge = new_edge
def _color_diff(color1, color2):
"""
Uses 1-norm distance to calculate difference between two values.
"""
if isinstance(color2, tuple):
return sum([abs(color1[i] - color2[i]) for i in range(0, len(color2))])
else:
return abs(color1 - color2)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pillow@py2@PIL@ImageDraw.py@.PATH_END.py
|
{
"filename": "_deprecations.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/_deprecations.py",
"type": "Python"
}
|
import warnings
warnings.filterwarnings(
"default", r"plotly\.graph_objs\.\w+ is deprecated", DeprecationWarning
)
class Data(list):
"""
plotly.graph_objs.Data is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Scatter
- plotly.graph_objs.Bar
- plotly.graph_objs.Area
- plotly.graph_objs.Histogram
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Data is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Scatter
- plotly.graph_objs.Bar
- plotly.graph_objs.Area
- plotly.graph_objs.Histogram
- etc.
"""
warnings.warn(
"""plotly.graph_objs.Data is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Scatter
- plotly.graph_objs.Bar
- plotly.graph_objs.Area
- plotly.graph_objs.Histogram
- etc.
""",
DeprecationWarning,
)
super(Data, self).__init__(*args, **kwargs)
class Annotations(list):
"""
plotly.graph_objs.Annotations is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.layout.Annotation
- plotly.graph_objs.layout.scene.Annotation
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Annotations is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.layout.Annotation
- plotly.graph_objs.layout.scene.Annotation
"""
warnings.warn(
"""plotly.graph_objs.Annotations is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.layout.Annotation
- plotly.graph_objs.layout.scene.Annotation
""",
DeprecationWarning,
)
super(Annotations, self).__init__(*args, **kwargs)
class Frames(list):
"""
plotly.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Frame
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Frame
"""
warnings.warn(
"""plotly.graph_objs.Frames is deprecated.
Please replace it with a list or tuple of instances of the following types
- plotly.graph_objs.Frame
""",
DeprecationWarning,
)
super(Frames, self).__init__(*args, **kwargs)
class AngularAxis(dict):
"""
plotly.graph_objs.AngularAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.AngularAxis
- plotly.graph_objs.layout.polar.AngularAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.AngularAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.AngularAxis
- plotly.graph_objs.layout.polar.AngularAxis
"""
warnings.warn(
"""plotly.graph_objs.AngularAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.AngularAxis
- plotly.graph_objs.layout.polar.AngularAxis
""",
DeprecationWarning,
)
super(AngularAxis, self).__init__(*args, **kwargs)
class Annotation(dict):
"""
plotly.graph_objs.Annotation is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Annotation
- plotly.graph_objs.layout.scene.Annotation
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Annotation is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Annotation
- plotly.graph_objs.layout.scene.Annotation
"""
warnings.warn(
"""plotly.graph_objs.Annotation is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Annotation
- plotly.graph_objs.layout.scene.Annotation
""",
DeprecationWarning,
)
super(Annotation, self).__init__(*args, **kwargs)
class ColorBar(dict):
"""
plotly.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.marker.ColorBar
- plotly.graph_objs.surface.ColorBar
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.marker.ColorBar
- plotly.graph_objs.surface.ColorBar
- etc.
"""
warnings.warn(
"""plotly.graph_objs.ColorBar is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.marker.ColorBar
- plotly.graph_objs.surface.ColorBar
- etc.
""",
DeprecationWarning,
)
super(ColorBar, self).__init__(*args, **kwargs)
class Contours(dict):
"""
plotly.graph_objs.Contours is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.contour.Contours
- plotly.graph_objs.surface.Contours
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Contours is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.contour.Contours
- plotly.graph_objs.surface.Contours
- etc.
"""
warnings.warn(
"""plotly.graph_objs.Contours is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.contour.Contours
- plotly.graph_objs.surface.Contours
- etc.
""",
DeprecationWarning,
)
super(Contours, self).__init__(*args, **kwargs)
class ErrorX(dict):
"""
plotly.graph_objs.ErrorX is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorX
- plotly.graph_objs.histogram.ErrorX
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ErrorX is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorX
- plotly.graph_objs.histogram.ErrorX
- etc.
"""
warnings.warn(
"""plotly.graph_objs.ErrorX is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorX
- plotly.graph_objs.histogram.ErrorX
- etc.
""",
DeprecationWarning,
)
super(ErrorX, self).__init__(*args, **kwargs)
class ErrorY(dict):
"""
plotly.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorY
- plotly.graph_objs.histogram.ErrorY
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorY
- plotly.graph_objs.histogram.ErrorY
- etc.
"""
warnings.warn(
"""plotly.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorY
- plotly.graph_objs.histogram.ErrorY
- etc.
""",
DeprecationWarning,
)
super(ErrorY, self).__init__(*args, **kwargs)
class ErrorZ(dict):
"""
plotly.graph_objs.ErrorZ is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter3d.ErrorZ
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ErrorZ is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter3d.ErrorZ
"""
warnings.warn(
"""plotly.graph_objs.ErrorZ is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter3d.ErrorZ
""",
DeprecationWarning,
)
super(ErrorZ, self).__init__(*args, **kwargs)
class Font(dict):
"""
plotly.graph_objs.Font is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Font
- plotly.graph_objs.layout.hoverlabel.Font
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Font is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Font
- plotly.graph_objs.layout.hoverlabel.Font
- etc.
"""
warnings.warn(
"""plotly.graph_objs.Font is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Font
- plotly.graph_objs.layout.hoverlabel.Font
- etc.
""",
DeprecationWarning,
)
super(Font, self).__init__(*args, **kwargs)
class Legend(dict):
"""
plotly.graph_objs.Legend is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Legend
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Legend is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Legend
"""
warnings.warn(
"""plotly.graph_objs.Legend is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Legend
""",
DeprecationWarning,
)
super(Legend, self).__init__(*args, **kwargs)
class Line(dict):
"""
plotly.graph_objs.Line is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Line
- plotly.graph_objs.layout.shape.Line
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Line is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Line
- plotly.graph_objs.layout.shape.Line
- etc.
"""
warnings.warn(
"""plotly.graph_objs.Line is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Line
- plotly.graph_objs.layout.shape.Line
- etc.
""",
DeprecationWarning,
)
super(Line, self).__init__(*args, **kwargs)
class Margin(dict):
"""
plotly.graph_objs.Margin is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Margin
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Margin is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Margin
"""
warnings.warn(
"""plotly.graph_objs.Margin is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Margin
""",
DeprecationWarning,
)
super(Margin, self).__init__(*args, **kwargs)
class Marker(dict):
"""
plotly.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Marker
- plotly.graph_objs.histogram.selected.Marker
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Marker
- plotly.graph_objs.histogram.selected.Marker
- etc.
"""
warnings.warn(
"""plotly.graph_objs.Marker is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Marker
- plotly.graph_objs.histogram.selected.Marker
- etc.
""",
DeprecationWarning,
)
super(Marker, self).__init__(*args, **kwargs)
class RadialAxis(dict):
"""
plotly.graph_objs.RadialAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.RadialAxis
- plotly.graph_objs.layout.polar.RadialAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.RadialAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.RadialAxis
- plotly.graph_objs.layout.polar.RadialAxis
"""
warnings.warn(
"""plotly.graph_objs.RadialAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.RadialAxis
- plotly.graph_objs.layout.polar.RadialAxis
""",
DeprecationWarning,
)
super(RadialAxis, self).__init__(*args, **kwargs)
class Scene(dict):
"""
plotly.graph_objs.Scene is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Scene
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Scene is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Scene
"""
warnings.warn(
"""plotly.graph_objs.Scene is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.Scene
""",
DeprecationWarning,
)
super(Scene, self).__init__(*args, **kwargs)
class Stream(dict):
"""
plotly.graph_objs.Stream is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Stream
- plotly.graph_objs.area.Stream
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Stream is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Stream
- plotly.graph_objs.area.Stream
"""
warnings.warn(
"""plotly.graph_objs.Stream is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.Stream
- plotly.graph_objs.area.Stream
""",
DeprecationWarning,
)
super(Stream, self).__init__(*args, **kwargs)
class XAxis(dict):
"""
plotly.graph_objs.XAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.XAxis
- plotly.graph_objs.layout.scene.XAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.XAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.XAxis
- plotly.graph_objs.layout.scene.XAxis
"""
warnings.warn(
"""plotly.graph_objs.XAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.XAxis
- plotly.graph_objs.layout.scene.XAxis
""",
DeprecationWarning,
)
super(XAxis, self).__init__(*args, **kwargs)
class YAxis(dict):
"""
plotly.graph_objs.YAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.YAxis
- plotly.graph_objs.layout.scene.YAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.YAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.YAxis
- plotly.graph_objs.layout.scene.YAxis
"""
warnings.warn(
"""plotly.graph_objs.YAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.YAxis
- plotly.graph_objs.layout.scene.YAxis
""",
DeprecationWarning,
)
super(YAxis, self).__init__(*args, **kwargs)
class ZAxis(dict):
"""
plotly.graph_objs.ZAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.scene.ZAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ZAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.scene.ZAxis
"""
warnings.warn(
"""plotly.graph_objs.ZAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.scene.ZAxis
""",
DeprecationWarning,
)
super(ZAxis, self).__init__(*args, **kwargs)
class XBins(dict):
"""
plotly.graph_objs.XBins is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.histogram.XBins
- plotly.graph_objs.histogram2d.XBins
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.XBins is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.histogram.XBins
- plotly.graph_objs.histogram2d.XBins
"""
warnings.warn(
"""plotly.graph_objs.XBins is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.histogram.XBins
- plotly.graph_objs.histogram2d.XBins
""",
DeprecationWarning,
)
super(XBins, self).__init__(*args, **kwargs)
class YBins(dict):
"""
plotly.graph_objs.YBins is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.histogram.YBins
- plotly.graph_objs.histogram2d.YBins
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.YBins is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.histogram.YBins
- plotly.graph_objs.histogram2d.YBins
"""
warnings.warn(
"""plotly.graph_objs.YBins is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.histogram.YBins
- plotly.graph_objs.histogram2d.YBins
""",
DeprecationWarning,
)
super(YBins, self).__init__(*args, **kwargs)
class Trace(dict):
"""
plotly.graph_objs.Trace is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.Scatter
- plotly.graph_objs.Bar
- plotly.graph_objs.Area
- plotly.graph_objs.Histogram
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Trace is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.Scatter
- plotly.graph_objs.Bar
- plotly.graph_objs.Area
- plotly.graph_objs.Histogram
- etc.
"""
warnings.warn(
"""plotly.graph_objs.Trace is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.Scatter
- plotly.graph_objs.Bar
- plotly.graph_objs.Area
- plotly.graph_objs.Histogram
- etc.
""",
DeprecationWarning,
)
super(Trace, self).__init__(*args, **kwargs)
class Histogram2dcontour(dict):
"""
plotly.graph_objs.Histogram2dcontour is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.Histogram2dContour
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.Histogram2dcontour is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.Histogram2dContour
"""
warnings.warn(
"""plotly.graph_objs.Histogram2dcontour is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.Histogram2dContour
""",
DeprecationWarning,
)
super(Histogram2dcontour, self).__init__(*args, **kwargs)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@_deprecations.py@.PATH_END.py
|
{
"filename": "_width.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram/marker/line/_width.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="width", parent_name="histogram.marker.line", **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram@marker@line@_width.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "changhoonhahn/pySpectrum",
"repo_path": "pySpectrum_extracted/pySpectrum-master/setup.py",
"type": "Python"
}
|
import os
import numpy as np
from numpy.distutils.core import setup
from numpy.distutils.core import Extension
__version__ = '0.0'
try:
if 'NERSC_HOST' in os.environ.keys():
if os.environ['NERSC_HOST'] == 'edison':
ext = Extension(name='estimator',
sources=['pyspectrum/estimator.f'],
language='f77',
library_dirs = ["/opt/cray/pe/fftw/3.3.8.1/x86_64/lib"],
libraries = ['fftw3f'],
include_dirs=[np.get_include(), "/opt/cray/pe/fftw/3.3.8.1/x86_64/include"])
elif os.environ['NERSC_HOST'] == 'cori':
ext = Extension(name='estimator',
sources=['pyspectrum/estimator.f'],
language='f77',
library_dirs = ["/opt/cray/pe/fftw/default/x86_64/lib"],
libraries = ['fftw3f'],
include_dirs=[np.get_include(), "/opt/cray/pe/fftw/default/x86_64/include"])
else:
raise KeyError
elif 'machine' in os.environ.keys():
if os.environ['machine'] == 'tiger':
print('install on princeton Tiger')
ext = Extension(name='estimator',
sources=['pyspectrum/estimator.f'],
language='f77',
library_dirs = ["/usr/local/fftw/intel-16.0/3.3.4/lib64"],
libraries = ['fftw3f'],
include_dirs=[np.get_include(), "/usr/local/fftw/intel-16.0/3.3.4/include"])
elif os.environ['machine'] == 'mbp':
ext = Extension(name='estimator',
sources=['pyspectrum/estimator.f'],
language='f77',
library_dirs = ["/usr/local/lib"],
libraries = ['fftw3f'],
include_dirs=[np.get_include(), '/usr/local/include'],
extra_f77_compile_args=['-fcheck=all', '-fallow-argument-mismatch'])
else:
raise KeyError
except KeyError:
ext = Extension(name='estimator',
sources=['pyspectrum/estimator.f'],
language='f77',
library_dirs = ["/usr/local/lib"],
libraries = ['fftw3f'],
include_dirs=[np.get_include(), '/usr/local/include'],
extra_f77_compile_args=['-fcheck=all', '-fallow-argument-mismatch'])
if __name__=="__main__":
setup(name = 'pySpectrum',
version = __version__,
description = 'TBD',
author='ChangHoon Hahn',
author_email='hahn.changhoon@gmail.com',
url='',
package_data={'pyspectrum': ['dat/fftw3.f', 'dat/*.pyfftw', 'dat/test_box.hdf5']},
platforms=['*nix'],
license='GPL',
requires = ['numpy', 'scipy', 'h5py', 'pyfftw', 'pytest'],
provides = ['pyspectrum'],
packages = ['pyspectrum'],
ext_modules = [ext]
)
|
changhoonhahnREPO_NAMEpySpectrumPATH_START.@pySpectrum_extracted@pySpectrum-master@setup.py@.PATH_END.py
|
{
"filename": "_meta.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/_meta.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="scattergl", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@_meta.py@.PATH_END.py
|
{
"filename": "test_minres.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/sparse/linalg/_isolve/tests/test_minres.py",
"type": "Python"
}
|
import numpy as np
from numpy.linalg import norm
from numpy.testing import assert_equal, assert_allclose, assert_
from scipy.sparse.linalg._isolve import minres
from pytest import raises as assert_raises
def get_sample_problem():
# A random 10 x 10 symmetric matrix
rng = np.random.RandomState(1234)
matrix = rng.rand(10, 10)
matrix = matrix + matrix.T
# A random vector of length 10
vector = rng.rand(10)
return matrix, vector
def test_singular():
A, b = get_sample_problem()
A[0, ] = 0
b[0] = 0
xp, info = minres(A, b)
assert_equal(info, 0)
assert norm(A @ xp - b) <= 1e-5 * norm(b)
def test_x0_is_used_by():
A, b = get_sample_problem()
# Random x0 to feed minres
rng = np.random.RandomState(12345)
x0 = rng.rand(10)
trace = []
def trace_iterates(xk):
trace.append(xk)
minres(A, b, x0=x0, callback=trace_iterates)
trace_with_x0 = trace
trace = []
minres(A, b, callback=trace_iterates)
assert_(not np.array_equal(trace_with_x0[0], trace[0]))
def test_shift():
A, b = get_sample_problem()
shift = 0.5
shifted_A = A - shift * np.eye(10)
x1, info1 = minres(A, b, shift=shift)
x2, info2 = minres(shifted_A, b)
assert_equal(info1, 0)
assert_allclose(x1, x2, rtol=1e-5)
def test_asymmetric_fail():
"""Asymmetric matrix should raise `ValueError` when check=True"""
A, b = get_sample_problem()
A[1, 2] = 1
A[2, 1] = 2
with assert_raises(ValueError):
xp, info = minres(A, b, check=True)
def test_minres_non_default_x0():
rng = np.random.RandomState(1234)
rtol = 1e-6
a = rng.randn(5, 5)
a = np.dot(a, a.T)
b = rng.randn(5)
c = rng.randn(5)
x = minres(a, b, x0=c, rtol=rtol)[0]
assert norm(a @ x - b) <= rtol * norm(b)
def test_minres_precond_non_default_x0():
rng = np.random.RandomState(12345)
rtol = 1e-6
a = rng.randn(5, 5)
a = np.dot(a, a.T)
b = rng.randn(5)
c = rng.randn(5)
m = rng.randn(5, 5)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, rtol=rtol)[0]
assert norm(a @ x - b) <= rtol * norm(b)
def test_minres_precond_exact_x0():
rng = np.random.RandomState(1234)
rtol = 1e-6
a = np.eye(10)
b = np.ones(10)
c = np.ones(10)
m = rng.randn(10, 10)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, rtol=rtol)[0]
assert norm(a @ x - b) <= rtol * norm(b)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@sparse@linalg@_isolve@tests@test_minres.py@.PATH_END.py
|
{
"filename": "blsstep.py",
"repo_name": "hpparvi/opents",
"repo_path": "opents_extracted/opents-master/src/blsstep.py",
"type": "Python"
}
|
# OpenTS: Open exoplanet transit search pipeline.
# Copyright (C) 2015-2020 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from logging import getLogger
from astropy import units as u
from astropy.io.fits import HDUList, Card
from astropy.timeseries import BoxLeastSquares
from matplotlib.pyplot import setp
from numpy import linspace, argmax, array, exp
from pytransit.orbits import epoch
from .otsstep import OTSStep
from .plots import bplot
def maskf(x, c, w):
return (1 - exp(-(x-c)**2/w**2))**4
class BLSStep(OTSStep):
name = "bls"
def __init__(self, ts):
super().__init__(ts)
self._periods = None
self._durations = array([0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0]) / 24
self.bls = None
self.result = None
self.period = None # Best-fit period
self.zero_epoch = None # Best-fit zero epoch
self.duration = None # Best-fit duration
self.depth = None # Best-fit depth
self.snr = None # Best-fit Signal to noise ratio
def __call__(self, *args, **kwargs):
self.logger = getLogger(f"{self.name}:{self.ts.name.lower().replace('_','-')}")
self.logger.info("Running BLS periodogram")
self._periods = linspace(self.ts.pmin, self.ts.pmax, self.ts.nper)
self.bls = BoxLeastSquares(self.ts.time * u.day, self.ts.flux, self.ts.ferr)
self.result = self.bls.power(self._periods, self._durations, objective='snr')
for p in self.ts.masked_periods:
self.result.depth_snr *= maskf(self._periods, p, .1)
self.result.log_likelihood *= maskf(self._periods, p, .1)
i = argmax(self.result.depth_snr)
self.period = self.result.period[i].value
self.snr = self.result.depth_snr[i]
self.duration = self.result.duration[i].value
self.depth = self.result.depth[i]
t0 = self.result.transit_time[i].value
ep = epoch(self.ts.time.min(), t0, self.period)
self.zero_epoch = t0 + ep * self.period
self.ts.update_ephemeris(self.zero_epoch, self.period, self.duration, self.depth)
self.logger.info(f"BLS SNR {self.snr:.2f} period {self.period:.2f} d, duration {24*self.duration:.2f} h")
def add_to_fits(self, hdul: HDUList):
if self.bls is not None:
h = hdul[0].header
h.append(Card('COMMENT', '======================'))
h.append(Card('COMMENT', ' BLS results '))
h.append(Card('COMMENT', '======================'))
h.append(Card('bls_snr', self.snr, 'BLS depth signal to noise ratio'), bottom=True)
h.append(Card('period', self.period, 'Orbital period [d]'), bottom=True)
h.append(Card('epoch', self.zero_epoch, 'Zero epoch [BJD]'), bottom=True)
h.append(Card('duration', self.duration, 'Transit duration [d]'), bottom=True)
h.append(Card('depth', self.depth, 'Transit depth'), bottom=True)
@bplot
def plot_snr(self, ax=None):
if self.period < 1.:
ax.semilogx(self._periods, self.result.depth_snr, drawstyle='steps-mid')
else:
ax.plot(self._periods, self.result.depth_snr, drawstyle='steps-mid')
ax.axvline(self.period, alpha=0.15, c='orangered', ls='-', lw=10, zorder=-100)
setp(ax, xlabel='Period [d]', ylabel='Depth SNR')
ax.autoscale(axis='x', tight=True)
|
hpparviREPO_NAMEopentsPATH_START.@opents_extracted@opents-master@src@blsstep.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/Extending/burnman/__init__.py",
"type": "Python"
}
|
from .package import burnman, Material, Mineral, material_property, dictionarize_formula, formula_mass, burnman_installed
from .build import build_burnman_world
from .burnman_world import BurnManWorld
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@Extending@burnman@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/coloraxis/colorbar/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._tickfont import Tickfont
from ._tickformatstop import Tickformatstop
from ._title import Title
from . import title
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".title"],
["._tickfont.Tickfont", "._tickformatstop.Tickformatstop", "._title.Title"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@coloraxis@colorbar@__init__.py@.PATH_END.py
|
{
"filename": "test_flow_run_notification_policies.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/server/orchestration/api/test_flow_run_notification_policies.py",
"type": "Python"
}
|
from typing import List
from uuid import uuid4
import pytest
from prefect.server import models, schemas
from prefect.server.schemas.core import FlowRunNotificationPolicy
from prefect.utilities.pydantic import parse_obj_as
@pytest.fixture
async def completed_policy(session, notifier_block):
policy = (
await models.flow_run_notification_policies.create_flow_run_notification_policy(
session=session,
flow_run_notification_policy=schemas.core.FlowRunNotificationPolicy(
state_names=["Completed"],
tags=[],
block_document_id=notifier_block._block_document_id,
),
)
)
await session.commit()
return policy
@pytest.fixture
async def failed_policy(session, notifier_block):
policy = (
await models.flow_run_notification_policies.create_flow_run_notification_policy(
session=session,
flow_run_notification_policy=schemas.core.FlowRunNotificationPolicy(
state_names=["Failed"],
tags=[],
block_document_id=notifier_block._block_document_id,
),
)
)
await session.commit()
return policy
class TestCreateFlowRunNotificationPolicy:
async def test_create_policy(self, client, notifier_block):
response = await client.post(
"/flow_run_notification_policies/",
json=dict(
schemas.actions.FlowRunNotificationPolicyCreate(
state_names=["Completed"],
tags=[],
block_document_id=notifier_block._block_document_id,
).model_dump(mode="json"),
),
)
assert response.status_code == 201
policy = FlowRunNotificationPolicy.model_validate(response.json())
assert policy.state_names == ["Completed"]
async def test_create_policy_with_message(self, client, notifier_block):
response = await client.post(
"/flow_run_notification_policies/",
json=dict(
schemas.actions.FlowRunNotificationPolicyCreate(
state_names=["Completed"],
tags=[],
block_document_id=notifier_block._block_document_id,
message_template="Hello there {flow_run_name}",
).model_dump(mode="json"),
),
)
policy = FlowRunNotificationPolicy.model_validate(response.json())
assert policy.message_template == "Hello there {flow_run_name}"
class TestReadFlowRunNotificationPolicy:
async def test_read_policy(self, client, completed_policy):
response = await client.get(
f"/flow_run_notification_policies/{completed_policy.id}"
)
assert response.status_code == 200
policy = FlowRunNotificationPolicy.model_validate(response.json())
assert policy.id == completed_policy.id
async def test_read_policy_with_invalid_id(self, client):
response = await client.get(f"/flow_run_notification_policies/{uuid4()}")
assert response.status_code == 404
class TestReadFlowRunNotificationPolicies:
@pytest.fixture(autouse=True)
async def policies(self, session, completed_policy, failed_policy):
# set failed policy to inactive
await models.flow_run_notification_policies.update_flow_run_notification_policy(
session=session,
flow_run_notification_policy_id=failed_policy.id,
flow_run_notification_policy=schemas.actions.FlowRunNotificationPolicyUpdate(
is_active=False
),
)
await session.commit()
return completed_policy, failed_policy
async def test_read_policies(self, client, policies):
response = await client.post("/flow_run_notification_policies/filter")
assert response.status_code == 200
result = parse_obj_as(List[FlowRunNotificationPolicy], response.json())
assert len(result) == 2
assert {r.id for r in result} == {p.id for p in policies}
async def test_read_active_policies(self, client, completed_policy):
response = await client.post(
"/flow_run_notification_policies/filter",
json=dict(
flow_run_notification_policy_filter=dict(is_active=dict(eq_=True))
),
)
assert response.status_code == 200
result = parse_obj_as(List[FlowRunNotificationPolicy], response.json())
assert len(result) == 1
assert result[0].id == completed_policy.id
async def test_read_inactive_policies(self, client, failed_policy):
response = await client.post(
"/flow_run_notification_policies/filter",
json=dict(
flow_run_notification_policy_filter=dict(is_active=dict(eq_=False))
),
)
assert response.status_code == 200
result = parse_obj_as(List[FlowRunNotificationPolicy], response.json())
assert len(result) == 1
assert result[0].id == failed_policy.id
class TestUpdateFlowRunNotificationPolicy:
async def test_update_policy_states(self, client, session, completed_policy):
response = await client.patch(
f"/flow_run_notification_policies/{completed_policy.id}",
json=schemas.actions.FlowRunNotificationPolicyUpdate(
state_names=["My State"]
).model_dump(mode="json", exclude_unset=True),
)
assert response.status_code == 204
policy_id = completed_policy.id
session.expire_all()
policy = await models.flow_run_notification_policies.read_flow_run_notification_policy(
session=session, flow_run_notification_policy_id=policy_id
)
assert policy.state_names == ["My State"]
assert policy.is_active is True
async def test_update_policy_active(self, session, client, completed_policy):
response = await client.patch(
f"/flow_run_notification_policies/{completed_policy.id}",
json=schemas.actions.FlowRunNotificationPolicyUpdate(
is_active=False
).model_dump(mode="json", exclude_unset=True),
)
assert response.status_code == 204
policy_id = completed_policy.id
session.expire_all()
policy = await models.flow_run_notification_policies.read_flow_run_notification_policy(
session=session, flow_run_notification_policy_id=policy_id
)
assert policy.state_names == ["Completed"]
assert policy.is_active is False
async def test_update_policy_message_template(
self, session, client, completed_policy
):
response = await client.patch(
f"/flow_run_notification_policies/{completed_policy.id}",
json=schemas.actions.FlowRunNotificationPolicyUpdate(
message_template="Hi there {flow_run_name}"
).model_dump(mode="json", exclude_unset=True),
)
assert response.status_code == 204
policy_id = completed_policy.id
session.expire_all()
policy = await models.flow_run_notification_policies.read_flow_run_notification_policy(
session=session, flow_run_notification_policy_id=policy_id
)
assert policy.message_template == "Hi there {flow_run_name}"
async def test_update_missing_policy(self, client, completed_policy):
response = await client.patch(
f"/flow_run_notification_policies/{uuid4()}",
json=schemas.actions.FlowRunNotificationPolicyUpdate(
is_active=False
).model_dump(mode="json", exclude_unset=True),
)
assert response.status_code == 404
class TestDeleteFlowRunNotificationPolicy:
async def test_delete_policy(self, client, completed_policy):
response = await client.delete(
f"/flow_run_notification_policies/{completed_policy.id}"
)
assert response.status_code == 204
async def test_delete_missing_policy(self, client):
response = await client.delete(f"/flow_run_notification_policies/{uuid4()}")
assert response.status_code == 404
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@server@orchestration@api@test_flow_run_notification_policies.py@.PATH_END.py
|
{
"filename": "_domain.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/treemap/_domain.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap"
_path_str = "treemap.domain"
_valid_props = {"column", "row", "x", "y"}
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this treemap trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this treemap trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this treemap trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this treemap trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
_v = column if column is not None else _v
if _v is not None:
self["column"] = _v
_v = arg.pop("row", None)
_v = row if row is not None else _v
if _v is not None:
self["row"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@treemap@_domain.py@.PATH_END.py
|
{
"filename": "GSPHHydroBase.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/PYB11/GSPH/GSPHHydroBase.py",
"type": "Python"
}
|
#-------------------------------------------------------------------------------
# GSPHHydroBase
#-------------------------------------------------------------------------------
from PYB11Generator import *
from GenericRiemannHydro import *
from RestartMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralGSPH")
@PYB11dynamic_attr
class GSPHHydroBase(GenericRiemannHydro):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename Physics<%(Dimension)s>::TimeStepType TimeStepType;
"""
def pyinit(smoothingScaleMethod = "const SmoothingScaleBase<%(Dimension)s>&",
dataBase = "DataBase<%(Dimension)s>&",
riemannSolver = "RiemannSolverBase<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
epsDiffusionCoeff = "const Scalar",
cfl = "const double",
useVelocityMagnitudeForDt = "const bool",
compatibleEnergyEvolution = "const bool",
evolveTotalEnergy = "const bool",
XSPH = "const bool",
correctVelocityGradient = "const bool",
gradType = "const GradientType",
densityUpdate = "const MassDensityType",
HUpdate = "const HEvolutionType",
epsTensile = "const double",
nTensile = "const double",
xmin = "const Vector&",
xmax = "const Vector&"):
"GSPHHydroBase constructor"
#...........................................................................
# Virtual methods
@PYB11virtual
def initializeProblemStartupDependencies(self,
dataBase = "DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"""A second optional method to be called on startup, after Physics::initializeProblemStartup has
been called.
One use for this hook is to fill in dependendent state using the State object, such as
temperature or pressure."""
return "void"
@PYB11virtual
def registerState(dataBase = "DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&"):
"Register the state Hydro expects to use and evolve."
return "void"
@PYB11virtual
def registerDerivatives(dataBase = "DataBase<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Register the derivatives/change fields for updating state."
return "void"
@PYB11virtual
def preStepInitialize(self,
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Optional hook to be called at the beginning of a time step."
return "void"
@PYB11virtual
def initialize(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Initialize the Hydro before we start a derivative evaluation."
return "void"
@PYB11virtual
@PYB11const
def evaluateDerivatives(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"""Evaluate the derivatives for the principle hydro
mass density, velocity, and specific thermal energy."""
return "void"
@PYB11virtual
@PYB11const
def finalizeDerivatives(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Finalize the derivatives."
return "void"
@PYB11virtual
def applyGhostBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Apply boundary conditions to the physics specific fields."
return "void"
@PYB11virtual
def enforceBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Enforce boundary conditions for the physics specific fields."
return "void"
DmassDensityDt = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "DmassDensityDt", returnpolicy="reference_internal")
#-------------------------------------------------------------------------------
# Inject methods
#-------------------------------------------------------------------------------
PYB11inject(RestartMethods, GSPHHydroBase)
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@PYB11@GSPH@GSPHHydroBase.py@.PATH_END.py
|
{
"filename": "bug_report.md",
"repo_name": "handley-lab/anesthetic",
"repo_path": "anesthetic_extracted/anesthetic-master/.github/ISSUE_TEMPLATE/bug_report.md",
"type": "Markdown"
}
|
---
name: Bug report
about: Create a report to help us improve
labels:
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behaviour.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
|
handley-labREPO_NAMEanestheticPATH_START.@anesthetic_extracted@anesthetic-master@.github@ISSUE_TEMPLATE@bug_report.md@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/volume/caps/_z.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="z", parent_name="volume.caps", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Z"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the z `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@volume@caps@_z.py@.PATH_END.py
|
{
"filename": "_gantt.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/figure_factory/_gantt.py",
"type": "Python"
}
|
from numbers import Number
import copy
from plotly import exceptions, optional_imports
import plotly.colors as clrs
from plotly.figure_factory import utils
import plotly.graph_objects as go
pd = optional_imports.get_module("pandas")
REQUIRED_GANTT_KEYS = ["Task", "Start", "Finish"]
def _get_corner_points(x0, y0, x1, y1):
"""
Returns the corner points of a scatter rectangle
:param x0: x-start
:param y0: y-lower
:param x1: x-end
:param y1: y-upper
:return: ([x], [y]), tuple of lists containing the x and y values
"""
return ([x0, x1, x1, x0], [y0, y0, y1, y1])
def validate_gantt(df):
"""
Validates the inputted dataframe or list
"""
if pd and isinstance(df, pd.core.frame.DataFrame):
# validate that df has all the required keys
for key in REQUIRED_GANTT_KEYS:
if key not in df:
raise exceptions.PlotlyError(
"The columns in your dataframe must include the "
"following keys: {0}".format(", ".join(REQUIRED_GANTT_KEYS))
)
num_of_rows = len(df.index)
chart = []
for index in range(num_of_rows):
task_dict = {}
for key in df:
task_dict[key] = df.iloc[index][key]
chart.append(task_dict)
return chart
# validate if df is a list
if not isinstance(df, list):
raise exceptions.PlotlyError(
"You must input either a dataframe " "or a list of dictionaries."
)
# validate if df is empty
if len(df) <= 0:
raise exceptions.PlotlyError(
"Your list is empty. It must contain " "at least one dictionary."
)
if not isinstance(df[0], dict):
raise exceptions.PlotlyError("Your list must only " "include dictionaries.")
return df
def gantt(
chart,
colors,
title,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=False,
show_hover_fill=True,
show_colorbar=True,
):
"""
Refer to create_gantt() for docstring
"""
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
for index in range(len(chart)):
task = dict(
x0=chart[index]["Start"],
x1=chart[index]["Finish"],
name=chart[index]["Task"],
)
if "Description" in chart[index]:
task["description"] = chart[index]["Description"]
tasks.append(task)
# create a scatter trace for every task group
scatter_data_dict = dict()
marker_data_dict = dict()
if show_hover_fill:
hoverinfo = "name"
else:
hoverinfo = "skip"
scatter_data_template = {
"x": [],
"y": [],
"mode": "none",
"fill": "toself",
"hoverinfo": hoverinfo,
}
marker_data_template = {
"x": [],
"y": [],
"mode": "markers",
"text": [],
"marker": dict(color="", size=1, opacity=0),
"name": "",
"showlegend": False,
}
# create the list of task names
for index in range(len(tasks)):
tn = tasks[index]["name"]
# Is added to task_names if group_tasks is set to False,
# or if the option is used (True) it only adds them if the
# name is not already in the list
if not group_tasks or tn not in task_names:
task_names.append(tn)
# Guarantees that for grouped tasks the tasks that are inserted first
# are shown at the top
if group_tasks:
task_names.reverse()
color_index = 0
for index in range(len(tasks)):
tn = tasks[index]["name"]
del tasks[index]["name"]
# If group_tasks is True, all tasks with the same name belong
# to the same row.
groupID = index
if group_tasks:
groupID = task_names.index(tn)
tasks[index]["y0"] = groupID - bar_width
tasks[index]["y1"] = groupID + bar_width
# check if colors need to be looped
if color_index >= len(colors):
color_index = 0
tasks[index]["fillcolor"] = colors[color_index]
color_id = tasks[index]["fillcolor"]
if color_id not in scatter_data_dict:
scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)
scatter_data_dict[color_id]["fillcolor"] = color_id
scatter_data_dict[color_id]["name"] = str(tn)
scatter_data_dict[color_id]["legendgroup"] = color_id
# if there are already values append the gap
if len(scatter_data_dict[color_id]["x"]) > 0:
# a gap on the scatterplot separates the rectangles from each other
scatter_data_dict[color_id]["x"].append(
scatter_data_dict[color_id]["x"][-1]
)
scatter_data_dict[color_id]["y"].append(None)
xs, ys = _get_corner_points(
tasks[index]["x0"],
tasks[index]["y0"],
tasks[index]["x1"],
tasks[index]["y1"],
)
scatter_data_dict[color_id]["x"] += xs
scatter_data_dict[color_id]["y"] += ys
# append dummy markers for showing start and end of interval
if color_id not in marker_data_dict:
marker_data_dict[color_id] = copy.deepcopy(marker_data_template)
marker_data_dict[color_id]["marker"]["color"] = color_id
marker_data_dict[color_id]["legendgroup"] = color_id
marker_data_dict[color_id]["x"].append(tasks[index]["x0"])
marker_data_dict[color_id]["x"].append(tasks[index]["x1"])
marker_data_dict[color_id]["y"].append(groupID)
marker_data_dict[color_id]["y"].append(groupID)
if "description" in tasks[index]:
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
del tasks[index]["description"]
else:
marker_data_dict[color_id]["text"].append(None)
marker_data_dict[color_id]["text"].append(None)
color_index += 1
showlegend = show_colorbar
layout = dict(
title=title,
showlegend=showlegend,
height=height,
width=width,
shapes=[],
hovermode="closest",
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(task_names))),
range=[-1, len(task_names) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list(
[
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
type="date",
),
)
data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]
data += [marker_data_dict[k] for k in sorted(marker_data_dict)]
# fig = dict(
# data=data, layout=layout
# )
fig = go.Figure(data=data, layout=layout)
return fig
def gantt_colorscale(
chart,
colors,
title,
index_col,
show_colorbar,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=False,
show_hover_fill=True,
):
"""
Refer to FigureFactory.create_gantt() for docstring
"""
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
showlegend = False
for index in range(len(chart)):
task = dict(
x0=chart[index]["Start"],
x1=chart[index]["Finish"],
name=chart[index]["Task"],
)
if "Description" in chart[index]:
task["description"] = chart[index]["Description"]
tasks.append(task)
# create a scatter trace for every task group
scatter_data_dict = dict()
# create scatter traces for the start- and endpoints
marker_data_dict = dict()
if show_hover_fill:
hoverinfo = "name"
else:
hoverinfo = "skip"
scatter_data_template = {
"x": [],
"y": [],
"mode": "none",
"fill": "toself",
"showlegend": False,
"hoverinfo": hoverinfo,
"legendgroup": "",
}
marker_data_template = {
"x": [],
"y": [],
"mode": "markers",
"text": [],
"marker": dict(color="", size=1, opacity=0),
"name": "",
"showlegend": False,
"legendgroup": "",
}
index_vals = []
for row in range(len(tasks)):
if chart[row][index_col] not in index_vals:
index_vals.append(chart[row][index_col])
index_vals.sort()
# compute the color for task based on indexing column
if isinstance(chart[0][index_col], Number):
# check that colors has at least 2 colors
if len(colors) < 2:
raise exceptions.PlotlyError(
"You must use at least 2 colors in 'colors' if you "
"are using a colorscale. However only the first two "
"colors given will be used for the lower and upper "
"bounds on the colormap."
)
# create the list of task names
for index in range(len(tasks)):
tn = tasks[index]["name"]
# Is added to task_names if group_tasks is set to False,
# or if the option is used (True) it only adds them if the
# name is not already in the list
if not group_tasks or tn not in task_names:
task_names.append(tn)
# Guarantees that for grouped tasks the tasks that are inserted
# first are shown at the top
if group_tasks:
task_names.reverse()
for index in range(len(tasks)):
tn = tasks[index]["name"]
del tasks[index]["name"]
# If group_tasks is True, all tasks with the same name belong
# to the same row.
groupID = index
if group_tasks:
groupID = task_names.index(tn)
tasks[index]["y0"] = groupID - bar_width
tasks[index]["y1"] = groupID + bar_width
# unlabel color
colors = clrs.color_parser(colors, clrs.unlabel_rgb)
lowcolor = colors[0]
highcolor = colors[1]
intermed = (chart[index][index_col]) / 100.0
intermed_color = clrs.find_intermediate_color(lowcolor, highcolor, intermed)
intermed_color = clrs.color_parser(intermed_color, clrs.label_rgb)
tasks[index]["fillcolor"] = intermed_color
color_id = tasks[index]["fillcolor"]
if color_id not in scatter_data_dict:
scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)
scatter_data_dict[color_id]["fillcolor"] = color_id
scatter_data_dict[color_id]["name"] = str(chart[index][index_col])
scatter_data_dict[color_id]["legendgroup"] = color_id
# relabel colors with 'rgb'
colors = clrs.color_parser(colors, clrs.label_rgb)
# if there are already values append the gap
if len(scatter_data_dict[color_id]["x"]) > 0:
# a gap on the scatterplot separates the rectangles from each other
scatter_data_dict[color_id]["x"].append(
scatter_data_dict[color_id]["x"][-1]
)
scatter_data_dict[color_id]["y"].append(None)
xs, ys = _get_corner_points(
tasks[index]["x0"],
tasks[index]["y0"],
tasks[index]["x1"],
tasks[index]["y1"],
)
scatter_data_dict[color_id]["x"] += xs
scatter_data_dict[color_id]["y"] += ys
# append dummy markers for showing start and end of interval
if color_id not in marker_data_dict:
marker_data_dict[color_id] = copy.deepcopy(marker_data_template)
marker_data_dict[color_id]["marker"]["color"] = color_id
marker_data_dict[color_id]["legendgroup"] = color_id
marker_data_dict[color_id]["x"].append(tasks[index]["x0"])
marker_data_dict[color_id]["x"].append(tasks[index]["x1"])
marker_data_dict[color_id]["y"].append(groupID)
marker_data_dict[color_id]["y"].append(groupID)
if "description" in tasks[index]:
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
del tasks[index]["description"]
else:
marker_data_dict[color_id]["text"].append(None)
marker_data_dict[color_id]["text"].append(None)
# add colorbar to one of the traces randomly just for display
if show_colorbar is True:
k = list(marker_data_dict.keys())[0]
marker_data_dict[k]["marker"].update(
dict(
colorscale=[[0, colors[0]], [1, colors[1]]],
showscale=True,
cmax=100,
cmin=0,
)
)
if isinstance(chart[0][index_col], str):
index_vals = []
for row in range(len(tasks)):
if chart[row][index_col] not in index_vals:
index_vals.append(chart[row][index_col])
index_vals.sort()
if len(colors) < len(index_vals):
raise exceptions.PlotlyError(
"Error. The number of colors in 'colors' must be no less "
"than the number of unique index values in your group "
"column."
)
# make a dictionary assignment to each index value
index_vals_dict = {}
# define color index
c_index = 0
for key in index_vals:
if c_index > len(colors) - 1:
c_index = 0
index_vals_dict[key] = colors[c_index]
c_index += 1
# create the list of task names
for index in range(len(tasks)):
tn = tasks[index]["name"]
# Is added to task_names if group_tasks is set to False,
# or if the option is used (True) it only adds them if the
# name is not already in the list
if not group_tasks or tn not in task_names:
task_names.append(tn)
# Guarantees that for grouped tasks the tasks that are inserted
# first are shown at the top
if group_tasks:
task_names.reverse()
for index in range(len(tasks)):
tn = tasks[index]["name"]
del tasks[index]["name"]
# If group_tasks is True, all tasks with the same name belong
# to the same row.
groupID = index
if group_tasks:
groupID = task_names.index(tn)
tasks[index]["y0"] = groupID - bar_width
tasks[index]["y1"] = groupID + bar_width
tasks[index]["fillcolor"] = index_vals_dict[chart[index][index_col]]
color_id = tasks[index]["fillcolor"]
if color_id not in scatter_data_dict:
scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)
scatter_data_dict[color_id]["fillcolor"] = color_id
scatter_data_dict[color_id]["legendgroup"] = color_id
scatter_data_dict[color_id]["name"] = str(chart[index][index_col])
# relabel colors with 'rgb'
colors = clrs.color_parser(colors, clrs.label_rgb)
# if there are already values append the gap
if len(scatter_data_dict[color_id]["x"]) > 0:
# a gap on the scatterplot separates the rectangles from each other
scatter_data_dict[color_id]["x"].append(
scatter_data_dict[color_id]["x"][-1]
)
scatter_data_dict[color_id]["y"].append(None)
xs, ys = _get_corner_points(
tasks[index]["x0"],
tasks[index]["y0"],
tasks[index]["x1"],
tasks[index]["y1"],
)
scatter_data_dict[color_id]["x"] += xs
scatter_data_dict[color_id]["y"] += ys
# append dummy markers for showing start and end of interval
if color_id not in marker_data_dict:
marker_data_dict[color_id] = copy.deepcopy(marker_data_template)
marker_data_dict[color_id]["marker"]["color"] = color_id
marker_data_dict[color_id]["legendgroup"] = color_id
marker_data_dict[color_id]["x"].append(tasks[index]["x0"])
marker_data_dict[color_id]["x"].append(tasks[index]["x1"])
marker_data_dict[color_id]["y"].append(groupID)
marker_data_dict[color_id]["y"].append(groupID)
if "description" in tasks[index]:
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
del tasks[index]["description"]
else:
marker_data_dict[color_id]["text"].append(None)
marker_data_dict[color_id]["text"].append(None)
if show_colorbar is True:
showlegend = True
for k in scatter_data_dict:
scatter_data_dict[k]["showlegend"] = showlegend
# add colorbar to one of the traces randomly just for display
# if show_colorbar is True:
# k = list(marker_data_dict.keys())[0]
# marker_data_dict[k]["marker"].update(
# dict(
# colorscale=[[0, colors[0]], [1, colors[1]]],
# showscale=True,
# cmax=100,
# cmin=0,
# )
# )
layout = dict(
title=title,
showlegend=showlegend,
height=height,
width=width,
shapes=[],
hovermode="closest",
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(task_names))),
range=[-1, len(task_names) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list(
[
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
type="date",
),
)
data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]
data += [marker_data_dict[k] for k in sorted(marker_data_dict)]
# fig = dict(
# data=data, layout=layout
# )
fig = go.Figure(data=data, layout=layout)
return fig
def gantt_dict(
chart,
colors,
title,
index_col,
show_colorbar,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=False,
show_hover_fill=True,
):
"""
Refer to FigureFactory.create_gantt() for docstring
"""
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
showlegend = False
for index in range(len(chart)):
task = dict(
x0=chart[index]["Start"],
x1=chart[index]["Finish"],
name=chart[index]["Task"],
)
if "Description" in chart[index]:
task["description"] = chart[index]["Description"]
tasks.append(task)
# create a scatter trace for every task group
scatter_data_dict = dict()
# create scatter traces for the start- and endpoints
marker_data_dict = dict()
if show_hover_fill:
hoverinfo = "name"
else:
hoverinfo = "skip"
scatter_data_template = {
"x": [],
"y": [],
"mode": "none",
"fill": "toself",
"hoverinfo": hoverinfo,
"legendgroup": "",
}
marker_data_template = {
"x": [],
"y": [],
"mode": "markers",
"text": [],
"marker": dict(color="", size=1, opacity=0),
"name": "",
"showlegend": False,
}
index_vals = []
for row in range(len(tasks)):
if chart[row][index_col] not in index_vals:
index_vals.append(chart[row][index_col])
index_vals.sort()
# verify each value in index column appears in colors dictionary
for key in index_vals:
if key not in colors:
raise exceptions.PlotlyError(
"If you are using colors as a dictionary, all of its "
"keys must be all the values in the index column."
)
# create the list of task names
for index in range(len(tasks)):
tn = tasks[index]["name"]
# Is added to task_names if group_tasks is set to False,
# or if the option is used (True) it only adds them if the
# name is not already in the list
if not group_tasks or tn not in task_names:
task_names.append(tn)
# Guarantees that for grouped tasks the tasks that are inserted first
# are shown at the top
if group_tasks:
task_names.reverse()
for index in range(len(tasks)):
tn = tasks[index]["name"]
del tasks[index]["name"]
# If group_tasks is True, all tasks with the same name belong
# to the same row.
groupID = index
if group_tasks:
groupID = task_names.index(tn)
tasks[index]["y0"] = groupID - bar_width
tasks[index]["y1"] = groupID + bar_width
tasks[index]["fillcolor"] = colors[chart[index][index_col]]
color_id = tasks[index]["fillcolor"]
if color_id not in scatter_data_dict:
scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)
scatter_data_dict[color_id]["legendgroup"] = color_id
scatter_data_dict[color_id]["fillcolor"] = color_id
# if there are already values append the gap
if len(scatter_data_dict[color_id]["x"]) > 0:
# a gap on the scatterplot separates the rectangles from each other
scatter_data_dict[color_id]["x"].append(
scatter_data_dict[color_id]["x"][-1]
)
scatter_data_dict[color_id]["y"].append(None)
xs, ys = _get_corner_points(
tasks[index]["x0"],
tasks[index]["y0"],
tasks[index]["x1"],
tasks[index]["y1"],
)
scatter_data_dict[color_id]["x"] += xs
scatter_data_dict[color_id]["y"] += ys
# append dummy markers for showing start and end of interval
if color_id not in marker_data_dict:
marker_data_dict[color_id] = copy.deepcopy(marker_data_template)
marker_data_dict[color_id]["marker"]["color"] = color_id
marker_data_dict[color_id]["legendgroup"] = color_id
marker_data_dict[color_id]["x"].append(tasks[index]["x0"])
marker_data_dict[color_id]["x"].append(tasks[index]["x1"])
marker_data_dict[color_id]["y"].append(groupID)
marker_data_dict[color_id]["y"].append(groupID)
if "description" in tasks[index]:
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
marker_data_dict[color_id]["text"].append(tasks[index]["description"])
del tasks[index]["description"]
else:
marker_data_dict[color_id]["text"].append(None)
marker_data_dict[color_id]["text"].append(None)
if show_colorbar is True:
showlegend = True
for index_value in index_vals:
scatter_data_dict[colors[index_value]]["name"] = str(index_value)
layout = dict(
title=title,
showlegend=showlegend,
height=height,
width=width,
shapes=[],
hovermode="closest",
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(task_names))),
range=[-1, len(task_names) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list(
[
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
type="date",
),
)
data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]
data += [marker_data_dict[k] for k in sorted(marker_data_dict)]
# fig = dict(
# data=data, layout=layout
# )
fig = go.Figure(data=data, layout=layout)
return fig
def create_gantt(
df,
colors=None,
index_col=None,
show_colorbar=False,
reverse_colors=False,
title="Gantt Chart",
bar_width=0.2,
showgrid_x=False,
showgrid_y=False,
height=600,
width=None,
tasks=None,
task_names=None,
data=None,
group_tasks=False,
show_hover_fill=True,
):
"""
**deprecated**, use instead
:func:`plotly.express.timeline`.
Returns figure for a gantt chart
:param (array|list) df: input data for gantt chart. Must be either a
a dataframe or a list. If dataframe, the columns must include
'Task', 'Start' and 'Finish'. Other columns can be included and
used for indexing. If a list, its elements must be dictionaries
with the same required column headers: 'Task', 'Start' and
'Finish'.
:param (str|list|dict|tuple) colors: either a plotly scale name, an
rgb or hex color, a color tuple or a list of colors. An rgb color
is of the form 'rgb(x, y, z)' where x, y, z belong to the interval
[0, 255] and a color tuple is a tuple of the form (a, b, c) where
a, b and c belong to [0, 1]. If colors is a list, it must
contain the valid color types aforementioned as its members.
If a dictionary, all values of the indexing column must be keys in
colors.
:param (str|float) index_col: the column header (if df is a data
frame) that will function as the indexing column. If df is a list,
index_col must be one of the keys in all the items of df.
:param (bool) show_colorbar: determines if colorbar will be visible.
Only applies if values in the index column are numeric.
:param (bool) show_hover_fill: enables/disables the hovertext for the
filled area of the chart.
:param (bool) reverse_colors: reverses the order of selected colors
:param (str) title: the title of the chart
:param (float) bar_width: the width of the horizontal bars in the plot
:param (bool) showgrid_x: show/hide the x-axis grid
:param (bool) showgrid_y: show/hide the y-axis grid
:param (float) height: the height of the chart
:param (float) width: the width of the chart
Example 1: Simple Gantt Chart
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01', Finish='2009-02-30'),
... dict(Task="Job B", Start='2009-03-05', Finish='2009-04-15'),
... dict(Task="Job C", Start='2009-02-20', Finish='2009-05-30')]
>>> # Create a figure
>>> fig = create_gantt(df)
>>> fig.show()
Example 2: Index by Column with Numerical Entries
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Complete=10),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Complete=60),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Complete=95)]
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors='Blues', index_col='Complete',
... show_colorbar=True, bar_width=0.5,
... showgrid_x=True, showgrid_y=True)
>>> fig.show()
Example 3: Index by Column with String Entries
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Resource='Apple'),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Resource='Grape'),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Resource='Banana')]
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors=['rgb(200, 50, 25)', (1, 0, 1), '#6c4774'],
... index_col='Resource', reverse_colors=True,
... show_colorbar=True)
>>> fig.show()
Example 4: Use a dictionary for colors
>>> from plotly.figure_factory import create_gantt
>>> # Make data for chart
>>> df = [dict(Task="Job A", Start='2009-01-01',
... Finish='2009-02-30', Resource='Apple'),
... dict(Task="Job B", Start='2009-03-05',
... Finish='2009-04-15', Resource='Grape'),
... dict(Task="Job C", Start='2009-02-20',
... Finish='2009-05-30', Resource='Banana')]
>>> # Make a dictionary of colors
>>> colors = {'Apple': 'rgb(255, 0, 0)',
... 'Grape': 'rgb(170, 14, 200)',
... 'Banana': (1, 1, 0.2)}
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors=colors, index_col='Resource',
... show_colorbar=True)
>>> fig.show()
Example 5: Use a pandas dataframe
>>> from plotly.figure_factory import create_gantt
>>> import pandas as pd
>>> # Make data as a dataframe
>>> df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],
... ['Fast', '2011-01-01', '2012-06-05', 55],
... ['Eat', '2012-01-05', '2013-07-05', 94]],
... columns=['Task', 'Start', 'Finish', 'Complete'])
>>> # Create a figure with Plotly colorscale
>>> fig = create_gantt(df, colors='Blues', index_col='Complete',
... show_colorbar=True, bar_width=0.5,
... showgrid_x=True, showgrid_y=True)
>>> fig.show()
"""
# validate gantt input data
chart = validate_gantt(df)
if index_col:
if index_col not in chart[0]:
raise exceptions.PlotlyError(
"In order to use an indexing column and assign colors to "
"the values of the index, you must choose an actual "
"column name in the dataframe or key if a list of "
"dictionaries is being used."
)
# validate gantt index column
index_list = []
for dictionary in chart:
index_list.append(dictionary[index_col])
utils.validate_index(index_list)
# Validate colors
if isinstance(colors, dict):
colors = clrs.validate_colors_dict(colors, "rgb")
else:
colors = clrs.validate_colors(colors, "rgb")
if reverse_colors is True:
colors.reverse()
if not index_col:
if isinstance(colors, dict):
raise exceptions.PlotlyError(
"Error. You have set colors to a dictionary but have not "
"picked an index. An index is required if you are "
"assigning colors to particular values in a dictionary."
)
fig = gantt(
chart,
colors,
title,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=group_tasks,
show_hover_fill=show_hover_fill,
show_colorbar=show_colorbar,
)
return fig
else:
if not isinstance(colors, dict):
fig = gantt_colorscale(
chart,
colors,
title,
index_col,
show_colorbar,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=group_tasks,
show_hover_fill=show_hover_fill,
)
return fig
else:
fig = gantt_dict(
chart,
colors,
title,
index_col,
show_colorbar,
bar_width,
showgrid_x,
showgrid_y,
height,
width,
tasks=None,
task_names=None,
data=None,
group_tasks=group_tasks,
show_hover_fill=show_hover_fill,
)
return fig
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@figure_factory@_gantt.py@.PATH_END.py
|
{
"filename": "test_stitching.py",
"repo_name": "itseez/opencv",
"repo_path": "opencv_extracted/opencv-master/modules/stitching/misc/python/test/test_stitching.py",
"type": "Python"
}
|
#!/usr/bin/env python
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
class stitching_test(NewOpenCVTests):
def test_simple(self):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)
(_result, pano) = stitcher.stitch((img1, img2))
#cv.imshow("pano", pano)
#cv.waitKey()
self.assertAlmostEqual(pano.shape[0], 685, delta=100, msg="rows: %r" % list(pano.shape))
self.assertAlmostEqual(pano.shape[1], 1025, delta=100, msg="cols: %r" % list(pano.shape))
class stitching_detail_test(NewOpenCVTests):
def test_simple(self):
img = self.get_sample('stitching/a1.png')
finder= cv.ORB.create()
imgFea = cv.detail.computeImageFeatures2(finder,img)
self.assertIsNotNone(imgFea)
# Added Test for PR #21180
self.assertIsNotNone(imgFea.keypoints)
matcher = cv.detail_BestOf2NearestMatcher(False, 0.3)
self.assertIsNotNone(matcher)
matcher = cv.detail_AffineBestOf2NearestMatcher(False, False, 0.3)
self.assertIsNotNone(matcher)
matcher = cv.detail_BestOf2NearestRangeMatcher(2, False, 0.3)
self.assertIsNotNone(matcher)
estimator = cv.detail_AffineBasedEstimator()
self.assertIsNotNone(estimator)
estimator = cv.detail_HomographyBasedEstimator()
self.assertIsNotNone(estimator)
adjuster = cv.detail_BundleAdjusterReproj()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_BundleAdjusterRay()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_BundleAdjusterAffinePartial()
self.assertIsNotNone(adjuster)
adjuster = cv.detail_NoBundleAdjuster()
self.assertIsNotNone(adjuster)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_NO)
self.assertIsNotNone(compensator)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN)
self.assertIsNotNone(compensator)
compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN_BLOCKS)
self.assertIsNotNone(compensator)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_DpSeamFinder("COLOR")
self.assertIsNotNone(seam_finder)
seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD")
self.assertIsNotNone(seam_finder)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
self.assertIsNotNone(blender)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_FEATHER)
self.assertIsNotNone(blender)
blender = cv.detail.Blender_createDefault(cv.detail.Blender_MULTI_BAND)
self.assertIsNotNone(blender)
timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_AS_IS);
self.assertIsNotNone(timelapser)
timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_CROP);
self.assertIsNotNone(timelapser)
class stitching_compose_panorama_test_no_args(NewOpenCVTests):
def test_simple(self):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)
stitcher.estimateTransform((img1, img2))
result, _ = stitcher.composePanorama()
assert result == 0
class stitching_compose_panorama_args(NewOpenCVTests):
def test_simple(self):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)
stitcher.estimateTransform((img1, img2))
result, _ = stitcher.composePanorama((img1, img2))
assert result == 0
class stitching_matches_info_test(NewOpenCVTests):
def test_simple(self):
finder = cv.ORB.create()
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
img_feat1 = cv.detail.computeImageFeatures2(finder, img1)
img_feat2 = cv.detail.computeImageFeatures2(finder, img2)
matcher = cv.detail.BestOf2NearestMatcher_create()
matches_info = matcher.apply(img_feat1, img_feat2)
self.assertIsNotNone(matches_info.matches)
self.assertIsNotNone(matches_info.inliers_mask)
class stitching_range_matcher_test(NewOpenCVTests):
def test_simple(self):
images = [
self.get_sample('stitching/a1.png'),
self.get_sample('stitching/a2.png'),
self.get_sample('stitching/a3.png')
]
orb = cv.ORB_create()
features = [cv.detail.computeImageFeatures2(orb, img) for img in images]
matcher = cv.detail_BestOf2NearestRangeMatcher(range_width=1)
matches = matcher.apply2(features)
# matches[1] is image 0 and image 1, should have non-zero confidence
self.assertNotEqual(matches[1].confidence, 0)
# matches[2] is image 0 and image 2, should have zero confidence due to range_width=1
self.assertEqual(matches[2].confidence, 0)
class stitching_seam_finder_graph_cuts(NewOpenCVTests):
def test_simple(self):
images = [
self.get_sample('stitching/a1.png'),
self.get_sample('stitching/a2.png'),
self.get_sample('stitching/a3.png')
]
images = [cv.resize(img, [100, 100]) for img in images]
finder = cv.detail_GraphCutSeamFinder('COST_COLOR_GRAD')
masks = [cv.UMat(255 * np.ones((img.shape[0], img.shape[1]), np.uint8)) for img in images]
images_f = [img.astype(np.float32) for img in images]
masks_warped = finder.find(images_f, [(0, 0), (75, 0), (150, 0)], masks)
self.assertIsNotNone(masks_warped)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
itseezREPO_NAMEopencvPATH_START.@opencv_extracted@opencv-master@modules@stitching@misc@python@test@test_stitching.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/annotation/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._font import FontValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._font.FontValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@annotation@hoverlabel@__init__.py@.PATH_END.py
|
{
"filename": "do_llh_inFoV4pc_pix.py",
"repo_name": "Swift-BAT/NITRATES",
"repo_path": "NITRATES_extracted/NITRATES-main/nitrates/archive/do_llh_inFoV4pc_pix.py",
"type": "Python"
}
|
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.wcs import WCS
import os
import argparse
import logging, traceback
import time
import pandas as pd
from ..analysis_seeds.bkg_rate_estimation import rate_obj_from_sqltab
from ..lib.sqlite_funcs import (
get_conn,
write_result,
write_results,
timeID2time_dur,
write_results_fromSigImg,
update_square_stat,
write_square_res_line,
write_square_results,
)
from ..lib.dbread_funcs import (
get_rate_fits_tab,
guess_dbfname,
get_seeds_tab,
get_info_tab,
get_files_tab,
get_square_tab,
get_full_sqlite_table_as_df,
)
from ..config import EBINS0, EBINS1, solid_angle_dpi_fname, fp_dir, rt_dir
from ..models.flux_models import Plaw_Flux, Cutoff_Plaw_Flux
from ..llh_analysis.minimizers import (
NLLH_ScipyMinimize_Wjacob,
imxy_grid_miner,
NLLH_ScipyMinimize,
)
# from ..lib.drm_funcs import DRMs
from ..response.ray_trace_funcs import RayTraces, FootPrints
from ..llh_analysis.LLH import LLH_webins
# from do_InFoV_scan3 import Swift_Mask_Interactions, Source_Model_InFoV, Bkg_Model_wFlatA,\
# CompoundModel, Point_Source_Model_Binned_Rates,\
# theta_phi2imxy, bldmask2batxys, imxy2theta_phi,\
# get_fixture_struct, LLH_webins
from ..models.models import (
CompoundModel,
Point_Source_Model_Binned_Rates,
Bkg_Model_wFlatA,
Source_Model_InFoV,
Source_Model_InOutFoV,
)
from ..lib.coord_conv_funcs import theta_phi2imxy, imxy2theta_phi
from ..lib.gti_funcs import mk_gti_bl, union_gtis
# need to read rate fits from DB
# and read twinds
# and read/get event, dmask, and ebins
# then get bkg_llh_obj and a minimizer
# then loop over all time windows
# minimizing nllh and recording bf params
def cli():
parser = argparse.ArgumentParser()
parser.add_argument("--evfname", type=str, help="Event data file", default=None)
parser.add_argument("--dmask", type=str, help="Detmask fname", default=None)
parser.add_argument(
"--job_id", type=int, help="ID to tell it what seeds to do", default=-1
)
parser.add_argument(
"--square_id", type=int, help="squareID to do if job_id is < 0", default=-1
)
parser.add_argument(
"--Njobs", type=int, help="Total number of jobs submitted", default=64
)
parser.add_argument(
"--dbfname", type=str, help="Name to save the database to", default=None
)
parser.add_argument(
"--rt_dir", type=str, help="Directory with ray traces", default=None
)
parser.add_argument(
"--pcfname", type=str, help="partial coding file name", default="pc_2.img"
)
parser.add_argument(
"--job_fname",
type=str,
help="File name for table with what imx/y square for each job",
default="rate_seeds.csv",
)
parser.add_argument(
"--rate_fname",
type=str,
help="Rate results file name",
default="rate_seeds.csv",
)
parser.add_argument(
"--bkg_fname",
type=str,
help="Name of the file with the bkg fits",
default="bkg_estimation.csv",
)
parser.add_argument(
"--pix_fname",
type=str,
help="Name of the file with good imx/y coordinates",
default="good_pix2scan.npy",
)
parser.add_argument(
"--log_fname",
type=str,
help="Name for the log file",
default="llh_analysis_from_rate_seeds",
)
parser.add_argument(
"--min_pc", type=float, help="Min partical coding fraction to use", default=0.01
)
args = parser.parse_args()
return args
def im_dist(imx0, imy0, imx1, imy1):
return np.hypot(imx0 - imx1, imy0 - imy1)
def parse_bkg_csv(bkg_fname, solid_angle_dpi, ebins0, ebins1, bl_dmask, rt_dir):
bkg_df = pd.read_csv(bkg_fname)
col_names = bkg_df.columns
nebins = len(ebins0)
PSnames = []
for name in col_names:
if "_imx" in name:
PSnames.append(name.split("_")[0])
print(PSnames)
Nsrcs = len(PSnames)
if Nsrcs > 0:
bkg_name = "Background_"
else:
bkg_name = ""
bkg_mod = Bkg_Model_wFlatA(bl_dmask, solid_angle_dpi, nebins, use_deriv=True)
ps_mods = []
if Nsrcs > 0:
rt_obj = RayTraces(rt_dir)
for i in range(Nsrcs):
name = PSnames[i]
imx = bkg_df[name + "_imx"][0]
imy = bkg_df[name + "_imy"][0]
mod = Point_Source_Model_Binned_Rates(
imx,
imy,
0.1,
[ebins0, ebins1],
rt_obj,
bl_dmask,
use_deriv=True,
name=name,
)
ps_mods.append(mod)
return bkg_df, bkg_name, PSnames, bkg_mod, ps_mods
def find_peaks2scan(
res_df,
max_dv=10.0,
min_sep=9e-3,
max_Npeaks=6,
min_Npeaks=2,
minTS=6.0,
nllh_name="nllh",
):
tgrps = res_df.groupby("timeID")
peak_dfs = []
for timeID, df_ in tgrps:
if np.nanmax(df_["TS"]) < minTS:
continue
df = df_.sort_values(nllh_name)
vals = df[nllh_name]
# ind_sort = np.argsort(vals)
min_val = np.nanmin(df[nllh_name])
peak_dict = {
"timeID": int(timeID),
"time": np.nanmean(df["time"]),
"dur": np.nanmean(df["dur"]),
}
imxs_ = np.empty(0)
imys_ = np.empty_like(imxs_)
As_ = np.empty_like(imxs_)
Eps_ = np.empty_like(imxs_)
Gs_ = np.empty_like(imxs_)
bkg_nllhs = np.empty_like(imxs_)
nllhs = np.empty_like(imxs_)
TSs = np.empty_like(imxs_)
for row_ind, row in df.iterrows():
if row[nllh_name] > (min_val + max_dv) and len(imxs_) >= min_Npeaks:
break
if len(imxs_) >= max_Npeaks:
break
if len(imxs_) > 0:
imdist = np.min(im_dist(row["imx"], row["imy"], imxs_, imys_))
if imdist <= min_sep:
continue
imxs_ = np.append(imxs_, [row["imx"]])
imys_ = np.append(imys_, [row["imy"]])
As_ = np.append(As_, [row["A"]])
Eps_ = np.append(Eps_, [row["Epeak"]])
Gs_ = np.append(Gs_, [row["gamma"]])
bkg_nllhs = np.append(bkg_nllhs, [row["bkg_nllh"]])
nllhs = np.append(nllhs, [row["nllh"]])
TSs = np.append(TSs, [row["TS"]])
peak_dict["imx"] = imxs_
peak_dict["imy"] = imys_
peak_dict["A"] = As_
peak_dict["Epeak"] = Eps_
peak_dict["gamma"] = Gs_
peak_dict["nllh"] = nllhs
peak_dict["bkg_nllh"] = bkg_nllhs
peak_dict["TS"] = TSs
peak_dfs.append(pd.DataFrame(peak_dict))
peaks_df = pd.concat(peak_dfs, ignore_index=True)
return peaks_df
def do_scan_around_peak(
peak_row,
bkg_bf_params,
bkg_name,
sig_miner,
sig_llh_obj,
sig_mod,
imstep=2e-3,
dimx=2e-3,
dimy=2e-3,
dgamma=0.2,
dlog10Ep=0.2,
gam_steps=3,
Ep_steps=3,
):
flux_params = {"A": 1.0, "Epeak": 150.0, "gamma": 0.5}
t1 = peak_row["time"] + peak_row["dur"]
sig_llh_obj.set_time(peak_row["time"], t1)
parss = {}
for pname, val in bkg_bf_params.items():
# pars_['Background_'+pname] = val
parss[bkg_name + "_" + pname] = val
sig_miner.set_fixed_params(list(parss.keys()), values=list(parss.values()))
imxax = np.arange(-dimx, dimx + (imstep / 2.0), imstep) + peak_row["imx"]
imyax = np.arange(-dimy, dimy + (imstep / 2.0), imstep) + peak_row["imy"]
imxg, imyg = np.meshgrid(imxax, imyax)
imxs = imxg.ravel()
imys = imyg.ravel()
thetas, phis = imxy2theta_phi(imxs, imys)
N_impnts = len(imxs)
logging.info("N_impnts: ", N_impnts)
Epeak_ax = np.logspace(
np.log10(peak_row["Epeak"]) - dlog10Ep,
np.log10(peak_row["Epeak"]) + dlog10Ep,
Ep_steps,
)
gamma_ax = np.linspace(
peak_row["gamma"] - dgamma, peak_row["gamma"] + dgamma, gam_steps
)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
Nspec_pnts = len(Epeaks)
logging.info("Nspec_pnts: ", Nspec_pnts)
logging.info("Epeak_ax: ")
logging.info(Epeak_ax)
logging.info("gammas_ax: ")
logging.info(gamma_ax)
res_dfs = []
for ii in range(N_impnts):
print(imxs[ii], imys[ii])
print(thetas[ii], phis[ii])
sig_miner.set_fixed_params(
["Signal_theta", "Signal_phi"], values=[thetas[ii], phis[ii]]
)
res_dict = {}
res_dict["imx"] = imxs[ii]
res_dict["imy"] = imys[ii]
res_dict["theta"] = thetas[ii]
res_dict["phi"] = phis[ii]
res_dict["Epeak"] = Epeaks
res_dict["gamma"] = gammas
res_dict["time"] = peak_row["time"]
res_dict["dur"] = peak_row["dur"]
res_dict["timeID"] = peak_row["timeID"]
res_dict["bkg_nllh"] = peak_row["bkg_nllh"]
nllhs = np.zeros(Nspec_pnts)
As = np.zeros(Nspec_pnts)
for jj in range(Nspec_pnts):
flux_params["gamma"] = gammas[jj]
flux_params["Epeak"] = Epeaks[jj]
sig_mod.set_flux_params(flux_params)
try:
pars, nllh, res = sig_miner.minimize()
As[jj] = pars[0][0]
nllhs[jj] = nllh[0]
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.error("Failed to minimize seed: ")
logging.error((imxs[ii], imys[ii]))
nllhs[jj] = np.nan
res_dict["nllh"] = nllhs
res_dict["A"] = As
res_dict["TS"] = np.sqrt(2 * (res_dict["bkg_nllh"] - nllhs))
res_dfs.append(pd.DataFrame(res_dict))
res_df = pd.concat(res_dfs, ignore_index=True)
res_df["TS"][np.isnan(res_df["TS"])] = 0.0
return res_df
def analysis_for_imxy_square(
imx0,
imx1,
imy0,
imy1,
bkg_bf_params_list,
bkg_mod,
flux_mod,
ev_data,
ebins0,
ebins1,
tbins0,
tbins1,
timeIDs,
TS2keep=4.5,
max_frac2keep=0.75,
):
bl_dmask = bkg_mod.bl_dmask
# dimxy = 0.0025
dimxy = np.round(imx1 - imx0, decimals=4)
imstep = 0.003
imxstep = 0.004
# imx_ax = np.arange(imx0, imx1+dimxy/2., dimxy)
# imy_ax = np.arange(imy0, imy1+dimxy/2., dimxy)
# imxg,imyg = np.meshgrid(imx_ax, imy_ax)
# imx_ax = np.arange(imx0, imx1, imxstep)
# imy_ax = np.arange(imy0, imy1, imstep)
imx_ax = np.arange(0, dimxy, imxstep)
imy_ax = np.arange(0, dimxy, imstep)
imxg, imyg = np.meshgrid(imx_ax, imy_ax)
bl = np.isclose((imyg * 1e4).astype(np.int64) % int(imstep * 2 * 1e4), 0)
imxg[bl] += imxstep / 2.0
imxs = np.ravel(imxg) + imx0
imys = np.ravel(imyg) + imy0
Npnts = len(imxs)
print(Npnts)
logging.info("%d imxy points to do" % (Npnts))
thetas, phis = imxy2theta_phi(imxs, imys)
gamma_ax = np.linspace(-0.4, 1.6, 8 + 1)
gamma_ax = np.linspace(-0.4, 1.6, 4 + 1)[1:-1]
# gamma_ax = np.array([0.4, 0.9])
# gamma_ax = np.linspace(-0.4, 1.6, 3+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 10 + 1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 5 + 1)[1:-1]
Epeak_ax = np.logspace(np.log10(45.0), 3, 4 + 1)[1:-1]
# Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[3:]
logging.info("Epeak_ax: ")
logging.info(Epeak_ax)
logging.info("gammas_ax: ")
logging.info(gamma_ax)
# Epeak_ax = np.logspace(np.log10(25.0), 3, 3+1)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
Nspec_pnts = len(Epeaks)
ntbins = len(tbins0)
rt_obj = RayTraces(rt_dir)
# fp_obj = FootPrints(fp_dir)
sig_mod = Source_Model_InOutFoV(
flux_mod, [ebins0, ebins1], bl_dmask, rt_obj, use_deriv=True
)
sig_mod.set_theta_phi(np.mean(thetas), np.mean(phis))
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_miner = NLLH_ScipyMinimize_Wjacob("")
tmin = np.min(tbins0)
tmax = np.max(tbins1)
if (tmax - tmin) > 40.0:
logging.debug("tmax - tmin > 40.0s, using twinds for tbl")
gti_dict = {"START": tbins0, "STOP": tbins1}
gti_twinds = Table(data=gti_dict)
gtis = union_gtis([gti_twinds])
tbl = mk_gti_bl(ev_data["TIME"], gtis, time_pad=0.1)
logging.debug("np.sum(tbl): %d" % (np.sum(tbl)))
else:
tbl = (ev_data["TIME"] >= (tmin - 1.0)) & (ev_data["TIME"] < (tmax + 1.0))
logging.debug("np.sum(tbl): %d" % (np.sum(tbl)))
sig_llh_obj = LLH_webins(ev_data[tbl], ebins0, ebins1, bl_dmask, has_err=True)
sig_llh_obj.set_model(comp_mod)
flux_params = {"A": 1.0, "gamma": 0.5, "Epeak": 1e2}
bkg_name = bkg_mod.name
pars_ = {}
pars_["Signal_theta"] = np.mean(thetas)
pars_["Signal_phi"] = np.mean(phis)
for pname, val in bkg_bf_params_list[0].items():
# pars_['Background_'+pname] = val
pars_[bkg_name + "_" + pname] = val
for pname, val in flux_params.items():
pars_["Signal_" + pname] = val
sig_miner.set_llh(sig_llh_obj)
fixed_pnames = list(pars_.keys())
fixed_vals = list(pars_.values())
trans = [None for i in range(len(fixed_pnames))]
sig_miner.set_trans(fixed_pnames, trans)
sig_miner.set_fixed_params(fixed_pnames, values=fixed_vals)
sig_miner.set_fixed_params(["Signal_A"], fixed=False)
res_dfs_ = []
for ii in range(Npnts):
print(imxs[ii], imys[ii])
print(thetas[ii], phis[ii])
sig_miner.set_fixed_params(
["Signal_theta", "Signal_phi"], values=[thetas[ii], phis[ii]]
)
res_dfs = []
for j in range(Nspec_pnts):
flux_params["gamma"] = gammas[j]
flux_params["Epeak"] = Epeaks[j]
sig_mod.set_flux_params(flux_params)
res_dict = {}
res_dict["Epeak"] = Epeaks[j]
res_dict["gamma"] = gammas[j]
nllhs = np.zeros(ntbins)
As = np.zeros(ntbins)
for i in range(ntbins):
parss_ = {}
for pname, val in bkg_bf_params_list[i].items():
# pars_['Background_'+pname] = val
parss_[bkg_name + "_" + pname] = val
sig_miner.set_fixed_params(
list(parss_.keys()), values=list(parss_.values())
)
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
try:
pars, nllh, res = sig_miner.minimize()
As[i] = pars[0][0]
nllhs[i] = nllh[0]
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.error("Failed to minimize seed: ")
logging.error((imxs[ii], imys[ii]))
logging.error((timeIDs[i]))
nllhs[i] = np.nan
# print "res: "
# print res
res_dict["nllh"] = nllhs
res_dict["A"] = As
res_dict["time"] = np.array(tbins0)
res_dict["dur"] = np.array(tbins1) - np.array(tbins0)
res_dict["timeID"] = np.array(timeIDs)
res_dict["theta"] = thetas[ii]
res_dict["phi"] = phis[ii]
res_dict["imx"] = imxs[ii]
res_dict["imy"] = imys[ii]
res_dfs.append(pd.DataFrame(res_dict))
# logging.info("Done with spec %d of %d" %(j+1,Nspec_pnts))
res_df = pd.concat(res_dfs, ignore_index=True)
bkg_nllhs = np.zeros(len(res_df))
bkg_bf_param_dict = {}
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
for pname, val in bkg_bf_params_list[i].items():
pars_[bkg_name + "_" + pname] = val
bkg_bf_param_dict[timeIDs[i]] = bkg_bf_params_list[i]
pars_["Signal_theta"] = thetas[ii]
pars_["Signal_phi"] = phis[ii]
pars_["Signal_A"] = 1e-10
bkg_nllh = -sig_llh_obj.get_logprob(pars_)
bl = np.isclose(res_df["time"] - t0, t0 - t0) & np.isclose(
res_df["dur"], dt
)
bkg_nllhs[bl] = bkg_nllh
# pars_['Signal_A'] = 1e-10
# bkg_nllh = -sig_llh_obj.get_logprob(pars_)
res_df["bkg_nllh"] = bkg_nllhs
res_df["TS"] = np.sqrt(2.0 * (bkg_nllhs - res_df["nllh"]))
res_df["TS"][np.isnan(res_df["TS"])] = 0.0
res_dfs_.append(res_df)
logging.info("Done with imxy %d of %d" % (ii + 1, Npnts))
res_df = pd.concat(res_dfs_, ignore_index=True)
TSbl = res_df["TS"] >= TS2keep
if np.sum(TSbl) > (len(res_df) / 5.0):
TSwrite_ = np.nanpercentile(res_df["TS"], max_frac2keep * 100.0)
TSbl = res_df["TS"] >= TSwrite_
elif np.sum(TSbl) < 1:
TSbl = np.isclose(res_df["TS"], np.max(res_df["TS"]))
else:
TSbl = res_df["TS"] >= TS2keep
res_df = res_df[TSbl]
minTS2scan = 6.0
if np.max(res_df["TS"]) >= minTS2scan:
peaks_df = find_peaks2scan(res_df)
Npeaks2scan = len(peaks_df)
else:
Npeaks2scan = 0
logging.info("%d peaks to scan" % (Npeaks2scan))
if Npeaks2scan > 0:
peak_res_dfs = []
for peak_ind, peak_row in peaks_df.iterrows():
bkg_bf_params = bkg_bf_param_dict[peak_row["timeID"]]
logging.info("Starting to scan peak_row")
logging.info(peak_row)
df = do_scan_around_peak(
peak_row, bkg_bf_params, bkg_name, sig_miner, sig_llh_obj, sig_mod
)
max_peak_row = df.loc[df["TS"].idxmax()]
df2 = do_scan_around_peak(
max_peak_row,
bkg_bf_params,
bkg_name,
sig_miner,
sig_llh_obj,
sig_mod,
imstep=1e-3,
dimx=1e-3,
dimy=1e-3,
dgamma=0.1,
dlog10Ep=0.1,
)
peak_res_dfs.append(df)
peak_res_dfs.append(df2)
peak_res_df = pd.concat(peak_res_dfs, ignore_index=True)
return res_df, peak_res_df
else:
return res_df, None
def analysis_for_imxy_square(
imxs,
imys,
bkg_bf_params_list,
bkg_mod,
flux_mod,
ev_data,
ebins0,
ebins1,
tbins0,
tbins1,
timeIDs,
TS2keep=None,
max_frac2keep=0.75,
do_scan=False,
):
bl_dmask = bkg_mod.bl_dmask
Npnts = len(imxs)
logging.info("%d imxy points to do" % (Npnts))
thetas, phis = imxy2theta_phi(imxs, imys)
gamma_ax = np.linspace(-0.4, 1.6, 8 + 1)
gamma_ax = np.linspace(-0.4, 1.6, 4 + 1)[1:-1]
# gamma_ax = np.array([0.4, 0.9])
# gamma_ax = np.linspace(-0.4, 1.6, 3+1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 10 + 1)
Epeak_ax = np.logspace(np.log10(45.0), 3, 5 + 1)[1:-1]
Epeak_ax = np.logspace(np.log10(45.0), 3, 4 + 1)[1:-1]
# Epeak_ax = np.logspace(np.log10(45.0), 3, 5+1)[3:]
logging.info("Epeak_ax: ")
logging.info(Epeak_ax)
logging.info("gammas_ax: ")
logging.info(gamma_ax)
# Epeak_ax = np.logspace(np.log10(25.0), 3, 3+1)
gammas, Epeaks = np.meshgrid(gamma_ax, Epeak_ax)
gammas = gammas.ravel()
Epeaks = Epeaks.ravel()
Nspec_pnts = len(Epeaks)
ntbins = len(tbins0)
rt_obj = RayTraces(rt_dir)
# fp_obj = FootPrints(fp_dir)
sig_mod = Source_Model_InOutFoV(
flux_mod, [ebins0, ebins1], bl_dmask, rt_obj, use_deriv=True
)
sig_mod.set_theta_phi(np.mean(thetas), np.mean(phis))
comp_mod = CompoundModel([bkg_mod, sig_mod])
sig_miner = NLLH_ScipyMinimize_Wjacob("")
tmin = np.min(tbins0)
tmax = np.max(tbins1)
if (tmax - tmin) > 40.0:
logging.debug("tmax - tmin > 40.0s, using twinds for tbl")
gti_dict = {"START": tbins0, "STOP": tbins1}
gti_twinds = Table(data=gti_dict)
gtis = union_gtis([gti_twinds])
tbl = mk_gti_bl(ev_data["TIME"], gtis, time_pad=0.1)
logging.debug("np.sum(tbl): %d" % (np.sum(tbl)))
else:
tbl = (ev_data["TIME"] >= (tmin - 1.0)) & (ev_data["TIME"] < (tmax + 1.0))
logging.debug("np.sum(tbl): %d" % (np.sum(tbl)))
sig_llh_obj = LLH_webins(ev_data[tbl], ebins0, ebins1, bl_dmask, has_err=True)
sig_llh_obj.set_model(comp_mod)
flux_params = {"A": 1.0, "gamma": 0.5, "Epeak": 1e2}
bkg_name = bkg_mod.name
pars_ = {}
pars_["Signal_theta"] = np.mean(thetas)
pars_["Signal_phi"] = np.mean(phis)
for pname, val in bkg_bf_params_list[0].items():
# pars_['Background_'+pname] = val
pars_[bkg_name + "_" + pname] = val
for pname, val in flux_params.items():
pars_["Signal_" + pname] = val
sig_miner.set_llh(sig_llh_obj)
fixed_pnames = list(pars_.keys())
fixed_vals = list(pars_.values())
trans = [None for i in range(len(fixed_pnames))]
sig_miner.set_trans(fixed_pnames, trans)
sig_miner.set_fixed_params(fixed_pnames, values=fixed_vals)
sig_miner.set_fixed_params(["Signal_A"], fixed=False)
res_dfs_ = []
sig_llh_obj.set_time(tbins0[0], tbins1[0])
parss_ = {}
for pname, val in bkg_bf_params_list[0].items():
# pars_['Background_'+pname] = val
parss_[bkg_name + "_" + pname] = val
sig_miner.set_fixed_params(list(parss_.keys()), values=list(parss_.values()))
for ii in range(Npnts):
sig_miner.set_fixed_params(
["Signal_theta", "Signal_phi"], values=[thetas[ii], phis[ii]]
)
res_dfs = []
for j in range(Nspec_pnts):
flux_params["gamma"] = gammas[j]
flux_params["Epeak"] = Epeaks[j]
sig_mod.set_flux_params(flux_params)
res_dict = {}
res_dict["Epeak"] = Epeaks[j]
res_dict["gamma"] = gammas[j]
nllhs = np.zeros(ntbins)
As = np.zeros(ntbins)
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
if ntbins > 1:
parss_ = {}
for pname, val in bkg_bf_params_list[i].items():
# pars_['Background_'+pname] = val
parss_[bkg_name + "_" + pname] = val
sig_miner.set_fixed_params(
list(parss_.keys()), values=list(parss_.values())
)
sig_llh_obj.set_time(tbins0[i], tbins1[i])
try:
pars, nllh, res = sig_miner.minimize()
As[i] = pars[0][0]
nllhs[i] = nllh[0]
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.error("Failed to minimize seed: ")
logging.error((imxs[ii], imys[ii]))
logging.error((timeIDs[i]))
nllhs[i] = np.nan
# print "res: "
# print res
res_dict["nllh"] = nllhs
res_dict["A"] = As
res_dict["time"] = np.array(tbins0)
res_dict["dur"] = np.array(tbins1) - np.array(tbins0)
res_dict["timeID"] = np.array(timeIDs)
res_dict["theta"] = thetas[ii]
res_dict["phi"] = phis[ii]
res_dict["imx"] = imxs[ii]
res_dict["imy"] = imys[ii]
res_dfs.append(pd.DataFrame(res_dict))
# logging.info("Done with spec %d of %d" %(j+1,Nspec_pnts))
res_df = pd.concat(res_dfs, ignore_index=True)
bkg_nllhs = np.zeros(len(res_df))
bkg_bf_param_dict = {}
for i in range(ntbins):
t0 = tbins0[i]
t1 = tbins1[i]
dt = t1 - t0
sig_llh_obj.set_time(tbins0[i], tbins1[i])
for pname, val in bkg_bf_params_list[i].items():
pars_[bkg_name + "_" + pname] = val
bkg_bf_param_dict[timeIDs[i]] = bkg_bf_params_list[i]
pars_["Signal_theta"] = thetas[ii]
pars_["Signal_phi"] = phis[ii]
pars_["Signal_A"] = 1e-10
bkg_nllh = -sig_llh_obj.get_logprob(pars_)
bl = np.isclose(res_df["time"] - t0, t0 - t0) & np.isclose(
res_df["dur"], dt
)
bkg_nllhs[bl] = bkg_nllh
# pars_['Signal_A'] = 1e-10
# bkg_nllh = -sig_llh_obj.get_logprob(pars_)
res_df["bkg_nllh"] = bkg_nllhs
res_df["TS"] = np.sqrt(2.0 * (bkg_nllhs - res_df["nllh"]))
res_df["TS"][np.isnan(res_df["TS"])] = 0.0
res_dfs_.append(res_df)
logging.info("Done with imxy %d of %d" % (ii + 1, Npnts))
res_df = pd.concat(res_dfs_, ignore_index=True)
if TS2keep is not None:
TSbl = res_df["TS"] >= TS2keep
if np.sum(TSbl) > (len(res_df) / 5.0):
TSwrite_ = np.nanpercentile(res_df["TS"], max_frac2keep * 100.0)
TSbl = res_df["TS"] >= TSwrite_
elif np.sum(TSbl) < 1:
TSbl = np.isclose(res_df["TS"], np.max(res_df["TS"]))
else:
TSbl = res_df["TS"] >= TS2keep
res_df = res_df[TSbl]
minTS2scan = 6.0
if np.max(res_df["TS"]) >= minTS2scan:
peaks_df = find_peaks2scan(res_df)
Npeaks2scan = len(peaks_df)
else:
Npeaks2scan = 0
logging.info("%d peaks to scan" % (Npeaks2scan))
if Npeaks2scan > 0 and do_scan:
peak_res_dfs = []
for peak_ind, peak_row in peaks_df.iterrows():
bkg_bf_params = bkg_bf_param_dict[peak_row["timeID"]]
logging.info("Starting to scan peak_row")
logging.info(peak_row)
df = do_scan_around_peak(
peak_row, bkg_bf_params, bkg_name, sig_miner, sig_llh_obj, sig_mod
)
max_peak_row = df.loc[df["TS"].idxmax()]
df2 = do_scan_around_peak(
max_peak_row,
bkg_bf_params,
bkg_name,
sig_miner,
sig_llh_obj,
sig_mod,
imstep=1e-3,
dimx=1e-3,
dimy=1e-3,
dgamma=0.1,
dlog10Ep=0.1,
)
peak_res_dfs.append(df)
peak_res_dfs.append(df2)
peak_res_df = pd.concat(peak_res_dfs, ignore_index=True)
return res_df, peak_res_df
else:
return res_df, None
def do_analysis(
square_tab,
ev_data,
flux_mod,
rt_dir,
ebins0,
ebins1,
bl_dmask,
trigger_time,
work_dir,
bkg_fname,
pcfname,
TSwrite=None,
pc_min=0.01,
):
nebins = len(ebins0)
solid_ang_dpi = np.load(solid_angle_dpi_fname)
job_id = np.min(square_tab["proc_group"])
bkg_miner = NLLH_ScipyMinimize("")
sig_miner = NLLH_ScipyMinimize_Wjacob("")
bkg_df, bkg_name, PSnames, bkg_mod, ps_mods = parse_bkg_csv(
bkg_fname, solid_ang_dpi, ebins0, ebins1, bl_dmask, rt_dir
)
bkg_mod.has_deriv = False
bkg_mod_list = [bkg_mod]
Nsrcs = len(ps_mods)
if Nsrcs > 0:
bkg_mod_list += ps_mods
for ps_mod in ps_mods:
ps_mod.has_deriv = False
bkg_mod = CompoundModel(bkg_mod_list)
PC = fits.open(pcfname)[0]
pc = PC.data
w_t = WCS(PC.header, key="T")
pcbl = pc >= pc_min
pc_inds = np.where(pcbl)
pc_imxs, pc_imys = w_t.all_pix2world(pc_inds[1], pc_inds[0], 0)
df_sq_grps = square_tab.groupby("squareID")
for squareID, square_df in df_sq_grps:
logging.info("Starting squareID: %d" % (squareID))
rt_obj = RayTraces(rt_dir, max_nbytes=2e9)
imx0 = np.mean(square_df["imx0"])
imx1 = np.mean(square_df["imx1"])
imy0 = np.mean(square_df["imy0"])
imy1 = np.mean(square_df["imy1"])
im_bl = (
(pc_imxs >= imx0) & (pc_imxs < imx1) & (pc_imys >= imy0) & (pc_imys < imy1)
)
# Npix = np.sum(im_bl)
# logging.debug("%d Pixels to minimize at" %(Npix))
#
# if Npix < 1:
# fname = os.path.join(work_dir,\
# 'res_%d_.csv' %(square_row['squareID']))
# logging.info("Nothing to write for squareID %d"\
# %(square_row['squareID']))
# f = open(fname, 'w')
# f.write('NONE')
# f.close()
# continue
imxs = pc_imxs[im_bl]
imys = pc_imys[im_bl]
#
# tab = Table()
res_dfs2write = []
# bl = (rate_res_tab['squareID']==square_row['squareID'])
logging.info("%d timeIDs to do" % (len(square_df)))
logging.info("%d pix to do" % (len(imxs)))
t0s = []
t1s = []
timeIDs = []
bkg_params_list = []
for sq_ind, sq_row in square_df.iterrows():
t0s.append(sq_row["time"])
t1s.append(sq_row["time"] + sq_row["dur"])
timeIDs.append(sq_row["timeID"])
tmid = sq_row["time"] + (sq_row["dur"] / 2.0)
bkg_row = bkg_df.iloc[np.argmin(np.abs(tmid - bkg_df["time"]))]
bkg_params = {pname: bkg_row[pname] for pname in bkg_mod.param_names}
bkg_params_list.append(bkg_params)
try:
# res_df, peak_res_df = analysis_for_imxy_square(imx0, imx1, imy0, imy1,\
# bkg_params_list,\
# bkg_mod, flux_mod, ev_data,\
# ebins0, ebins1, t0s, t1s, timeIDs)
res_df, peak_res_df = analysis_for_imxy_square(
imxs,
imys,
bkg_params_list,
bkg_mod,
flux_mod,
ev_data,
ebins0,
ebins1,
t0s,
t1s,
timeIDs,
)
res_df["squareID"] = squareID
fname = os.path.join(work_dir, "res_%d_%d_.csv" % (squareID, job_id))
fname = "res_%d_%d_.csv" % (squareID, job_id)
res_df.to_csv(fname)
logging.info("Saved results to")
logging.info(fname)
if peak_res_df is not None:
peak_res_df["squareID"] = squareID
fname = os.path.join(
work_dir, "peak_res_%d_%d_.csv" % (squareID, job_id)
)
fname = "peak_res_%d_%d_.csv" % (squareID, job_id)
peak_res_df.to_csv(fname)
logging.info("Saved peak results to")
logging.info(fname)
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.warn("Messed up with squareID %d" % (squareID))
fname = "res_%d_%d_.csv" % (squareID, job_id)
f = open(fname, "w")
f.write("NONE")
f.close()
def main(args):
# fname = 'llh_analysis_from_rate_seeds_' + str(args.job_id)
fname = args.log_fname + "_" + str(args.job_id)
logging.basicConfig(
filename=fname + ".log",
level=logging.DEBUG,
format="%(asctime)s-" "%(levelname)s- %(message)s",
)
t_0 = time.time()
if args.dbfname is None:
db_fname = guess_dbfname()
if isinstance(db_fname, list):
db_fname = db_fname[0]
else:
db_fname = args.dbfname
logging.info("Connecting to DB")
conn = get_conn(db_fname)
info_tab = get_info_tab(conn)
logging.info("Got info table")
files_tab = get_files_tab(conn)
logging.info("Got files table")
trigtime = info_tab["trigtimeMET"][0]
evfname = files_tab["evfname"][0]
ev_data = fits.open(evfname)[1].data
dmask_fname = files_tab["detmask"][0]
dmask = fits.open(dmask_fname)[0].data
bl_dmask = dmask == 0.0
logging.debug("Opened up event and detmask files")
bkg_fits_df = pd.read_csv(args.bkg_fname)
# rate_fits_df = get_rate_fits_tab(conn)
# bkg_rates_obj = rate_obj_from_sqltab(rate_fits_df, 0, 1)
time_starting = time.time()
proc_num = args.job_id
# init classes up here
# drm_dir = files_tab['drmDir'][0]
# if args.rt_dir is None:
# rt_dir = files_tab['rtDir'][0]
# else:
# rt_dir = args.rt_dir
# drm_obj = DRMs(drm_dir)
# rt_obj = RayTraces(rt_dir, max_nbytes=1e10)
work_dir = files_tab["workDir"][0]
conn.close()
# pl_flux = Plaw_Flux()
flux_mod = Cutoff_Plaw_Flux(E0=100.0)
ebins0 = np.array(EBINS0)
ebins1 = np.array(EBINS1)
ebins0 = np.array([15.0, 24.0, 35.0, 48.0, 64.0])
ebins0 = np.append(ebins0, np.logspace(np.log10(84.0), np.log10(500.0), 5 + 1))[:-1]
ebins0 = np.round(ebins0, decimals=1)[:-1]
ebins1 = np.append(ebins0[1:], [350.0])
logging.debug("ebins0")
logging.debug(ebins0)
logging.debug("ebins1")
logging.debug(ebins1)
# bkg_llh_obj = LLH_webins(ev_data, ebins0, ebins1, bl_dmask)
# sig_llh_obj = LLH_webins(ev_data, ebins0, ebins1, bl_dmask)
# try:
# good_pix = np.load(args.pix_fname)
# except Exception as E:
# logging.error(E)
# logging.warning("No pix2scan file")
# PC = fits.open(args.pcfname)[0]
# pc = PC.data
# w_t = WCS(PC.header, key='T')
#
# pcbl = (pc>=args.min_pc)
# pc_inds = np.where(pcbl)
# pc_imxs, pc_imys = w_t.all_pix2world(pc_inds[1], pc_inds[0], 0)
# logging.debug("Min pc_imx, pc_imy: %.2f, %.2f" %(np.nanmin(pc_imxs), np.nanmin(pc_imys)))
# logging.debug("Max pc_imx, pc_imy: %.2f, %.2f" %(np.nanmax(pc_imxs), np.nanmax(pc_imys)))
# conn = get_conn(db_fname)
# if proc_num >= 0:
# square_tab = get_square_tab(conn, proc_group=proc_num)
# else:
# square_tab = get_square_tab(conn)
square_tab = pd.read_csv(args.job_fname)
if proc_num >= 0:
bl = square_tab["proc_group"] == proc_num
elif args.square_id >= 0:
bl = square_tab["squareID"] == args.square_id
else:
bl = np.ones(len(square_tab), dtype=bool)
square_tab = square_tab[bl]
logging.info("Read in Square Seed Table, now to do analysis")
do_analysis(
square_tab,
ev_data,
flux_mod,
rt_dir,
ebins0,
ebins1,
bl_dmask,
trigtime,
work_dir,
args.bkg_fname,
args.pcfname,
)
# do_analysis(square_tab, rate_res_tab, good_pix['imx'], good_pix['imy'], pl_flux,\
# drm_obj, rt_dir,\
# bkg_llh_obj, sig_llh_obj,\
# conn, db_fname, trigtime, work_dir,bkg_fits_df)
if __name__ == "__main__":
args = cli()
main(args)
|
Swift-BATREPO_NAMENITRATESPATH_START.@NITRATES_extracted@NITRATES-main@nitrates@archive@do_llh_inFoV4pc_pix.py@.PATH_END.py
|
{
"filename": "ifort.py",
"repo_name": "duvall3/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Tool/ifort.py",
"type": "Python"
}
|
"""SCons.Tool.ifort
Tool-specific initialization for newer versions of the Intel Fortran Compiler
for Linux/Windows (and possibly Mac OS X).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifort.py 4043 2009/02/23 09:06:45 scons"
import string
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
# ifort supports Fortran 90 and Fortran 95
# Additionally, ifort recognizes more file extensions.
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if not env.has_key('FORTRANFILESUFFIXES'):
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if not env.has_key('F90FILESUFFIXES'):
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
fc = 'ifort'
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = fc
env['SH%s' % dialect] = '$%s' % dialect
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
if env['PLATFORM'] == 'win32':
# On Windows, the ifort compiler specifies the object on the
# command line with -object:, not -o. Massage the necessary
# command-line construction variables.
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
for var in ['%sCOM' % dialect, '%sPPCOM' % dialect,
'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]:
env[var] = string.replace(env[var], '-o $TARGET', '-object:$TARGET')
env['FORTRANMODDIRPREFIX'] = "/module:"
else:
env['FORTRANMODDIRPREFIX'] = "-module "
def exists(env):
return env.Detect('ifort')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
duvall3REPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Tool@ifort.py@.PATH_END.py
|
{
"filename": "inspectdata.ipynb",
"repo_name": "yongsukyee/uncertain_blackholemass",
"repo_path": "uncertain_blackholemass_extracted/uncertain_blackholemass-main/uncertain_blackholemass/notebooks/inspectdata.ipynb",
"type": "Jupyter Notebook"
}
|
# Inspect SDSS DR16 Quasar Samples
```python
import sys
sys.path.append("..")
from astropy.io import fits
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from lib.dataset_sdssquasarspec import SDSSQuasarSpecDataset
from lib.get_config import get_config
cfg = get_config('../config/config.yaml')
```
Load config file >> ../config/config.yaml
/Users/sukyee/opt/miniconda3/envs/mltorch/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
```python
df = pd.read_csv(Path(cfg['data_dir'], 'catalogue', 'sdssdr16q_prop.csv'), sep=',', header=0)
list_files = np.asarray([str(f) for f in Path(cfg['data_dir'], 'spectra').rglob('*.fits.gz')])
```
```python
fig = plt.figure(figsize=(6,5))
gs = fig.add_gridspec(2, 2, width_ratios=(3,1), height_ratios=(1,4), top=0.9, right=0.9, hspace=0., wspace=0.)
ax = fig.add_subplot(gs[1, 0])
ax.hexbin(df['LOGMBH'], df['Z_FIT'], gridsize=20, mincnt=1, cmap='BuPu')
ax_xhist = fig.add_subplot(gs[0, 0], sharex=ax)
ax_xhist.hist(df['LOGMBH'], bins=30, color=plt.get_cmap('BuPu', 8)(1))
ax_yhist = fig.add_subplot(gs[1, 1], sharey=ax)
# ax_yhist.hist(df['Z_FIT'], bins=30, orientation='horizontal', alpha=0.6)
ax_yhist.hist(df['Z_FIT'], bins=30, orientation='horizontal', color=plt.get_cmap('BuPu', 8)(1))
ax_xhist.tick_params(axis="x", labelbottom=False)
ax_yhist.tick_params(top=True, labeltop=True, labelbottom=False, bottom=False, labelleft=False)
ax.set_xlabel(r'$\log M_{\mathrm{vir}}\ [M_{\odot}]$')
ax.set_ylabel(r'Redshift, $z$')
ax_xhist.set_ylabel('Frequency')
fig.savefig('plots/datamvirvsz.pdf', bbox_inches='tight')
plt.show()
```

```python
fig, ax = plt.subplots()
df['LOGMBH_HB'].plot.hist(bins=50, histtype='step', lw=2, alpha=0.8, label='Hb')
df['LOGMBH_MGII'].plot.hist(bins=50, histtype='step', lw=2, alpha=0.8, label='MgII')
df['LOGMBH'].plot.hist(bins=50, alpha=0.5)
ax.legend()
ax.set_xlabel(r'$\log M_{\mathrm{BH}}\ [M_{\odot}]$')
plt.show()
```

```python
fig, ax = plt.subplots()
ax.scatter(df['Z_FIT'], df['LOGMBH'], s=1, alpha=0.5)
ax.set_xlabel('z')
ax.set_ylabel(r'$\log M_{\mathrm{BH}}\ [M_{\odot}]$')
plt.show()
```

```python
fig, ax = plt.subplots()
df['HBETA_FWHM'].plot.hist(alpha=0.5, label='Hb')
df['MGII_FWHM'].plot.hist(alpha=0.5, label='MgII')
ax.legend()
ax.set_xlabel(r'FWHM [km s$^{-1}$]')
plt.show()
```

## Visualize Quasar Spectrum
```python
# Plot spectrum
sample_n = 10
objid = SDSSQuasarSpecDataset.get_labelbyfilename(df, path_fits=list_files[sample_n])[0][0]
data = fits.getdata(Path(cfg['data_dir'], 'spectra', f"op-{objid}.fits.gz"), ext=3)
plt.figure()
plt.plot(data['flux_prereduced'], alpha=0.8)
plt.show()
```

```python
# Plot line flux
sample_n = 10
objid = SDSSQuasarSpecDataset.get_labelbyfilename(df, path_fits=list_files[sample_n])[0][0]
data = fits.getdata(Path(cfg['data_dir'], 'spectra', f"op-{objid}.fits.gz"), ext=3)
plt.figure()
plt.plot(data['flux_line'], alpha=0.8, label='line')
plt.plot(data['err_line'], alpha=0.8, label='error')
plt.xlim(0, 1000)
plt.legend()
plt.show()
```

```python
```
|
yongsukyeeREPO_NAMEuncertain_blackholemassPATH_START.@uncertain_blackholemass_extracted@uncertain_blackholemass-main@uncertain_blackholemass@notebooks@inspectdata.ipynb@.PATH_END.py
|
{
"filename": "test_atek2015.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/examples/lit/test_atek2015.py",
"type": "Python"
}
|
"""
test_atek2015.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Fri Nov 6 13:33:37 PST 2015
Description: Compare to their Figure 8.
"""
import ares
import numpy as np
import matplotlib.pyplot as pl
# Remember: they are stored as the log10!
atek15 = ares.util.read_lit('atek2015')
for z in atek15.redshifts:
data = atek15.data['lf'][z]
pl.errorbar(data['M'], np.array(data['phi']), yerr=data['err'],
fmt='o', label=r'$z={:.2g}$ (Atek)'.format(z))
pl.xlabel(r'$M_{\mathrm{UV}}$')
pl.ylabel(r'$\log_{10} \phi \ (\mathrm{cMpc}^{-3} \ \mathrm{mag}^{-1})$')
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@examples@lit@test_atek2015.py@.PATH_END.py
|
{
"filename": "_borderwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/colorbar/_borderwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="choroplethmap.colorbar", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@colorbar@_borderwidth.py@.PATH_END.py
|
{
"filename": "ah_bootstrap.py",
"repo_name": "GBTSpectroscopy/gbtpipe",
"repo_path": "gbtpipe_extracted/gbtpipe-master/ah_bootstrap.py",
"type": "Python"
}
|
"""
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import io
import locale
import os
import re
import subprocess as sp
import sys
from distutils import log
from distutils.debug import DEBUG
from configparser import ConfigParser, RawConfigParser
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
# This is the minimum Python version required for astropy-helpers
__minimum_python_version__ = (3, 5)
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
# Start off by parsing the setup.cfg file
SETUP_CFG = ConfigParser()
if os.path.exists('setup.cfg'):
try:
SETUP_CFG.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
# We used package_name in the package template for a while instead of name
if SETUP_CFG.has_option('metadata', 'name'):
parent_package = SETUP_CFG.get('metadata', 'name')
elif SETUP_CFG.has_option('metadata', 'package_name'):
parent_package = SETUP_CFG.get('metadata', 'package_name')
else:
parent_package = None
if SETUP_CFG.has_option('options', 'python_requires'):
python_requires = SETUP_CFG.get('options', 'python_requires')
# The python_requires key has a syntax that can be parsed by SpecifierSet
# in the packaging package. However, we don't want to have to depend on that
# package, so instead we can use setuptools (which bundles packaging). We
# have to add 'python' to parse it with Requirement.
from pkg_resources import Requirement
req = Requirement.parse('python' + python_requires)
# We want the Python version as a string, which we can get from the platform module
import platform
# strip off trailing '+' incase this is a dev install of python
python_version = platform.python_version().strip('+')
# allow pre-releases to count as 'new enough'
if not req.specifier.contains(python_version, True):
if parent_package is None:
message = "ERROR: Python {} is required by this package\n".format(req.specifier)
else:
message = "ERROR: Python {} is required by {}\n".format(req.specifier, parent_package)
sys.stderr.write(message)
sys.exit(1)
if sys.version_info < __minimum_python_version__:
if parent_package is None:
message = "ERROR: Python {} or later is required by astropy-helpers\n".format(
__minimum_python_version__)
else:
message = "ERROR: Python {} or later is required by astropy-helpers for {}\n".format(
__minimum_python_version__, parent_package)
sys.stderr.write(message)
sys.exit(1)
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Check that setuptools 30.3 or later is present
from distutils.version import LooseVersion
try:
import setuptools
assert LooseVersion(setuptools.__version__) >= LooseVersion('30.3')
except (ImportError, AssertionError):
sys.stderr.write("ERROR: setuptools 30.3 or later is required by astropy-helpers\n")
sys.exit(1)
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not SETUP_CFG.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not SETUP_CFG.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = SETUP_CFG.getboolean('ah_bootstrap', option)
else:
value = SETUP_CFG.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
# NOTE: we need to parse the config file (e.g. setup.cfg) to make sure
# it honours the options set in the [easy_install] section, and we need
# to explicitly fetch the requirement eggs as setup_requires does not
# get honored in recent versions of setuptools:
# https://github.com/pypa/setuptools/issues/1273
try:
context = _verbose if DEBUG else _silence
with context():
dist = _Distribution(attrs=attrs)
try:
dist.parse_config_files(ignore_option_errors=True)
dist.fetch_build_eggs(req)
except TypeError:
# On older versions of setuptools, ignore_option_errors
# doesn't exist, and the above two lines are not needed
# so we can just continue
pass
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
r'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
r'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _verbose():
yield
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
GBTSpectroscopyREPO_NAMEgbtpipePATH_START.@gbtpipe_extracted@gbtpipe-master@ah_bootstrap.py@.PATH_END.py
|
{
"filename": "_dtickrange.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/marker/colorbar/tickformatstop/_dtickrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scattergl.marker.colorbar.tickformatstop",
**kwargs,
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"editType": "calc", "valType": "any"},
{"editType": "calc", "valType": "any"},
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@marker@colorbar@tickformatstop@_dtickrange.py@.PATH_END.py
|
{
"filename": "create_cylinder.py",
"repo_name": "andizq/sf3dmodels",
"repo_path": "sf3dmodels_extracted/sf3dmodels-master/sf3dmodels/create_cylinder.py",
"type": "Python"
}
|
from __future__ import print_function
from .Utils import *
import numpy as np
def velocity(M,R_l,r_seg,r): #(Big mass,Low mass position,Particle position)
R_h = R_l + r_seg #High mass position = Low mass position + The segment between both masses
r_rel = np.array(R_h) - np.array(r)
#Virial theorem: (1/2)*<T> = -(3/5)*<V_total> in t -> inf.
# in wiki english: 1/2 and 3/5 can be neglected
d = np.linalg.norm(r_rel)
speed = np.sqrt(G*M/d)
dir = r_rel / d
vel = speed * dir
return speed, vel #returns speed, velocity
def segment_between_stars(M_pair,R_pair):
M_pair = np.array(M_pair)
R_pair = np.array(R_pair)
ind = [1,0]
if M_pair[0] > M_pair[1]: ind = [0,1]
# Forcing the vector of the segment to point towards the bigger mass
r_seg = R_pair[ind[0]] - R_pair[ind[1]]
# Returns the position of the lower mass and the vector of the segment
return R_pair[ind[1]] , r_seg
def make_cylinder(M_pair, DiskSizes, R_l, r_seg, drBIGGRID, width, function_R_rho, function_R_T,
vsys=0, abund=5e-8, gtd=100., name = 'cylinder0.dat'):
if M_pair[0] > M_pair[1]: M = M_pair[0]
else: M = M_pair[1]
Rdisk_l = DiskSizes[0]
Rdisk_H = DiskSizes[1]
drmax = drBIGGRID
dr = drmax/4.
r_seg_mag = np.linalg.norm(r_seg) #Magnitude of the segment between low mass and high mass
r_seg_dir = r_seg/r_seg_mag #Direction of the segment
#Guess of the number of points to generate: (approximation to a rectangular region)
# Number of divisions along the main segment,
# times the number of divisions perpendicular to the segment,
# times the number of divisions in the perpendicular to both segments above
Npoints = int(r_seg_mag/dr * width/dr * width/dr)
print ('Number of points generated:', Npoints)
flag = True
file = open(name,'w')
x,y,z = (0,0,0)
vx,vy,vz = (0,0,0)
speed = 0
vmean = []
rho = 0
T = 0
for i in range(Npoints):
r = np.random.uniform(Rdisk_l, r_seg_mag - Rdisk_H) #Random r from low_mass disk border until high_mass disk border
R = np.random.uniform(0, width) #Random R from the segment
r_vec = r * r_seg_dir #Random vector along the segment
#a,b,c = r_vec
rand_vec = np.random.uniform(-1,1,3) #Random vector to generate the perpendicular vector to the segment
rand_vec = rand_vec/np.linalg.norm(rand_vec)
rand_vec_plane = R * np.cross(r_seg_dir,rand_vec) #Perpendicular (random) vector to the segment
Rl_r = r_vec + rand_vec_plane #Vector from low mass to the generated point in the cylinder
r_real = R_l + Rl_r #Real position from the origin of coordinates
x,y,z = r_real
speed, (vx,vy,vz) = velocity(M,R_l,r_seg,r_real) #Velocity of the given r_real
vmean.append(speed)
vz = vz + vsys
rho = function_R_rho(R) #Density of the given R
T = function_R_T(R) #Temperature of the given R
file.write('%d %e %e %e %e %e %e %e %e %e %e\n'%(i,x,y,z,rho,T,vx,vy,vz,abund,gtd))
"""
while flag:
x,y,z = np.random.uniform(0,500,3)
#Equation of a plane with the normal unitary vector (a,b,c) in (x0,y0,z0):
# f = a*(x-x0) + b*(y-y0) + c*(z-z0) = 0
"""
file.close()
vmean = np.sum( np.array(vmean) ) / len(vmean)
print ("Mean tangential velocity:", vmean, "m/s")
cross_sec = np.pi * width**2
inflow_rate = vmean * (rho * 2*Mu) * cross_sec / MSun * yr
print ("Mass inflow rate:", inflow_rate, "MSun/yr")
return r_seg_mag
def make_outflow(pos_c, pos_f, r_min, dx, w, temp, dens, ionfrac, v0, r_max = 0, vsys=0, name = 'outflow.dat'):
"""
pos_c: Position of the outflow center
pos_f: Final position of the outflow
dx: Maximum separation between nodes of the Global Grid
Note: If r_max is set, pos_f could just store the vectorial direction of the outflow: pos_f = pos_i + dir.
For example: pos_i = np.array([100*AU, 0, 0]), r_max = 2000 * AU, then an outflow pointing towards the Y direction should have
pos_f = pos_i + np.array([0, 1, 0]). The equivalent solution (without setting r_max) would be pos_f = np.array([100*AU, 2000*AU, 0]).
"""
#--------------------------
#Jet Model (Reynolds 1986)
#--------------------------
r_min = float(r_min)
r0 = r_min
cw = w[0] * r0**-w[1]
def width(r): #Jet half-width
return cw * r**w[1]
cd = dens[0] * r0**-dens[1]
def density(r): #Jet density
return cd * r**dens[1]
ct = temp[0] * r0**-temp[1]
def temperature(r): #Jet temperature
return ct * r**temp[1]
qv = -2 * w[1] - dens[1]
cv = v0 * r0**-qv
def velocity(r): #Jet velocity
return cv * r**qv
ci = ionfrac[0] * r0**-ionfrac[1]
def ionfraction(r): #Jet ionization fraction
return ci * r**ionfrac[1]
pos_c = np.array(pos_c)
pos_f = np.array(pos_f)
r_seg = pos_f - pos_c
r_seg_mag = np.linalg.norm(r_seg) #Magnitude of the segment between low mass and high mass
r_seg_dir = r_seg/r_seg_mag #Direction of the segment
r_seg = r_seg - r_min * r_seg_dir
if r_max: r_seg = (r_max - r_min) * r_seg_dir
r_seg_mag = np.linalg.norm(r_seg) #Magnitude of the segment between low mass and high mass
r_seg_dir = r_seg/r_seg_mag #Direction of the segment
#Guess the number of random grid points to generate: (approximation to a rectangular region)
# Number of divisions along the main segment,
# times the number of divisions along an axis perpendicular to the segment,
# times the number of divisions along an axis perpendicular to both of the segments above.
drmax = dx
dr = drmax/4.
mean_w = width(0.5 * r_seg_mag)
Npoints = int(r_seg_mag/dr * (mean_w/dr)**2 )
print ('Number of grid points:', Npoints)
flag = True
file = open(name,'w')
x,y,z = (0,0,0)
xn,yn,zn = (-x,-y,-z)
vx,vy,vz = (0,0,0)
speed = 0
vmean, nemean, Tmean = [], [], []
rho = 0
T = 0
k = 0
for i in range(Npoints):
r = np.random.uniform(r_min, r_seg_mag) #Random r from low_mass disk border until high_mass disk border
R = np.random.uniform(0, width(r)) #Random R from the segment
r_vec = r * r_seg_dir #Random vector along the segment
#a,b,c = r_vec
rand_vec = np.random.uniform(-1,1,3) #Random vector to generate the perpendicular vector to the segment
rand_vec = rand_vec / np.linalg.norm(rand_vec)
rand_vec_plane = R * np.cross(r_seg_dir, rand_vec) #Perpendicular (random) vector to the segment
r_c = r_vec + rand_vec_plane #Vector from the outflow center to the generated point in the outflow
r_real = pos_c + r_c #Real position from the coordinates origin
r_c_n = -r_vec + rand_vec_plane
r_real_n = pos_c + r_c_n
x,y,z = r_real
xn,yn,zn = r_real_n #negative
speed = velocity(r)
(vx,vy,vz) = speed * r_seg_dir #Velocity of the given r_real
vz = vz + vsys
ne = density(r) #Electronic density of the given r
T = temperature(r) #Temperature of the given r
vmean.append(speed)
nemean.append(ne)
Tmean.append(T)
xp,yp,zp = [x,xn],[y,yn],[z,zn]
for j in range(2):
k+=1
file.write('%d %e %e %e %e %e %e %e %e %e %e\n'%(k,xp[j],yp[j],zp[j],ne,T,vx,vy,vz,0,0))
"""
while flag:
x,y,z = np.random.uniform(0,500,3)
#Equation of a plane with normal unitary vector (a,b,c) in (x0,y0,z0):
# f = a*(x-x0) + b*(y-y0) + c*(z-z0) = 0
"""
file.close()
vmean0 = np.sum( np.array(vmean) ) / len(vmean)
netot = np.sum( np.array(nemean) )
nemean0 = netot / len(nemean)
Tmean0 = np.sum(np.array(nemean) * np.array(Tmean)) / netot
print ("Mean tangential velocity: %.2f km/s"%(vmean0 * 1e-3) )
print ("Mean density: %.2e e-/cm^3"%(nemean0 * 1e-6) )
print ("Mean temperature: %.1f K"%(Tmean0) )
cross_sec = np.pi * width(0.5 * r_seg_mag)**2
inflow_rate = vmean0 * (rho * 2*Mu) * cross_sec / MSun_yr
print ("Mass inflow rate:", inflow_rate, "MSun/yr")
print (vmean0, rho, cross_sec)
return r_seg_mag
|
andizqREPO_NAMEsf3dmodelsPATH_START.@sf3dmodels_extracted@sf3dmodels-master@sf3dmodels@create_cylinder.py@.PATH_END.py
|
{
"filename": "_side.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/colorbar/title/_side.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="isosurface.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@colorbar@title@_side.py@.PATH_END.py
|
{
"filename": "filters.py",
"repo_name": "simonsobs/nemo",
"repo_path": "nemo_extracted/nemo-main/nemo/filters.py",
"type": "Python"
}
|
"""
This module defines several filter classes, together with a function that uses them to filter maps.
New base classes can be derived from the overall base class :obj:`MapFilter`. There are two main
classes of filter that are currently implemented - :obj:`MatchedFilter` and :obj:`RealSpaceMatchedFilter`.
There are also base classes corresponding to filters with different signal templates (:obj:`BeamFilter`,
:obj:`ArnaudModelFilter`, :obj:`BattagliaModelFilter`). The actual filters that can be used are
derived from these:
* :obj:`BeamMatchedFilter`
* :obj:`ArnaudModelMatchedFilter`
* :obj:`BattagliaModelMatchedFilter`
* :obj:`BeamRealSpaceMatchedFilter`
* :obj:`ArnaudModelRealSpaceMatchedFilter`
* :obj:`BattagliaModelRealSpaceMatchedFilter`
"""
import math
from pixell import enmap
from pixell import fft as enfft
from pixell import powspec
import astropy.wcs as enwcs
from astLib import *
import numpy as np
from numpy import fft
import pylab as plt
import os
from scipy import interpolate
from scipy import ndimage
import astropy.io.fits as pyfits
import astropy.stats as apyStats
import copy
import sys
import glob
import itertools
import nemo
from . import maps
from . import signals
from . import photometry
from . import catalogs
from . import plotSettings
from . import gnfw
from . import completeness
import astropy.table as atpy
import time
#-------------------------------------------------------------------------------------------------------------
def filterMaps(unfilteredMapsDictList, filterParams, tileName, diagnosticsDir = '.', \
selFnDir = '.', verbose = True, undoPixelWindow = True, useCachedFilter = False, \
returnFilter = False):
"""Builds and applies filters to the unfiltered map(s).
Args:
unfilteredMapsDictList (:obj:`list`): A list of dictionaries, each of which describes a map of the sky
at some frequency (see :ref:`InputMaps`).
filterParams (:obj:`dict`): A dictionary containing filter settings (see :ref:`Filters`).
tileName (:obj:`str`): The name of the tile.
diagnosticsDir (:obj:`str`, optional): Path to the `diagnostics` directory, where the filters may be
written to disk.
selFnDir (:obj:`str`, optional): Path to the `selFn` directory, where area masks may be written.
verbose (:obj:`bool`, optional): If True, write information about progress to the terminal.
undoPixelWindow (:obj:`bool`, optional): If True, undo the pixel window effect on the output filtered
maps.
useCachedFilter (:obj:`bool`, optional): If True, and a previously made filter is found, it will be
read from disk, rather than re-calculated (used by source injection simulations).
returnFilter (:obj:`bool`, optional): If True, the filter object is returned, as well as a dictionary
containing the filtered map.
Returns:
A dictionary containing the filtered map in signal units, a signal-to-noise-map, area mask, WCS, and
a label (for housekeeping).
"""
f=filterParams
label=f['label']+"#"+tileName
print("... making filtered map %s" % (label))
filterClass=eval('%s' % (f['class']))
filterObj=filterClass(f['label'], unfilteredMapsDictList, f['params'], tileName = tileName,
diagnosticsDir = diagnosticsDir, selFnDir = selFnDir)
filteredMapDict=filterObj.buildAndApply(useCachedFilter = useCachedFilter)
# Keywords we need for photometry later
filteredMapDict['wcs'].header['BUNIT']=filteredMapDict['mapUnits']
if 'beamSolidAngle_nsr' in filteredMapDict.keys() and filteredMapDict['beamSolidAngle_nsr'] > 0:
filteredMapDict['wcs'].header['BEAMNSR']=filteredMapDict['beamSolidAngle_nsr']
filteredMapDict['wcs'].header['FREQGHZ']=filteredMapDict['obsFreqGHz']
filteredMapDict['wcs'].updateFromHeader()
# Undo pixel window function using Sigurd's FFT method (takes into account variable pixel scale etc.)
# We only need to do this for maps of signal (cancels in S/N map)
# We do this once because it does take some time...
# ... and then we can forget about if e.g. stacking or doing forced photometry later
if undoPixelWindow == True:
mask=np.equal(filteredMapDict['data'], 0)
filteredMapDict['data']=enmap.apply_window(filteredMapDict['data'], pow=-1.0)
filteredMapDict['data'][mask]=0 # just in case we rely elsewhere on zero == no data
if returnFilter == True:
return filteredMapDict, filterObj
return filteredMapDict
#------------------------------------------------------------------------------------------------------------
class MapFilter(object):
"""Base class for Fourier space filters, defining a common interface.
Args:
label (:obj:`str`): Unique label for the filter, can be anything.
unfilteredMapsDictList (:obj:`list`): A list of dictionaries, each of which describes a map of the sky
at some frequency (see :ref:`InputMaps`).
paramsDict (:obj:`dict`): Dictionary of filter settings (see :ref:`Filters`).
tileName (:obj:`str`): Name of the tile in which the filter will be constructed.
writeFilter (:obj:`bool`): If `True`, save the filter to disk in the `diagnosticsDir` directory.
forceRebuild (:obj:`bool`): If `True`, rebuild the filter, even a previously calculated filter is
found cached to disk.
diagnosticsDir (:obj:`str`, optional): Path to the `diagnostics` directory, where the filters may be
written to disk.
selFnDir (:obj:`str`, optional): Path to the `selFn` directory, where area masks may be written.
Attributes:
label (:obj:`str`): Unique label for the filter, can be anything.
params (:obj:`dict`): Dictionary of filter settings (see :ref:`Filters`).
diagnosticsDir (:obj:`str`, optional): Path to the `diagnostics` directory, where the filters may be
written to disk.
selFnDir (:obj:`str`, optional): Path to the `selFn` directory, where area masks may be written.
tileName (:obj:`str`): Name of the map tile.
filterFileName (:obj:`str`): Path (under `diagnosticsDir`) where the cached filter is written.
unfilteredMapsDictList (:obj:`list`): A list of dictionaries, each of which describes a map of the sky
at some frequency (see :ref:`InputMaps`).
wcs (:obj:`astWCS.WCS`): Object that contains the map World Coordinate System.
shape (:obj:`tuple`): Dimensions of the map (height, width) in pixels.
beamSolidAnglesDict (:obj:`dict`): Dictionary, indexed by map frequency in GHz, holding the beam
solid angles in nano steradians. Used only for conversion of source amplitudes to flux
densities in Jy.
"""
def __init__(self, label, unfilteredMapsDictList, paramsDict, tileName = 'PRIMARY', writeFilter = False,
forceRebuild = False, diagnosticsDir = None, selFnDir = None):
self.label=label
self.params=paramsDict
self.diagnosticsDir=diagnosticsDir
self.selFnDir=selFnDir
self.tileName=tileName
self.filterFileName=self.diagnosticsDir+os.path.sep+self.tileName+os.path.sep+"filter_%s#%s.fits" % (self.label, self.tileName)
# Prepare all the unfilteredMaps (in terms of cutting sections, masks etc.)
# NOTE: This is a copy to deal with repeated runs (yes, it's necessary)
self.unfilteredMapsDictList=[]
for mapDict in unfilteredMapsDictList:
if 'mapToUse' in self.params.keys() and mapDict['label'] != self.params['mapToUse']:
continue
newMapDict=mapDict.copy()
newMapDict.preprocess(tileName = tileName, diagnosticsDir = diagnosticsDir)
self.unfilteredMapsDictList.append(newMapDict)
self.wcs=self.unfilteredMapsDictList[0]['wcs']
self.shape=self.unfilteredMapsDictList[0]['data'].shape
# Combine flag masks (yes, we may need to think about this more...)
self.flagMask=np.zeros(self.shape, dtype = int)
for mapDict, i in zip(self.unfilteredMapsDictList, range(len(self.unfilteredMapsDictList))):
self.flagMask=self.flagMask+(mapDict['flagMask']*(i+1))
# Get beam solid angle info (units: nanosteradians)... we'll need for fluxes in Jy later
self.beamSolidAnglesDict={}
for mapDict in self.unfilteredMapsDictList:
if 'solidAngle_nsr' in mapDict.keys():
solidAngle_nsr=mapDict['solidAngle_nsr']
else:
beamFileName=mapDict['beamFileName']
with open(beamFileName, "r") as inFile:
lines=inFile.readlines()
foundLine=False
for line in lines:
if line.find("solid angle") != -1:
foundLine=True
break
if foundLine == True:
bits=line.split("=")
solidAngle_nsr=float(bits[1].split()[0])
else:
solidAngle_nsr=0.0
self.beamSolidAnglesDict[mapDict['obsFreqGHz']]=solidAngle_nsr
# For pixell / enmap
self.enwcs=self.wcs.AWCS
# We could make this adjustable... added after switch to pixell
self.apodPix=20
# Check that all maps are the same dimensions
shape=self.unfilteredMapsDictList[0]['data'].shape
for mapDict in self.unfilteredMapsDictList:
if mapDict['data'].shape != shape:
raise Exception("Maps at different frequencies have different dimensions!")
# This is used by routines that make signal templates
self.makeRadiansMap()
# This is a default - will get modified (calculated) by buildAndApply
self.signalNorm=1.0
self.fRelWeights={}
def makeRadiansMap(self):
"""Makes a map of distance in radians from the centre of the map being filtered.
Returns:
None
"""
mapDict=self.unfilteredMapsDictList[0]
# NOTE: int conversion for python3
x0=int(mapDict['data'].shape[1]/2)
y0=int(mapDict['data'].shape[0]/2)
ra0, dec0=self.wcs.pix2wcs(x0, y0)
ra1, dec1=self.wcs.pix2wcs(x0+1, y0+1)
self.degPerPixX=astCoords.calcAngSepDeg(ra0, dec0, ra1, dec0)
self.degPerPixY=astCoords.calcAngSepDeg(ra0, dec0, ra0, dec1)
# Real space map of angular distance from centre in radians, used in making filters and beam
# NOTE: floor and int conversion added for python3
xRadRange=np.array([np.arange(int(np.floor(-mapDict['data'].shape[1]/2)), int(mapDict['data'].shape[1]/2), \
dtype=np.float64)*np.radians(self.degPerPixX)]*mapDict['data'].shape[0])
yRadRange=np.array([np.arange(int(np.floor(-mapDict['data'].shape[0]/2)), int(mapDict['data'].shape[0]/2), \
dtype=np.float64)*np.radians(self.degPerPixY)]*mapDict['data'].shape[1]).transpose()
rRadRange=np.sqrt(xRadRange**2+yRadRange**2)
self.radiansMap=rRadRange
def buildAndApply(self):
"""Builds and applies the filter to the unfiltered map(s).
Returns:
A dictionary, containing the listed keys:
* data (:obj:`np.ndarray`): The filtered map, in signal units.
* wcs (:obj:`astWCS.WCS`): WCS object for the map.
* obsFreqGHz (:obj:`float`): The observing frequency (in GHz) for the map. This is set to `yc` if the output units are set to the central Comptonization parameter.
* SNMap (:obj:`np.ndarray`): Signal-to-noise map.
* surveyMask (:obj:`np.ndarray`): Survey mask, where pixels with value 1 indicate valid area that can be searched for objects.
* mapUnits (:obj:`str`): Either `uK` (for ΔTemperature (μK) with respect to the CMB) or `yc` (for central Comptonization parameter).
* beamSolidAngle_nsr (:obj:`float`): The beam solid angle in nanosteradians. This is only used for conversion of source amplitudes to flux densities in Jy.
* label (:obj:`str`): User-defined label for the filter (see :ref:`Filters`).
* tileName (:obj:`str`): Name of the tile this filtered map corresponds to.
"""
# NOTE: Long lines above to avoid breaking formatting in sphinx
raise Exception("Called a base filter class without a buildAndApply() function implemented.")
return None
def makeForegroundsPower(self):
"""Returns 2d power spectrum in k-space, with the power set by a Planck-like CMB power spectrum.
Returns:
A 2d :obj:`np.ndarray` containing the noise power.
"""
# CAMB power spec with Planck 2015 parameters (ish)
tab=atpy.Table().read(nemo.__path__[0]+os.path.sep+"data"+os.path.sep+"planck_lensedCls.dat", format = 'ascii')
tab['TT']=(tab['TT']*2*np.pi)/(tab['L']*(tab['L']+1))
lmap=enmap.modlmap(self.unfilteredMapsDictList[0]['data'].shape, self.enwcs)
l2p=interpolate.interp1d(tab['L'], tab['TT'], bounds_error=False, fill_value=0.0)
fgPower=l2p(lmap)*lmap.shape[0]*lmap.shape[1]
return fgPower
def makeRealSpaceFilterProfile(self):
"""Makes a 1d real-space profile of the filter, with amplitude normalised to 1 at the frequency of the
first map given in the unfiltered maps list.
Returns:
One dimensional profile (:obj:`np.ndarray`), corresponding angular range in arcmin (:obj:`np.ndarray`).
"""
realSpace=fft.ifft2(self.filt).real
realSpace=fft.fftshift(realSpace)
# Changed for python3 - we may want to check this...
# i.e., force to always be int after /2 without this
x0=int(realSpace.shape[2]/2)
y0=int(realSpace.shape[1]/2)
# Arbitrarily normalise to 1 at the maximum (in whichever frequency that occurs)
normFactor=abs(realSpace[:, y0, x0:]).max()
prof=realSpace[:, y0, x0:]/normFactor
#prof=realSpace[0, y0, x0:]/realSpace[0, y0, x0:].max() # normalise for plot
arcminRange=np.arange(0, prof.shape[1])*self.degPerPixX*60.0
return prof, arcminRange
def saveRealSpaceFilterProfile(self):
"""Saves a real-space profile of the filter as a PNG plot under the `diagnosticsDir` directory.
Returns:
None
"""
prof, arcminRange=self.makeRealSpaceFilterProfile()
# Measure characteristic FWHM
#tck=interpolate.splrep(arcminRange, prof)
#FWHMArcmin=interpolate.splev([0.5], tck)*2 # *2 because otherwise would be half width
plotSettings.update_rcParams()
fig=plt.figure(figsize=(8,8))
ax=plt.axes([0.14, 0.11, 0.835, 0.86])
#fig.canvas.set_window_title('Filter Profile in Real Space')
#plt.title("Filter Profile %s" % (self.label))
plt.ylabel("Amplitude")
plt.xlabel("$\\theta$ (arcmin)")
for row, mapDict in zip(prof, self.unfilteredMapsDictList):
if mapDict['obsFreqGHz'] is not None:
label = '%d GHz' % (mapDict['obsFreqGHz'])
elif mapDict['units'] == 'yc':
label = 'yc'
plt.plot(arcminRange, row, label = label)
plt.xlim(0, 10.0)
plt.ylim(prof.min(), prof.max()*1.1)
plt.legend()
plt.savefig(self.diagnosticsDir+os.path.sep+"realSpaceProfile1d_"+self.label+"#"+self.tileName+".png")
plt.close()
# Save 2D realspace filter image too
#astImages.saveFITS(self.diagnosticsDir+os.path.sep+"realSpaceProfile2d_"+self.label+".fits", \
#realSpace, wcs)
def makeNoiseMap(self, mapData):
"""Estimate the noise map using local measurements in grid cells, over the whole filtered map
(see :ref:`Filters` for the config file parameters that control this).
Args:
mapData (:obj:`np.ndarray`): A filtered map.
Returns:
Noise map (:obj:`np.ndarray`).
"""
# average all weight maps
# doesn't matter about spectral weighting, we just want areas with similar characteristics
# now using this for both 'smart' and original noise grid option
medWeights=[]
for mapDict in self.unfilteredMapsDictList:
medWeights.append(mapDict['weights'])
medWeights=np.median(np.array(medWeights), axis = 0)
# 'smart option' - measure noise in areas with similar weights (however weights defined)
if self.params['noiseParams']['noiseGridArcmin'] == "smart":
try:
numBins=self.params['noiseParams']['numNoiseBins']
except:
raise Exception("Need to give numNoiseBins in noiseParams when using noiseGridArcmin = 'smart'")
binEdges=np.linspace(medWeights.min(), medWeights.max(), numBins)
RMSMap=np.zeros(medWeights.shape, dtype = np.float32)
apodMask=np.not_equal(mapData, 0)
for i in range(len(binEdges)-1):
# Find area of similar weight
binMin=binEdges[i]
binMax=binEdges[i+1]
weightMask=np.logical_and(medWeights > binMin, medWeights < binMax)
# Measure noise in that area from filtered map, and fill in noise map
chunkValues=mapData[weightMask]
goodAreaMask=np.greater_equal(apodMask[weightMask], 1.0)
if 'RMSEstimator' in self.params['noiseParams'].keys() and self.params['noiseParams']['RMSEstimator'] == 'biweight':
if goodAreaMask.sum() >= 10:
# Astropy version is faster but gives identical results
chunkRMS=apyStats.biweight_scale(chunkValues[goodAreaMask], c = 9.0, modify_sample_size = True)
#chunkRMS=astStats.biweightScale(chunkValues[goodAreaMask], 6.0)
else:
chunkRMS=0.
elif 'RMSEstimator' in self.params['noiseParams'].keys() and self.params['noiseParams']['RMSEstimator'] == 'percentile':
chunkRMS=np.percentile(abs(chunkValues[goodAreaMask]), 68.3)
else:
# Default: 3-sigma clipped stdev
if np.not_equal(chunkValues, 0).sum() != 0:
goodAreaMask=np.greater_equal(apodMask[weightMask], 1.0)
chunkMean=np.mean(chunkValues[goodAreaMask])
chunkRMS=np.std(chunkValues[goodAreaMask])
sigmaClip=3.0
for c in range(10):
mask=np.less(abs(chunkValues), abs(chunkMean+sigmaClip*chunkRMS))
mask=np.logical_and(goodAreaMask, mask)
if mask.sum() > 0:
chunkMean=np.mean(chunkValues[mask])
chunkRMS=np.std(chunkValues[mask])
else:
chunkRMS=0.
if chunkRMS > 0:
RMSMap[weightMask]=chunkRMS
# The grid method now recognises numNoiseBins in cells
else:
# We may want to just bin and not grid
if 'noiseGridArcmin' not in self.params['noiseParams'].keys() or self.params['noiseParams']['noiseGridArcmin'] is None:
overlapPix=0
numXChunks=1
numYChunks=1
else: # The usual gridding
gridSize=int(round((self.params['noiseParams']['noiseGridArcmin']/60.)/self.wcs.getPixelSizeDeg()))
overlapPix=int(gridSize/2)
numXChunks=mapData.shape[1]/gridSize
numYChunks=mapData.shape[0]/gridSize
yChunks=np.linspace(0, mapData.shape[0], int(numYChunks+1), dtype = int)
xChunks=np.linspace(0, mapData.shape[1], int(numXChunks+1), dtype = int)
apodMask=np.not_equal(mapData, 0)
RMSMap=np.zeros(mapData.shape, dtype = np.float32)
# For this mode, interpreted as number of noise bins per cell
if 'numNoiseBins' in self.params['noiseParams'].keys():
numBins=self.params['noiseParams']['numNoiseBins']
else:
numBins=1
for i in range(len(yChunks)-1):
for k in range(len(xChunks)-1):
y0=yChunks[i]-overlapPix
y1=yChunks[i+1]+overlapPix
x0=xChunks[k]-overlapPix
x1=xChunks[k+1]+overlapPix
if y0 < 0:
y0=0
if y1 > mapData.shape[0]:
y1=mapData.shape[0]
if x0 < 0:
x0=0
if x1 > mapData.shape[1]:
x1=mapData.shape[1]
chunkValues=mapData[y0:y1, x0:x1]
goodAreaMask=np.greater_equal(apodMask[y0:y1, x0:x1], 1.0)
if goodAreaMask.sum() == 0:
continue
# Binning inside cell by weights - to handle sudden noise changes
weightValues=medWeights[y0:y1, x0:x1]
percentiles=np.arange(0, 100, 100/numBins)
binEdges=[]
for p in percentiles:
binEdges.append(np.percentile(weightValues[goodAreaMask], p))
binEdges.append(weightValues[goodAreaMask].max()+1e-6)
for b in range(len(binEdges)-1):
binMin=binEdges[b]
binMax=binEdges[b+1]
binMask=np.logical_and(weightValues >= binMin, weightValues < binMax)
binValues=chunkValues[binMask*goodAreaMask]
if 'RMSEstimator' in self.params['noiseParams'].keys() and self.params['noiseParams']['RMSEstimator'] == 'biweight':
if (binMask*goodAreaMask).sum() >= 10:
chunkRMS=apyStats.biweight_scale(binValues, c = 9.0, modify_sample_size = True)
else:
chunkRMS=0.
elif 'RMSEstimator' in self.params['noiseParams'].keys() and self.params['noiseParams']['RMSEstimator'] == 'percentile':
chunkRMS=np.percentile(abs(binValues), 68.3)
else:
# Default: 3-sigma clipped stdev
if np.not_equal(binValues, 0).sum() != 0:
chunkMean=np.mean(binValues)
chunkRMS=np.std(binValues)
sigmaClip=3.0
for c in range(10):
mask=np.less(abs(binValues), abs(chunkMean+sigmaClip*chunkRMS))
if mask.sum() > 0:
chunkMean=np.mean(binValues[mask])
chunkRMS=np.std(binValues[mask])
else:
chunkRMS=0.
if chunkRMS > 0:
RMSMap[y0:y1, x0:x1][binMask]=chunkRMS
return RMSMap
def loadFRelWeights(self):
"""Reads frequency weights used for relativistic corrections from the filter header.
Returns:
None
"""
with pyfits.open(self.filterFileName) as img:
self.fRelWeights={}
for i in range(1, 10):
if 'RW%d_GHZ' % (i) in img[0].header.keys():
freqGHz=img[0].header['RW%d_GHZ' % (i)]
self.fRelWeights[freqGHz]=img[0].header['RW%d' % (i)]
def makeSignalTemplateMap(self, beam, amplitude = None):
"""Makes a model signal template map. Shape parameters (if applicable) are taken from the object's
`params` attribute.
Args:
beam (:class:`nemo.signals.BeamProfile` or str): Either a :class:`nemo.signals.BeamProfile` object,
or a string that gives the path to a text file that describes the beam profile.
amplitude (:obj:`float`): Amplitude of the signal template.
Returns:
Model map (2d :obj:`np.ndarray`).
"""
raise Exception("Called a base filter class without a makeSignalTemplateMap() function implemented.")
return None
#------------------------------------------------------------------------------------------------------------
class MatchedFilter(MapFilter):
"""A multi-frequency matched filter, implemented in Fourier space. Derived from :class:`MapFilter`.
"""
def buildAndApply(self, useCachedFilter = False):
fMapsToFilter=[]
for mapDict in self.unfilteredMapsDictList:
fMapsToFilter.append(enmap.fft(enmap.apod(mapDict['data'], self.apodPix)))
fMapsToFilter=np.array(fMapsToFilter)
# NOTE: We've tidied up the config file, so we don't have to feed in surveyMask and psMask like this
# (see startUp.parseConfig)
surveyMask=self.unfilteredMapsDictList[0]['surveyMask']
psMask=self.unfilteredMapsDictList[0]['pointSourceMask']
if os.path.exists(self.filterFileName) == False and useCachedFilter == False:
fMapsForNoise=[]
for i in range(len(self.unfilteredMapsDictList)):
mapDict=self.unfilteredMapsDictList[i]
d=mapDict['data']
if self.params['noiseParams']['method'] == 'dataMap':
if 'noiseModelCatalog' in self.params.keys() and self.params['noiseModelCatalog'] is not None:
assert(type(self.params['noiseModelCatalog']) == list)
for noiseModelCatalog in self.params['noiseModelCatalog']:
model=maps.makeModelImage(d.shape, self.wcs, noiseModelCatalog,
mapDict['beamFileName'],
obsFreqGHz = mapDict['obsFreqGHz'])
if model is not None:
d=d-model
fMapsForNoise.append(enmap.fft(enmap.apod(d, self.apodPix)))
elif self.params['noiseParams']['method'] == 'model':
# Assuming weights are actually inv var white noise level per pix
# (which they are for Sigurd's maps)
valid=np.nonzero(mapDict['weights'])
RMS=np.mean(1/np.sqrt(mapDict['weights'][valid]))
if RMS < 10.0: # Minimum level to stop this blowing up
RMS=10.0
# Seeds fixed so that outputs are the same on repeated runs
cmb=maps.simCMBMap(self.shape, self.wcs, beam = mapDict['beamFileName'],
seed = 3141592654+i, noiseLevel = RMS)
fMapsForNoise.append(enmap.fft(enmap.apod(cmb, self.apodPix)))
else:
raise Exception("'%s' is not a valid filter noise method name - fix the .yml config file" % (self.params['noiseParams']['method']))
fMapsForNoise=np.array(fMapsForNoise)
# Smoothing noise here is essential
kernelSize=(3,3)
noiseCov=[]
for i in range(len(self.unfilteredMapsDictList)):
iMap=self.unfilteredMapsDictList[i]
row=[]
for j in range(len(self.unfilteredMapsDictList)):
jMap=self.unfilteredMapsDictList[j]
if self.params['noiseParams']['method'] in ['dataMap', 'model']:
NP=np.real(fMapsForNoise[i]*fMapsForNoise[j].conj())
elif self.params['noiseParams']['method'] == 'max(dataMap,CMB)':
NP=np.real(fMapsForNoise[i]*fMapsForNoise[j].conj())
NPCMB=self.makeForegroundsPower() # This needs a beam convolution adding
NP=np.maximum.reduce([NP, NPCMB])
else:
raise Exception("Other noise models not yet re-implemented")
NP=ndimage.gaussian_filter(NP, kernelSize)
row.append(NP)
noiseCov.append(row)
del fMapsForNoise
noiseCov=np.array(noiseCov)
# Signal frequency weighting
w=[]
for mapDict in self.unfilteredMapsDictList:
if mapDict['units'] != 'yc':
# Allows custom weighting in the config file
if 'specWeight' in mapDict.keys():
w.append(mapDict['specWeight'])
# Or standard options (e.g., SZ weighting)
else:
if self.params['outputUnits'] == 'yc':
w.append(signals.fSZ(mapDict['obsFreqGHz']))
elif self.params['outputUnits'] == 'uK':
# alpha = spectral index (e.g., -0.8 ish for AGN, +3.8 ish for dusty)
if 'alpha' in self.params.keys() and self.params['alpha'] is not None:
w.append(np.power(mapDict['obsFreqGHz']/self.unfilteredMapsDictList[0]['obsFreqGHz'],
self.params['alpha']) )
else:
w.append(1.0)
else:
raise Exception('need to specify "outputUnits" ("yc" or "uK") in filter params')
else:
w.append(1.0) # For TILe-C: there should only be one map if input units are yc anyway...
w=np.array(w)
# Make FFTs of unit-normalised signal templates for each band
signalMapsList=[]
fSignalsArr=[]
for mapDict in self.unfilteredMapsDictList:
signalMap=self.makeSignalTemplateMap(mapDict['beamFileName'])
fSignal=enmap.fft(signalMap)
signalMapsList.append(signalMap)
fSignalsArr.append(fSignal)
fSignalsArr=np.array(fSignalsArr)
# Build the filter itself
self.filt=np.zeros([len(self.unfilteredMapsDictList), self.shape[0], self.shape[1]], dtype = np.float32)
for y in range(0, self.shape[0]):
for x in range(0, self.shape[1]):
try:
self.filt[:, y, x]=np.dot(np.linalg.inv(noiseCov[:, :, y, x]), w*abs(fSignalsArr[:, y, x]))
except:
continue
del fSignalsArr
del noiseCov
# Use a map with known input signal to figure out how much it has been rolled off by
if self.params['outputUnits'] == 'yc':
# Normalise such that peak value in filtered map == y0, taking out the effect of the beam
signalMaps=[]
fSignalMaps=[]
y0=2e-4
for mapDict in self.unfilteredMapsDictList:
if mapDict['units'] == 'yc': # For handling TILe-C maps
signalMap=self.makeSignalTemplateMap(mapDict['beamFileName'], amplitude = y0)
else: # The normal case
deltaT0=maps.convertToDeltaT(y0, mapDict['obsFreqGHz'])
signalMap=self.makeSignalTemplateMap(mapDict['beamFileName'],
amplitude = deltaT0)
signalMap=enmap.apply_window(signalMap, pow=1.0) # Needed for clusters, 1.5% effect
signalMaps.append(signalMap)
fSignal=enmap.fft(signalMap)
fSignalMaps.append(fSignal)
signalMaps=np.array(signalMaps)
fSignalMaps=np.array(fSignalMaps)
filteredSignal=self.applyFilter(fSignalMaps)
# This is a 0.6% difference to the previous version
cRADeg, cDecDeg=self.wcs.getCentreWCSCoords()
cx, cy=self.wcs.wcs2pix(cRADeg, cDecDeg)
mapInterpolator=interpolate.RectBivariateSpline(np.arange(filteredSignal.shape[0]),
np.arange(filteredSignal.shape[1]),
filteredSignal, kx = 3, ky = 3)
peakFilteredSignal=mapInterpolator(cy, cx)[0][0]
#peakFilteredSignal=filteredSignal.max() # Previous version
self.signalNorm=y0/peakFilteredSignal
# For relativistic corrections (see signals module)
totalSignal=filteredSignal.flatten()[np.argmax(filteredSignal)]
filteredSignalCube=np.real(enmap.ifft(fSignalMaps*self.filt, normalize = False))
self.fRelWeights={}
for filteredSignalPlane, mapDict in zip(filteredSignalCube, self.unfilteredMapsDictList):
freqGHz=mapDict['obsFreqGHz']
fRelWeight=filteredSignalPlane.flatten()[np.argmax(filteredSignal)]/totalSignal
self.fRelWeights[freqGHz]=fRelWeight
del fSignalMaps
elif self.params['outputUnits'] == 'uK':
#if len(self.unfilteredMapsDictList) > 1:
#raise Exception("multi-frequency filtering not currently supported for outputUnits 'uK' (point source finding)")
combinedObsFreqGHz=float(list(self.beamSolidAnglesDict.keys())[0]) # Make less clunky...
signalMaps=[]
fSignalMaps=[]
for mapDict in self.unfilteredMapsDictList:
signalMap=self.makeSignalTemplateMap(mapDict['beamFileName'])
#signalMap=enmap.apply_window(signalMap, pow=1.0) # Tests with nemoModel confirm not needed here
signalMaps.append(signalMap)
fSignal=enmap.fft(signalMap)
fSignalMaps.append(fSignal)
signalMaps=np.array(signalMaps)
fSignalMaps=np.array(fSignalMaps)
filteredSignal=self.applyFilter(fSignalMaps)
self.signalNorm=1.0/filteredSignal.max()
del fSignalMaps
else:
raise Exception('need to specify "outputUnits" ("yc" or "uK") in filter params')
else:
print("... loading cached filter")
self.loadFilter()
self.params['saveRMSMap']=False
self.params['saveFilter']=False
self.params['savePlots']=False
# Apply filter
filteredMap=self.applyFilter(fMapsToFilter)
del fMapsToFilter
# Units etc.
if self.params['outputUnits'] == 'yc':
mapUnits='yc'
combinedObsFreqGHz='yc'
beamSolidAngle_nsr=0.0 # not used for clusters...
elif self.params['outputUnits'] == 'uK':
#if len(self.unfilteredMapsDictList) > 1:
#raise Exception("multi-frequency filtering not currently supported for outputUnits 'uK' (point source finding)")
combinedObsFreqGHz=float(list(self.beamSolidAnglesDict.keys())[0]) # Make less clunky...
mapUnits='uK'
beamSolidAngle_nsr=self.beamSolidAnglesDict[combinedObsFreqGHz]
else:
raise Exception('need to specify "outputUnits" ("yc" or "uK") in filter params')
# Apply the point source mask here (before noise estimates etc.)
filteredMap=filteredMap*psMask
# Make noise and S/N maps
RMSMap=self.makeNoiseMap(filteredMap)
validMask=np.greater(RMSMap, 0)
SNMap=np.zeros(filteredMap.shape, dtype = np.float32)+filteredMap
SNMap[validMask]=SNMap[validMask]/RMSMap[validMask]
# Use rank filter to zap edges where RMS will be artificially low - we use a bit of a buffer here
# NOTE: Now point source mask is applied above, we fill the holes back in here when finding edges
# NOTE: This all works on maps which have a zero border. If they don't, edgeTrimArcmin has no effect
if 'edgeTrimArcmin' in self.params.keys() and self.params['edgeTrimArcmin'] > 0:
trimSizePix=int(round((self.params['edgeTrimArcmin']/60.)/self.wcs.getPixelSizeDeg()))
elif 'noiseGridArcmin' in self.params['noiseParams'] and self.params['noiseParams']['noiseGridArcmin'] != "smart"\
and self.params['noiseParams']['noiseGridArcmin'] is not None:
gridSize=int(round((self.params['noiseParams']['noiseGridArcmin']/60.)/self.wcs.getPixelSizeDeg()))
trimSizePix=int(round(gridSize*3.0))
else:
trimSizePix=0.0
if trimSizePix > 0:
edgeCheck=ndimage.rank_filter(abs(filteredMap+(1-psMask)), 0, size = (trimSizePix, trimSizePix))
edgeCheck=np.array(np.greater(edgeCheck, 0), dtype = np.float32)
else:
edgeCheck=np.ones(filteredMap.shape)
filteredMap=filteredMap*edgeCheck
surveyMask=edgeCheck*surveyMask*psMask
filteredMap=filteredMap*surveyMask # NOTE: Needed for 2-pass (I think)
del edgeCheck
# Just in case... we always want to trim the apodized region from the region searched
# This has no effect if we're using a survey mask already
# Doing this makes life easier when running tests that use small survey masks or go right to edge of tile otherwise
apodMask=np.equal(enmap.apod(np.ones(filteredMap.shape), self.apodPix), 1)
surveyMask=surveyMask*apodMask
# Apply final survey mask to signal-to-noise map and RMS map
# NOTE: need to avoid NaNs in here, otherwise map interpolation for e.g. S/N will fail later on
# NOTE: we now save the mask after detecting objects, as we can detect rings around extremely
# bright sources and add those to the mask there (see pipelines module)
SNMap=SNMap*surveyMask
SNMap[np.isnan(SNMap)]=0.
RMSMap=RMSMap*surveyMask
if 'savePlots' in self.params and self.params['savePlots'] == True:
self.saveRealSpaceFilterProfile()
if 'saveFilter' in self.params and self.params['saveFilter'] == True:
img=pyfits.PrimaryHDU()
img.header['SIGNORM']=self.signalNorm
count=0
for key in list(self.fRelWeights.keys()):
count=count+1
img.header['RW%d_GHZ' % (count)]=key
img.header['RW%d' % (count)]=self.fRelWeights[key]
img.data=self.filt
# Just in case... saves having to fix this up elsewhere
os.makedirs(os.path.split(self.filterFileName)[0], exist_ok = True)
img.writeto(self.filterFileName, overwrite = True)
# NOTE: What to do about frequency here? Generalise for non-SZ
return {'data': filteredMap, 'wcs': self.wcs, 'obsFreqGHz': combinedObsFreqGHz, 'SNMap': SNMap,
'RMSMap': RMSMap, 'surveyMask': surveyMask, 'flagMask': self.flagMask, 'mapUnits': mapUnits,
'beamSolidAngle_nsr': beamSolidAngle_nsr, 'label': self.label, 'tileName': self.tileName}
def loadFilter(self):
"""Loads in a previously saved filter.
Returns:
None
"""
with pyfits.open(self.filterFileName) as img:
self.filt=img[0].data.astype(np.float32)
self.signalNorm=img[0].header['SIGNORM']
self.loadFRelWeights()
def reshapeFilter(self, shape):
"""Uses linear interpolation to transform the filter to the given shape.
Returns:
Reshaped filter (2d :obj:`np.ndarray`)
"""
# If we feed in a 2d shape, make sure we add an axis to keep generalised to multi-frequency
if len(shape) == 2:
shape=[self.filt.shape[0], shape[0], shape[1]]
assert(len(shape) == 3)
lx, ly=enmap.laxes(self.unfilteredMapsDictList[0]['data'].shape, self.enwcs)
lxToX=interpolate.interp1d(lx, np.arange(lx.shape[0]), fill_value = 'extrapolate')
lyToY=interpolate.interp1d(ly, np.arange(ly.shape[0]), fill_value = 'extrapolate')
lxOut, lyOut=enmap.laxes([shape[1], shape[2]], self.enwcs)
xOut=lxToX(lxOut)
yOut=lyToY(lyOut)
reshapedFilt=np.zeros(shape, dtype = np.float32)
for i in range(self.filt.shape[0]):
filtInterp=interpolate.interp2d(np.arange(ly.shape[0]), np.arange(lx.shape[0]), self.filt[i])
reshapedFilt[i]=filtInterp(yOut, xOut)
return reshapedFilt
def applyFilter(self, mapDataToFilter):
"""Apply the filter to the given map data (must be 3d array, i.e., a cube, with each plane
corresponding to a different frequency). If the map data is not complex, it will be Fourier
transformed. If the map data is not the same shape as the filter, the filter will be
interpolated to match.
Args:
mapDataToFilter (:obj:`np.ndarray`): A 3d array, where each plane corresponds to a map
at a different observed frequency. This must match with how the filter was defined
(see :ref:`Filters` and :ref:`InputMaps`).
Returns:
Filtered map (2d :obj:`np.ndarray`)
"""
# NOTE: need to check appropriate signalNorm after reshaping
if mapDataToFilter.shape == self.filt.shape:
filt=self.filt
else:
filt=self.reshapeFilter(mapDataToFilter.shape)
if 'complex' in mapDataToFilter.dtype.name:
fMapsToFilter=mapDataToFilter
else:
fMapsToFilter=enmap.fft(enmap.apod(mapDataToFilter, self.apodPix))
filteredMap=np.real(enmap.ifft(fMapsToFilter*filt, normalize = False)).sum(axis = 0).astype(np.float32)
# Optional additional high-pass filter
if 'bckSub' in self.params.keys() and 'bckSubScaleArcmin' in self.params.keys() and self.params['bckSub'] == True:
filteredMap=maps.subtractBackground(filteredMap, self.wcs, smoothScaleDeg = self.params['bckSubScaleArcmin']/60.)
filteredMap=filteredMap*self.signalNorm
return filteredMap
#------------------------------------------------------------------------------------------------------------
class RealSpaceMatchedFilter(MapFilter):
"""Makes a matched-filter kernel using the noise properties of a specified region of the map (e.g.,
the deepest part) in Fourier space, which is converted into real space and truncated such that the
kernel is small enough in footprint to be applied by direct convolution with the map in a (relatively)
short amount of time. Derived from :class:`MapFilter`.
"""
def loadFilter(self):
"""Loads in a previously saved filter kernel.
Returns:
None
"""
with pyfits.open(self.filterFileName) as img:
kern2d=img[0].data
signalNorm=img[0].header['SIGNORM']
if 'BCKSCALE' in img[0].header.keys():
bckSubScaleArcmin=img[0].header['BCKSCALE']
else:
bckSubScaleArcmin=0
self.applyRACentre=img[0].header['APP_RA']
self.applyDecCentre=img[0].header['APP_DEC']
self.kern2d=kern2d
self.signalNorm=signalNorm
self.bckSubScaleArcmin=bckSubScaleArcmin
def buildKernel(self, RADecSection, RADeg = 'centre', decDeg = 'centre'):
"""Builds the real space filter kernel.
Args:
RADecSection (:obj:`list`): List of coordinates (minimum RA, maximum RA,
minimum declination, maximum declination) that defines the region of map
from which the filter kernel will be constructed.
RADeg (:obj:`str` or :obj:`float`, optional): Coordinate at which the kernel
pixel scale will be determined (use `centre` for the middle of the map
section).
decDeg (:obj:`str` or :obj:`float`, optional): Coordinate at which the kernel
pixel scale will be determined (use `centre` for the middle of the map
section).
Returns:
None
"""
if os.path.exists(self.filterFileName) == True:
return self.loadFilter()
wcs=self.wcs
# Build the matched-filter kernel in a small section of the map
# Apply the same difference of Gaussians high pass filter here
# NOTE: we could merge 'bckSubScaleArcmin' and 'maxArcmin' keys here!
#mapDict['bckSubScaleArcmin']=maxArcmin
keysWanted=['mapFileName', 'weights', 'weightsFileName', 'obsFreqGHz', 'units', 'beamFileName', 'addNoise',
'pointSourceRemoval', 'weightsType', 'tileName', 'reprojectToTan']
kernelUnfilteredMapsDictList=[]
for mapDict in self.unfilteredMapsDictList:
for key in list(mapDict.keys()):
if key not in keysWanted:
del mapDict[key]
kernelUnfilteredMapsDict=maps.MapDict(mapDict, tileCoordsDict = mapDict.tileCoordsDict)
kernelUnfilteredMapsDict['RADecSection']=RADecSection
kernelUnfilteredMapsDictList.append(kernelUnfilteredMapsDict)
kernelLabel="realSpaceKernel_%s" % (self.label)
matchedFilterDir=self.diagnosticsDir+os.path.sep+kernelLabel+"#"+self.tileName
diagnosticsDir=matchedFilterDir+os.path.sep+'diagnostics'
selFnDir=matchedFilterDir+os.path.sep+'selFn'
for d in [matchedFilterDir, diagnosticsDir, selFnDir]:
if os.path.exists(d) == False:
os.makedirs(d, exist_ok = True)
matchedFilterClass=eval(self.params['noiseParams']['matchedFilterClass'])
matchedFilter=matchedFilterClass(kernelLabel, kernelUnfilteredMapsDictList, self.params,
tileName = mapDict['tileName'],
diagnosticsDir = matchedFilterDir+os.path.sep+'diagnostics',
selFnDir = matchedFilterDir+os.path.sep+'selFn')
filteredMapDict=matchedFilter.buildAndApply()
# Turn the matched filter into a smaller real space convolution kernel
# This means we have to roll off the kernel to 0 at some radius
# This is set by maxArcmin in the config file
kernelMaxArcmin=self.params['noiseParams']['kernelMaxArcmin']
prof, arcminRange=matchedFilter.makeRealSpaceFilterProfile()
rIndex=np.where(arcminRange > kernelMaxArcmin)[0][0]
mask=np.less(arcminRange, kernelMaxArcmin)
# Kernel can be either fully 2d, or be azimuthally averaged... in the ACTPol E-D56 paper, we used the latter
if 'symmetrize' in self.params['noiseParams'].keys() and self.params['noiseParams']['symmetrize'] == True:
rRadians=np.radians(arcminRange/60.)
profile2d=[]
for i in range(prof.shape[0]):
r2p=interpolate.interp1d(rRadians[mask], prof[i, mask], bounds_error=False, fill_value=0.0)
profile2d.append(r2p(matchedFilter.radiansMap))
profile2d=np.array(profile2d)
else:
profile2d=fft.ifft2(matchedFilter.filt).real
profile2d=fft.fftshift(profile2d)
# z is not needed here - just because we switched to multi-freq throughout
z, y, x=np.where(abs(profile2d) == abs(profile2d).max())
#y, x=np.where(profile2d[0] == profile2d[0].max())
y=y[0]
x=x[0]
yMin=y-rIndex
yMax=y+rIndex
xMin=x-rIndex
xMax=x+rIndex
if (yMax-yMin) % 2 == 0:
yMin=yMin+1
if (xMax-xMin) % 2 == 0:
xMin=xMin+1
self.kern2d=profile2d[:, yMin:yMax, xMin:xMax]
kern2dRadiansMap=matchedFilter.radiansMap[yMin:yMax, xMin:xMax]
# This is what to high pass filter on
if 'bckSubScaleArcmin' in self.params.keys():
self.bckSubScaleArcmin=self.params['bckSubScaleArcmin']
else:
# This is here just so we can reproduce old results where this was done automatically
# Now we have to fiddle about a bit to check the sign of the filter
# (depends on spectral response)
# We're only considering the first frequency given in a set of multi-freq maps
if np.greater(prof[0, 0], 0) == True:
func=np.min
else:
func=np.max
self.bckSubScaleArcmin=arcminRange[prof[0] == func(prof[0])][0]
# Use a map with known input signal to figure out how much it has been rolled off by:
# 1. The high pass filter (bck sub step)
# 2. The matched filter itself (includes beam)
# NOTE: This is SZ-specific again and needs generalising
signalMaps=[]
for mapDict in self.unfilteredMapsDictList:
# This has been made more complicated because of TILe-C
if self.params['outputUnits'] == 'yc':
y0=2e-4
if mapDict['obsFreqGHz'] is not None:
# Normal case
deltaT0=maps.convertToDeltaT(y0, mapDict['obsFreqGHz'])
signalMap=self.makeSignalTemplateMap(mapDict['beamFileName'],
amplitude = deltaT0)
else:
# TILe-C case
signalMap=self.makeSignalTemplateMap(mapDict['beamFileName'], amplitude = y0)
elif self.params['outputUnits'] == 'uK':
signalMap=self.makeSignalTemplateMap(mapDict['beamFileName'])
else:
raise Exception('need to specify "outputUnits" ("yc" or "uK") in filter params')
signalMaps.append(signalMap)
signalMaps=np.array(signalMaps)
filteredSignal=self.applyFilter(signalMaps, calcFRelWeights = True)
if self.params['outputUnits'] == 'yc':
# Normalise such that peak value in filtered map == y0, taking out the effect of the beam
self.signalNorm=y0/filteredSignal.max()
elif self.params['outputUnits'] == 'uK':
self.signalNorm=1.0/filteredSignal.max()
else:
raise Exception('need to specify "outputUnits" ("yc" or "uK") in filter params')
# Save 2d kernel - we need this (at least for the photometry ref scale) to calc Q later
# Add bckSubScaleArcmin to the header
kernWCS=wcs.copy()
if self.params['bckSub'] == True:
kernWCS.header['BCKSCALE']=self.bckSubScaleArcmin
kernWCS.header['SIGNORM']=self.signalNorm
kernWCS.header['APP_RA']=self.applyRACentre
kernWCS.header['APP_DEC']=self.applyDecCentre
count=0
for key in list(self.fRelWeights.keys()):
count=count+1
kernWCS.header['RW%d_GHZ' % (count)]=key
kernWCS.header['RW%d' % (count)]=self.fRelWeights[key]
kernWCS.header['NEMOVER']=nemo.__version__
maps.saveFITS(self.filterFileName, self.kern2d, kernWCS)
# Filter profile plot
# Save the stuff we plot first, in case we want to make a plot with multiple filters on later
np.savez(self.diagnosticsDir+os.path.sep+"filterProf1D_%s#%s.npz" % (self.label, self.tileName),
arcminRange = arcminRange, prof = prof, mask = mask,
bckSubScaleArcmin = self.bckSubScaleArcmin)
plotSettings.update_rcParams()
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.13, 0.12, 0.86, 0.86])
#plt.tick_params(axis='both', which='major', labelsize=15)
#plt.tick_params(axis='both', which='minor', labelsize=15)
for row, mapDict in zip(prof, self.unfilteredMapsDictList):
tck=interpolate.splrep(arcminRange[mask], row[mask])
plotRange=np.linspace(0, arcminRange[mask].max(), 1000)
#plt.plot(arcminRange[mask], prof[mask])
if mapDict['obsFreqGHz'] is not None:
label = '%d GHz' % (mapDict['obsFreqGHz'])
elif mapDict['units'] == 'yc':
label = 'yc'
plt.plot(plotRange, interpolate.splev(plotRange, tck), '-', label = label)
plt.xlabel("$\\theta$ (arcmin)")
plt.ylabel("Amplitude")
plt.legend()
#plt.title(self.label)
#plt.plot(arcminRange[mask], [0]*len(arcminRange[mask]), 'k--')
plt.xlim(0, arcminRange[mask].max())
if self.params['bckSub'] == True:
plt.plot([self.bckSubScaleArcmin]*3, np.linspace(-1.2, 1.2, 3), 'k--')
plt.ylim(-1.2, 0.2)
plt.savefig(self.diagnosticsDir+os.path.sep+"filterPlot1D_%s#%s.pdf" % (self.label, self.tileName))
plt.close()
def buildAndApply(self, useCachedFilter = False):
surveyMask=self.unfilteredMapsDictList[0]['surveyMask']
psMask=self.unfilteredMapsDictList[0]['pointSourceMask']
if os.path.exists(self.filterFileName) == False:
# Noise region to use
RAMin, RAMax, decMin, decMax=self.wcs.getImageMinMaxWCSCoords()
if self.params['noiseParams']['RADecSection'] == 'tileNoiseRegions':
RADecSection=[self.wcs.header['NRAMIN'], self.wcs.header['NRAMAX'],
self.wcs.header['NDEMIN'], self.wcs.header['NDEMAX']]
elif self.params['noiseParams']['RADecSection'] == 'auto':
cRA, cDec=self.wcs.getCentreWCSCoords()
halfSizeDeg=2.0
nRAMin=cRA-halfSizeDeg/np.cos(np.radians(cDec))
nRAMax=cRA+halfSizeDeg/np.cos(np.radians(cDec))
nDecMin=cDec-halfSizeDeg
nDecMax=cDec+halfSizeDeg
RADecSection=[nRAMin, nRAMax, nDecMin, nDecMax]
else:
RADecSection=self.params['noiseParams']['RADecSection']
self.applyDecCentre=(decMax+decMin)/2.
self.applyRACentre=(RAMax+RAMin)/2.
# Build kernel
self.buildKernel(RADecSection, RADeg = self.applyRACentre, decDeg = self.applyDecCentre)
else:
self.loadFilter()
# Apply kernel
mapDataToFilter=[]
for mapDict in self.unfilteredMapsDictList:
mapDataToFilter.append(mapDict.loadTile('mapFileName', tileName = self.tileName))
mapDataToFilter=np.array(mapDataToFilter)
filteredMap=self.applyFilter(mapDataToFilter)
# Apply the point source mask here (before noise estimates etc.)
filteredMap=filteredMap*psMask
# Make noise and S/N maps
RMSMap=self.makeNoiseMap(filteredMap)
validMask=np.greater(RMSMap, 0)
SNMap=np.zeros(filteredMap.shape, dtype = np.float32)+filteredMap
SNMap[validMask]=SNMap[validMask]/RMSMap[validMask]
# Units etc.
if self.params['outputUnits'] == 'yc':
mapUnits='yc'
combinedObsFreqGHz='yc'
beamSolidAngle_nsr=0.0 # not used for clusters...
elif self.params['outputUnits'] == 'uK':
if len(self.unfilteredMapsDictList) > 1:
raise Exception("multi-frequency filtering not currently supported for outputUnits 'uK' (point source finding)")
combinedObsFreqGHz=float(list(self.beamSolidAnglesDict.keys())[0]) # Make less clunky...
mapUnits='uK'
beamSolidAngle_nsr=self.beamSolidAnglesDict[combinedObsFreqGHz]
else:
raise Exception('need to specify "outputUnits" ("yc" or "uK") in filter params')
# Use rank filter to zap edges where RMS will be artificially low - we use a bit of a buffer here
# NOTE: Now point source mask is applied above, we fill the holes back in here when finding edges
if 'edgeTrimArcmin' in self.params.keys():
trimSizePix=int(round((self.params['edgeTrimArcmin']/60.)/self.wcs.getPixelSizeDeg()))
else:
gridSize=int(round((self.params['noiseParams']['noiseGridArcmin']/60.)/self.wcs.getPixelSizeDeg()))
trimSizePix=int(round(gridSize*3.0))
if trimSizePix > 0:
edgeCheck=ndimage.rank_filter(abs(filteredMap+(1-psMask)), 0, size = (trimSizePix, trimSizePix))
else:
edgeCheck=np.ones(filteredMap.shape)
edgeCheck=np.array(np.greater(edgeCheck, 0), dtype = float)
filteredMap=filteredMap*edgeCheck
surveyMask=edgeCheck*surveyMask*psMask
del edgeCheck
# Just in case... we always want to trim the apodized region from the region searched
# This has no effect if we're using a survey mask already
# Doing this makes life easier when running tests that use small survey masks or go right to edge of tile otherwise
apodMask=np.equal(enmap.apod(np.ones(filteredMap.shape), self.apodPix), 1)
surveyMask=surveyMask*apodMask
# Apply final survey mask to signal-to-noise map and RMS map
# NOTE: need to avoid NaNs in here, otherwise map interpolation for e.g. S/N will fail later on
SNMap=SNMap*surveyMask
SNMap[np.isnan(SNMap)]=0.
RMSMap=RMSMap*surveyMask
return {'data': filteredMap, 'wcs': self.wcs, 'obsFreqGHz': combinedObsFreqGHz, 'SNMap': SNMap,
'RMSMap': RMSMap, 'surveyMask': surveyMask, 'flagMask': self.flagMask, 'mapUnits': mapUnits,
'beamSolidAngle_nsr': beamSolidAngle_nsr, 'label': self.label, 'tileName': self.tileName}
def applyFilter(self, mapDataToFilter, calcFRelWeights = False):
"""Apply the filter to the given map data (must be 3d array, i.e., a cube, with each plane
corresponding to a different frequency).
Args:
mapDataToFilter (:obj:`np.ndarray`): A 3d array, where each plane corresponds to a map
at a different observed frequency. This must match with how the filter was defined
(see :ref:`Filters` and :ref:`InputMaps`).
calcFRelWeights (:obj:`bool`, optional): This should *only* be set to `True` if this
routine is being applied to an ideal signal map.
Returns:
Filtered map (2d :obj:`np.ndarray`)
"""
# Apply the high pass filter - subtract background on larger scales using difference of Gaussians
filteredMap=np.zeros(mapDataToFilter.shape, dtype = np.float32)
if self.params['bckSub'] == True and self.bckSubScaleArcmin > 0:
for i in range(mapDataToFilter.shape[0]):
filteredMap[i]=maps.subtractBackground(mapDataToFilter[i], self.wcs,
RADeg = self.applyRACentre,
decDeg = self.applyDecCentre,
smoothScaleDeg = self.bckSubScaleArcmin/60.)
else:
filteredMap=filteredMap+mapDataToFilter
# Apply the kernel
for i in range(filteredMap.shape[0]):
filteredMap[i]=ndimage.convolve(filteredMap[i], self.kern2d[i])
# For relativistic corrections (see signals module)
if calcFRelWeights == True:
self.fRelWeights={}
maxIndex=np.argmax(filteredMap.sum(axis = 0))
totalSignal=filteredMap.sum(axis = 0).flatten()[maxIndex]
for filteredSignalPlane, mapDict in zip(filteredMap, self.unfilteredMapsDictList):
freqGHz=mapDict['obsFreqGHz']
fRelWeight=filteredSignalPlane.flatten()[maxIndex]/totalSignal
self.fRelWeights[freqGHz]=fRelWeight
filteredMap=filteredMap.sum(axis = 0).astype(np.float32)
# Apply the normalisation
filteredMap=filteredMap*self.signalNorm
return filteredMap
#------------------------------------------------------------------------------------------------------------
class BeamFilter(MapFilter):
"""Base class for filters using ACT-style beam profile files. Derived from :class:`MapFilter`.
"""
def makeSignalTemplateMap(self, beamFileName, amplitude = None):
signalMap=signals.makeBeamModelSignalMap(np.degrees(self.radiansMap),
self.wcs,
beamFileName,
amplitude = amplitude)
return signalMap
#------------------------------------------------------------------------------------------------------------
class ArnaudModelFilter(MapFilter):
"""Base class for filters using the GNFW profile as described in
`Arnaud et al. (2010) <https://ui.adsabs.harvard.edu/abs/2010A%26A...517A..92A/abstract>`_.
Derived from :class:`MapFilter`.
"""
def makeSignalTemplateMap(self, beamFileName, amplitude = None):
RADeg, decDeg=self.wcs.getCentreWCSCoords()
signalMap=signals.makeArnaudModelSignalMap(self.params['z'], self.params['M500MSun'],
self.shape, self.wcs, beam = beamFileName,
RADeg = RADeg, decDeg = decDeg,
GNFWParams = self.params['GNFWParams'],
amplitude = amplitude,
convolveWithBeam = True)
return signalMap
#------------------------------------------------------------------------------------------------------------
class BattagliaModelFilter(MapFilter):
"""Base class for filters using the GNFW profile as described in
`Battaglia et al. (2012) <https://ui.adsabs.harvard.edu/abs/2012ApJ...758...75B/abstract>`_.
Derived from :class:`MapFilter`.
Note:
This is the same as :class:`ArnaudModelFilter` (it is still a GNFW profile), but has non-self-similar
evolution with redshift, which is accounted for here. The convention for how the GNFW parameters are
defined follows `Arnaud et al. (2010) <https://ui.adsabs.harvard.edu/abs/2010A%26A...517A..92A/abstract>`_,
rather than `Battaglia et al. (2012) <https://ui.adsabs.harvard.edu/abs/2012ApJ...758...75B/abstract>`_.
"""
def makeSignalTemplateMap(self, beamFileName, amplitude = None):
RADeg, decDeg=self.wcs.getCentreWCSCoords()
signalMap=signals.makeBattagliaModelSignalMap(self.params['z'], self.params['M500MSun'],
self.shape, self.wcs, beam = beamFileName,
RADeg = RADeg, decDeg = decDeg,
GNFWParams = self.params['GNFWParams'],
amplitude = amplitude,
convolveWithBeam = True)
return signalMap
#------------------------------------------------------------------------------------------------------------
# Definitions of actual filters that can be used
class ArnaudModelMatchedFilter(MatchedFilter, ArnaudModelFilter):
"""Fourier-space multi-frequency matched filter implementation, using the GNFW profile as described in
`Arnaud et al. (2010) <https://ui.adsabs.harvard.edu/abs/2010A%26A...517A..92A/abstract>`_.
Derived from :class:`MapFilter`.
"""
pass
class BattagliaModelMatchedFilter(MatchedFilter, BattagliaModelFilter):
"""Fourier-space multi-frequency matched filter implementation, using the GNFW profile as described in
`Battaglia et al. (2012) <https://ui.adsabs.harvard.edu/abs/2012ApJ...758...75B/abstract>`_.
Derived from :class:`MapFilter`.
Note:
This is the same as :class:`ArnaudModelFilter` (it is still a GNFW profile), but has non-self-similar
evolution with redshift, which is accounted for here. The convention for how the GNFW parameters are
defined follows `Arnaud et al. (2010) <https://ui.adsabs.harvard.edu/abs/2010A%26A...517A..92A/abstract>`_,
rather than `Battaglia et al. (2012) <https://ui.adsabs.harvard.edu/abs/2012ApJ...758...75B/abstract>`_.
"""
pass
class BeamMatchedFilter(MatchedFilter, BeamFilter):
"""Fourier-space multi-frequency matched filter implementation using ACT-style beam profile files.
Derived from :class:`MapFilter`.
"""
pass
class ArnaudModelRealSpaceMatchedFilter(RealSpaceMatchedFilter, ArnaudModelFilter):
"""Real-space filter kernel, built using the GNFW profile as described in
`Arnaud et al. (2010) <https://ui.adsabs.harvard.edu/abs/2010A%26A...517A..92A/abstract>`_.
Derived from :class:`MapFilter`.
"""
pass
class BattagliaModelRealSpaceMatchedFilter(RealSpaceMatchedFilter, BattagliaModelFilter):
"""Real-space filter kernel, built using the GNFW profile as described in
`Battaglia et al. (2012) <https://ui.adsabs.harvard.edu/abs/2012ApJ...758...75B/abstract>`_.
Derived from :class:`MapFilter`.
"""
pass
class BeamRealSpaceMatchedFilter(RealSpaceMatchedFilter, BeamFilter):
"""Real-space filter kernel, built using ACT-style beam profile files.
Derived from :class:`MapFilter`.
"""
pass
|
simonsobsREPO_NAMEnemoPATH_START.@nemo_extracted@nemo-main@nemo@filters.py@.PATH_END.py
|
{
"filename": "trainable_segmentation.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/future/trainable_segmentation.py",
"type": "Python"
}
|
from skimage.feature import multiscale_basic_features
try:
from sklearn.exceptions import NotFittedError
from sklearn.ensemble import RandomForestClassifier
has_sklearn = True
except ImportError:
has_sklearn = False
class NotFittedError(Exception):
pass
class TrainableSegmenter:
"""Estimator for classifying pixels.
Parameters
----------
clf : classifier object, optional
classifier object, exposing a ``fit`` and a ``predict`` method as in
scikit-learn's API, for example an instance of
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
features_func : function, optional
function computing features on all pixels of the image, to be passed
to the classifier. The output should be of shape
``(m_features, *labels.shape)``. If None,
:func:`skimage.feature.multiscale_basic_features` is used.
Methods
-------
compute_features
fit
predict
"""
def __init__(self, clf=None, features_func=None):
if clf is None:
if has_sklearn:
self.clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
else:
raise ImportError(
"Please install scikit-learn or pass a classifier instance"
"to TrainableSegmenter."
)
else:
self.clf = clf
self.features_func = features_func
def compute_features(self, image):
if self.features_func is None:
self.features_func = multiscale_basic_features
self.features = self.features_func(image)
def fit(self, image, labels):
"""Train classifier using partially labeled (annotated) image.
Parameters
----------
image : ndarray
Input image, which can be grayscale or multichannel, and must have a
number of dimensions compatible with ``self.features_func``.
labels : ndarray of ints
Labeled array of shape compatible with ``image`` (same shape for a
single-channel image). Labels >= 1 correspond to the training set and
label 0 to unlabeled pixels to be segmented.
"""
self.compute_features(image)
fit_segmenter(labels, self.features, self.clf)
def predict(self, image):
"""Segment new image using trained internal classifier.
Parameters
----------
image : ndarray
Input image, which can be grayscale or multichannel, and must have a
number of dimensions compatible with ``self.features_func``.
Raises
------
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
"""
if self.features_func is None:
self.features_func = multiscale_basic_features
features = self.features_func(image)
return predict_segmenter(features, self.clf)
def fit_segmenter(labels, features, clf):
"""Segmentation using labeled parts of the image and a classifier.
Parameters
----------
labels : ndarray of ints
Image of labels. Labels >= 1 correspond to the training set and
label 0 to unlabeled pixels to be segmented.
features : ndarray
Array of features, with the first dimension corresponding to the number
of features, and the other dimensions correspond to ``labels.shape``.
clf : classifier object
classifier object, exposing a ``fit`` and a ``predict`` method as in
scikit-learn's API, for example an instance of
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
Returns
-------
clf : classifier object
classifier trained on ``labels``
Raises
------
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
"""
mask = labels > 0
training_data = features[mask]
training_labels = labels[mask].ravel()
clf.fit(training_data, training_labels)
return clf
def predict_segmenter(features, clf):
"""Segmentation of images using a pretrained classifier.
Parameters
----------
features : ndarray
Array of features, with the last dimension corresponding to the number
of features, and the other dimensions are compatible with the shape of
the image to segment, or a flattened image.
clf : classifier object
trained classifier object, exposing a ``predict`` method as in
scikit-learn's API, for example an instance of
``RandomForestClassifier`` or ``LogisticRegression`` classifier. The
classifier must be already trained, for example with
:func:`skimage.future.fit_segmenter`.
Returns
-------
output : ndarray
Labeled array, built from the prediction of the classifier.
"""
sh = features.shape
if features.ndim > 2:
features = features.reshape((-1, sh[-1]))
try:
predicted_labels = clf.predict(features)
except NotFittedError:
raise NotFittedError(
"You must train the classifier `clf` first"
"for example with the `fit_segmenter` function."
)
except ValueError as err:
if err.args and 'x must consist of vectors of length' in err.args[0]:
raise ValueError(
err.args[0]
+ '\n'
+ "Maybe you did not use the same type of features for training the classifier."
)
else:
raise err
output = predicted_labels.reshape(sh[:-1])
return output
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@future@trainable_segmentation.py@.PATH_END.py
|
{
"filename": "test_astradb.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/vectorstores/test_astradb.py",
"type": "Python"
}
|
"""
Test of Astra DB vector store class `AstraDB`
Required to run this test:
- a recent `astrapy` Python package available
- an Astra DB instance;
- the two environment variables set:
export ASTRA_DB_API_ENDPOINT="https://<DB-ID>-us-east1.apps.astra.datastax.com"
export ASTRA_DB_APPLICATION_TOKEN="AstraCS:........."
- optionally this as well (otherwise defaults are used):
export ASTRA_DB_KEYSPACE="my_keyspace"
"""
import json
import math
import os
from typing import Iterable, List
import pytest
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.vectorstores import AstraDB
# Ad-hoc embedding classes:
class SomeEmbeddings(Embeddings):
"""
Turn a sentence into an embedding vector in some way.
Not important how. It is deterministic is all that counts.
"""
def __init__(self, dimension: int) -> None:
self.dimension = dimension
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(txt) for txt in texts]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
unnormed0 = [ord(c) for c in text[: self.dimension]]
unnormed = (unnormed0 + [1] + [0] * (self.dimension - 1 - len(unnormed0)))[
: self.dimension
]
norm = sum(x * x for x in unnormed) ** 0.5
normed = [x / norm for x in unnormed]
return normed
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
class ParserEmbeddings(Embeddings):
"""
Parse input texts: if they are json for a List[float], fine.
Otherwise, return all zeros and call it a day.
"""
def __init__(self, dimension: int) -> None:
self.dimension = dimension
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(txt) for txt in texts]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
try:
vals = json.loads(text)
assert len(vals) == self.dimension
return vals
except Exception:
print(f'[ParserEmbeddings] Returning a moot vector for "{text}"') # noqa: T201
return [0.0] * self.dimension
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
def _has_env_vars() -> bool:
return all(
[
"ASTRA_DB_APPLICATION_TOKEN" in os.environ,
"ASTRA_DB_API_ENDPOINT" in os.environ,
]
)
@pytest.fixture(scope="function")
def store_someemb() -> Iterable[AstraDB]:
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_s",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
yield v_store
v_store.delete_collection()
@pytest.fixture(scope="function")
def store_parseremb() -> Iterable[AstraDB]:
emb = ParserEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_p",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
yield v_store
v_store.delete_collection()
@pytest.mark.requires("astrapy")
@pytest.mark.skipif(not _has_env_vars(), reason="Missing Astra DB env. vars")
class TestAstraDB:
def test_astradb_vectorstore_create_delete(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_1",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store.delete_collection()
# Creation by passing a ready-made astrapy client:
from astrapy.db import AstraDB as LibAstraDB
astra_db_client = LibAstraDB(
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store_2 = AstraDB(
embedding=emb,
collection_name="lc_test_2",
astra_db_client=astra_db_client,
)
v_store_2.delete_collection()
async def test_astradb_vectorstore_create_delete_async(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_1_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
await v_store.adelete_collection()
# Creation by passing a ready-made astrapy client:
from astrapy.db import AsyncAstraDB
astra_db_client = AsyncAstraDB(
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store_2 = AstraDB(
embedding=emb,
collection_name="lc_test_2_async",
async_astra_db_client=astra_db_client,
)
await v_store_2.adelete_collection()
def test_astradb_vectorstore_pre_delete_collection(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_pre_del",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
v_store.add_texts(
texts=["aa"],
metadatas=[
{"k": "a", "ord": 0},
],
ids=["a"],
)
res1 = v_store.similarity_search("aa", k=5)
assert len(res1) == 1
v_store = AstraDB(
embedding=emb,
pre_delete_collection=True,
collection_name="lc_test_pre_del",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
res1 = v_store.similarity_search("aa", k=5)
assert len(res1) == 0
finally:
v_store.delete_collection()
async def test_astradb_vectorstore_pre_delete_collection_async(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_pre_del_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
await v_store.aadd_texts(
texts=["aa"],
metadatas=[
{"k": "a", "ord": 0},
],
ids=["a"],
)
res1 = await v_store.asimilarity_search("aa", k=5)
assert len(res1) == 1
v_store = AstraDB(
embedding=emb,
pre_delete_collection=True,
collection_name="lc_test_pre_del_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
res1 = await v_store.asimilarity_search("aa", k=5)
assert len(res1) == 0
finally:
await v_store.adelete_collection()
def test_astradb_vectorstore_from_x(self) -> None:
"""from_texts and from_documents methods."""
emb = SomeEmbeddings(dimension=2)
# from_texts
v_store = AstraDB.from_texts(
texts=["Hi", "Ho"],
embedding=emb,
collection_name="lc_test_ft",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert v_store.similarity_search("Ho", k=1)[0].page_content == "Ho"
finally:
v_store.delete_collection()
# from_documents
v_store_2 = AstraDB.from_documents(
[
Document(page_content="Hee"),
Document(page_content="Hoi"),
],
embedding=emb,
collection_name="lc_test_fd",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert v_store_2.similarity_search("Hoi", k=1)[0].page_content == "Hoi"
finally:
v_store_2.delete_collection()
async def test_astradb_vectorstore_from_x_async(self) -> None:
"""from_texts and from_documents methods."""
emb = SomeEmbeddings(dimension=2)
# from_texts
v_store = await AstraDB.afrom_texts(
texts=["Hi", "Ho"],
embedding=emb,
collection_name="lc_test_ft_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert (await v_store.asimilarity_search("Ho", k=1))[0].page_content == "Ho"
finally:
await v_store.adelete_collection()
# from_documents
v_store_2 = await AstraDB.afrom_documents(
[
Document(page_content="Hee"),
Document(page_content="Hoi"),
],
embedding=emb,
collection_name="lc_test_fd_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert (await v_store_2.asimilarity_search("Hoi", k=1))[
0
].page_content == "Hoi"
finally:
await v_store_2.adelete_collection()
def test_astradb_vectorstore_crud(self, store_someemb: AstraDB) -> None:
"""Basic add/delete/update behaviour."""
res0 = store_someemb.similarity_search("Abc", k=2)
assert res0 == []
# write and check again
store_someemb.add_texts(
texts=["aa", "bb", "cc"],
metadatas=[
{"k": "a", "ord": 0},
{"k": "b", "ord": 1},
{"k": "c", "ord": 2},
],
ids=["a", "b", "c"],
)
res1 = store_someemb.similarity_search("Abc", k=5)
assert {doc.page_content for doc in res1} == {"aa", "bb", "cc"}
# partial overwrite and count total entries
store_someemb.add_texts(
texts=["cc", "dd"],
metadatas=[
{"k": "c_new", "ord": 102},
{"k": "d_new", "ord": 103},
],
ids=["c", "d"],
)
res2 = store_someemb.similarity_search("Abc", k=10)
assert len(res2) == 4
# pick one that was just updated and check its metadata
res3 = store_someemb.similarity_search_with_score_id(
query="cc", k=1, filter={"k": "c_new"}
)
print(str(res3)) # noqa: T201
doc3, score3, id3 = res3[0]
assert doc3.page_content == "cc"
assert doc3.metadata == {"k": "c_new", "ord": 102}
assert score3 > 0.999 # leaving some leeway for approximations...
assert id3 == "c"
# delete and count again
del1_res = store_someemb.delete(["b"])
assert del1_res is True
del2_res = store_someemb.delete(["a", "c", "Z!"])
assert del2_res is True # a non-existing ID was supplied
assert len(store_someemb.similarity_search("xy", k=10)) == 1
# clear store
store_someemb.clear()
assert store_someemb.similarity_search("Abc", k=2) == []
# add_documents with "ids" arg passthrough
store_someemb.add_documents(
[
Document(page_content="vv", metadata={"k": "v", "ord": 204}),
Document(page_content="ww", metadata={"k": "w", "ord": 205}),
],
ids=["v", "w"],
)
assert len(store_someemb.similarity_search("xy", k=10)) == 2
res4 = store_someemb.similarity_search("ww", k=1, filter={"k": "w"})
assert res4[0].metadata["ord"] == 205
async def test_astradb_vectorstore_crud_async(self, store_someemb: AstraDB) -> None:
"""Basic add/delete/update behaviour."""
res0 = await store_someemb.asimilarity_search("Abc", k=2)
assert res0 == []
# write and check again
await store_someemb.aadd_texts(
texts=["aa", "bb", "cc"],
metadatas=[
{"k": "a", "ord": 0},
{"k": "b", "ord": 1},
{"k": "c", "ord": 2},
],
ids=["a", "b", "c"],
)
res1 = await store_someemb.asimilarity_search("Abc", k=5)
assert {doc.page_content for doc in res1} == {"aa", "bb", "cc"}
# partial overwrite and count total entries
await store_someemb.aadd_texts(
texts=["cc", "dd"],
metadatas=[
{"k": "c_new", "ord": 102},
{"k": "d_new", "ord": 103},
],
ids=["c", "d"],
)
res2 = await store_someemb.asimilarity_search("Abc", k=10)
assert len(res2) == 4
# pick one that was just updated and check its metadata
res3 = await store_someemb.asimilarity_search_with_score_id(
query="cc", k=1, filter={"k": "c_new"}
)
print(str(res3)) # noqa: T201
doc3, score3, id3 = res3[0]
assert doc3.page_content == "cc"
assert doc3.metadata == {"k": "c_new", "ord": 102}
assert score3 > 0.999 # leaving some leeway for approximations...
assert id3 == "c"
# delete and count again
del1_res = await store_someemb.adelete(["b"])
assert del1_res is True
del2_res = await store_someemb.adelete(["a", "c", "Z!"])
assert del2_res is False # a non-existing ID was supplied
assert len(await store_someemb.asimilarity_search("xy", k=10)) == 1
# clear store
await store_someemb.aclear()
assert await store_someemb.asimilarity_search("Abc", k=2) == []
# add_documents with "ids" arg passthrough
await store_someemb.aadd_documents(
[
Document(page_content="vv", metadata={"k": "v", "ord": 204}),
Document(page_content="ww", metadata={"k": "w", "ord": 205}),
],
ids=["v", "w"],
)
assert len(await store_someemb.asimilarity_search("xy", k=10)) == 2
res4 = await store_someemb.asimilarity_search("ww", k=1, filter={"k": "w"})
assert res4[0].metadata["ord"] == 205
@staticmethod
def _v_from_i(i: int, N: int) -> str:
angle = 2 * math.pi * i / N
vector = [math.cos(angle), math.sin(angle)]
return json.dumps(vector)
def test_astradb_vectorstore_mmr(self, store_parseremb: AstraDB) -> None:
"""
MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
"""
i_vals = [0, 4, 5, 13]
N_val = 20
store_parseremb.add_texts(
[self._v_from_i(i, N_val) for i in i_vals],
metadatas=[{"i": i} for i in i_vals],
)
res1 = store_parseremb.max_marginal_relevance_search(
self._v_from_i(3, N_val),
k=2,
fetch_k=3,
)
res_i_vals = {doc.metadata["i"] for doc in res1}
assert res_i_vals == {0, 4}
async def test_astradb_vectorstore_mmr_async(
self, store_parseremb: AstraDB
) -> None:
"""
MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
"""
i_vals = [0, 4, 5, 13]
N_val = 20
await store_parseremb.aadd_texts(
[self._v_from_i(i, N_val) for i in i_vals],
metadatas=[{"i": i} for i in i_vals],
)
res1 = await store_parseremb.amax_marginal_relevance_search(
self._v_from_i(3, N_val),
k=2,
fetch_k=3,
)
res_i_vals = {doc.metadata["i"] for doc in res1}
assert res_i_vals == {0, 4}
def test_astradb_vectorstore_metadata(self, store_someemb: AstraDB) -> None:
"""Metadata filtering."""
store_someemb.add_documents(
[
Document(
page_content="q",
metadata={"ord": ord("q"), "group": "consonant"},
),
Document(
page_content="w",
metadata={"ord": ord("w"), "group": "consonant"},
),
Document(
page_content="r",
metadata={"ord": ord("r"), "group": "consonant"},
),
Document(
page_content="e",
metadata={"ord": ord("e"), "group": "vowel"},
),
Document(
page_content="i",
metadata={"ord": ord("i"), "group": "vowel"},
),
Document(
page_content="o",
metadata={"ord": ord("o"), "group": "vowel"},
),
]
)
# no filters
res0 = store_someemb.similarity_search("x", k=10)
assert {doc.page_content for doc in res0} == set("qwreio")
# single filter
res1 = store_someemb.similarity_search(
"x",
k=10,
filter={"group": "vowel"},
)
assert {doc.page_content for doc in res1} == set("eio")
# multiple filters
res2 = store_someemb.similarity_search(
"x",
k=10,
filter={"group": "consonant", "ord": ord("q")},
)
assert {doc.page_content for doc in res2} == set("q")
# excessive filters
res3 = store_someemb.similarity_search(
"x",
k=10,
filter={"group": "consonant", "ord": ord("q"), "case": "upper"},
)
assert res3 == []
# filter with logical operator
res4 = store_someemb.similarity_search(
"x",
k=10,
filter={"$or": [{"ord": ord("q")}, {"ord": ord("r")}]},
)
assert {doc.page_content for doc in res4} == {"q", "r"}
def test_astradb_vectorstore_similarity_scale(
self, store_parseremb: AstraDB
) -> None:
"""Scale of the similarity scores."""
store_parseremb.add_texts(
texts=[
json.dumps([1, 1]),
json.dumps([-1, -1]),
],
ids=["near", "far"],
)
res1 = store_parseremb.similarity_search_with_score(
json.dumps([0.5, 0.5]),
k=2,
)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert abs(1 - sco_near) < 0.001 and abs(sco_far) < 0.001
async def test_astradb_vectorstore_similarity_scale_async(
self, store_parseremb: AstraDB
) -> None:
"""Scale of the similarity scores."""
await store_parseremb.aadd_texts(
texts=[
json.dumps([1, 1]),
json.dumps([-1, -1]),
],
ids=["near", "far"],
)
res1 = await store_parseremb.asimilarity_search_with_score(
json.dumps([0.5, 0.5]),
k=2,
)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert abs(1 - sco_near) < 0.001 and abs(sco_far) < 0.001
def test_astradb_vectorstore_massive_delete(self, store_someemb: AstraDB) -> None:
"""Larger-scale bulk deletes."""
M = 50
texts = [str(i + 1 / 7.0) for i in range(2 * M)]
ids0 = ["doc_%i" % i for i in range(M)]
ids1 = ["doc_%i" % (i + M) for i in range(M)]
ids = ids0 + ids1
store_someemb.add_texts(texts=texts, ids=ids)
# deleting a bunch of these
del_res0 = store_someemb.delete(ids0)
assert del_res0 is True
# deleting the rest plus a fake one
del_res1 = store_someemb.delete(ids1 + ["ghost!"])
assert del_res1 is True # ensure no error
# nothing left
assert store_someemb.similarity_search("x", k=2 * M) == []
def test_astradb_vectorstore_drop(self) -> None:
"""behaviour of 'delete_collection'."""
collection_name = "lc_test_d"
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name=collection_name,
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store.add_texts(["huh"])
assert len(v_store.similarity_search("hah", k=10)) == 1
# another instance pointing to the same collection on DB
v_store_kenny = AstraDB(
embedding=emb,
collection_name=collection_name,
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store_kenny.delete_collection()
# dropped on DB, but 'v_store' should have no clue:
with pytest.raises(ValueError):
_ = v_store.similarity_search("hah", k=10)
def test_astradb_vectorstore_custom_params(self) -> None:
"""Custom batch size and concurrency params."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_c",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
batch_size=17,
bulk_insert_batch_concurrency=13,
bulk_insert_overwrite_concurrency=7,
bulk_delete_concurrency=19,
)
try:
# add_texts
N = 50
texts = [str(i + 1 / 7.0) for i in range(N)]
ids = ["doc_%i" % i for i in range(N)]
v_store.add_texts(texts=texts, ids=ids)
v_store.add_texts(
texts=texts,
ids=ids,
batch_size=19,
batch_concurrency=7,
overwrite_concurrency=13,
)
#
_ = v_store.delete(ids[: N // 2])
_ = v_store.delete(ids[N // 2 :], concurrency=23)
#
finally:
v_store.delete_collection()
async def test_astradb_vectorstore_custom_params_async(self) -> None:
"""Custom batch size and concurrency params."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_c_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
batch_size=17,
bulk_insert_batch_concurrency=13,
bulk_insert_overwrite_concurrency=7,
bulk_delete_concurrency=19,
)
try:
# add_texts
N = 50
texts = [str(i + 1 / 7.0) for i in range(N)]
ids = ["doc_%i" % i for i in range(N)]
await v_store.aadd_texts(texts=texts, ids=ids)
await v_store.aadd_texts(
texts=texts,
ids=ids,
batch_size=19,
batch_concurrency=7,
overwrite_concurrency=13,
)
#
await v_store.adelete(ids[: N // 2])
await v_store.adelete(ids[N // 2 :], concurrency=23)
#
finally:
await v_store.adelete_collection()
def test_astradb_vectorstore_metrics(self) -> None:
"""
Different choices of similarity metric.
Both stores (with "cosine" and "euclidea" metrics) contain these two:
- a vector slightly rotated w.r.t query vector
- a vector which is a long multiple of query vector
so, which one is "the closest one" depends on the metric.
"""
emb = ParserEmbeddings(dimension=2)
isq2 = 0.5**0.5
isa = 0.7
isb = (1.0 - isa * isa) ** 0.5
texts = [
json.dumps([isa, isb]),
json.dumps([10 * isq2, 10 * isq2]),
]
ids = [
"rotated",
"scaled",
]
query_text = json.dumps([isq2, isq2])
# creation, population, query - cosine
vstore_cos = AstraDB(
embedding=emb,
collection_name="lc_test_m_c",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
metric="cosine",
)
try:
vstore_cos.add_texts(
texts=texts,
ids=ids,
)
_, _, id_from_cos = vstore_cos.similarity_search_with_score_id(
query_text,
k=1,
)[0]
assert id_from_cos == "scaled"
finally:
vstore_cos.delete_collection()
# creation, population, query - euclidean
vstore_euc = AstraDB(
embedding=emb,
collection_name="lc_test_m_e",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
metric="euclidean",
)
try:
vstore_euc.add_texts(
texts=texts,
ids=ids,
)
_, _, id_from_euc = vstore_euc.similarity_search_with_score_id(
query_text,
k=1,
)[0]
assert id_from_euc == "rotated"
finally:
vstore_euc.delete_collection()
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@vectorstores@test_astradb.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.