metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "test_coag_dustpy.py",
"repo_name": "cuDisc/cuDisc",
"repo_path": "cuDisc_extracted/cuDisc-main/tests/scripts/test_coag_dustpy.py",
"type": "Python"
}
|
import os
from constants import *
from fileIO import *
import numpy as np
import matplotlib.pyplot as plt
print("Making and running test...\n")
os.system('cd ../../ && make -j test_coagdustpy')
os.system('../../test_coagdustpy')
sim = CuDiscModel("../outputs/coag_dustpy")
sizes = sim.load_grain_sizes()
gas, dust = sim.load_dens1D("0")
a_dp = [5e-05, 5.19e-05, 5.4e-05, 5.61e-05, 5.83e-05, 6.05e-05, 6.29e-05, 6.53e-05, 6.79e-05, 7.05e-05, 7.33e-05, 7.61e-05, 7.91e-05, 8.22e-05, 8.53e-05, 8.87e-05, 9.21e-05, 9.57e-05, 9.94e-05, 0.000103, 0.000107, 0.000112, 0.000116, 0.00012, 0.000125, 0.00013, 0.000135, 0.00014, 0.000146, 0.000151, 0.000157, 0.000163, 0.00017, 0.000176, 0.000183, 0.00019, 0.000198, 0.000205, 0.000213, 0.000222, 0.00023, 0.000239, 0.000249, 0.000258, 0.000268, 0.000279, 0.00029, 0.000301, 0.000313, 0.000325, 0.000338, 0.000351, 0.000364, 0.000379, 0.000393, 0.000409, 0.000425, 0.000441, 0.000458, 0.000476, 0.000495, 0.000514, 0.000534, 0.000555, 0.000576, 0.000599, 0.000622, 0.000646, 0.000671, 0.000698, 0.000725, 0.000753, 0.000782, 0.000813, 0.000844, 0.000877, 0.000911, 0.000947, 0.000984, 0.00102, 0.00106, 0.0011, 0.00115, 0.00119, 0.00124, 0.00129, 0.00134, 0.00139, 0.00144, 0.0015, 0.00156, 0.00162, 0.00168, 0.00174, 0.00181, 0.00188, 0.00196, 0.00203, 0.00211, 0.00219, 0.00228, 0.00237, 0.00246, 0.00256, 0.00266, 0.00276, 0.00287, 0.00298, 0.00309, 0.00321, 0.00334, 0.00347, 0.0036, 0.00374, 0.00389, 0.00404, 0.0042, 0.00436, 0.00453, 0.00471, 0.00489, 0.00508, 0.00528, 0.00549, 0.0057, 0.00592, 0.00615, 0.00639, 0.00664, 0.0069, 0.00717, 0.00745, 0.00774, 0.00804, 0.00835, 0.00868, 0.00901, 0.00937, 0.00973, 0.0101, 0.0105, 0.0109, 0.0113, 0.0118, 0.0122, 0.0127, 0.0132, 0.0137, 0.0143, 0.0148, 0.0154, 0.016, 0.0166, 0.0173, 0.0179, 0.0186, 0.0194, 0.0201, 0.0209, 0.0217, 0.0225, 0.0234, 0.0243, 0.0253, 0.0263, 0.0273, 0.0284, 0.0295, 0.0306, 0.0318, 0.033, 0.0343, 0.0357, 0.037, 0.0385, 0.04, 0.0415, 0.0432, 0.0448, 0.0466, 0.0484, 0.0503, 0.0522, 0.0543, 0.0564, 0.0586, 0.0609, 0.0632, 0.0657, 0.0683, 0.0709, 0.0737, 0.0765, 0.0795, 0.0826, 0.0858, 0.0892, 0.0926, 0.0963, 0.1]
sigma_dp = [0.0134, 0.00831, 0.00685, 0.00608, 0.00558, 0.00523, 0.00526, 0.00515, 0.00499, 0.00488, 0.00476, 0.00468, 0.00459, 0.00452, 0.0045, 0.00443, 0.00439, 0.00436, 0.00433, 0.00434, 0.00433, 0.00432, 0.00432, 0.00431, 0.00432, 0.00432, 0.00433, 0.00435, 0.00436, 0.00438, 0.0044, 0.00442, 0.00444, 0.00447, 0.00449, 0.00452, 0.00455, 0.00459, 0.00462, 0.00465, 0.00469, 0.00473, 0.00476, 0.0048, 0.00484, 0.00489, 0.00493, 0.00497, 0.00502, 0.00506, 0.00511, 0.00516, 0.00521, 0.00526, 0.00531, 0.00536, 0.00541, 0.00547, 0.00552, 0.00558, 0.00563, 0.00569, 0.00575, 0.00581, 0.00586, 0.00592, 0.00598, 0.00604, 0.00638, 0.00636, 0.00636, 0.00636, 0.00638, 0.0064, 0.00643, 0.00646, 0.00649, 0.00652, 0.00656, 0.0066, 0.00663, 0.00667, 0.0067, 0.00673, 0.00676, 0.00678, 0.00681, 0.00682, 0.00684, 0.00685, 0.00685, 0.00685, 0.00684, 0.00683, 0.0068, 0.00677, 0.00674, 0.00669, 0.00605, 0.00554, 0.00529, 0.0052, 0.00524, 0.00536, 0.00554, 0.00575, 0.00597, 0.0062, 0.00643, 0.00667, 0.00691, 0.00715, 0.00739, 0.00763, 0.00787, 0.00812, 0.00836, 0.0086, 0.00885, 0.00911, 0.00936, 0.00963, 0.0099, 0.0102, 0.0105, 0.0108, 0.0111, 0.0115, 0.0118, 0.0122, 0.0126, 0.013, 0.0134, 0.0139, 0.0143, 0.0148, 0.0153, 0.0159, 0.0164, 0.017, 0.0176, 0.0182, 0.0188, 0.0195, 0.0201, 0.0208, 0.0215, 0.0223, 0.023, 0.0238, 0.0247, 0.0259, 0.0256, 0.0252, 0.0248, 0.0243, 0.0236, 0.0225, 0.0212, 0.0196, 0.0178, 0.0157, 0.0135, 0.0113, 0.00915, 0.00713, 0.00533, 0.00381, 0.00259, 0.00167, 0.00101, 0.000574, 0.000303, 0.000148, 6.6e-05, 2.69e-05, 9.87e-06, 3.26e-06, 9.54e-07, 2.47e-07, 5.59e-08, 1.1e-08, 1.89e-09, 2.8e-10, 3.6e-11, 4.02e-12, 3.94e-13, 3.42e-14, 2.66e-15, 1.86e-16, 1.19e-17, 6.94e-19, 3.71e-20, 1.82e-21, 8.26e-23, 3.44e-24, 1.32e-25, 4.66e-27, 1.52e-28, 4.56e-30]
print("Generating plots...")
# def a(m):
# return (3.*m/(1.6 * 4.* np.pi) )**(1./3.)
# a_dp = a(data.m[-1])
fig,ax1 = plt.subplots(figsize = (5,3.5))
ax1.loglog(a_dp, sigma_dp, label='DustPy')
ax1.loglog(sizes.a_c, sizes.m_c*dust.Sigma[2,:]/np.diff(sizes.m_e),label= 'cuDisc', ls='--', c='r')
ax1.set_ylim(2e-3,5e-2)
# plt.ylim(1e-25, 1e-14)
plt.xlim(6e-5, 5e-2)
ax1.set_ylabel(r"$\sigma_d$(m) (g cm$^{-2}$)")
plt.xlabel(r"Grain size (cm)")
plt.legend()
plt.tight_layout()
plt.savefig("../outputs/coag_dustpy/dustpycomp_0D.png")
print("Done!\n")
|
cuDiscREPO_NAMEcuDiscPATH_START.@cuDisc_extracted@cuDisc-main@tests@scripts@test_coag_dustpy.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/marker/colorbar/title/font/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weight import WeightValidator
from ._variant import VariantValidator
from ._textcase import TextcaseValidator
from ._style import StyleValidator
from ._size import SizeValidator
from ._shadow import ShadowValidator
from ._lineposition import LinepositionValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weight.WeightValidator",
"._variant.VariantValidator",
"._textcase.TextcaseValidator",
"._style.StyleValidator",
"._size.SizeValidator",
"._shadow.ShadowValidator",
"._lineposition.LinepositionValidator",
"._family.FamilyValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@marker@colorbar@title@font@__init__.py@.PATH_END.py
|
{
"filename": "_labelalias.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/coloraxis/colorbar/_labelalias.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelaliasValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self,
plotly_name="labelalias",
parent_name="layout.coloraxis.colorbar",
**kwargs,
):
super(LabelaliasValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@coloraxis@colorbar@_labelalias.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Fermipy/fermipy",
"repo_path": "fermipy_extracted/fermipy-master/fermipy/tests/__init__.py",
"type": "Python"
}
|
FermipyREPO_NAMEfermipyPATH_START.@fermipy_extracted@fermipy-master@fermipy@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "test_bbox.py",
"repo_name": "pmelchior/scarlet",
"repo_path": "scarlet_extracted/scarlet-master/tests/test_bbox.py",
"type": "Python"
}
|
import numpy as np
import scarlet
class TestBox(object):
def test_from_data(self):
x = np.arange(25).reshape(5, 5)
x[0] = 0
x[:, -2:] = 0
bbox = scarlet.Box.from_data(x)
assert bbox == scarlet.Box((4, 3), origin=(1, 0))
x += 10
bbox = scarlet.Box.from_data(x)
assert bbox == scarlet.Box((5, 5), origin=(0, 0))
bbox = scarlet.Box.from_data(x, min_value=10)
assert bbox == scarlet.Box((4, 3), origin=(1, 0))
def test_contains(self):
bbox = scarlet.Box((6, 4, 3), origin=(0, 1, 0))
p = (2, 2, 2)
assert bbox.contains(p)
p = (3, 0, 3)
assert not bbox.contains(p)
p = (7, 3, 3)
assert not bbox.contains(p)
p = (3, 3, -1)
assert not bbox.contains(p)
def test_extract_from(self):
image = np.zeros((3, 5, 5))
image[1, 1, 1] = 1
# simple one pixel box extraction
bbox = scarlet.Box.from_data(image)
extracted = bbox.extract_from(image)
assert extracted.shape == (1, 1, 1) and extracted[0, 0, 0] == 1
# offset box extraction past boundary of image
bbox = scarlet.Box.from_bounds((0, 3), (-2, 3), (-3, 2))
extracted = bbox.extract_from(image)
assert extracted.shape == (3, 5, 5) and extracted[1, 3, 4] == 1
def test_insert_into(self):
image = np.zeros((3, 5, 5))
sub = np.zeros((3, 5, 5))
sub[1, 3, 4] = 1
bbox = scarlet.Box.from_bounds((0, 3), (-2, 3), (-3, 2))
image = bbox.insert_into(image, sub)
assert image.shape == (3, 5, 5) and image[1, 1, 1] == 1
|
pmelchiorREPO_NAMEscarletPATH_START.@scarlet_extracted@scarlet-master@tests@test_bbox.py@.PATH_END.py
|
{
"filename": "SYGMA_SSP_CNO_yield_input.ipynb",
"repo_name": "NuGrid/NuPyCEE",
"repo_path": "NuPyCEE_extracted/NuPyCEE-master/regression_tests/temp/SYGMA_SSP_CNO_yield_input.ipynb",
"type": "Jupyter Notebook"
}
|
# Regression test suite: Test of basic SSP GCE features
Test of SSP with artificial yields of C,N,O + Ni provided in tables.
C12 only in low-masss stars (up to 3Msun).
N14 only in intermediate mass stars (up to 7Msun).
O16 only in massive stars.
N-58 only in SNIa.
Each star produces only 0.1Msun of yields.
Focus are basic GCE features.
You can find the documentation <a href="doc/sygma.html">here</a>.
## Results:
$\odot$ Distinguished final ISM from different sources (low mass, massive AGB, massive stars, SN1a)
$\odot$ Evolution of different sources
$\odot$ Check of transition mass
$\odot$ Check of the exclude_masses parameter
$\odot$ IMPORTANT: Change of SNIa (time) contribution when changing the mass interval! Vogelsberger SNIa does not allow to only partly include SNIa contribution
```python
#from imp import *
#s=load_source('sygma','/home/nugrid/nugrid/SYGMA/SYGMA_online/SYGMA_dev/sygma.py')
%pylab nbagg
import sygma as s
reload(s)
s.__file__
from scipy.integrate import quad
from scipy.interpolate import UnivariateSpline
#import matplotlib.pyplot as plt
#%matplotlib inline
import numpy as np
#import mpld3
#mpld3.enable_notebook()
```
Populating the interactive namespace from numpy and matplotlib
The IMF allows to calculate the number of stars $N_{12}$ in the mass interval [m1,m2] with
(I) $N_{12}$ = k_N $\int _{m1}^{m2} m^{-2.35} dm$
Where k_N is the normalization constant. It can be derived from the total amount of mass of the system $M_{tot}$
since the total mass $M_{12}$ in the mass interval above can be estimated with
(II) $M_{12}$ = k_N $\int _{m1}^{m2} m^{-1.35} dm$
With a total mass interval of [1,30] and $M_{tot}=1e11$ the $k_N$ can be derived:
$1e11 = k_N/0.35 * (1^{-0.35} - 30^{-0.35})$
```python
k_N=1e11*0.35/ (1**-0.35 - 30**-0.35) #(I)
```
The total number of stars $N_{tot}$ is then:
```python
N_tot=k_N/1.35 * (1**-1.35 - 30**-1.35) #(II)
```
```python
k_N=1e11*0.35/ (1**-0.35 - 30**-0.35)
```
### Distinguish between 2 sources (AGB,massive)
```python
s1=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=True,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
print s1.history.isotopes
Yield_lagb_sim=s1.history.ism_iso_yield[-1][0]
Yield_magb_sim=s1.history.ism_iso_yield[-1][1]
Yield_massive_sim=s1.history.ism_iso_yield[-1][2]
Yield_sn1a_sim=s1.history.ism_iso_yield[-1][3]
```
SYGMA run in progress..
SYGMA run completed - Run time: 0.53s
['C-12', 'N-14', 'O-16', 'Ni-58']
Using the mass boundaries chosen in the yield tables:
low mass AGB: till 4 [1,3.5]
massive AGB : till 8 [3.5,8] #Different because M12 star is missing in set1.2
massive stars till 30 [8,30]
```python
N_lagb=k_N/1.35 * (1**-1.35 - 3.5**-1.35)
Yield_lagb=0.1*N_lagb
N_magb=k_N/1.35 * (3.5**-1.35 - 8.**-1.35)
Yield_magb=0.1*N_magb
N_massive=k_N/1.35 * (8.**-1.35 - 30**-1.35)
Yield_massive=0.1*N_massive
```
Compare final yields:
```python
print 'Should be 1:',Yield_lagb_sim/Yield_lagb
print 'Should be 1:',Yield_magb_sim/Yield_magb
print 'Should be 1:',Yield_massive_sim/Yield_massive
```
Should be 1: 1.0
Should be 1: 1.0
Should be 1: 1.0
### Plotting
```python
s1.plot_mass(specie='C',label='C',color='r',shape='-',marker='o',markevery=800)
s1.plot_mass(specie='N',label='N',color='b',shape='-',marker='o',markevery=800)
s1.plot_mass(specie='O',label='O',color='g',shape='-',marker='o',markevery=800)
m=[1,1.65,2,3,4,5,6,7,12,15,20,25]
#ages=[1.177e10,2.172e9,1.265e9,4.141e8,1.829e8,1.039e8,6.95e7,5.022e7,1.165e7,8.109e6,6.628e6]
ages=[5.67e9,1.211e9,6.972e8,2.471e8,1.347e8,8.123e7,5.642e7,4.217e7,1.892e7,1.381e7,9.895e6,7.902e6] #0.0001 lifetiems
def yields(min1,max1,k_N):
return ( k_N/1.35 * (min1**-1.35 - max1**-1.35) ) * 0.1
yields1_lagb=[]
age_lagb=[]
yields1_magb=[]
age_magb=[]
yields1_massive=[]
age_massive=[]
for m1 in m:
idx=m.index(m1)
#print m1,idx
if m1>=1 and m1<=3.5:
yields1_lagb.append(yields(m1,3.5,k_N))
age_lagb.append(ages[idx])
#print yields(1,m1,k_N)
#print ages[idx]
if m1>=3.5 and m1<=8.:
yields1_magb.append(yields(m1,8,k_N))
age_magb.append(ages[idx])
if m1>=8 and m1<=30:
yields1_massive.append(yields(m1,30,k_N))
age_massive.append(ages[idx])
plt.plot(age_lagb,yields1_lagb,marker='+',color='r',linestyle='',markersize=30,label='C*')
plt.plot(age_magb,yields1_magb,marker='+',color='b',linestyle='',markersize=30,label='N*')
plt.plot(age_massive,yields1_massive,marker='+',color='g',linestyle='',markersize=30,label='O*')
plt.legend(loc=4,prop={'size':14})
plt.xlim(7e6,1.5e10)
```
<IPython.core.display.Javascript object>
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA/IAAAHGCAYAAADXFrCqAAAgAElEQVR4XuydB5yU1dX/f1QFCwgqdhDE2JAm0Rj1jRiEiEYUEcWWWCiJSf4KthgFXpNYYI1Go7TYpQhYIRpFMJrYAIHYXrEAsYUmTRFZyv9z5plhZ5eZnWdmnnnmKd/7+Wwiu/fec+733IX97bn33DqiQQACEIAABCAAAQhAAAIQgAAEIBAaAnVC4ymOQgACEIAABCAAAQhAAAIQgAAEICCEPJsAAhCAAAQgAAEIQAACEIAABCAQIgII+RAFC1chAAEIQAACEIAABCAAAQhAAAIIefYABCAAAQhAAAIQgAAEIAABCEAgRAQQ8iEKFq5CAAIQgAAEIAABCEAAAhCAAAQQ8uwBCEAAAhCAAAQgAAEIQAACEIBAiAgg5EMULFyFAAQgAAEIQAACEIAABCAAAQgg5NkDEIAABCAAAQhAAAIQgAAEIACBEBFAyIcoWLgKAQhAAAIQgAAEIAABCEAAAhBAyLMHIAABCEAAAhCAAAQgAAEIQAACISKAkA9RsHAVAhCAAAQgAAEIQAACEIAABCCAkGcPQAACEIAABCAAAQhAAAIQgAAEQkQAIR+iYOEqBCAAAQhAAAIQgAAEIAABCEAAIc8egAAEIAABCEAAAhCAAAQgAAEIhIgAQj5EwcJVCEAAAhCAAAQgAAEIQAACEIAAQp49AAEIQAACEIAABCAAAQhAAAIQCBEBhHyIgoWrEIAABCAAAQhAAAIQgAAEIAABhDx7AAIQgAAEIAABCEAAAhCAAAQgECICCPkQBQtXIQABCEAAAhCAAAQgAAEIQAACCHn2AAQgAAEIQAACEIAABCAAAQhAIEQEEPIhChauQgACEIAABCAAAQhAAAIQgAAEEPLsAQhAAAIQgAAEIAABCEAAAhCAQIgIIORDFCxchQAEIAABCEAAAhCAAAQgAAEIIOTZAxCAAAQgAAEIQAACEIAABCAAgRARQMiHKFi4CgEIQAACEIAABCAAAQhAAAIQQMizByAAAQhAAAIQgAAEIAABCEAAAiEigJAPUbBwFQIQgAAEIAABCEAAAhCAAAQggJBnD0AAAhCAAAQgAAEIQAACEIAABEJEACEfomDhKgQgAAEIQAACEIAABCAAAQhAACHPHoAABCAAAQhAAAIQgAAEIAABCISIAEI+RMHCVQhAAAIQgAAEIAABCEAAAhCAAEKePQABCEAAAhCAAAQgAAEIQAACEAgRAYR8iIKV5upjkq6WtDjtcwdKOkvSJ5KOkjRG0qJwLg+vIQABCEAAAhCAAAQgAAEIQCAbAYR8uPZGb0mtJV0rqXMNIf+RpIPSllPzz+FaKd5CAAIQgAAEIAABCEAAAhCAQEYCCHn/NoYJ8P5JEZ7Jqn3dsuxzJO2W7DAii3tfSeqUJuQ7SrIsfdsaQt4y9PP9WyKWIAABCEAAAhCAAAQgAAEIQKDUBBDypSbszP/jpNCeJGlQBpNNJM2VZIJ8XfLrt0haKSmTmK8p5C9L/pKgS9rc9guBUZLG+bNErEAAAhCAAAQgAAEIQAACEICAHwQQ8qWlbPfWb5X0cfL++owsQt5Eu4n5dJFvf14lqW4GF2sK+auSvyzontb3eUn2MbK0S2R2CEAAAhCAAAQgAAEIQAACEPCTAELeP9qWIZ+dRcjbfXYT8zWz51uSAn1mDTfJyPsXNyxBAAIQgAAEIAABCEAAAhAIFAGEvH/hqE3Im2C3++yP13DHBP5kSdflEPLckfcvjliCAAQgAAEIQAACEIAABCBQVgIIef/wZxPyqSP0do++ZuY925iaGXlbxYfJSvZrk0uyP6cXv/NvpViCAAQgAAEIQAACEIAABCAAgZIRQMiXDO12E2cT5XaP3u7QZxPy9rW+ydks82797Bi+vRP/QloWv5WkAcnj+1b0zgrrUbHev/hiCQIQgAAEIAABCEAAAhCAgC8EEPK+YE4YKUTI29F6q2afEvJeertvgZN9XuA4hkEAAhCAAASiRIB/R6MUTdYCAQhAIGQEEPL+BayQo/WlFPJbC1w6e6ZAcAyDAAQgAIFIEeDf0UiFk8VAAAIQCBcBRJl/8Sqk2J3dhR+dodhdsV7vIWlZgZOkv1Vf4BQMgwAEIAABCISegL1EU0i7UNJzkpYXMpgxEIAABCAAASOAkPdvH+R6fm5UhjffrZr9SZJmeezmeEnnejwn00EAAhCAAAQg4I6AifmH3XWlFwQgAAEIQGB7Agh5/3ZFbULeitdZ0bv0u/Ctk5Xo65XAxRWSmhcy7+zZs9WjRw+tXLlSzZs313PPWVJBWrZsmXr27FltyunTp2vPPfcsxIwvYzKtw2vDXtkodJ58x7npX2wf9o/7XeaGtZvZCp0n33Fu+hfbh/3jJuJOHzes3cxW6Dz5jnPTv9g+Xu6fLl2KOqB2vKR/uuFPHwhAAAIQgEAmAgh5//ZFbULenqCzr3eWlHo+zjL09oRcRQlcTAj5pk2b6p133sk4fceOHbV8+XLtsccemjdv3rY+++67r/bee2/997//1V577aUvv/wy8bXPP/9c++23X7W5PvvsM1n/oLZM6/DaV69sFDpPvuPc9C+2D/vH/S5zw9rNbIXOk+84N/2L7cP+cRNxp48b1m5mK3SefMe56V9sHy/3j/27l29bsGBB6pfe9lsA+3efBgEIQAACECiIAEK+IGx5DbJsu2XXeydHTZH0SYZ77/Z83EBJVuBuN0lWRGdkXpbcd04I+Tp16qhFixYZR33zzTdat25dNbGe6ujlD0LuXfa+p5sfCIu16pWNQufJd5yb/sX2Yf+431VuWLuZrdB58h3npn+xfdg/biKOkK/5y2Y///0aOXKkKioy/w6+srIycaJNEkLe/VamJwQgAAEIZCCAkI/ntsh5tH6nnXaSifn0rLufPwj5ERY3gqJYP7yyUeg8+Y5z07/YPggx97vKDWs3sxU6T77j3PQvtg/7x03EEfLlFPLDhg3T8OHDcwUKIZ+LEF+HAAQgAIFaCSDk47lByMh7eOy0ti3kRrS42YKFzpPvODf9i+2DEHMTcYRYOYWY+wgV3tPN91Hhs7N/yrl/yMgXu3MZDwEIQAACbggg5N1Qil6fhJC3YnUrVth/bt9q+yETIeZ+Q3j1w3qh8+Q7zk3/Yvuwf9g/2URWigx//7jfI/wiMVz/fs2ZM0fJInlk5L3Z5swCAQhAILYEEPLxDD0ZeTLyef8Cx43IctMHIe/+Lx03vzRxM1uh8+Q7zk3/Yvuwf9xE3OnjhrWb2QqdJ99xbvoX28eP/UNG3s2uog8EIAABCBRLACFfLMFwjr9D0m9qc71bt2469thjtfPOO2vIkCHVutoPKV9//XW1r4Wxan2mdXgdTq9sFDpPvuPc9C+2D/vH/S5zw9rNbIXOk+84N/2L7cP+cRNxp48b1m5mK3SefMe56V9sHz/2D3fk3ewq+kAAAhCAQLEEEPLFEgzn+KMkzbaj9Q0aNMi4gsGDB28n4GtbahiFfDhDF02v2T/RjKtfq2L/+EU6mna83j9k5KO5T1gVBCAAgaARQMgHLSL++JMQ8rNnz9ZRR9l/Ft+8/kGoeI+YIUwE2D9hilbwfGX/BC8mYfLIz/3DHfkw7Qx8hQAEIBBsAgj5YMenVN4h5EtFlnkLIuDnD9IFOcigQBNg/wQ6PIF3zs/9g5AP/HbAQQhAAAKhIYCQD02oPHWUo/We4mSyYgn4+YN0sb4yPngE2D/Bi0mYPPJ6/3C0PkzRx1cIQAAC4SWAkA9v7IrxPCHka5tg6NChsoI9bpvXPwi5tUu/aBBg/0QjjuVaBfunXOSjYdfr/UOxu2jsC1YBAQhAIOgEEPJBj1Bp/PM8I18aN5kVAhCAAAQgEC4CZOTDFS+8hQAEIBBWAgj5sEauOL89vyNfnDuMhgAEIAABCESfAHfkox9jVggBCEDALwIIeb9IB8sOQj5Y8cAbCEAAAhCIAQGEfAyCzBIhAAEI+EQAIe8T6ICZQcgHLCC4AwEIQAAC0SeAkI9+jFkhBCAAAb8IIOT9Ih0sO9yRD1Y88AYCEIAABCJCgDvyEQkky4AABCAQcAII+YAHqETueV61vkR+Mi0EIAABCEAgVASoWh+qcOEsBCAAgdASQMiHNnRFOU5Gvih8DIYABCAAAQhkJkBGnp0BAQhAAAJ+EEDI+0E5eDa4Ix+8mOARBCAAAQhEnAB35CMeYJYHAQhAwEcCCHkfYQfIFEI+QMHAFQhAAAIQiAcBhHw84swqIQABCPhBACHvB+Xg2UDIBy8meAQBCEAgfgTGj5f69YvNuhHysQk1C4UABCBQcgII+ZIjDqQBhHwgw4JTEIAABGJG4NRTpWnTYrNohHxsQs1CIQABCJScAEK+5IgDaYBid4EMC05BAAIQiBmBCAp5it3FbA+zXAhAAAJlIoCQLxP4Mpvl+bkyBwDzEIAABCAgKYJCnufn2NkQgAAEIOAHAYS8H5SDZ4OMfPBigkcQgAAE4kcggkKejHz8tjErhgAEIFAOAgj5clAvv03uyJc/BngAAQhAAAIRFPK1BZU78mx5CEAAAhDwigBC3iuS4ZoHIR+ueOEtBCAAgWgSQMhHM66sCgIQgAAESk4AIV9yxIE0gJAPZFhwCgIQgEDMCCDkYxZwlgsBCEAAAl4RQMh7RTJc8yDkwxUvvIUABCAQTQII+WjGlVVBAAIQgEDJCSDkS444kAYQ8oEMC05BAAIQiBkBhHzMAs5yIQABCEDAKwIIea9IhmseqtaHK154CwEIQCCaBCIo5KlaH82tyqogAAEIBI0AQj5oEfHHH96R94czViAAAQhAoDYCERTyvCPPlocABCAAAT8IIOT9oBw8G2TkgxcTPIIABCAQPwIRFPJk5OO3jVkxBCAAgXIQQMiXg3r5bXJHvvwxwAMIQAACEIigkK8tqLwjz5aHAAQgAAGvCCDkvSIZrnkQ8uGKF95CAAIQiCYBhHw048qqIAABCECg5AQQ8iVHHEgDCPlAhgWnIAABCMSMAEI+ZgFnuRCAAAQg4BUBhLxXJMM1D0I+XPHCWwhAAALRJICQj2ZcWRUEIAABCJScAEK+5IgDaQAhH8iw4BQEIACBmBFAyMcs4CwXAhCAAAS8IoCQ94pkuOZByIcrXngLAQhAIJoEEPLRjCurggAEIACBkhNAyJcccSANIOQDGRacggAEIBAzAjWF/GefSf/6l7RhQ3hBdO4sHXFERv+pWh/esOI5BCAAgaARQMgHLSL++IOQ94czViAAAQhAoDYCp5wiXXGF9Nxz0t//Lr37bvh5jRghDRmCkA9/JFkBBCAAgUATQMgHOjwlcy4h5Js3b64GDRpkNDJ48GANyfKDSMm8YmIIQAACEAgugfHjJfsotn39tbR8edVHsfMFbPzInj1VMXduRq8qKyu1cuVK+1oXSXMC5jruQAACEIBAiAgg5EMULA9dTQj52uYbOnSohg0b5qFJpoIABCAAgVgSWLNGmjmzKuu+ZEl2DB06SCefLO2zT2hRDXv3XQ0fOzaX/wj5XIT4OgQgAAEI1EoAIR/PDUJGPp5xZ9UQgAAESk9gyxZp3rwq4f7qq9LmzZnt2qmwPn2k7t0dAb/XXqX3r8QWRo4cqYqKioxWyMiXGD7TQwACEIgRAYR8jIKdtlTuyMcz7qwaAhCAQGkIfPutNGOG9PTT0jPPSEuXZrZTr570gx84wr1HD2noUGn69NL4FMBZKXYXwKDgEgQgAIGQEkDIhzRwRbqNkC8SIMMhAAEIxJ7AsmXStGmOeH/+ecnEfKZ2wAFVwr1rV6lp06pePD8X+20EAAhAAAIQKIwAQr4wbmEfhZAPewTxHwIQgEA5CPzf/znC/amnpNdek7Zu3d4LOy5/4onST37iCPhDDpHqZPlxAyFfjihiEwIQgAAEIkAAIR+BIBawBIR8AdAYAgEIQCB2BOxuu91xN/FuHwsXZkaw225Sz57ST3/qiPddd3WHCiHvjhO9IAABCEAAAjUIIOTjuSUQ8vGMO6uGAAQgkJvA+vXOm+6Wdbej885zadu31q2l0093xPsPfyhlec60VoMI+dzxoAcEIAABCEAgAwGEfDy3BUI+nnFn1RCAAAQyE1i71ik6N3Wq9Le/Zb/vfvTRVeL9sMOyH5l3yxkh75YU/SAAAQhAAALVCCDk47khEPLxjDurhgAEIFBF4KuvnOPyJt6tWN3GjdvT2XFH6cc/dsS7iW6vn4dDyLMjIQABCEAAAgURQMgXhC30gxDyoQ8hC4AABCBQAAF7Fu7JJx3xPmuWtGnT9pM0aeIclz/zTKlbN2mnnQow5HIIQt4lKLpBAAIQgAAEqhNAyMdzRyDk4xl3Vg0BCMSRwOefS48/Lk2ZIv3zn9KWLdtT2H13qVcvqXdvyZ6Ia9jQH1IIeX84YwUCEIAABCJHACEfuZC6WhBC3hUmOkEAAhAIKYHFix3hbpn311/PvIi993ay7ibejz9eql/f/8Ui5P1njkUIQAACEIgEAYR8JMKY9yIQ8nkjYwAEIBBJAuPHS/36RWNpX34pTZ4sTZiQXby3bOkId/s45hipbt3yrh0hX17+WIcABCAAgdASQMiHNnRFOY6QLwofgyEAgcgQCLuQtIJ1dmzexPtLL2U+Nn/wwVXivVOn4ivNexn8sPPPk8WcOXPUpUsXG2X/MyfP4XSHAAQgAAEIbCOAkI/nZkgI+ebNm6tBlnd/Bw8erCFDhsSTDquGAATiQyCMQvLrr5033idOdN57r6zcPl5t20rnnCOdfbZ0+OHBEu/p3oaRf47vjpEjR6qioiJjr8rKSq1cuRIhH5+/YVgpBCAAgZIRQMiXDG2gJ04I+do8HDp0qIYNGxboReAcBCAAgaIJhEVIbtggPfusk3mfNi3zO+/77++Id/vo2DG44j3iQt7+7Rw+fHiurUlGPhchvg4BCEAAArUSQMjHc4OQkY9n3Fk1BCBQk0CQhbxl2mfOdMT7E09Ia9duH78995T69JHOPVf6wQ/Kf+c93x0WZP75riXZn4x8geAYBgEIQAACeRFAyOeFKzKduSMfmVCyEAhAoCgCQROSW7dK8+ZJDz7oCPjly7dfnr3zbtXmTbyfeGJ5qs0XBT1tcND4e7WuLPNwR77EgJkeAhCAQIwIIORjFOy0pSLk4xl3Vg0BCNQkEBQh+d//So8+6gj4t9/ePk6NG0s//alzbL5HD2mHHaIRy6Dw94kmQt4n0JiBAAQgEAMCCPkYBDnDEhHy8Yw7q4YABIIk5O3e+zPPOOL9ueekzZure2fvuv/kJ87zeKedJu20U/Tih5CPXkxZEQQgAAEI+EIAIe8L5sAZQcgHLiQ4BAEIlIWA30LSjs6/+WbV0fnVq7dftj0Rd9FFztH5PfYoCxbfjI4f7/yiIiaNjHxMAs0yIQABCPhAACHvA+QAmkDIBzAouAQBCJSBgF9C/rPPpIcfdgT8Bx9sv9AWLaTzz3cEfLt2ZQCBST8IIOT9oIwNCEAAAvEggJCPR5xrrhIhH8+4s2oIQKAmgVIK+fXrnWrzJt5nzJAsG5/eGjaUTj9d+tnPpJNPDnfROnaWKwIIeVeY6AQBCEAAAi4IIORdQIpgF4R8BIPKkiAAgQIIlELIv/eeNHq09NBDUqaj88cc42Te+/aVdtutAKcZElYCCPmwRg6/IQABCASPAEI+eDHxwyOEvB+UsQEBCASfgFdC3grXTZ3qCPhXXtl+3fvtJ11wgSPgv/e94HPBw5IQQMiXBCuTQgACEIglAYR8LMMuhHw8486qIQCBmgSKFfIffiiNGSPdf7+0cmX12Rs0kHr3li65xHnvvV49+MecAEI+5huA5UMAAhDwkABC3kOYIZoKIR+iYOEqBCBQQgKFCPmNG6WnnnKy7y++uL1zbdpIAwY4d9+jXnW+hKGJ4tQI+ShGlTVBAAIQKA8BhHx5uJfbKkK+3BHAPgQgEAwC+Qj5RYuksWOl++6Tli6t7r9l23v1cgT8SSdJdesGY314ESgCCPlAhQNnIAABCISaAEI+1OGr5nxHSWdLmiMljs6PkbQoy/IQ8tGJOyuBAASKIZBLyG/aJE2f7mTfn3tu+8rzBxwg9e8vXXyxtPfexXjC2BgQQMjHIMgsEQIQgIBPBBDyPoEusZkmSdHeLGnH/mznPU2wZ2oI+RIHhOkhAIFaCIwfL/XrFwxE2YT8qlXO3fe775bsDfj0Ztn2nj2d7HuPHtx9D0YkQ+EFQj4UYcJJCEAAAqEggJAPRphaS+ov6dos7tjXr05m21NvFY1I69tb0i2S2qZ97itJnSQtzjAnQj4YcccLCMSTQK4suJ9Uavry8cfSnXc6x+e/+aa6J5Zxv/RS58My8TQI5EkAIZ8nMLpDAAIQgEBWAgj58m+OH0t6TNIkSYMyuGPZ9bmS7Oj8uuTXTbRbeeSUmD9J0qgaQn6LJJt7JkK+/EHGAwhAII1A0IT8M89I//qXdPvt0pNPbn98vls3adAgyfy2SvQ0CBRIACFfIDiGQQACEIDAdgQQ8uXbFAdKulXSx5LOkjQji5A30W5iPl3k259XSUqvpjRbUp9kBt5Evx2ttyz+OIR8+YKMZQhAIAOBoAh5u//+/e9L9etLs+2v0LS2ww7S+edLV1whHX44YYSAJwQQ8p5gZBIIQAACEJCEkA/GNrACdfZTZKaM/EfJY/M1BXmmjPtlyV8MmMg3Id9V0nyEfDCCjBcQgECSQKFC3u7W20exzQT8f/4jWRX6DRuqz2YZ91atpJYtJRPzhTS7/x+UGgCF+M+YkhFAyJcMLRNDAAIQiB0BhHwwQl6bkDfBbhn7x2u4agJ/sqTrkp+3LP2atD6bJdl9+rUI+WAEGS8gAIEihXyxAE24//nP0rhx0tdfV5/tkEOkK690svCNGhVrifEQyEgAIc/GgAAEIAABrwgg5L0iWdw82YR86gh9prvuNceY4G+aFO5XSdoqaWQWtyh2V1y8GA0BCBRDoNCMfKE2X3/duf8+daq0xf6qTGu77y499JDUvTtvvxfKl3GuCSDkXaOiIwQgAAEI5CCAkA/GFskm5O0evd2hzybk7Wt9k0sYIml18rqEifhMd+NTq0XIByPueAGBeBLwQ8hv3SrNmiUNGya98kp1znZ83o6+2/3366+Xpk2LZxxYte8EEPK+I8cgBCAAgcgSQMgHI7SFCHk7Wm/V7FNCPp+VJIT89OnT1b59+3zGad99982rP50hAAEIbEeglELeBPzMmY6A/+c/q5tu1sypPv/LX0r2lJy1UvpC6CNN4PPPP897fQsWLFDPnj1tXJfkk7J5z8EACEAAAhCAgBFAyAdjHxRytL5oIV/I0rfaD8k0CEAAAsUQKIV4tr+bXnxRGj58ewF/4IHSVVdJF14o7bRTdc9L4UsxbBgbGgJ16hT1IxRCPjSRxlEIQAACwSRQ1L9CwVxSKL0qpNjdV5JGpxW7y2fhiYx8PgNSfRHyhVBjDAQgUI2Al+LZBPyMGY6At7fg01vr1tLvfucUsMv2/ruXvhDmWBFAyMcq3CwWAhCAQOAIIOSDEZJcz8+NylC4zio2nSRpVgFLQMgXAI0hEICARwS8EM8pAW9H6F99tbpjbdo4Av6887IL+NQIL3zxCAvThIsAQj5c8cJbCEAAAlEjgJAPRkRrE/K3SLKid+l34VtL+lBSvQLdR8gXCI5hEICABwSKEc8m4F94wbkD/9pr2wv4G25wBHz9+u4cLcYXdxboFVECCPmIBpZlQQACEAgJAYR8MAJVm5C3J+js653T3oS3DL0J+YoC3afYXYHgGAYBCHhAoBDxbAL++ecdAW/PyaW3gw6qysC7FfCp8YX44gECpgg/AYrdhT+GrAACEIBAmAkg5MsbPcu2W3a9d9KNKZI+yXDvvZWkgZKswN1uOd6Id7OihJBv3ry5GmS5Nzp48GANGWIv2tEgAAEIeEwgH/Fcm4Bv29YR8PaUXL4CHiHvcVCZLkVg5MiRqqjI/Hv2yspKrVy50rpS7I4tAwEIQAACRRFAyBeFL7SDcx6tHzp0qIZZ5osGAQhAwGsCboX8W2851ebtObn0ZgLejtCfe27hAh4h73VUmS9JwP7tHG7FF2tvCPlchPg6BCAAAQjUSgAhH88NQkY+nnFn1RAIBoFcQv7TT6Xrr5cefri6vwcf7Aj4c84pXsAj5IOxFyLoBRn5CAaVJUEAAhAIIAGEfACD4oNLCSE/e/ZsHXWU/ScNAhCAgI8Esgn5NWukW26R7rhD2rChyqH995duuim/InZul5Prlwpu56EfBFwQmDNnjrp0sWQ8R+td4KILBCAAAQjUQgAhH8/tQUY+nnFn1RAIBoGa4rmyUhozxilkt2JFlY+77CL99rfSb34jNWpUGt8R8qXhGuNZycjHOPgsHQIQgICPBBDyPsIOkCnuyAcoGLgCgdgRSIlnK2T31FPSNddICxdWYbDCdQMHSjfeKO2xR2nxIORLyzeGs3NHPoZBZ8kQgAAEykAAIV8G6AEwSUY+AEHABQjEloCJZxPp9jLGK69Ux9Crl3O8/nvf8wcPQt4fzjGyQkY+RsFmqRCAAATKSAAhX0b4ZTTNHfkywsc0BEJFYPx4yT68auvXS7NmbT9bkybSYYdJzZoVZsmeoLOPfBtCPl9i9C+CAHfki4DHUAhAAAIQqEYAIR/PDYGQj2fcWTUEykdg1SrpD3+Q7rpL2rixyo9WraSbb5bOPluqW9d//xDy/jOPsUWEfIyDz9IhAAEIeEwAIe8x0JBMx9H6kAQKNyEQegKbN0ujR0u/+51kYj7VmjZ1Pnf55dIOO5RvmQj58rGPqGWO1kc0sCwLAhCAQMAIIOQDFhCf3KHYnU+gMQOBWBP497+l/v2lN96owtCggbTffvYAptS8efnxIOTLH4OIeUCxu4gFlOVAAAIQCCgBhHxAA1Nit8jIlxgw00Mg1gTsHvz//q9UUSFt2lSF4qyznEJ29pzctGnBQGT3/5pX5fgAACAASURBVAu5Wx8M7/EigATIyAcwKLgEAQhAIIIEEPIRDKqLJXFH3gUkukAAAgUQ+PvfpUGDpEWLqga3bi3de6908snO58iCFwCWIVEgwB35KESRNUAAAhAIBgGEfDDi4LcXCHm/iWMPAlEnsHSpdMUV0oQJVSu19+DtibkbbpAaN676PEI+6ruB9WUhgJBna0AAAhCAgFcEEPJekQzXPAj5cMULbyEQXAJbtkj33SddfXX1YnbHHCONGSO1a7e97wj54MYTz0pKACFfUrxMDgEIQCBWBBDysQr3tsVyRz6ecY/Vqrn67EO4339fGjBAeuWVKmO77uo8JzdwYPbn5BDyPgQHE+UiwB35cpHHLgQgAIF4EUDIxyveqdVStT6ecY/VqtGKJQz3hg2OWLePysoqQ717S3/+s7TPPrUbJzglDA5Tl5sAVevLHQHsQwACEIgHAYR8POJcc5Vk5OMZ91itGq1YonC/9JKThV+4sMrA/vtLf/mLdNpp7owSHHec6BVKAmTkQxk2nIYABCAQOgII+dCFzBOHuSPvCUYmCTIBtKLH0Vm5UrrqKun++6smrltX+vWvpZtuknbe2b1BguOeFT0jRYA78pEKJ4uBAAQgUFYCCPmy4i+bcYR82dBj2C8CaEUPST/3nHTRRdKyZVWTduwojR0rde6cvyGCkz8zRkSCAEI+EmFkERCAAAQCQQAhH4gw+O4EQt535Bj0mwBa0QPidv/dno679daqyXbaycnA/+pXkj0vV0gjOIVQY0wECCDkIxBElgABCEAgIAQQ8gEJhM9uIOR9Bo45/wmgFYtkvmSJdO650muvVU100knSX/8qtWxZ3OQEpzh+jA4tAYR8aEOH4xCAAAQCRwAhH7iQ+OIQxe58wYyRchJAKxZB/6mnpJ//vOpdeLsLb1n4a6/N/qRcPuYITj606BsyAhS7C1nAcBcCEIBASAnEVcif6WG8HvdwLr+m4vk5v0hjp2wE0IoFoP/uO+maa6Q776wavO++0oQJ0vHHFzBhliEExzuWzBQ4Ajw/F7iQ4BAEIACBSBKIq5DfImmrBxG1OQq8JOqB9cKnICNfODtGhoRAKbTiihVO0fZ33gkJhHzcXLdO+sc/pK9Wpon4/aQf/lDaccd8ZsrZ94oXT9WfTpqWsx8dIBBGAg0ajNT06RUZXa+srNRKewFC6iJpThjXh88QgAAEIBAMAnEW8pMlzSgiDN0k9ZZUr4g5yjWUO/LlIo9d3wh4KeTff1+64w7poYekDRt8W0JkDT2jU3WaEPKRDXDMFzZihDRkSGYI3JGP+eZg+RCAAAQ8JBBnIX+1pJFFsLxK0i0I+SIIMhQCJSRQrJDfulV64QXpT3+S7PU1mncEEPLesWSm4BFAyAcvJngEAQhAIIoE4irkTYBPlDS/iKB2lNRX0rVFzFGuoWTky0Ueu74RKFTIW8b9kUecDPy771Z3t1Ej5zl1+7BX2ELdFi1y0oYfLqxaxkk/loYPk3ZtUtKltbz8VC25m4x8SSEzedkI7LWXtMcemc2TkS9bWDAMAQhAIHIE4irkIxfIPBeEkM8TGN3DRyBfIb90qXTPPdK990rLl1df7z77SJdfLvXvLzVvHj4W23n88MPSoEHSN984X2rYUKqokH75S6mOD/8sjB8v9esXAZAsAQL5EUDI58eL3hCAAAQgkJ2ADz+xhQr/rpKa5fB4cahWlNlZhHwEgsgSaifgVsgvWOBk301bbtxYfc7OnaUrrpD69HG0buibCXf7jcQDD1Qt5aCDpEmTpE6dQr88FgCBoBNAyAc9QvgHAQhAIDwEEPJOrOwc6WOSfpwjdGGtUl9zWVStD8/3KJ4WSKA2Ib9li/S3vzn332fOrG7AEtK9ejkC/rjj/ElQF7jE/Ia995501lmSVe5LtXPOkUaPlna132HSIAABLwjwjrwXFJkDAhCAAARyEUDIO4RGSeov6RMXlewH5oIagq/zjnwIgoSLxRHIJOStgN3UqdL110sL066Gm6Wdd5YuuUT69a+l1q2Lsx240W+9JXXrJn31leOaPSd3113Ogv04Sh84IDgEgdIR4B350rFlZghAAAIQqCKAkHdY2E+39rBr25hsDjLyMQl0nJdZU8h/8IH0q185lejT2wEHOOL90kulJqWt8VaecLz5ptS9u7R6tWP/kEOkxx6T2rUrjz9YhUDECZCRj3iAWR4EIACBgBBAyDuB2CLpVknXBSQupXaDO/KlJsz8ZSeQEvJ2LfwPf5BGjpQqK6vcOvpoafBg6YwzpPr1y+5uaRx49VWpRw9p3Tpn/i5dpL//Xdptt9LYY1YIQKBWAtyRZ4NAAAIQgIBXBBDyDsmPJM2RdI5XYAM+D0I+4AHCveIJmJC/+GLp//0/6dNPq+bbe2/p9tulvn0jfqr85ZelU06pqkx/7LFOYYBIHjsofr8wAwT8IICQ94MyNiAAAQjEgwBC3omz3Y+/V5LdjF0Sg9Aj5GMQ5Dgv0e6/W6G69Gfk6tVzRP2NN8agttuLL0qnnSZ9+62zDU44QZo2TdpllzhvC9YOgbITQMiXPQQ4AAEIQCAyBBDyTiitZPNvJV2VPGI/I3lvPlOg50cg+gj5CAQxDEtw+1y49bOPYtvmzdJHH0kffyxZYbtUa9ZMOuKI4nWsPX0e+OfPn3vOuS+wYYOz/K5dpaeflnbaqVi8jIcABIokgJAvEiDDIQABCEBgGwGEvIPC7sjbj/0pHmkSYLvdUi8C+wchH4EghmEJbt9yL3YtJtqffNLJuP/nP1Wz7bWXVFEhnXtuxI/Rp5b8zDPOE3MbNzqfsSJ3TzwhNWpULGLGQwACHhBAyHsAkSkgAAEIQCBBACHvbAQrdFebeE/fLtdGYO8g5CMQxDAswQ8h/+GHTtV5S0Snmh2jt2r08+fH4Bh9atGPP+5c/N+0yfmMwZ882XlqjgYBCASCAEI+EGHACQhAAAKRIICQj0QY814EQj5vZAwohEAphfz69dLNN0u33VaVgDYfjz9e+stfpOuuc66Fx6JNmiSdd55kdwus2dH6iROlhg1jsXwWCYGwEEDIhyVS+AkBCEAg+AQQ8sGPUSk8RMiXgipzbkegVELeTpDbm/BL0kpTtmjhPDFnerZOHSchHQsh/8gj0kUXSVvshpCcrPzDD0sNGrAjIQCBgBFAyAcsILgDAQhAIMQEEPJO8KzYndu21m3HAPdLCPnmzZurQZYf9gcPHqwhQ4YEeAm4FgYCXovplSsdAT9hQtXq7Ri9fW7YsOovq3ltO5C8779fuuSSqsp+558v2efq1w+kuzgFgTgQGDlypCqsOEeGVllZqZX2F5nUJfnsbRyQsEYIQAACECgBAYS8AzVV7M4N4sgUu6ttsUOHDtUwU0Y0CBRBwEsxbTXbBg6Uli2rcsiemLNj9Eceub2TXtouAkHpho4e7QBJtZ//XBo7VrLfbNAgAIGyEbB/O4cPH57LPkI+FyG+DgEIQAACtRJAyDt4JmcpdmfvyttH02SfRZIiU+yOjDx/O5SagBdiesUKJ+NuV75TzZ5Dt2P0l12WvRq9F7ZLzafg+e+6y6nwl2oDBkj33CPVrVvwlAyEAAS8IUBG3huOzAIBCEAAArUTQMi72yFW1d7OmbeRtNjdkED34o58oMMTHeeKFdNWiH3QoOpZ+JNPdhLPVpW+tlas7ZJFYfz44h6jtyO76dde7Lccd94Zk/f1ShYVJoaALwS4I+8LZoxAAAIQiAUBhLz7MM+RtEJSD/dDAtsTIR/Y0ETLsULF9PLlThbeirGn2q67SrffLl18sTvNWqjtkkegGMesTP9vf1vl4uDB0ogR7oCUfGEYgAAEchFAyOcixNchAAEIQMAtAYS8W1LSLZKukhSFC6gIefdxp2cRBArRrFOmSL/4hWRiPtW6d3ey8Pvv796ZQmy7n72InoU6NnWqdNZZVYbtfb0//AERX0QoGAoBvwkg5P0mjj0IQAAC0SWAkHcf28ck9UbIuwdGTwjko1lNuP/yl9Jkq1iRbJaF/9OfJKvjZk/K5dPysZ3PvEX3LcQxKxRw+OFVdwxuvNEp058vlKKdZwIIQKAYAgj5YugxFgIQgAAE0gnk+aNxZOHV9vxcM0k/ljRG0tzkkzFhB0FGPuwRDIn/bjWriXfLwpteTbUePaQxY/LLwqdjcWvbd5SFOHbeeZLdrbdmxxOefRYR73vgMAiB4gkg5ItnyAwQgAAEIOAQQMg7HNw8P2esTNDPjMDmQchHIIhhWEIuzWpPyVkW3o7Tp5pl4e+4Q/rZz4rTqrlsl41fvo49/bR0+umOu1au/913C//tRtkWjWEIQMAIIOTZBxCAAAQg4BUBhLxDMtvzcynOX0kaLWmeV+DLPA9CvswBiIv5bJp161bnCL2J+PQs/E9+4mTh99uveEL56uXiLbqcIR/HVq1yjtR/+aUzub0d37+/S0N0gwAEgkYAIR+0iOAPBCAAgfASQMiHN3bFeI6QL4YeY10TyKRZv/vOEfB//WvVNE2aOFn4iy4qLguf7lg+etn1grzomI9jdizhwQcdq127SjNmeAfIi7UwBwQgkBcBhHxeuOgMAQhAAAK1EEDI1749OkjqK+mjZNZ+bUR2E0I+IoEM+jJqatYvvpB695Zef73K81NOcbLw++7r7Wry0cveWs4xm1vH7B68wbG2007S229LBx7oq6sYgwAEvCWAkPeWJ7NBAAIQiDMBhHxV9EdJukxSZ0nzJZ0k6flkHYGtkj6WZAI4CmIeIR/n73of156uWU28n3lm1Snxhg2lu+6SLrusNElmt3rZRxyOKTeOrVkjHXGE9NlnzhgDdfnlvruKQQhAwFsCCHlveTIbBCAAgTgTQMg70bf34W+VtFpSJ0mLrSaNpI6SBko6KNnH3pL/bQQ2DEI+AkEMwxJSmvW++6RBg6SNGx2v995bevxx6ZhjSrcKN3q5dNZrmdmNY3YPfuxYZ5ITTpBmzZLq1i2LuxiFAAS8I4CQ944lM0EAAhCIOwGEvLMD7Oi8Zd3bJjdEE0mrkgXuBiU/Z8LePp/qE+a9g5APc/RC5HvPnlLr1tLdd1c5beJ96lRpn31KuxA3erm0HmSZPZdjdg++WzdncKNG0r//LR1kv0ukQQACYSeAkA97BPEfAhCAQHAIIOSdWNjzc1aVPiXaU8fq7afp1HNzqaP39YITvoI9SQj55s2bq0GDBhknGTx4sIYMGVKwAQZCYPly6ZBDpK/szYdku/hi6Z57pB12KD2fXHq59B4UIOTXrZPatZOWLHEG3367dMUVZXMVwxCAQP4ERo4cqYqKiowDKysrtXLlSvtal+TJv/wNMAICEIAABCDAO/Lb9oBJjReShe3skynRvlvanfjHJPWWFBkhX9t3wNChQzVs2DC+SXwg8Pnaz/X5us99sFS7ib8/0Vx/f7K5J358/bX0/vvSxrVNpR1WS3WczLwdqS+2de+1Ut3PSPwgXGuz9bjpl2ser79+8EVXauGDt2ee9rbbnOMK1todIY0eI9WLwl85XlNkPggEl8Ckuyfp9luyfI9XuY2QD24I8QwCEIBAKAiQkXfCNFdSq+THOkkm7K24nf1Da82O1C+SZOohMkfryciX93t01berdO2MazXmrTHldaSU1h99RjrvtFJaCN3czzwqnXZe6NzGYQhAwCWBnst7au5U+7Fi+0ZG3iVEukEAAhCAQE4CCHkH0VmSLONu9+St4F1TSQMkjUtm4U1p2eeukTQyJ9Xgd+COfBljtHXrVj327mP6zXO/0dJvlpbRk6Tpf58rHTmhNH4g5LfjipAvzVZjVggEhcCIbiM05NjMV9O4Ix+UKOEHBCAAgfATQMhXxbB/snK9Zd9NuFu1emtWqf7qGnfowx55hHyZIrhk9RL94m+/0N8+/Ns2DxrVb6S+R/TVDvV8uDieYd3PDv+FfjL0nqKIfPedZDXaUq+l2WRt2kgb//kLnTKsuLmLciyAg38x/FndM/Qn1T179VXnnXhre+wh9epFlfoAxg6XIOCGQO9De6tbm2TByhoDEPJuCNIHAhCAAATcEEDI56Z0YPKo/ZrcXUPTAyHvc6g2bdmkP7/xZ90w6watr1y/zfrJbU7WvT3vVevdWvvsUZW5YovCvfuuozs/srcfrPBGHemWW6SrrpJOO02aNq1sSwum4ZrA//Uv6fjjpa1bJSs++dZbzhvyNAhAIHIEEPKRCykLggAEIFA2Agj5sqEvq2GEvI/4534xV/2n9ddbX761zeoejffQHT3u0LlHnKs6pnzL2IoR8k8+KV1wgWTF7aw1bSpNmCD16OH8uZi5y4iktKbToXz7rdShg7RwoWPzppuk3/2utPaZHQIQKBsBhHzZ0GMYAhCAQOQIlFdBlA9n7pLXmX2zO/S7l89tzywj5D1DmX2irzd+rRtn3ag737hTW7baC4dOu7jDxRpx8gg1a9TMBy9ymyhEbFvy+I9/rK45DztMMmHfNq0cZCFz5/Y45D3SoVx9tTRihLOgjh2lN95wsvI0CEAgkgQQ8pEMK4uCAAQgUBYCcRXypqpMlBey/rpliZS3RmMr5Me/PV792vXzlmaG2aYvnJ64C/+fNf/Z9tWDmx+s0aeO1o9a/ajk9vMxUIjYtqPz111XZeX006WHH5Z22aW65ULmzsf3UPZNQTHRfuyx0pYtUv360pw5Uvv2oVwSTkMAAu4IIOTdcaIXBCAAAQjkJlCIkM09a/B7pNKjJuanSJok6fHgu+2Zh7EV8qeOP1XT+pXu0vaX675MVKOf/N7kbcFqULeBrjvuOl13/HXasf6OngXRq4nyFdujRkmDBlVZv+EGadiwzLXZ8p3bqzV5Ns/48ZJ9eNmmT5d+8hPplVeq7iTYMYaDDy7cSr9+kn3QIACBQBNAyAc6PDgHAQhAIFQE4irkLUi9JfVNPj1ngt6aqS8T9U+EKor5O4uQz59ZrSPs6PzYuWN1zYxrtOa7qrqIxx1wXCILf9geh3ls0bvp8hHbEyc6etGO1lu78UZp+PDsvuQzt3crCvhMBsUy73Y3wZoVtps7V2rYMOCO4x4EIFAsAYR8sQQZDwEIQAACKQJxFvLpu8DekTdRb+LemskUE/WjJc2K4HZByHsY1PeXv6/LnrlM//r0X9tmbbJDE9lbwpd0ukR16wT7NoZbsf23v0l2hH7TJmeZv/qVdOedTpX6bM3t3B6GI/hTWYX6116TNm+W6tWTXn9dOsq+JWkQgEDUCSDkox5h1gcBCEDAPwII+e1Zm6gfIOmkpKBfnczS2xH8mf6FpqSWEPIe4N26dav+Ou+v+vWzv9a3m77dNmPfw/smKtLvtfNeHlgp/RRuxLadAj/5ZGnDBscfq1T/wAO5nzp3M3fpVxggCxs3SrvvLq1b5zh17bXSzTcHyEFcgQAESkkAIV9KuswNAQhAIF4EEPK1x7umqF9F1fpwf4N4dUd+7XdrNWDaAE18Z+I2IC2btNQ9Pe/RKW1PCRWkXGJ73jzpRz+S1q51lvXTn0pTprgrrp5r7lCB8sJZO05//fXOTIce6rwZv2Pw6iZ4sVTmKC+BxYsXa8WKFeV1ImbWd999d7Vq1arWVSPkY7YpWC4EIACBEhJAyOeG21GS1ec2UW9H7uvlHlKWHgdK+rGkryTZf78oaV4WT8jIFxGiOV/M0TlTztHHqz7eNstF7S/S3afcrZ0b7lzEzOUZWpvYtufNjztOWr7c8c0E/bPPuteeCPkaMW3dWlq0yLmP8Oqr0jHHlCfoWI00ARPxhx9+uNavXx/pdQZtcY0bN9a7775bq5hHyActavgDAQhAILwEEPKZY9cheWe+v6SmyWfq3koesU8++hy4oN9iB3XTvHpM0tkI+eoEisnI21H6O16/I1HQrnJLZWLinRrspHt73qsL2l8QuA3h1qFsYvvTT6Uf/lCy/7dm17hnztz+ibna7CDk0+h89pm0//7OJ44+2rkbT4NACQikxOKDDz6oww4LbqHNEiy9bFO+9957uuiiizR79mwdVUvNC4R82UKEYQhAAAKRI4CQrwqpnYezu/GWeW+dFO+fJAve2f34RSWMvtmzXxqkC/F0c/b1qyXNkbRb8gs1f6FgmfjOaX5a9X0r4JepkZHPM5gr1q/Qz5/6uaYtrHq6rn2L9pp01iR9b/fv5TlbsLpnEtuWgbeabB984Phqp8Bfftm53p1PQ8in0ZowoeqJuKuukm67LR+U9IWAawIpsZhLVLqekI45CbhljpDPiZIOEIAABCDgkkDchbyJ99Q9+HTxbsLdKtaXUrynQmTH4S17bsI77XXubRFsImmuJDvin6yQJcu+r5SULuaHSDJlYB/2tRckzUfIVydQSEb+5SUvq9/Ufvp83efbJru8y+UacfKIQL4L7/J7f1u3mmJ7zRqpa1fn+ra1li2lf/5T2m+/fGeWEPJpzAYNkkaNcj7x9NPSaaflD5QREHBBwK2odDEVXVwScMscIe8SKN0gAAEIQCAngbgKeRO9lq3ulCRkD3+PSYrpbPfKc8LMs4PdY79Vkl20tl8mzMgi5E20m5hPF/n2Zyu8V/Nds3uT9+SbJY/V2z35TI2MvItgbd6yWX945Q8a/o/hsnfirTXdsanu++l9OuPQM1zMEI4u6WL722+lHj2c7Lu1Fi0cEX/QQYWtBSGfxs3ei3/3XecTX30l7ZY6XFMYW0ZBIBsBt6ISgt4RcMscIe8dc2aCAAQgEHcCcRXypsqscJ01y75b9tptG+e2Yx797Mj87CxC/iMpkYGvadfWYNn81JN46Xfir0qOaSNpcQY/EPI5gvPFui90/uPna9biWdt6/mC/H2hC7wlq2bRlHqENfteU2K6slM44Q5o+3fG5aVPppZek9u0LXwNCPslu5cqqewm77FL1BEDhaBkJgawE3IpKEHpHwC1zhLx3zJkJAhCAQNwJxFnIp2KfEvS59oKxKlXV+tqEvAl2y9g/XsNBE/iTkxX1eyfvzqeLfXuc2n45UHOcTYOQryXaz330nC584kItX58s1W7FC354rf73xP9Vg3oNcu2T0H3dxLad9La34cePd9xv1EiaMUM69tjiloOQT/J76impVy/nD3ZXYXGm368Vx5rREEgRcCsqIeYdAbfMEfLeMWcmCEAAAnEnEFchbxnrQlspqtZnE/KpI/TpmfdtP6ulZfHt/rwVy0s/fm9rNKFPRj4t0rXdka/cXKnrZ16vEa9WhXjPnfbUw2c8rJPbnFzofgn8OBPbpi3vucdxtUED6ZlnpO7di3cdIZ9kOHiwdPvtzh86dqwqQFA8YmaAwHYE3IrKOKCbMmWKRo8erblz52r16tVq3bq1OnXqpFtvvVUHHmg33Lxpbpkj5L3hzSwQgAAEICDFVcgHLfbZhLz9lGF36LMJeftaqjL9mclq+6uTT+ZZqbLUsfua6yUjX4PIolWLdO7Uc/XG529s+8pJB56kR858RHvtvFfQ9oun/rRtK31k5zvsqYY60qRJUp8+3piwDH+/ft7MFepZvv99abYdkJF00knOcQcaBEpEwK2o3Lhxox6fOFGP3n67tqy2fzqkuk2b6rwrr9SZ55yjhg0beuqh3/a6deumF198UXXq1EkIePswNibo7TnRyZMnq3dvO9BWfHPLHCFfPGtmgAAEIACB5M/tMQVh78Tb03Jri1j/rknhnK0yfD5TFyLkTXpZNftsT8zVZj8h5KdPn672eV6A3nffffNZV+D6ZsrIT184Xec9fp7WfGc1D6V6deoljtFf88NrVK9uvcCtwUuHKiqkIVb6MdnGjJEuu8xLC8ylr792Cg5s3iy1aSMdcog0reoZQwhBwGsCbkTl0qVLdX7Xruq1eLEuWL9e9g+aNftH8aHGjfVUq1Z6ZOZMtbCKlx40v+0NGDBAY8eOTbzpbmJ+F6tNkWwzZ87UWWedpTVr1ujjjz9Wq1b2gE1xLcU817+rCxYsUM+ePc1Yl+STssUZZjQEIAABCMSWQFwz8pslXSNpZBGRTxWU80LpFXK0vmghX8jaLYsR5pYu5G0tf3zlj7ph1g3amqx9uP+u+ycK2v3wgB8GcpmW4U7dYy/WwS+/rH7C2/Sl6UwvmmXhycQnSb7wgnRy8mrGz38uLVuGkPdikzFHVgK5hLxlxnt27Kg/vfeejsgyyzuSrjjsME2fN6/ozLzf9hYtWqQ2bdrooIMO0sKFCzOucOrUqerTp4+uueYa3XyzlZQprqVl2t1OhJB3S4p+EIAABCCQkUBchbwVkLNK8PZRaPutJBPzpRTy5lu2YndfJd+6v66ABSQy8gWMSxxH9LuNf3u8+rXz5nx2Ssh/vfFr/ezJn2nq+1O3LeeUtqck7sM3a2Sv90W7WZL4e9+TvvjCWee110oe/CwbbWiFru7GG6WbbnJG33efNHUqQr5QloxzRSCXkJ/40ENaOWiQfrl+fa3z3d24sfYYNUp9rRJmEc1ve5aNHzdunOx+/Bn2FEeW1r1798R9eYR8EcFlKAQgAAEIlI1AnIW8V4q01ELeMu+jMpweMIF/kqSq99Hcb6NQCfnaCtS5X7LT0+a6s8ed6jWpl95ZZjknp11//PWJ4/R169TNd8pQ9r/+eumPf3Rc3313J0ls9+NpJSDwox9J//iHM7EVI/jNbxDyJcDMlFUEcgn50zp00KMLFmw7Tp+NnR2zP699ez0zv7gbZH7bs0y8ZeU323UWnxoZeZ9AYwYCEIAABLYRiOuP7lbN3Sshf7YH+6m25+fs1IAVvUu/C99a0odFnAaIrZA/etzR+nDlh1q1YVUibDs12EkP9HpAZx1mL/zFo33yiXTYYdJ330n16knHHee8F08rAQGDbPfjN2yQ9t5b+vxz6bTTEPIlQM2U7oV8z1atNH3JElfIetatq+l77umqb7ZOPZct0/Qt9rvn3K1ny5aaXuTzjHXr1tVuu+2mlStX5jboUQ+EvEcgmQYChksCOQAAIABJREFUEIAABFwTiKuQdw3Ip461CXl7gs6+3jmtOJ9l6E3IVxToX6iK3XmRkbcrARWvVeiqF6peHmy9W2s92fdJtWvRrkCM4RxmJ02ffNLx3ZLDliSm9lqJYvmvfzm/KbHWt680caLEm3wlgs20KQK5MvJ5CXlJ04tEa6Xd3M7hlZDv3LmzZqdeiijSfzfDKXbnhhJ9IAABCEDASwIIeS9p5j+XZdstu556/2ZKspp+zXvvVlJ3oB3MlbRb8jRBMYX6EkK+efPmamCPhmdogwcP1pD0cub5r63oEXY33j5mfz5bXfa1ukCFtc1bN+vfS/+tL9YlL4TbcfLGu6vTXp3UoF7m9edjye7ve3WHPx+7hfS1V8+6dXNG2pH6Dz+Uzj8fIV8IS1djbrlFui757Xz33dIvf4mQdwWOTsUQyCXk8zrqXr++nrG/LIpop61YoUc3bfLtKH85M/L2y4OXXnpJFfYkSIZWWVmZOilAsbsi9hRDIQABCECAd+TjugdyHq0fOnSohg0bFgg+xWTkl6xekrgPP/+/VXc8rzr2Kv3xpD+qft36gVifX05UVkodOkjvvedYHD1a6t8fXVlS/qecIj37rGNiwQLpyCOdZwco6V9S7HGfPJeQ97v4nN/23N6Rt4r18+bNSxTF23XX1AN8he2edObTpk3T8OHDc02EkM9FiK9DAAIQgECtBMjIx3ODhCIjnwpNoUJ+1qJZOnvK2VqxfkViqkb1G+mQ3Q/RWwPeimXU//xn5yi9tY4dJTt1anfkOeldou1ghbaaNZPWrnXuydt93brxKKZYIqJM65JALiHv93NwftsbOHBg4g35yZMn68wzz8xKzcvMfTpzMvIuNyrdIAABCECgKAII+aLwhXZwQsjbEcCjjrL/DHbLV8jbffi73rxLV/79StmxemsHNDkgcR/e3oyf1m9asBdcAu+WL5cOPlhavdqZ/OWXpeOPd/4bIV8C4DblvHlSp05VkJ95pkSGmBYC1QnkEvLWe+nSpTq/a1edvnixLly/ftux9zWSHm7cWE+1aqVHZs5UixYtPMHrpz3LstsdeXtL/kO7P5ShjRkzRib4zz77bE202hVFNjfMzURaUTwy8kUyZzgEIACBuBNAyMdzB0RWyG/YtEEDpw3Ugwse3BbZH7X6kR476zHtsdMeiefn4ijkBw50jtJbO+ccacKEqo2PkC/RXwLpRyBuu026qqrQYoksMi0EEgTcikrLlD8xaZIeqajQluRv+eo2barzBw/WGX37qmHDhp4S9dOeCXQ7Mm/vxFtm/sAD7fEXp82YMUN9+vTR2rVr9fHHH6tVKytDU1xzyxwhXxxnRkMAAhCAQBUBhHztu6FD8tk3KzJnT9bZs7pRaJE8Wv/Z2s905qQzNfuL2dti9Jujf6MR3UZsK2oXRyFvieHOnaWtW6VGjaQPPpD23x8hX/Jv5LPOkqZOdcy89pp0zDElN4kBCBgBt6Iy6rS6d++eEO12Sqt169aJj08++STxUadOHVlW/pJLLvEEA0frPcHIJBCAAAQgkAcBhHwVLHvS7VJJJnKtMtpJkp6XUxDQ3pz/OPm1KIj5yBW7e/XTV3XGpDO07JtliYjuUG8HjT51tC7qcFG1b4coCXk3NdNMvJ9wgvTPfzoY/vd/pRtuqP43BBn5PP7GdNvVwO+1l7RsmdS4sbRqleRxdtOtK/SLHwGEfFXMx40bl8jIpwS8iXk7dn/rrbeqZcuWnm0Oit15hpKJIAABCEDAJQGEvAPKzrzeKsluENul1sXJt9s7Jp99OyjZx56L+61LtkHuFqmM/PMfP69eE3vp203fJpjvu8u+eqLvExmfrIuSkHcjwO3q57nnOlvRTo9axXrLyqc3N/MEeTMH0jc79nDIIY5rXbtKL74YSDdxKpoEEPL+x5WMvP/MsQgBCEAg7gQQ8s4OsKPzlnVvm9wQTSStshe6JA1Kfm6OJPt8qk+Y905k7sg//cHT6jO5jzZu3piIxw/3/6Gmnj1VLXbOXKApTkL+m28cLfnZZ85WnTJF6t17+22LkC/Bt/K4cdJllzkT2zOOQ4eWwAhTQiAzAYS8/zvDLXPuyPsfGyxCAAIQiCoBhLwT2S01RHvqWH03STOTwbej9/aTeb0IbIZICPlJ70zS+U+cr01bNiVCckrbUzSlzxQ1alAj5ZwWsDgJ+RtvlG66yVn8iSc6SeE6Gb7jEfIl+I6+8ELp4YediQ28ZeVpEPCJgFtR6ZM7sTDjljlCPhbbgUVCAAIQ8IUAQt7B/JWkF5KF7ezPKdG+W1qBu8ckWT4zMkK+efPmatCgQcaNNnjwYA0ZMsSXTZjLSCbx/eD8B3Xx0xdry1b7HYx05qFnakLvCWpYr/Yqy3ER8osXS4ceKm3Y4DxdPn++1K5dZtII+Vw7sICvW4VsC0L9+tKaNc49eRoEfCLgVlT65E4szHC0PhZhZpEQgAAEAkUAIe+EY65dIU5+rEsKeytuZ++8WrMj9YskrYzS0fraduLQoUM1zI4EB6DVFN/3zr5Xv/jbL7Z5dl678/RArwdUv279nN7GRcinF0z/5S+lu+/OjgYhn3Pb5Nfh00+lAw5wxlileqtYT4OAjwQQ8j7CTpqi2J3/zLEIAQhAIO4EEPLODjhLkmXc7Z68FbxrKmmApHHJLPyY5OeukTQyApsmtMXuKl6t0JAXqk4KXNrxUo06dZTq1XV3UCIOQn7WrKqT3M2aSR9+KNn/Z2sIeY+/o+05gfPOcya9+mrpVqujSYOAfwQQ8v6xTlkiI+8/cyxCAAIQiDsBhHzVDuifrFxv2XcT7gOTX7JK9VfXuEMf9n0Tujvyz5z7jH7/8u9140s3bmP/6+//Wnf0uCPxHrDbFnUhv2mT1LGj9M47DpG//EX6RdXhhYyYEPJud4/LfoMGSaPsdo6kZ56RDDANAj4SQMj7CDtpyi1z7sj7HxssQgACEIgqAfcKKKoEcq/rwORR+zW5u4amR+iEfLs92+mWf9nvVJx23XHX6Q9d/5CXiLdxURfyJtwvv9xhZHfi33rLuaZdW0PIe/x9e/jhzjt/9gumlSul3azUBg0C/hFwKyr98yj6ltwyR8hHfy+wQghAAAJ+EUDI+0U6WHZCc7Teitm1ubONFq9ZvI3g70/8va4/4fqCiEZZyJtmbNtWWmUPJ0qyI/Y/+lFuTAj53Ixc91ixQtpjD6f7kUdKCxa4HkpHCHhFwK2o9Moe80gcrWcXQAACEICA3wTiKuTPLAL040WMDcrQhJCvzZkgFLvbvGWzBk4bqHHzrFSB024/+XZd8YMrCuYYZSFvRe3uucdBY8XuJk92h8mudPfr564vvXIQePJJ6YwznE52NOKuu0AGAd8JIOR9R15NyE+bNk3Dhw/P5YQV052TqxNfhwAEIAABCGQjEFchb2+WWWG7fJqxsjHuqqrlM7P/fUuWkR//9njZR7Ftq7Zq/n/n64t1X2yb6og9j1DLJi2Lmnr257PVZd8u6teuX+IjzC09k/7vfzt347dskXbcUfq//5NaFocqzGjK5/vgwdLttzv2J02Szj67fL5gObYEEPL+h56MvP/MsQgBCEAg7gTiKuQvyxB4q0jfWtInkqYkn5prLqmbpI6SLL9pl7TnRWDTBPqO/MbNG3Xu1HP1+PtVhx8e7PWgLmx/YdHoo5iR37rVqVL/0ksOnhtvlHIng4pGyQSZCHTpYmdsna988YW0995wgoDvBBDyviOvlpE/6ij7JzZz4468/7HBIgQgAIGoEoirkK8Zz95JoW7V6TM9L2efv1mSVbb/awQ2Q2CF/LeV36r3Y7317EfPJjDb2/BH7nmk5g6Y6wn2KAr5KVOkPn0cPPvv72TjGzf2BBeT5ENg3TqnsN3mzdJBBznv/tEgUAYCcRby1157rW677TZ17txZs2dnvkG2aNEitWnTRgMGDNC9997rSYTcMkfIe4KbSSAAAQhAQBJC3tkGz0uy6vRta9kVHyWP1tfWJyybKpBC/uuNX+unE36qWYtnJTg2rNdQU/pM0ei5ozWt3zRP2EZNyNs9+EMPlZYscfBMnCj17esJKibJl8Dzz0vduzujLr5Y+msUfueXLwT6B4GAW1EZBF+99iEl5O1Z0smTJ+vMM7cviYOQ95o680EAAhCAQDkIIOQd6l9JekFSbRLoMUmWuY/MHXnLVtR2BNDPDWkivvsj3fXqp68mzDaq30hPnfOUurXp5umTcXZ/P+x341NxsTvy3/++NHSo85kTTnCO19urZ7QyELjhBun3v3cM33+/9LOflcEJTEKgegX12v6O37hxoyZOfFy33/6oVq+20jFS06Z1deWV5+mcc85Uw4YNPcXph710Id+0aVOttOc8ajSEvKdhZTIIQAACECgTAX7kd8C7yba76VOmMOZttmTF7vL2xI45bN2qi568SA//++HE8J0b7qzp/abrhJYnJP4cpSx6IXyyjTnlFOmVV6Svv5bq1pXmzpU6dPDSAnPlReB//kd6+WVnyMcfS62t5AYNAv4TcJORX7p0qbp2PV+LF/fS+vUXSNo16ehaNW78kFq1ekozZz6iFi1aeLIAv+yZkB8xYoT69++v0aNHZzw+X2oh/9JLL6mioiIjt8rKytQvF6ha78nOYhIIQAAC8SWAkHdib4Xs7Pyd3YXP9K/vVZJuTfaLwsHlQD0/d/+8+3Xx0xcnAmGZ+FkXzdLR+x297bsSIZ/5L6gTT6wqcGeifvr0+P5FVvaVf/ed1KSJZP+/zz7SZ59xNKLsQYmvA7mEvGXGO3bsqffe+5OkI7KAekeHHXaF5s2bXnRm3k97KSE/d+5cXXrppZo3b57svzuk/Zaz1EKe5+fi+73HyiEAAQj4SQAh79BuImlxMiVhVdVetJyapDaSfiyps6RVyXv0a/0MUIlslSwjn+/R9XeXvasuY7vo203fJpZ6/+n362cdqh9JRshn3gXHHCO98Ybztf/3/6Q/2c/ktPIQ+Oc/peOPd2xbkQIrVkCDQJkI5BLyDz00UYMGrdT69b+s1cPGje/WqFF76IILivv9tZ/20oW8nfayonc1C9+VWsiTkS/TxscsBCAAgZgRQMhXBdyK3Y2RdFLyU/ZmfIrPDEkDJC2KyP4oWbG7fET3Nxu/SYj491e8n8Bqz8vZM3M1Wz5zRiQ+rpZx5JHS2287XU3Em5inlYnAzTdLv/2tY/wvf5F+8YsyOYJZCOS+I9+hw2lasODRtOP02aitVfv252n+/GeKwuqnvXQhb1n4gQMHauzYsYlj9paht1ZqIc/zc0VtFwZDAAIQgIBLAgj57UGZoO8kqVnyTXl7Vz4qAj612kAI+Z8/9XM9MP+BhE+H7H6IZl82O3E/HiHv7ru3bVvpI6vcIOmJJ6RevdyNo1cJCNjdhmedJxP1739L7dqVwAhTQsAdgVwZ+VatemrJEnd3cerW7ak993TXN5t3y5b11JYt7uZo2bKnFi921zeTvZpCfs2aNTrwwANlVexNwO+6664IeXfbiF4QgAAEIBBwAgj5gAeoRO6VXcg/OP9B/ewp5wi93Yt/87I3dcSeme9qRiEjP368ZB9etvQ78ccd51zRLrb16yfZBy0PAvZufLNm0tq1zjvyK1Y41QdpECgTAS+FvNRTUuHC2kHgfg6vhbxZt4y8vRmfejeejHyZNiZmIQABCEDAUwJxFfKtiqBod+nD3soq5N9b/l7iSP36yvUJjuNOG6dLOl2SlWkUhHwpNoxpx1VWucHeT/zK0ZC0MhCYN0/qZId4JJ12mvT002VwApMQqCKQS8jnc9S9fv3ztPvuxR2tX7HiNG3a5M9R/poZ+RQVO+6eKnzXpEkTtWnTJmNF+0L3US7mqXlT/SRRtb5Q2IyDAAQgAIEEgbgKeXsw1+7A59tsTP18BwWwf9mEvIn374/9vt5d/m4Cy/lHnq+Hej2UOPaYrSHkM5Np1EjasEHaZRdpzRqKpJft++zOO6sKFIwYIQ0ZUjZXMAwBI5BLVPpZfM788dNeNiFvIj5V+O6xxx5DyPOtAgEIQAACoScQVyE/ukAhbwEfGPqoSyWrWp9LdF/y1CW6b/59CYTfa/49zek/J+O9+HTGueaMQDzyXsLGjdIOOzjD7Dq2XcumlYnAWWdJU6c6xl9/XTq66unEMnmE2ZgTyCXk/XwOzkLhp71sQj7xj3ey8N1VV12l2267rWQZearWx/wbkOVDAAIQ8IlAXIW8T3gDa6Zk78jXJrofXvCwLnzywgSUHevvqDcufUNHtjgyJySE/PaIPvlEamOPI0o69VTpmeJOvuaMAR2yENi6VWrRQlq+XGrcWFq9WmrQAFwQKCuBXELenFu6dKm6dj1fixefrvXr7e/lXZM+r1Hjxg+rVaunNHPmI2ph+9uD5pe92oR8qvDd6tWrE6fA+vfvr3vvvdeD1VU/BcE78p4gZRIIQAACEMhBACEfzy3ie0b+/1b8n44ac5S+qfwmQXzMqWN0WefLXNFHyG+PadYsqWtX5/OXXy7ddZcrlHTymsAHH0iHHOLMetJJ0gx7qZIGgfIScCPkzUPLlE+a9IQqKh7R6tV240xq2rSuBg8+X337nqGGDRt6uhA/7NUm5G0xU6dOVZ8+fUoq5MnIe7ptmAwCEIAABLIQQMhXB2PSqE/y6bmbJc2XdKakxyO2g3y9I2/34o8ed7TeWfZOAuO5R5yrR898tNZ78em8EfLb777775cuvtj5PNeyy/jdOXas1L+/48Dw4dKNN5bRGUxDwCHgVshHkVcuIW9rThW+syr299xzjycY3DKn2J0nuJkEAhCAAARiXOwuU/DtfJ39RG6/3LCidt0kzZRkaYqvkgJ/VkR2ja9C/rKnL9O4eeMS6No2a6u5/edqlx12cY0SIb89qmHDHN1obfJkya5p08pA4MILpYcfdgzPnCmdeGIZnMAkBKoTcCsq4eYdAbfMEfLeMWcmCEAAAnEnQEbe2QG9TQ9JekuSZeLtv3+cFPJXS7olKeg7S1oQgU3jm5Af//Z4nff4eQlkO9TbIXEvvv1e7fNCiJDfHtfPfiY9+KDz+TfflLrYQ0Y0/wm0aiUtWeLci7f78XZPngaBMhNwKyrL7GakzLtljpCPVNhZDAQgAIGyEkDIO/ifl2QivbmkJpLsde6UkLevt5b0UbJfj7JGzBvjvgj5D1Z8oKPGHqWvN36d8Prenvdq4FH5F/1HyG8fdEv8vvSS8/lly6Q99vBmYzBLHgT+8x+pZUtnwA9+IL36ah6D6QqB0hFwKypL50H8ZnbLHCEfv73BiiEAAQiUigBC3iFrR+dfkNQ3i5C3Po9ZOauk2C9VPPya11MhbwWMJj42UbePvV0LD1+oPWfvqV0b7apVbVfps30+k+pLfQ/vqwm9J7i+F58OAiG//bZo3VpatEiyt+S/+YY35P36xqlm59FHpfPPdz51zTXSLXZwhwaB8hNwKyrL72l0PHDLHCEfnZizEghAAALlJoCQrxLykyQNyiHk7Qh+vXIHzQP7ngn5xJNCp3fV4r0Wa/2h66Udk95tSF5C+EBqdXErLRiyQLvukHreKL8V2PH8fu365Tcowr03b5Z23FHatEk69FDpvfcivNggL23gQGn0aMfDadOknj2D7C2+xYiAW1EZIyQlX6pb5gj5kocCAxCAAARiQwAh74TajtYfaLXYahHylrX/WFIUbiN7IuQtE9/xhI56r9N7UranhpdKrd9srfdffd/zp4xi811aY6GffiodcIDzyR49pGefjSuJMq/7sMOk9993jkN89ZW921VmhzAPAYeAW1EJL+8IuGWOkPeOOTNBAAIQiDsBhLyzA6xa/ShJ9rD5lOQdeTtGb1XqLY38oqROdoBW0sgIbBpPhPxDjzykQVMGaX3H9bUiaTyvsUadPUoX9LsgAujKv4RXXpFOOMHxw5LC99p7CzR/CaxYUVWYoH17ab69VEmDQDAIuBWVwfA2Gl64ZY6Qj0a8WQUEIACBIBBAyFdFwSrV29F5y7pbcTurYG/NBLxxmhuRbLytKSHkmzdvrgZWbTtDGzx4sIYMGVLrHu3wPx204NgFVcfps/XeILV/tb3m/wOx48U3/SOPSBckfydy883Stdd6MStz5EXgySelM85whvzqV9Kf/5zXcDpDoJQE3IrKUvoQt7nTmb/00kuqqKjIiKCyslIrV660r9npvjlx48R6IQABCEDAOwII+eosLTNvz82ZkE+11ckn6UZ4h73sMyWEfG1eDB06VMPssfJaWqsurbTk1CWuFtNyWkstnr3YVV861U7g97+XbrjB6TNhgnTOORDzncCVV0p/+pNj9rHHpD59fHcBgxDIRgAh7//eSGc+bdo0DR8+PJcTCPlchPg6BCAAAQjUSgAhnx2P3ZlfFNH940lGHiFfnt1x2WXSuHGObXvxzF4+o/lMoEsXu4jsGP3yS2mvvXx2AHMQyE4AIe//7iAj7z9zLEIAAhCIOwGEfDx3gCd35DlaX57N062bNGOGY/uLL6S99y6PH7G1um6dU9huyxapbVtp4cLYomDhwSSAkPc/Lm6Zc0fe/9hgEQIQgEBUCcRVyLdKBjR11jv1ZzdxjsL58ISQv2nCTXp9y+tu1pyxz2eff6a3v3xbW5psqXWOumvqqt3e7bTfvvsVZMuenuP5uSp0Bx8sffihUyzdnqCrW7cgrAwqlMDzz0vduzujL75Y+utfC52JcRAoCQG3orIkxmM6qVvmCPmYbhCWDQEIQKAEBOIq5E152sdBkkyY239vdcHX+tR30S/oXTzJyLt9fu6wtw7TvJfn8fycB7vCksCNGkkbN0qNG0vffOPBpEyRH4Hf/U76wx+cMQ88IF10UX7j6Q2BEhNwKypL7EaspnfLHCEfq23BYiEAAQiUlEBchbxVqDdRfqmktZJGuxTyFoyBJY2IP5N7IuTN1aVLl6rr6V21eK/FWn/o+qoK9hukxu83Vqv/ttLMp2aqRYtsD837s+CoWLHr2Pvs46xm992l5cujsrIQreN//kd6+WXH4Y8/llqn18YM0TpwNbIE3IrKyAIow8LcMkfIlyE4mIQABCAQUQJxFfIRDafrZXkm5M2iZeYnTZmkitEVWnj4Qu05e081bdxUgwcMVt+z+pKJdx2W3B1fe0069lin349+JM2alXsMPTwk8N13UpMmkv3/vvtKn37q3HGgQSBABNyKygC5XDJXZsyYodGjR+vFF1/U6tWr1bRpUx111FHq06ePLrPKoR41t8wR8h4BZxoIQAACEEi8j05znmIbJcky9Zahj3rzVMinwzp1/Kma1m9a1PmVbX0TJ0rnnuuYv+kmyU5503wk8MYb0jHHOAbt3T97/48GgYARcCsq7ZewEx+bqNvH3q7V6+2lVSV+CXtl/yt1Tp9zPP8lrN/2TKxPnTpVderU2SbgP/nkE9nH1q1b1alTp4TAb2K/nCuyuWWOkC8SNMMhAAEIQGAbAYS8gyL9jryJeTtqH+VcJ0I+pH8J3HKLdN11jvMPPSRdcEFIFxJWt+25AHs2wNpVV0m33RbWleB3hAm4EZV+X4vy2163bt0SIv2ggw7SCy+8oJYtW26L+Nq1axMZeft8mzZt9KFVDy2yuWFuJhDyRYJmOAQgAAEIIORr7AH7dfyA5Ie9H2/35y09YYJ+TLIgXpS2DUI+pNEcNEgaZWdH5FzTPv74kC4krG4j5MMauVj5nUtU+l2o1G97U6ZM0dlnn504Qv/mm29mjb31sYz91VdfrZtvvrmoPZKLeWpyhHxRmBkMAQhAAAJpBMjIb78dOtqhWUl2ea5pUtS/JemPkp6IyO5ByIc0kD/5ifTcc47zS5ZIBxwQ0oWE1W2EfFgjFyu/c4nKhx55SIOmDNL6jutr5dJ4XmONOnuULuhX3NEfv+117txZ8+fPl92PP/HEE7Oucc2aNdptt90SHytXrixqj+RijpAvCi+DIQABCEAgAwGEfO3b4qRklv6stKr2qSz9/BDvKIR8SIN32GHS++9L9etL337r/D/NRwIIeR9hY6pQArlEZYf/6aAFxy6oemUkm6ENUvtX22v+P4r7585ve3Xr1nV9ZD6VlZ87d646dOhQKPJtR+Znz56dOAmQrZGRLxgxAyEAAQhAoAYBhHz2LdFKkgl4O3LfpkY3O3o/V9KPQ1ocDyEfwr8Ktm6VdtrJEfCtWkmLFoVwEWF3GSEf9gjGwv9cQr5Vl1ZacuoSVyzqjq+rPfvv6apvtk7LxizTln5WiiZ3azmtpRbPXpy7Y5Ye8+bNk2Xk7Y783//+95zzXHvttRoxYoQmT56sM888M2f/XAIdIV8wQgZCAAIQgECeBBDy1YGlxHtfSZ2kbVX9pyTvy7+Y/Pytkixb/7ykHnkyD0J3hHwQopCnD/Zm/J7Jn6d5ei5PeF51R8h7RZJ5SkjASyGvRyWdV6SzecxRrJC3Ancm4gcMGKB77703p+Njx47VwIEDE0/UXXrppTn7I+QLRsRACEAAAhDwmABC3gE6RFIm8T5J0tQszOdIsvv09TyOiR/TJYR88+bN1aBBg4z2Bg8erCFDDEt+jefn8uOVT+/Zs6Xvf98ZcdFF0gMP5DOavp4QeOEF6eSTnamoWu8JUibxnkAuIZ/PUff6T9bX7pfuXpSTK8at0KZem3w5yh+EjPxLL72kioqKjMwqKytT9/G7WBH7osAyGAIQgAAEYk0AIe+EP3Xmb0Yy855NvKdvFnumzqrdJ3+qD9U+Sgj52jweOnSohg0blveiEPJ5I3M9YMoUqU8fp/vQoVIB4XFti45ZCKQL+auvlm61wzk0CASLQC4h73fxOb/t5XNH3p6he/zxx+XlHflp06Zp+PDhuTYFQj4XIb4OAQhAAAK1EkDIO3isQv1jktbEZL+QkQ9hoEeOdJLA1u67T/r5z0O4iLC7jJAPewRj4X8uIe/3c3B+27Nic5aZt3fiu3btmjXHH06fAAAgAElEQVTmqar1zZo104oVK4raG+nMycgXhZLBEIAABCDgkgBC3iWoiHXjjnwIA/qrX0l33+04PnOmVMurSiFcXUhcRsiHJFDxdjOXkDc6S5cuVdfTu2rxXou1/tD1VcfeN0iN32+sVv9tpZlPzVSLFi08gemnPXsb3jLtVvTOis9la6ls/G233Sa7TlZMc8Pc5qdqfTGUGQsBCEAAAukEEPLx3A85hfz4t8erX7t+edPhaH3eyFwPOO00ado0p/vHH0utW7seSkevCCDkvSLJPCUk4FZUWqZ80pRJqhhdodXrVyc8atq4qQYPGKy+Z/VVw4YNPfXST3vdu3dPZORbt26d+P8DDzxw21pWr16dEPpWGO+ggw7SwoULi16nW+YI+aJRMwEEIAABCCQJIOTjuRVyCvlCBXmh4+IZhvxWfeSR0ttvS3XrOk/Qefwzdn7OxLU3Qj6ukQ/Vut2KylAtqgBnTczPmDFDW7duVdOmTRPvu3/yySeJjzp16iQy9vb1XXfdtYDZqw9xyxwhXzRqJoAABCAAAYR8rPcAQj5k4bc35Js0kdatk/bbT/r005AtICruPv+81L27sxqK3UUlqpFbh1tRud3Cx4+X+uV/EqtggD7YmzlzZuJpORPslom3DH2bNm0Sz9OdccYZBbtec6Bb5gh5z5AzEQQgAIHYEyAjH88tgJAPWdxXrZKaNXOcPu446ZVXQraAqLiLkI9KJCO9DreicjsIp55adX/HD0J+2yvhmtwyR8iXMAhMDQEIQCBmBBDyMQt4crkI+ZDFfd48qVMnx+nzzpMeeSRkC4iKu+lC/pprpFtuicrKWEeECLgVlQh574LuljlC3jvmzAQBCEAg7gQQ8vHcAQj5kMX9ySel1CnQ66///+2dC9BdVXn3/wnSsRTCGwJyiR3IBWulKpBAreWzI0qDTZSSkOCXcbCDkETb4iCBXBRDvBCTgBeqYBKYDnVIgSSo7RsEDDGd4jgak0AFL1wC4weKQiQJlLZDNd/8zzk72Tnsc8465+xzzr789sw7efOetZ71rN/znPc9//2stbb0mc/kbAJFcRchX5RIFnoeoaISIZ9eGoQyR8inxxxLEIAABMpOACFfzgzomZDv9LT7coYhfNZf/KJ0+eXV9qtXS5deGt6XlikSQMinCBNTvSIQKioR8ulFIJQ5Qj495liCAAQgUHYCCPlyZkDPhHw5cfZ+1hbxFvO+rCXPOaf3YzJCAgGEPGmRAwKhohIhn14wQ5kj5NNjjiUIQAACZSeAkC9nBiDkcxZ3L6v38npfP/uZ9IY35GwCRXEXIV+USBZ6HqGiEiGfXhqEMkfIp8ccSxCAAATKTgAhX84MQMjnLO4+6M4H3vnyM+Rf+9qcTaAo7t57r3TuudXZcNhdUaJauHmEikqEfHqhD2WOkE+POZYgAAEIlJ0AQr44GfCEpHF101kl6cMJU0TI5yzufvScH0F33HHSL3+ZM+eL5C5CvkjRLOxcQkUlQj69FAhljpBPjzmWIAABCJSdAEK+OBmwTNJXJUUxnSvJP9uLkM93kF98URo1qjqHt71N+t738j2fXHsfF/ILF0rL/BbjgkC2CISKSoR8enELZY6QT485liAAAQiUnQBCPhsZMF7SHEkLG7jj16+S9ENJo2ttVsbaHilpX0y0v0vSLkkPNrBHRT4bcQ/y4uGHpTe/udr0wgul228P6kajXhBAyPeCKjZTJhAqKhHy6YEPZY6QT485liAAAQiUnQBCfvAZ8G5Jd0q6o8EyeIv0bZJOk/Rizd3P1YR6XMzHZ+Iy4aImU0PIDz7uwR4MD0vvfW+1Oduyg7H1piFCvjdcsZoqgVBRiZBPD3soc4R8esyxBAEIQKDsBBDyg8sA72dfLsl72y+QtKmBkLdot5iP73X3/1+QNDLBfbd3zbZRNd5dEPKDi3vbI3/lK9Lf/V212403Sh9OOvWgbat06IgAQr4jbHTqL4FQUYmQTy8uocwR8ukxxxIEIACBshNAyGcjA7xkfmsDIf+4JIvzm+tc/Z0kV/M31/38N5KOajEthHw24h7kxZVXStddV226caP0V38V1I1GvSCAkO8FVWymTCBUVJZByK9fv16rVq3Stm3btHv3bo0fP16nn366li9frnHj6s+HrRKZOHGiVqxYoenTpwdHJpQ5Qj4YKQ0hAAEIQKAFAYR8NlKkmZC3YHfF/q46Vy3w19UtoffeeB94dzJCPhuBTcOLWbOkdY60pEcekd70pjSsYqMjAvfcI73nPdWuHHbXEUI69Z5AqKgsupA/55xzdP/992vEiBEVAe8vs7Gg37dvn9atW6cZM2bsx7Bhw4bK/y3kV69erdGjR2toaKih4I/zC2WOkO99/jMCBCAAgbIQQMhnI9KNhHy0hD6p8p7U59LaoXlnIOSzEdg0vDjzTGmr12tIeukl6Q/+IA2r2OiIAEK+I2x06i+BUFFZZCE/d+5crVmzRpMnT66I+SOOOGL/dDdv3qwLLrhAe/bs0RNPPKGTTjqp8r0r9Bb5vizg/TNX7ufPn98ygKHMEfItUdIAAhCAAAQCCSDkA0H1uFkjIe91f95D30jI+7ULY75ZyLtt/GdJrrO0vscBTdP8scdKv/61dPTR0nPPpWkZW20TiAv5RYuka69t2wQdINBrAqGisqhC/sknn9SECRMqlfVHH300Eber7zNnztSCBQu0LPYYyZUrV1Z+5v5ejj8qevZni6CFMkfI9zr7sQ8BCECgPAQQ8tmIdSdC3kvrfZp9K9GOkM9GjDvy4uWXD1TgJ02SfuhM4RocAYT84NgzcjCBUFFZVCHvavzNN98s748///zzG3KbMmVKZb98JOSjqvzixYsrP1u0aFFQNd4DhDJHyAenMQ0hAAEIQKAFAYR8NlKkk6X1XQv5jRs36q1vfWsigQ9+/YO69fxbX/Xa2LFjs0GsJF789KfSH/9xdbLeyrl+fUkmntVpIuSzGhn8ihEIFZVFFfKuxLsq/9vf/ratvHAfH3J30003ycvvt2/f3raQb/Z31c489NBDmjp1qr/1FjhuzbYVIRpDAAIQgECcAEI+G/nQyWF3Pp1+VYvnxTeaXWVpfSdTn3pb5QNIR9fsN8+Wv7jCCcR148c+Jl1/fXhfWvaAAEK+B1AxmTaBsgv5kSNHVg6q27VrV9poG9qLVdpDx0TIh5KiHQQgAAEIJBJAyGcjMVo9fs4n0dceQLbfYZ9m71Pqv9PBFDoW8j7pl6t/BFatkubNq453ww3S3/99/8ZmpAQC3/rWgef/sUeeFMkoAYT8SE2aNElbo1NC+xAnhHwfIDMEBCAAAQgcRAAhn42EaCbk/Qx5H3oX3ws/XtJjkg7p0H2EfIfg+t3NWvFzzgBJ3/ym9L739dsDxjuIAEKehMgKgbVrJX8lXLv37NEDDzygs846S0NH+uEngdfGjVJ12ferr2eekX7xi0BDsWYnnCA12pLVbLz2R6r2mD1bIz/wASrynfKjHwQgAAEI5IYAQj4boWom5P0pzK9PkrS35q4r9BbynS60RshnI+4tvZg9W/rnf642e+gh6S1vadmFBr0kgJDvJV1sp0Sg7BX50D3yPp1+x44dlUPxQk+nbxQiKvIpJS9mIAABCEAgmABCPhhVTxq61urq+oyadR9ltjNh3/tJkrzA2gfcjZbk9e31S+3bcbAi5Dnsrh1kg2n79rdL3/tedWw/3rid4tpgPC74qHEhv3ix9NnPFnzCTC+PBMou5OfNm1d5hvy6des0ffr0hiFMcy99xJzD7vL4jsFnCEAAAvkkgJDPZ9y69boi5MeMGaNDDz000dbQ2UP6yW0/6XYc+ndJwCtSvZp1aEh64YUujdG9ewII+e4ZYqHnBMou5F1l9x55Pwv+sce8eO3V1+rVq2XBP2vWLN1+++1dxyTOfMuWLbq+wcmkr7zySnQIH4fddU0dAxCAAATKTQAhX874t1xaf/L0k/XohkfLSScjs/6f/5Fe+9qqM35K4IMPZsSxMruBkC9z9HMz97ILeQfKAt1L5v2ceFfmx43zUTPVa9OmTZo5c6b27t2rJ554Qied5EVv3V1x5sPDw1q6dGkrgwj5VoR4HQIQgAAEmhJAyJczQajI5yDujz8unXxy1dHzzpO+8Y0cOF10FxHyRY9wIeaHkK+GccqUKRXR7qetjB8/vvK1c+fOyteIESPkqvyHPvShVGJORT4VjBiBAAQgAIE2CCDk24BVoKYVIe9H80ye7G9ffU1bO03Ds4cLNOX8TWXTJumcc6p+X3aZ9KUv5W8OhfP47rsPnOrNHvnChbcoE0LIH4jkzTffXKnIRwLeYt7L7pcvX64TTzwxtZCHMo8dikdFPjX6GIIABCBQTgII+XLGnYp8DuJ+yy3SJZdUHf3856XLL8+B00V3ESFf9AgXYn6horJ+smtPW6nZO67sG4N+j9fLiVGR7yVdbEMAAhCAQBIBhHw584I98jmI+9VXS5/5TNXRDRukJocv52A2BXERIV+QQBZ7Gp0K+WnH/kDDvzqzb3D6PV4vJ8Ye+V7SxTYEIAABCCDkyYGIABX5HOTCRRdJX/ta1dFt26TTT8+B00V3MS7kP/7xA3daij5v5pcrAgj5/oeLinz/mTMiBCAAgbIToCJfzgxgj3wO4v6Od0j//u9VR59/XhozJgdOF91FhHzRI1yI+SHk+x/GUObske9/bBgRAhCAQFEJIOSLGtnm80LI5yDuPofp5z+XDj9c2rtXGsG7dfBRQ8gPPgZ40JJAqKisN9Tvpe79Hq8luC4ahDJHyHcBma4QgAAEIHAQAaRBOROCpfUZj/srr1SfIf+730mnnCI9/HDGHS6Lexs3StOmVWfL0vqyRD138wwVlQj59ELL0vr0WGIJAhCAAATCCCDkwzgVrRWH3WU8ok89JY0bV3Vy6lRpmCcBZiNiCPlsxAEvmhJAyPc/QTjsrv/MGRECEIBA2Qkg5MuZAVTkMx73LVukd76z6uRHPiJ95SsZd7gs7iHkyxLpXM8TId//8FGR7z9zRoQABCBQdgII+XJmAHvkMx73W2+V/uZvqk6uWCFd2b9HO2eczIDdQ8gPOAAMH0IAIR9CKd02oczZI58ud6xBAAIQKDMBhHw5o4+Qz3jcly6Vrrmm6uQdd0izZmXc4bK4Fxfyn/iE9OlPl2XmzDNHBEJFZf2U+n34XL/H62UIQ5kj5HsZBWxDAAIQKBcBhHy54h3NFiGf8bhffLH0j/9YdfL735fOPDPjDpfFPYR8WSKd63mGikqEfHphDmWOkE+POZYgAAEIlJ0AQr6cGcAe+YzH/eyzpe98p+rks89Kxx6bcYfL4h5CviyRzvU8Q0UlQj69MLNHPj2WWIIABCAAgTACCPkwTkVr1fLU+unzpmvDTRuKNu/czGfCBGnnzuoj6F5+mWfIZyZwfnzAe99bdYel9ZkJC44cTAAh3/+M4NT6/jNnRAhAAAJlJ4CQL2cGtKzIX3HFFZo/f3456Qx41r/9bVXA/+//Sn/0R9JPfzpghxj+AAGEPNmQAwJlFvILFy7UihUrNGnSJG3dujUxWk8++aQmTJiguXPn6qabbjqozcSJEyv9p0+f3lakqci3hYvGEIAABCCQAgGEfAoQc2ii5R75HM6pMC4//bT0h39Ync5f/qV0772FmVr+J4KQz38MSzCDjoX8ac9oeMfYvhGa1oPxIiE/YsQIrVu3LlGQJwn5DRs2aMaMGbKQX716tUaPHq2hoSGNGzcuiEcoc/bIB+GkEQQgAAEIBBBAyAdAKmAThHyGg/rd70pnnVV1cM4cadWqDDtbNtcQ8mWLeC7nGyoq6yc3bZrkFO/X1Yvx4kLeQnzXrl2vmk69kN+zZ09FsO/evbvS1v38s+XLlwevTAtljpDvV3YxDgQgAIHiE0DIFz/GSTNEyGc47rfdJn3gA1UHP/tZafHiDDtbNtfiQv7qq6VPfapsBJhvDgiEisqiCvmVK1dqzpw5WrVqVeLy+UZL691vwYIFlWX327Zt06hRo4KjHcocIR+MlIYQgAAEINCCAEK+nCnCHvkMx/3aa6WPf7zqoEX97NkZdrZsriHkyxbxXM43VFQWWchbiF9yySXasWNHRZSfeuqp+6ebJOSjqvzixYu1bNkyLVq0KLgab8Pskc/lWwWnIQABCOSaAEI+1+Hr2PmWp9YvWbJE11xzTccD0LFzAl5Ov2ZNtf8DD0h//ued26JnygT+9V+l972vapSKfMpwMZcWAYT8yop437dvX+XQu/qD75KEvH/mQ+58+N3mzZu1ffv2joX88PCwli5d2iqcZ1j/t2rE6xCAAAQgAIFGBBDy5cwNKvIZjvuUKdJ991Ud9MF3Y/t39lSGqWTENYR8RgKBG80IIOSrQt5V+Hnz5mnNmjWVZfau0Ptqdmp9p5lFRb5TcvSDAAQgAIFOCSDkOyWX737skc9w/PzIuUcflQ49VPrv/5ZGjsyws2VzDSFftojncr4I+QNCPloy71PsLeC9773XQn7yZP+JTb7YI5/LtxROQwACEMgkAYR8JsPSc6cQ8j1H3NkA+/ZJhx1WFfATJkiPP96ZHXr1iABCvkdgMdsugbVrJX8lXXv27NYDDzygs846S0ceORRseuNGaerU5ObPPCP94hfBpvY3POGExquKmo3X/kjVHiNHLtTGjQeEvH/miryfGR89Nx4h3yld+kEAAhCAQJYIIOSzFI3++YKQ7x/rtkZ69lnp+OOrXc4+W7r//ra607jXBBDyvSaM/RQIUJE/WMgbqavk0cF3Rx55ZOVk+kjYp4D8oMPuqMinQRQbEIAABCDQigBCvhWhYr6OkM9oXL//feltb6s6d/HF0i23ZNTRsroVF/Kf/KTU+kCrspJi3gMkgJB/tZC3iI8OvrvzzjsR8gPMT4aGAAQgAIF0CCDk0+GYNysI+YxG7I47pPe/v+qcNaK1IleGCCDkMxQMXGlEACH/aiFvVtHBd1deeWXlhHoq8ryHIAABCEAgzwQQ8nmOXue+I+Q7Z9fTnitWSAsWVIe49Vbpoot6OhzG2yXwL/8inXdetRcV+Xbp0b5PBBDyyUI+Ovhu9+7d8uF3c+bMqTxuLo0rlDmH3aVBGxsQgAAEIGACCPly5gGPn8to3D/yESn6XLlli/QXf5FRR8vqFkK+rJHP1bxDRWX9pKZNk4aH+zfVXoy3cOFCrVyZLOQ9sw0bNmjmzJk9FfJbtmzR9ddfnwjylVde0a5du/waz5HvX6oxEgQgAIFCEkDIFzKsLSdVEfLNWi1ZskTXXHNNS0M0SJeAT4y+++6qzaeekk48MV37WOuSAEK+S4B07wcBhHxjIW/+0cF3Xlp/4403phKSOPPh4WEtbX1+BkI+FfIYgQAEIFBeAgj5csaeinxG437KKdKPfywdckj1EXSveU1GHS2rWwj5skY+V/PuVMifdtFa7fin2X2ba7/H6+XE4sypyPeSNLYhAAEIQCAigJAvZy6wRz6Dcfcz5I84QvrP/6xW4l2R58oYAYR8xgKCO0kEOhXyx14+Tb/6Qv/W1vd7vF5mSyhz9sj3MgrYhgAEIFAuAgj5csU7mi1CPoNxf/556Zhjqo694x3Sv/1bBp0su0txIb9kicT2k7JnRCbnHyoq653vt7Du93i9DFYoc4R8L6OAbQhAAALlIoCQL1e8EfIZjve2bd67WXXQp9X71HqujBFAyGcsILiTRCBUVCLk08ufUOYI+fSYYwkCEIBA2Qkg5MuZAVTkMxj3DRukCy6oOnb11dKnPpVBJ8vu0je/Kf31X1cpUJEvezZkdv6hohIhn14IQ5kj5NNjjiUIQAACZSeAkC9nBiDkMxj3z39euuKKqmO33CJdfHEGnSy7Swj5smdALuYfKioR8umFM5Q5Qj495liCAAQgUHYCCPlyZgBCPoNxv+wy6R/+oerYpk3Su96VQSfL7hJCvuwZkIv5h4pKhHx64QxljpBPjzmWIAABCJSdAEK+nBmAkM9g3M87T/IWbF+PPy5NmJBBJ8vuEkK+7BmQi/mHikqEfHrhDGWOkE+POZYgAAEIlJ0AQr6cGcBz5DMY97e+VfqP/5BGjKg+Q/73fi+DTpbdJYR82TMgF/MPFZUI+fTCyXPk02OJJQhAAAIQCCOAkA/jVLRWFSHfbFJLlizRNTxaq69xHxqS9uyRxo6Vnn66r0MzWCiBuJD3+8MH3nFBIGMEEPL9D0ic+fDwsJYuXdrKiTMk/bBVI16HAAQgAAEINCKAkC9nblCRz1jcd++WRo+uOvX2t0vf/W7GHMSdKoFvfEM6//zq9wh5siKjBBDy/Q8MFfn+M2dECEAAAmUngJAvZwawRz5jcX/oIenUU6tOzZ4t3XZbxhzEHYQ8OZAbAgj5/ocqlDl75PsfG0aEAAQgUFQCCPmiRrb5vBDyGYt7fMX2okXStddmzEHcQciTA7khECoq6yd07OXT9KsvDPdtnv0er5cTC2WOkO9lFLANAQhAoFwEEPLlinc0W4R8xuJ+ww3SRz9adWrVKmnOnIw5iDsIeXIgNwRCRWUZhPymTZu0atUq3X///dq9e7eGhoY0efJkzZw5U5deemnDmE6cOFErVqzQ9OnTg+IeyhwhH4STRhCAAAQgEEAAIR8AqYBNEPIZC+rHPiZ94QtVp+65R5oyJWMO4g5CnhzIDYFQUVk/odNWTtOOK/tXke/1eBbrGzZs0IgRI/YL+J07d8pf+/bt0+mnn14R+EceeeR+FG4/Y8YMWcivXr1ao0ePrvQdN25c0/iHMkfI5+ZthKMQgAAEMk8AIZ/5EPXEQYR8T7B2bnTGDOmuu6r9f/IT6Y1v7NwWPXtIgMPueggX02kRCBWV9eNNWztNw7P7J+R7Od4555xTEekW5N/+9rd14okn7p/u3r17KxV5/3zChAl67LHHKq/t2bOnIthdufdlAe+fLV++XPPnz0fIp5Wg2IEABCAAgVQIIORTwZg7Iwj5jIVs0iRp+/aqUy+/LP3+72fMQdypEogLeT9e6pOfhAwEMkeg7EJ+/fr1mjVrVmUJ/Q9+8IOG8XEbV+CvuuoqLVu2bH+7lStXasGCBRWRv23bNo0aNapljEOZU5FviZIGEIAABCAQSAAhHwiqYM0Q8hkL6NFHS7t2Sa97nfSrX2XMOdw5QODrX5eiPbMIeTIjowRCRWW9+72skCeh6tV4kyZN0oMPPijvj3/nO9/ZMEqutnvpvL92+RdwrCq/ePHiirhftGhRy2q8+4UyR8hn9E2DWxCAAARySAAhn8OgpeAyQj4FiGmZeOkl6YgjqtbOPFP6/vfTsoyd1Akg5FNHisH0CYSKyqIK+ZEjRx60ZL4Z4agq78r7qaeeqieffLJyyN1NN92kzZs3a/v27Qj59FMUixCAAAQgkAIBhHwKEHNoAiGfoaA98oj0J39SdWjmTOnOOzPkHK4cTAAhT0bkgECZhfyOHTvkirz3yN97770to7Vw4UJ5Kf26deuCT6hPMhrKnIp8y5DQAAIQgAAEAgkg5ANBFawZQj5DAb37bmnq1KpDV14prViRIedwBSFPDuSOQKiorJ9Yr5a6NwLYi/F8wJ1F/Ny5cytV9VbXmjVrNG/evMoj6i655JJWzRu+HsocId8xYjpCAAIQgEAdAYR8OVMCIZ+huN94o/S3f1t16MtfPvB9hlzElYgAFXlyISME1v5orfyVdHnv9wMPPKCzzjrroEertXJ942MbNfXk2l3FusbP7H1Gv3jxF61MvOr1E0adoLFHjE3s12y8tgeqdXj7a96uT7z/E1TkOwVIPwhAAAIQyA0BhHxuQpWqowj5VHF2Z2zBggNV+OHhA9X57qzSuycEEPI9wYrRdAmEVofrR+1FhbzZzHo1Xjt75P0YurvuuqtyOr33yHd6hTKnIt8pYfpBAAIQgEA9AYR8OXOiIuTHjBmjQw89NJHAFVdcEXTATznxpTvrCy88sC/+Rz86sF8+3VGwlgqBu+6SZsyomvrUp6Srr07FLEYgkCaBUFFZVCHvx855r7yfE3/22Wc3RBudWn/UUUfp+eef7yoEceZbtmzR9ddfn2jvlVdeiU7IP8OH3Xc1KJ0hAAEIQKDUBBDy5Qx/Rcg3m/qSJUt0zTXXlJNOn2f9trcdOKl+794DJ9j32Q2GCyGAkA+hRJsBEyi7kPez4V1p96F3W7c2/lMXVeN9Sr1vXndzxZkPDw9rqR9P2fxCyLcixOsQgAAEINCUAEK+nAlCRT5DcT/uuOqz4486qvosea4ME0DIZzg4uBYRKLuQN4cpU6ZUKvLjx4+v/Dtu3Lj9CbJ79+6K0PfBeBMnTtSjjz7adfJQke8aIQYgAAEIQKBNAgj5NoEVpDl75DMSyP/6L+mww6rOnH66tG1bRhzDjWQCCHkyIwcEEPLVIFnMb9q0Sfv27dPQ0JC85H7nzp2VrxEjRlQq9n591KhRXUc1lDl75LtGjQEIQAACEKgRQMiXMxUQ8hmJ+89+Jr3xjVVnzj9fsk7kyjABhHyGg4NrEYFQUVlPrFeHzzWKTD/G27x5c+XRchbsrsS7Qj9hwoTK4+nO9y/dlK5Q5gj5lIBjBgIQgAAEhJAvZxIg5DMS9/vuc9Wo6szll0uf/3xGHMONZAIIeTIjBwRCRWUZhHy/whXKHCHfr4gwDgQgAIHiE0DIFz/GSTNEyGck7qtXS3PnVp354helj340I47hBkKeHMgtgVBRiZBPL8ShzBHy6THHEgQgAIGyE0DIlzMDEPIZifuSJdWnmPlysTfFlZ4ZmWHB3NiwQbrgguqkPv1p6ROfKNgEmU4RCISKSoR8etEOZY6QT485liAAAQiUnQBCvpwZgJDPSNw//nHp2murztx9t/Se92TEMdxIJoCQJzNyQCBUVCLk0wtmKHOEfHrMsQQBCECg7AQQ8uXMAIR8RuKOkM9IIELdQMiHkqLdAAmEikqEfHpBCrC+wO0AABkySURBVGWOkE+POZYgAAEIlJ0AQr6cGYCQz0jcEfIZCUSoGwj5UFK0GyCBUFGJkE8vSKHMEfLpMccSBCAAgbITQMiXMwMQ8hmJO0I+I4EIdQMhH0qKdgMkECoqEfLpBSmUOUI+PeZYggAEIFB2Agj5cmYAQj4jcUfIZyQQoW4g5ENJ0W6ABEJFJUI+vSCFMkfIp8ccSxCAAATKTgAhX84MSF3IP/PMM3r9619/EM2nn35aY8eOLSfhwFkj5KugcpM/69dLM2dWnebU+sAs732z3ORP71FURggVlQj59AISyhwhnx5zLEEAAhAoOwGEfDkzACGfkbgvXiwtW1Z1psyn1udGiMWF/Gc+I/lODNfACeQmf/pEKlRUIuTTC0goc4R8esyxBAEIQKDsBBDyxcqAGZLGSXpS0lGS1jSYHkI+I3GPC/lvfUs699yMONZnN3IjxBDyfc6MsOFykz9h0+m6VaioRMh3jXq/gVDmCPn0mGMJAhCAQNkJIOSLkwGXSjpS0nW1f38j6RCEfLYDjJCvxic3Qgwhn8k3VG7yp0/0IrF466236k1velPwqJdvvVxfOOMLwe27bdjv8br1t1n/H//4x/rgBz+orVu3avJk3ytPvhDyvYwCtiEAAQiUiwBCPhvxHi9pjqSFDdzx61d566Ok0bU2K+vaWrifJGlv7eejYt/Xm6Uin424CyGPkM9IKubaDYT8weF76qmndMopp+jll19uL66zJa1tr0tXrfs9XlfOtu582GGH6ZFHHtFJJ/lPMUK+NTFaQAACEIBANwQQ8t3QS6fvuyXdKekOSR9OMOkq+zZJp0l6sfb65yTtkhSJeb+2SZJP4bLQt1C3zR0NXETIpxO7rq0g5BHyXScRBvKzoqOPsbKYf/7559sasd8V8n6P1wjGr3/9a02dOvWglzdu3KjXve51bfE7+uijm4p4G6Mi3xZSGkMAAhCAQBMCCPnBpYf3si+X9ISkC2pCPEnIW7RbzMdf8/9fkDSy5r73xlu4T5D0VO1n9RX6+Ey/KOmjl112mb70pS8lErjuuuv00ksv6fDDD9f8+fMPapP0Wh4rYs3mmFZatBojVMi3stPI33b7hbTvtk2u8ye2tP66c8/VS3/6p4nvkX7lT+g4ITFLstVuv5D23bbJdf7EIIdwCI1vWu//tT9aq9lvdpn84KtTX1v1qx+vVXt71W2bQecPQr7brKY/BCAAAQhEBBDy2cgFL5nf2qAi/7gki/mb61z9nSRX8zdLeldNyI+JtXE/L8e/K2GKLtOMGTNmTMOKzfHHH69nn31Wxx13nH75y18eZCLptTwK+WZzTCstWo2xaJH0OUdXUrPD7lrZaeRvu/1C2nfbJtf5s26dNGtWBffxhx+uZ196KfE90q/8CR0nJGZJttrtF9K+2za5zp8Y5BAOofFN6/2ftp125xjSvts2g84fhHy3WU1/CEAAAhBAyGcrB5oJeQt2V+zrBbmF+jpJi2on1dsGQr6NuIZ8IGzDXGLTVmPEhfw990hTpiSP2MpOWh/AQ8bpts2gP0h3FVOEfFN83eZGZJwbiV1l6f7OIfEIGalTO+32C2nfbZtB//5ByIdkHG0gAAEIQCCEABX5EEq9b9NIyEdL6KPKe9yT+j73SpobW1rvPfRevh8dfhfvS0XeFdUmqw7SCnmrMRDy1dUeuVnRgZBHyKf0y6HV74Y0hklrjE7ttNsvpH23bRDyaWQWNiAAAQhAIAsEEPJZiEL1NPqkpfUW4t5D30jI+7ULa1PwKfUraifbe6+8D897sMH0EPII+cTU6PZDcuErqgh5hHxKfzNC3mvdDpXWGJ3aabdfSPtu2yDku80q+kMAAhCAQFYIIOSzEYlOhLyX1vs0+0jItzOTipAfGhrSww8/nNjvtNNO03PPPadjjjlGO3YcOPx+7NixiZXs3FRUY7MN+UDYDtSktq3GoCJPRb5ZjrXKn9D87NROu/1C2nfbZtBCLJR5q3YhHFrZaPV6WmN0aqfdfiHtu22TZv74716710MPPRSdkH9G7cZ7uyZoDwEIQAACEKgQQMhnIxE6WVrftZDvZOr79u1DyLcBrtWHToQ8Qh4h/2oCzd43aQqxNt7KqTdt9bshjQHTGqNTO+32C2nfbZs082fEiK4+QiHk00hybEAAAhAoMYGu/gqVmFvaU+/ksDs/Xm5V7bC7dv2pVOTb7eT2W7du1bnnnqtdu3b52Hvd4xPaJKX1HN5OfOq0T9I8OrXVqF+rMb78ZenWW6u9b7hB+rM/S7bUyk6n49f3Cxmn2za5zp9NmyTffZF07mGHadfLLx/0Puh3/oSOFxKzJFvt9gtp322bXOdPDHIIh9D4pvX+T9tOu3MMad9tmzTz54wzrMU7vv6PpAc67k1HCEAAAhAoPQGEfDZSoNXj577qx+fWuerT7P3Yue90MIW1kv5vB/3oAgEIQAACEIBA9wQukvS17s1gAQIQgAAEykoAIZ+NyDcT8n7KuA+9i++FHy/pMUmHdOj+MS6id9i3qxJEh2PSDQIQgAAEIJA1Aj6ktpPLIt7L2Z7rpDN9IAABCEAAAiaAkM9GHjQT8n4EnV+fFHuUnCv0FvLXd+H+2A77tn+6z4GBfMq+b0rEL28P+HCHvtCtXAScO36Cg7eV+Pv7JR04ibFcLJhtZwROkzSr9jt1sqTVkp7szBS9SkTgTklXxR7v6qn7d9DFkn4u6S2SbpP0/wKZdPN3NHAImkEAAhCAQNEJIOQHG2FX211dn1FzY72knQn73k+SNE+SD7gbLWlfwlL7wc4kbPRlknwTIso7P/feP0t61n2YRVqViYDfLwtjE/aHa4syLgiEEPBNUYv2o2qN/X/fDLKg54JAEgH/bfbfaP/e8c30p2KN/Pd4YpP/QxQCEIAABCDQUwII+Z7iLZRxf5iZUyek4hP0665YePWAbzb4Whlr4A/NvgERiXbv79/V5Fn3hYLHZCofhrvJHyN0Jd4fpqMK6h0dPn6RcOSTQLc5ZFHmm0Enx6bvnDq9TqDlkw5e1xPoNl/i9urzxCs7fCMxnksW9hfwN41EhAAEIACBfhFAyPeLdL7H8XJmf2ixcEpaBm+R7mfa+8PNi7Wp+gOzhXpczMcpuBJfPf6bq+gE0sqf+ZJW1L6cW9/mQ3PRU2f//NLIId889IqguPjyoaG2vbk0JMsx0TTypZmQv7R2YzJ+ZoxvYju/bi4HYmYJAQhAAAKDJoCQH3QEsj2+9wAul+S97a40bGog5C3aLebjIt//f0HSyIQpuv3tiLBsBz8F73qRPzfVhJeXR3tZvZdGcxWXQNo55MPJZtYq8L7x6PzxSiLEVzFyKO18iajUV+SvrP0emhLDdp8kf9U/YaYYZJkFBCAAAQhkjgBCPnMhyaxDrR6RZ3Fe/2G4UbXLH4qifaqZnTCOpUogjfyJ74n3B2nn3ASWRacapywbSyOHPD9XU31z0jcaLeTP5qZilsPesW9p5YsdqBfyVOQ7DgsdIQABCEAgLQII+bRIFt9Osw9FFuyu2N9Vh8F7BtfVLaFPWt5afHrMsNv88f5mn70Qv1nk7RmusNbnHbSLSaDbHDIVrxTaE8Pz21peceBm8XImjXyJqLBHvnj5wYwgAAEI5J4AQj73IezbBBp9KIqW0CftM03qk1TJ6NskGGhgBLrNHy+D9mF58e0brsr7RlH8JOmBTZCBe06g2xyyg77pOFQ7dNP5k9cngPQcdgEGSCNfGgl5/9yPgI0/Ftb/j5+/UACETAECEIAABLJMACGf5ehky7dGH4q8J9HLVBsJeb92YWwqFvJuG/9ZtmaKN70gkEb+TK89Cmp3TYxt55CyXoQqszbTyCEfmOj88d8+i3j2xmc23F07lka++Aai/155G8/q2gGb0QogPxbWj1D1qiAfeufDYB/s2msMQAACEIAABAIJIOQDQdGs8lg5f2CpP7W+mZD30nqfZo9oJ4HIH3KgWwLkULcEy9WffClXvJktBCAAgdIRQMiXLuQdT7iTZYoI+Y5xF64j+VO4kPZ9QuRQ35HnekDyJdfhw3kIQAACEGhFACHfihCvRwQ6OTjIBwSt4nnxJJHUcEWH4TQ6LJH8IXXiBPgdRD60Q4B8aYcWbSEAAQhAIHcEEPK5C9nAHG71KJ+vJjw/1wLNp9R/Z2BeM3BWCJA/WYlEfv0gh/Ibu0F4Tr4MgjpjQgACEIBA3wgg5PuGOvcDNftQ5IOAvFc+vhd+fO1U30NyP3MmkAYB8icNiuW2QQ6VO/7tzp58aZcY7SEAAQhAIFcEEPK5CtdAnW32ociPoPPr8UfxuELvx/FcP1CvGTwrBMifrEQiv36QQ/mN3SA8J18GQZ0xIQABCECgbwQQ8n1DnduBXG13dX1GbQbrJe1M2PfuR/HMk+QD7kbzfObcxjttx8mftImWzx45VL6YdzNj8qUbevSFAAQgAIHcEEDI5yZUOAoBCEAAAhCAAAQgAAEIQAACEJAQ8mQBBCAAAQhAAAIQgAAEIAABCEAgRwQQ8jkKFq5CAAIQgAAEIAABCEAAAhCAAAQQ8uQABCAAAQhAAAIQgAAEIAABCEAgRwQQ8jkKFq5CAAIQgAAEIAABCEAAAhCAAAQQ8uQABCAAAQhAAAIQgAAEIAABCEAgRwQQ8jkKFq5CAAIQgAAEIAABCEAAAhCAAAQQ8uQABCAAAQhAAAIQgAAEIAABCEAgRwQQ8jkKFq5CAAIQgAAEIAABCEAAAhCAAAQQ8uQABCAAAQhAAAIQgAAEIAABCEAgRwQQ8jkKFq5CAAIQgAAEIAABCEAAAhCAAAQQ8uQABCAAAQhAAAIQgAAEIAABCEAgRwQQ8jkKFq5CAAIQgAAEIAABCEAAAhCAAAQQ8uQABCCQFoFVki6VNF7SU2kZzbGdXvMwa49xgaS7csxpnaQZNf/3SZok6cE+zudzkq6KjZd3nn1Ex1AQgAAEIAABCAyKAEJ+UOQZFwLFI/DVmpCfgJCvBLfXPCzkPcbMnAv5O2tCfrmk30haLWlvH98ep0p6t6QzajdF8s6zj+gYCgIQgAAEIACBQRFAyA+KPONCoHgEei1c80as1zyKJuRH91nA1+dTUXjm7X2CvxCAAAQgAAEIdEAAId8BNLpAAAKJBHotXPOGvdc8iiI8o4o8Qj5vGY6/EIAABCAAAQgMjABCfmDoGRgChSPQTLh62fS7JJ0uaaek7ZIWSHoygcKRklbU2nu/fdTWS/Y9hm2E7qH2fudFksZJGqqN7X3lKxPGjfvoMX9Y83FPrG29b7tjNtfU2WzGY05tGffkWh+PZb82NOBxs6TTJB1V88tt/X3I0vp2fPbwnfBvZz71U0wS8tH+f+9dv66ug/17QdK22nL46CyCkZLsh+P4A0lTav1Cc6AoN0YK94uFCUEAAhCAAAQg8GoCCHmyAgIQSItAI+H6hKSTYgLegtp7kv37x/9urhPKFtFuH4lpt7GI31QT96GHoVnU2SeLPvf1/uvIlvdhz6uNa2FYP6YFtsdxX98E8J5tt/MhfqNq9nxDwmLaNl1NrhedjXhEh7uZi/2K27Ao/XCMh8e2bx4zugHiGxnm4f/79WZ7utv1OYlFK/7tzCcp15KEfL1Yj/cz52U10X5L7CwCx9P8HLM7JH2k1iYkB2wfIZ/WbwLsQAACEIAABCDQcwII+Z4jZgAIlIZAknB1dfTKmtiysIouHzBmgWrRNSb2cwuxS2IiLXrJws0V/HZONbdtC2DfOHgxNoYFvX/+mtrPojEtEK+PtbPf9t9frur7dHP/rL6dbbkyH1WIIxNJPKKbCxaNFqHxy1V5V93Pid3c+LaksxPGnF9btWAezYR8uz63y7/d+YQKebe7r3bjpv7wxOjGULQU35zth1m4+v712CChOYCQL82vKSYKAQhAAAIQKAYBhHwx4sgsIJAFAknC9XcJYr1e6EZCNKrCWtCemTAhC3C3Ca3INxrb1X6L+2h5vts9LukNCWNuleQv34TwzQcv9U961Jt92yXp5JiNJB4ex4IzWlIfH9KVb1e3o6q8q+0WrY142Farinw7PnfCv535NMrRRnvk/Ug684ivdPCNDt8wcZ/31wxGQt7/xm8W+eXQHEDIZ+E3CD5AAAIQgAAEIBBMACEfjIqGEIBACwL1wjUShq4qR/uV4ybqhVok0qIKeP1wUYU2LuQtZOuvaN995I/3uFv4WRRaFMf3vEdj1i9pDwm2l7jbF9+IsAi36G4l5C0sLeQb/e71a+slXVh7JJt9bsQjqra387i0Zj53wr+d+bQr5CMhHl/pYBZejWDe34kJea9wSOIQkgORXyytD8l62kAAAhCAAAQgkAkCCPlMhAEnIFAIAvVCvpVIrn89qQIbBxPZjwt5C8n4Vb9U38v059YOyHM7/86LDs+7P0As1wfGgt+H9rkyb9Htfere5+4xWgn5qMJuYeoqc6Pfv67ue7VAJCxt24fd1V9e5m8x30rIh/rcLv9259OJkI+q9dHyerMx9/h2jKS8iI/VKgcQ8oX49cMkIAABCEAAAuUigJAvV7yZLQR6SaDdirwFsav1UcW5/v/1vkaiLi7kp9c18l71+OF50cvex+7l7Ba93k/ty3b8O9DCOqQiH+1Xj5a/RxVh2wpdWu8bD/V76RvFpBWPkIp8Oz63Gi+Jfzvz6UTIx28u7Kjli+e9OGaslZBvlQPRFgsq8r387YBtCEAAAhCAAARSJYCQTxUnxiBQagLt7pGPDlar3yPfSOi2s0fe1WJXsr2/vf6RbtGjzXwDwaLQVfz6anoUSAtVv+abAPFHntUHOlTIR3NIema6q/T2xwe2eR9+vOJ9RkJmmZP3wDeqyDc7+T3p5kNI+/ozCtqZTydC3n2iGFjIW9jXH37XSMiH5oAPMvSFkC/1ry8mDwEIQAACEMgXAYR8vuKFtxDIMoEkIW9haoFaX/H2HmfvebcQPDo2KVd9LWQtwuPPZY9Ovw89tT4SpRbfXgYf3xcf7bN2Zd4nx0dj+lT8+PPlI2EdVYAjQRnfB2/Xo3atlta7bXQSfv3NCvvog+PqeZiRWTXyrdWp9e363C7/dueTlL+NDruL2kY++f9JN3kaCfmQHIhvW0DIZ/m3C75BAAIQgAAEIHAQAYQ8CQEBCKRFoNFz0x+riWkfQuf96dFz5C0y/ai1+BJ1V1F9IJ0r1m7rPeg+oM3/98+jZ5r7ee6trptqy+gt4qPnyLuybnsWzf4+ej68x7Ffrvr6ewtrt7Ow9vduFwlOt/Fzyr1P2zcdLKZ9uVK8QtK1tfaNeHiVgG1Hfrmv7bTiEX+OvDn5UDxX46MKfjORHOpzJ/zbmU8nQj5a8p/0eDnba7a0PjQHbAch3+odxesQgAAEIAABCGSGAEI+M6HAEQjknoAFlQ8WmyipXmj7OfAW4RawFqSurC5MaGcI3s/uqrnbW0R7n/es2oFvXlp9SBukfMK5q66248tje4+7q+wW5/HLoi8a0+08rn2Mt/M8LJxtzzca3Mb7tS02Pf+janvvPf9mPOyXT6Y3D+/rtxh21f2hhLmZh1cnuK3HtW/23+P75karw+7a8blT/u3Mp36KrSrybh+djp8U+1Z75ENzACHfxhuLphCAAAQgAAEIDJYAQn6w/BkdAhA4mIAFsavgriDXXxatPum+HSEP3/YIDIJ/KyHvGxiOfdJz4tubXfPWCPk0aWILAhCAAAQgAIGeEkDI9xQvxiEAgTYJeMm7l3e7sh3f1x7tqfee8XPbtEnzcAKD4N9KyEcn78efVhA+o/CWCPlwVrSEAAQgAAEIQGDABBDyAw4Aw0MAAgcR8NJ5CzuLeP/rA+R8YruXs7tS733tIfvjwdoZgUHwj4S8DxrcVTsY0dsZvAXCN3WiivyZnU2pZS+v8vCNIueZ599qq0JLgzSAAAQgAAEIQAACvSaAkO81YexDAALtEji7tl/cot0H0HkvuPeQ1+9Xb9cu7cMI9Jt/JOQj76LKu1dfRAfd+YyE+jMNwmbTupXPG/Dp+758oJ7H8uP/uCAAAQhAAAIQgEBmCSDkMxsaHIMABCAAAQhAAAIQgAAEIAABCLyaAEKerIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIAEI+R8HCVQhAAAIQgAAEIAABCEAAAhCAAEKeHIAABCAAAQhAAAIQgAAEIAABCOSIwP8Hy9G2pbdw3n0AAAAASUVORK5CYII=">
(7000000.0, 15000000000.0)
Simulation results compared with semi-analytical calculations with C,N,O.
### Distinguish between all 3 sources (AGB,massive AND SNIA)
#### DTD taken from Maoz
```python
Yield_tot_sim=s1.history.ism_iso_yield_1a[-1][0]
from scipy.interpolate import UnivariateSpline
zm_lifetime_grid=s1.zm_lifetime_grid_current
idx_z = (np.abs(zm_lifetime_grid[0]-0.0001)).argmin() #Z=0
grid_masses=zm_lifetime_grid[1][::-1]
grid_lifetimes=zm_lifetime_grid[2][idx_z][::-1]
spline_degree1=2
smoothing1=0
boundary=[None,None]
spline_lifetime = UnivariateSpline(grid_lifetimes,np.log10(grid_masses),bbox=boundary,k=spline_degree1,s=smoothing1)
from scipy.integrate import quad
```
```python
def spline1(t):
minm_prog1a=3
#if minimum progenitor mass is larger than 3Msun due to IMF range:
return max(minm_prog1a,10**spline_lifetime(np.log10(t)))
#funciton giving the total (accummulatitive) number of WDs at each timestep
def wd_number(m,t):
#print 'time ',t
#print 'mass ',m
mlim=10**spline_lifetime(np.log10(t))
maxm_prog1a=8
if mlim>maxm_prog1a:
return 0
else:
mmin=0
mmax=0
inte=0
#normalized to 1msun!
def g2(mm):
return mm*mm**-2.35
norm=1./quad(g2,1,30)[0]
return norm*m**-2.35 #self.__imf(mmin,mmax,inte,m)
def maoz_sn_rate(m,t):
return wd_number(m,t)* 4.0e-13 * (t/1.0e9)**-1
def maoz_sn_rate_int(t):
return quad( maoz_sn_rate,spline1(t),8,args=t)[0]
#in this formula, (paper) sum_sn1a_progenitors number of
maxm_prog1a=8
longtimefornormalization=1.3e10 #yrs
A = 1e-3 / quad(maoz_sn_rate_int,0,longtimefornormalization)[0]
print 'Norm. constant A:',A
n1a= A* quad(maoz_sn_rate_int,0,1.3e10)[0]
Yield_sn1a=n1a*1e11*0.1 #specialfactor
```
Norm. constant A: 8.47537309048
```python
print 'Should be 1:',Yield_sn1a_sim/Yield_sn1a
print 'Check specific Ni-56: ',s1.history.ism_iso_yield[-1][-1]/Yield_sn1a #last isotope in s1.history.isotopes, see above
```
Should be 1: 0.999997080727
Check specific Ni-56: 0.999997080727
### Test of parameter transitionmass
#### default (above) is 8Msun; needs to chosen so it agrees with yield input!
```python
s2=s.sygma(iolevel=0,transitionmass=7.2,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
s3=s.sygma(iolevel=0,transitionmass=8,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
s4=s.sygma(iolevel=0,transitionmass=9,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
```
Warning: Non-default transitionmass chosen. Use in agreement with yield input!
SYGMA run in progress..
SYGMA run completed - Run time: 0.24s
SYGMA run in progress..
SYGMA run completed - Run time: 0.2s
Warning: Non-default transitionmass chosen. Use in agreement with yield input!
SYGMA run in progress..
SYGMA run completed - Run time: 0.17s
```python
N_agb=k_N/1.35 * (1**-1.35 - 7.2**-1.35)
Yield_agb7=0.1*N_agb
N_massive=k_N/1.35 * (7.2**-1.35 - 30**-1.35)
Yield_massive7=0.1*N_massive
N_agb=k_N/1.35 * (1**-1.35 - 8.**-1.35)
Yield_agb8=0.1*N_agb
N_massive=k_N/1.35 * (8.**-1.35 - 30**-1.35)
Yield_massive8=0.1*N_massive
N_agb=k_N/1.35 * (1**-1.35 - 9.**-1.35)
Yield_agb9=0.1*N_agb
N_massive=k_N/1.35 * (9.**-1.35 - 30**-1.35)
Yield_massive9=0.1*N_massive
print 'should be 1:',sum(s2.history.ism_elem_yield_agb[-1])/Yield_agb7
print 'should be 1:',sum(s2.history.ism_elem_yield_massive[-1])/Yield_massive7
print 'should be 1:',sum(s3.history.ism_elem_yield_agb[-1])/Yield_agb8
print 'should be 1:',sum(s3.history.ism_elem_yield_massive[-1])/Yield_massive8
print 'should be 1:',sum(s4.history.ism_elem_yield_agb[-1])/Yield_agb9
print 'should be 1:',sum(s4.history.ism_elem_yield_massive[-1])/Yield_massive9
```
should be 1: 1.0
should be 1: 1.0
should be 1: 1.0
should be 1: 1.0
should be 1: 1.0
should be 1: 1.0
```python
fig=4
s2.plot_totmasses(fig=fig,mass='gas', source='all', norm='no', label='Tot,7Msun', shape='', marker='o', color='', markevery=20, log=True)
s2.plot_totmasses(fig=fig,mass='gas', source='agb', norm='no', label='AGB, 7Msun', shape='', marker='s', color='', markevery=20, log=True)
s2.plot_totmasses(fig=fig,mass='gas', source='massive', norm='no', label='Massive, 7Msun', shape='', marker='D', color='', markevery=20, log=True)
s3.plot_totmasses(fig=fig,mass='gas', source='all', norm='no', label='Tot, 8Msun', shape='', marker='x', color='', markevery=20, log=True)
s3.plot_totmasses(fig=fig,mass='gas', source='agb', norm='no', label='AGB, 8Msun', shape='', marker='+', color='', markevery=20, log=True)
s3.plot_totmasses(fig=fig,mass='gas', source='massive', norm='no', label='Massive, 8Msun', shape='', marker='>', color='', markevery=20, log=True)
s4.plot_totmasses(fig=fig,mass='gas', source='all', norm='no', label='Tot, 9Msun', shape='', marker='p', color='', markevery=20, log=True)
s4.plot_totmasses(fig=fig,mass='gas', source='agb', norm='no', label='AGB, 9Msun', shape='', marker='^', color='', markevery=20, log=True)
s4.plot_totmasses(fig=fig,mass='gas', source='massive', norm='no', label='Massive, 9Msun', shape='', marker='+', color='', markevery=20, log=True)
plt.legend(prop={'size':12})
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5),markerscale=0.8,fontsize=12)
plt.ylim(4e6,4e9)
```
<IPython.core.display.Javascript object>
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA/IAAAHGCAYAAADXFrCqAAAgAElEQVR4XuydCbxN5frHf8YimZNCnUz/Ro3UTZRCkluhIlODIRqQmQqpzCINhhChDKVuqcyl4VZkuNJEoqI0mCtT+H9+a511rLPP2mevPa+19+/9fM69Outd7/B937P3+q3neZ8nD1REQAREQAREQAREQAREQAREQAREQAR8QyCPb0aqgYqACIiACIiACIiACIiACIiACIiACEBCXptABERABERABERABERABERABERABHxEQELeR4uloYqACIiACIiACIiACIiACIiACIiAhLz2gAiIgAiIgAiIgAiIgAiIgAiIgAj4iICEvI8WS0MVAREQAREQAREQAREQAREQAREQAQl57QEREAEREAEREAEREAEREAEREAER8BEBCXkfLZaGKgIiIAIiIAIiIAIiIAIiIAIiIAIS8toDIiACIiACIiACIiACIiACIiACIuAjAhLyPlosDVUEREAEREAEREAEREAEREAEREAEJOS1B0RABERABERABERABERABERABETARwQk5H20WBqqCIiACIiACIiACIiACIiACIiACEjIaw+IgAiIgAiIgAiIgAiIgAiIgAiIgI8ISMj7aLE0VBEQAREQAREQAREQAREQAREQARGQkNceEAEREAEREAEREAEREAEREAEREAEfEZCQ99FiaagiIAIiIAIiIAIiIAIiIAIiIAIiICGvPSACIiACIiACIiACIiACIiACIiACPiIgIe+jxdJQRUAEREAEREAEREAEREAEREAEREBCXntABERABERABERABERABERABERABHxEQELeR4uloYqACIiACIiACIiACIiACIiACIiAhLz2gAiIgAiIgAiIgAiIgAiIgAiIgAj4iICEvI8WS0MVAREQAREQAREQAREQAREQAREQAQl57QEREAEREAEREAEREAEREAEREAER8BEBCXkfLZaGKgIiIAIiIAIiIAIiIAIiIAIiIAIS8toDIiACIiACIiACIiACIiACIiACIuAjAhLyPlosDVUEREAEREAEREAEREAEREAEREAEJOS1B0RABERABERABERABERABERABETARwQk5H20WBqqCIiACIiACIiACIiACIiACIiACEjIaw+IgAiIgAiIgAiIgAiIgAiIgAiIgI8ISMj7aLE0VBEQAREQAREQAREQAREQAREQARGQkNceEAEREAEREAEREAEREAEREAEREAEfEZCQ99FiaagiIAIiIAIiIAIiIAIiIAIiIAIiICGvPSACIiACIiACIiACIiACIiACIiACPiIgIe+jxdJQRUAEREAEREAEREAEREAEREAEREBCXntABERABERABERABERABERABERABHxEQELeR4uloYqACIiACIiACIiACIiACIiACIiAhLz2gAiIgAiIgAiIgAiIgAiIgAiIgAj4iICEvI8WS0MVAREQAREQAREQAREQAREQAREQAQl57QEREAEREAEREAEREAEREAEREAER8BEBCXkfLZaGKgIiIAIiIAIiIAIiIAIiIAIiIAIS8toDIiACIiACIiACIiACIiACIiACIuAjAhLyPlosDVUEREAEREAEREAEREAEREAEREAEJOS1B0RABERABERABERABERABERABETARwQk5H20WBqqCIiACIiACIiACIiACIiACIiACEjIp+ceOAXA0wDq5zL9GQD4oyICIiACIiACIuCeQCsA/AlWFgHoAuB3902qpgiIgAiIgAhkJyAhn147ohyArek1Zc1WBERABERABDxHoA2A6Z4blQYkAiIgAiLgGwIS8r5ZqpgMNIeQL1GiBPLmzZuj8ZYtW6J169Yx6TSWjTRo0AA7duxAqVKlsGDBglg2naOtWPYVTVvh3uu2vpt6oeo4Xf/tt99w4403ZuP59ttvo0yZMnFdr1g2HmreXu0rmnGHe2849UPVjeS69ll4uzAU43Bai6atcO8Np76burnVidXn2fTp0zFz5kxHpP/88w/27NnDa7UAfBQOd9UVAREQAREQATsBCfn02g85hPzWrVtRrhx/7Y9y2mmnYfv27Shbtix++eWXuA46ln1F01a497qt76ZeqDpO17dt24by5ctnWxvts+BbNRTjcDZ5NG2Fe2849UPVjeS69lk4OwMIxTic1qJpK9x7w6nvpm5udRLxefb555+jevXqxM3/+Twc7qorAiIgAiIgAhLy6bsHJOTDWHs3D4Vum4umrXDvdVvfTb1QdRLx4OuWcSzrhZq3V/uKZtzh3htO/VB1I7kuIR/eLgzFOJzWomkr3HvDqe+mbjKE/J9//olnnnkGc+fOxVdffYWDBw9KyIez4VRXBERABETAkYAs8um1MSTkw1hvNw+FbpuLpq1w73Vb3029UHUk5N3ugOD1QjEOp4do2gr33nDqh6obyXUJ+XB2hizyFq1ECPm1a9di1KhRWL58ueE5Rnd6q+TJkwfHjh2TkA9v+6q2CIiACIiAAwEJ+fTaFhLyYax3KHERRlNRubWGOw639d3UC1VHQj6cXeBcNxTjcHqIpq1w7w2nfqi6kVyXkA9nZ0jIx0vIHzlyBFOnTjV+vvjiC+zdu9cS6kaXBQsWxBlnnAGev+/ZsycY20Gu9eHtXdUWAREQARFwJiAhn147Q0I+jPUOJS7CaEpCXrEYgm6XVN9nnHioOUZyXUI+nE+g0GsQTmuh1iu3tsK9N5z6burGwiJ/zz334MMPP8QPP/yAQ4cOZZtusWLFcMEFF6BTo0ZoXq4c8q5bB1x0EfDBB8CKFfj82WdRvWZN3qMz8uFsOtUVAREQARHIQUBCPr02hYR8GOvt5qHQbXPRtBXuvW7ru6kXqo4s8m53QPB6oRiH00M0bYV7bzj1Q9WN5LqEfDg7Q0LeohWukF+/fr0hzJ1K/vz5jcCrtWvVQr/GjXHejh3AO+8A//0vwH/bS5UqQK1a+PyOO1C9Xj0J+fC2r2qLgAiIgAg4EJCQT69t4XshP3LkSDBwUJEiRdCjR4+4rl4s+4qmrXDvdVvfTb1QdZyup4LACjXvWG68WPYVTVvh3htO/VB1I7mufRbeLgzFOJzWomkr3HvDqe+mbm51eG3Xrl1gZHla27///nscPnw4Bxq6yPfv39/4HsKSJUDz5qZwz5MHMM+/AxkZwLXXAg0aGAIeZcsav1bU+nB2muqKgAiIgAjkRkBCPr32h++FfHotlz9nmwoCy5/k02vU2mfptd7xmi3PuE+aNAkTJkywR5Q3uqPFvUKFCti8eXO27rf+8APK/fGH6S6/YIH5//v3m3UqVwZuvhkYOdJxyBLy8VpJtSsCIiAC6UdAQj691lxCPr3WOymzlcBKCva061T7LO2WPKwJt6xbF3t+/NHxnt///BM/7NqFXQcP4pBlQQeQN29elCtXDjfddBP63303yqxdi227dqF8z57ZhXz//ig3aBBQoABQowZQu7b5c+WVQNGiuY5TQj6sZVRlERABERCBXAhIyKfX9pCQT6/1TspsJbCSgj3tOtU+S7slD2vCjapWxfyNG417fgDwHIC3AfA3lrP8iQDOBfBvACsqVMA748cD8+cDb70FbN1q3LsNQPmAnreuWIFyf/0FXH45UKhQWOOSkA8LlyqLgAiIgAhIyGsPZBKQkNdWiDsBCay4I1YHFFjbtqF8+ewSa6vPsiNoIeNDYM+ePaiRkYGyu3djNYA/M7vJT893AA0AdABwjq17ivsC9uGccgrQsCG21aqF8u3aZRtoNPtMQj4+a65WRUAERCAdCcgin16rnkPIlylTxnAnDCzdu3ePezC59EKfPrOVwEqftU7mTLXPkknfW33znPvMmTPx9NNP48svv8TBgwezBngagGsAUIpfm8uwJ+bJgw50s6e7fLduwJAhRvC6SPYZg+aNGjXKsTcGz9thRrRX+jlvbSONRgREQAR8R0BC3ndLFpMBXwZgZW4tDRgwAAMHDoxJZ2pEBERABERABKIm8PLLQIsWRjNr164Fv6c++OAD7N69O6tpvpg+44wzcMKePVi7axfoPu+mNCpRAvNfew1gjveCBd3cErQOvzsfe+yxUG1IyIcipOsiIAIiIAK5EpCQT88NYgj5UqVKoQCtDw5FFvn03BiatQiIgAh4kQDTjr5brRp6HDkCurYfPXo0a5glSpTA1VdfbYjnanSJX7AAKx54ADX+/tv1VBpVqYL5Gza4rp9bRVnkY4JRjYiACIiACIQgICGfnlvEEPIrV67EZZfxnyoiIAIiIAIi4C0CixcvxqBBg4zc6wcOHMga3IknnogLLrgAXbt2RYvbbwc++wx4913g1VeBb7+NaBKxFPK5DUBn5CNaHt0kAiIgAiLgQEBCPj23hYR8eq67Zi0CIiACniVAq3v//v0xZ84c/PzzzziWmRrOSgs3pkAB1P/f/1CkSJHjc2DKt08+AQoXNs60g9Hkr7gCT23ejG6//up6rhLyrlGpogiIgAiIgEcISMh7ZCESPAwJ+QQDV3ciIAIikM4EDh06hK5t22LM5MkoaDuDvmzZMiMeCy3V+/fvz0JUqFAh1KhRw7h2zTUMVwegUSMzPRzLd9+Z/542DfjiC+DIEaBiRbPOvfei0S23ZKWfc8NdQt4NJdURAREQARHwEgEJeS+tRuLGIiGfONbqSQREQATSnsCksWMxs1cv3PbEE9jy22945ZVXjIjwltU9T548KFeuHJo1a2aI9yL58gFLlwK1agEffwz85z/AN98Ay5cDFPb8f8Z4qV0buPFG86dq1SzO9jzybuBLyLuhpDoiIAIiIAJeIiAh76XVSNxYJOQTx1o9iYAIiEBaE/jwww/RoV49fH7wIM4HsCWTBq3ujNNC4X7ttdcCdIWnlf2ZZ4B16wC61jM9KgPbFSoE0GJPwb51K0Chz8B2+fObkewzo9lboCXk03rLafIiIAIikBYEJOTTYplzTFJCPj3XXbMWAREQgbgTYF73SZMmYcyYMdi4cSMKHjmCoQA6AxgLYMHVV2PO/PkoctJJwJdfAm+9Bbz5phm0LvNcvDHIc88FmjUDbr4ZqFYN+Pe/j7vWh5iFhHzcl1kdiIAIiIAIJJmAhHySFyBJ3UvIJwm8uhUBERCBVCTAQHV9+/Y1AtX99ttvWVNkitOKefJg3aFDYHb2QwCuz8jAwm+/RUFa0087Ddi71zzfvmULwJRxvXoBDz4IlC+fHZX9jHwIiC3r1sWeH390jbrYGWdg5pIlrutHWlFR6yMlp/tEQAREQAQCCUjIp+eekJBPz3XXrEVABEQgZgTWr1+Pnj17gq7zfzFafGYpWrQo6tati+HDh+O9t98G+vRBO1sguxcKFUKeTp3Q7oQTgFmzgM2bAf67bl2gcWOgSROgRImc4wxDyMdskjFuSEI+xkDVnAiIgAikMQEJ+fRcfAn59Fx3zVoEREAEoiIwe/ZsDB48GF999RX++ecfoy0Gqjv99NPRsmVL47w7z75jxw4cevttNHj0USz48UfDGm8VwyoPYGHx4ihId/lbbgHq1wfsaeWcRikhH9Xa6WYREAEREIHUIiAhn1rr6XY2EvJuSameCIiACKQxAZ53HzRoECZPnpwtt3v+/PlxzjnnoE+fPmhhBZrbvh144w1g9mzggw8wiUHqALRz4PdCwYLIM3Qo2j30kHu6EvLuWammCIiACIhAyhOQkE/5JXacoIR8eq67Zi0CIiACIQkwnzsF+ssvv4w//vgjq37hwoVRu3ZtjBgxAuefz/jzAH74AXj9deC118w0cQxWlycPDh07hgYFCmDB4cPZrPFWY9nOytvyyuc6uJdfzhGdPuRkPFZBrvUeWxANRwREQAR8TEBC3seLF8XQJeSjgKdbRUAERCDVCOzZswcPPfQQXn/9dezevTtreiVKlEDTpk0xcuRIFCtWLPu0Dx4ESpYEDh8GLrgA2LDBDFbXpw8mMcf78OHZzsYHMjPOytMq35nx7NOjSMinxzprliIgAiKQCAIS8omg7L0+JOS9tyYakQiIgAgklMC2bdvQpUsXLFy4EIw6b5VTTjkFrVq1wpNPPmmed3cqFPGLFgFjx5pp4/btAypXBpo3x6G2bdGgTh0s2LLF0RpvNReRVT6hhGLfmYR87JmqRREQARFIVwIS8um58hLy6bnumrUIiECaE9iwYYMh3pcvXw660FulXLlyaN++PR555BHky5fP/DXTwWVkAL/8AsydC9x1lxmQ7p57THd6po2rWhW4/XbgtttMq3yePJhEcR8QqT4Y9nSzykvIp/kfoKYvAiIgAjEkICEfQ5g+akpC3keLpaGKgAiIQDQEVq1aZbjNf/bZZzh0iHZwM9L8mWeeiW7duuG+++47Lt6Ze53B6mbMANatA/71L+DTTwGK+wULgOuuA9q1M/O/28S7fXz33X47vl+9Gvnz5g057MNHj6LSJZfg+TlzQtZNhQoS8qmwipqDCIiACHiDgIS8N9Yh0aOQkE80cfUnAiIgAgkkQMt7p06d8NFHH2WJ97x586Jq1aro27cv2rRpc3w0v/4KvPoqMHMm8MknAAU4g9bxp2ZNoHVr4NZbgVKlEjiD1OxKQj4111WzEgEREIFkEJCQTwb15PcpIZ/8NdAIREAERCCmBLZv346OHTsaZ94PHDhgtE3xfu655xrn3W+66abs/TEPPPO486w7C8/D//UXcO65pht98+ZAhQoxHWO6NyYhn+47QPMXAREQgdgRkJCPHUs/tWQI+VKlSqEAIws7lO7du6NHjx5+mpPGKgIiIAJpR4DR5jt37ox58+ZlBayj23ylSpXw2GOPHc/xHoxMp04AU8xt2gSsWWOK+fnzgWuvTTuWsZowI/yPGjXKsbnDhw9jx44dvFYdwOex6lPtiIAIiIAIpB8BCfn0W3PO2BDyuU19wIABGDhwYHrS0axFQAREwMMEeM6dL1tnzpyJXbt2ZY20QoUK6NmzJx588EHn0WfmeDes7m+8YbrSW9b4Bg2Ali0BWu1POsnDs/f+0PjdyZcoIYqEfChCui4CIiACIpArAQn59Nwgssin57pr1iIgAj4lcOTIEQwaNAjjx4/Hb7/9ljWLMmXKGMHqskWb59UvvgCmTQPatgXeftsU7cOGAfXrHw9gd8UVpnhn1PkyZXxKxnvDlkXee2uiEYmACIhAKhKQkE/FVQ09J52RD81INURABEQg6QQmTJiAJ554Alu3bs0aS4kSJdC6dWuMGDECBQsWPD5Gusi/8grw4oummzyPTh05Ykacb9TISAmHGjWAjz8GTj3VzPuuklACOiOfUNzqTAREQARSmoCEfEovb9DJScin57pr1iIgAj4gsG7dOrRr1w5MG3f06FFjxEWKFEGTJk0wduxYFCtW7PgsDh8GFi4Epk4F/vMfU7hTwDPNHPO6M1VcixZA6dI+mHnqD1FCPvXXWDMUAREQgUQRkJBPFGlv9SMh76310GhEQATSnMD+/ftx//33Y/bs2fj7778NGgxGWqdOHUyZMgXlypVzJsS87suWAeedZ17fsAHgeXee0b744jSn6r3pS8h7b000IhEQARHwKwEJeb+uXHTjlpCPjp/uFgEREIGYEJg8eTIYXHTbtm1Z7VWuXBmjR49GI7rDhyqvvQZ89BGwYAHwzTdmurjhw83UcSqeIyAh77kl0YBEQAREwLcEJOR9u3RRDVxCPip8ulkEREAEIiewfv16tG/fHitWrMhynae7fIcOHTBkyBDk45l2p2JFnaf7/OLFwOTJpjs9y803m270deuaZ+JVPElAQt6Ty6JBiYAIiIAvCUjI+3LZoh60hHzUCNWACIiACLgnwJRxdJ1/5ZVX8BfTvwHInz8/rr76asN1/owzzsje2M8/A5MmAdWrA3v2AFOmmK7yjDzfvz/w+OPAOeeY4r11a+CUU9wPRjWTRkBCPmno1bEIiIAIpBwBCfmUW1JXE5KQd4VJlURABEQgOgIvvfSSkRrup59+ymqoUqVKRsT5xo0bZ2+cFvf33gOef97M854nD3DCCTDyvjNVXJcupsv8li3AL7+Yv2MdFd8QkJD3zVJpoCIgAiLgeQJ6AvD8EsVlgBLyccGqRkVABEQA2L59u5Ee7v3338c///xjIClatCjatm1rCPgcrvO7d5s538eNA779liHqTfFeuDBw111Ap07Hg9kJsK8JSMj7evk0eBEQARHwFAEJeU8tR8IGIyGfMNTqSAREwA8Epk2bZri5n3766Zg1axaeeuop/Prrr9i5cyfOPPNMw6revHlzI2/7li1bsHz5ctx5553ZpjZmzBg8+eST+IP53MGj6vlQs2ZNTJ06FWeddZYzhpdfNt3jmS7u8suB//4XOO004OGHTZf5okX9gE9jdElAQt4lKFUTAREQAREISUBCPiSilKwgIZ+Sy6pJiYAIREqA4rxVq1aGeP/555+zUsBZ7TEVXJUqVQxR3rt3b+Nce0ZGBjZt2oQ2bdrg008/zQpcV6ZMGQwcOBCdaEkPVRhpnmK+bFngpZeAzz4zre/r1gF584a6W9d9RkBC3mcLpuGKgAiIgIcJSMh7eHHiODQJ+TjCVdMiIAL+I8BgdOeddx6+++67XAdfqFAhrF692sj3Tgv8brrFZwauq1u3Lngm/hS3ged++w2YONF0qWdwu9q1gc6dzQj0+fP7D6JGHJKAhHxIRKogAiIgAiLgkoCEvEtQKVZNQj7FFlTTEQERiI4ABXjHjh2xf//+sBoqV64chg8fjhYtWuS8j2nivv4aOP98gKKdAezatzej0HftCrzyihmsrmVL4MEHgYsuCqtvVfYfAQl5/62ZRiwCIiACXiUgIe/VlYnvuCTk48tXrYuACPiMwEUXXYT//e9/rkfdtGlTw82+CAPTBRYrXdzYsQDPyzdqBMybZ4r2L78EMjKAOnWAG24whX3p0q77VUV/E5CQ9/f6afQiIAIi4CUCEvJeWo3EjUVCPnGs1ZMIiIAPCPC8+w8//OBqpIULF87KBZ/tBrrlP/OMmfP977+BEiWAHTsA5ojnefm2bZXv3RXh1K0kIZ+6a6uZiYAIiECiCUjIJ5q4N/qTkPfGOmgUIiACHiEQjpDnGfjf6CrPYuV+HzMGmD8fKFQIKFgQ4Nn5K68EunUDbrmFIew9MlMNI5kEJOSTSV99i4AIiEBqEZCQT631dDsbCXm3pFRPBEQg5Qm88MILuO+++7Jyvoea8AUXXIB1jCrPwjzxFSuaud+7dDFd6L/6CujXz7TCq4iAjYCEvLaDCIiACIhArAhIyMeKpL/akZD313pptCIgAjEmwLRxd911Fz755BMcYVA6lyVPnjxgzvnWzPFuFbrkr18PjBgBLF8OMGc8XexvvNFlq6qWLgQk5NNlpTVPERABEYg/AQn5+DP2Yg8S8l5cFY1JBEQgrgQo2JkDnhb4vXv3Gn3lz58fV199NSZMmICbbroJX9GankspWbIkfvnlFxSk+/yhQ2bkeQp4BrG79FKgVy+gSROlj4vrSvq3cQl5/66dRi4CIiACXiMgIe+1FUnMeCTkE8NZvYiACHiAwLJlywzX+W+//TZrNBUqVMCIESPQrFmzrN+tXLkStWvXxoEDB4zf5QVwE4DNABjPvnCePOhYoQIeXL4cGRT8HToA27YBDRqYAv6aa8zI9CoiEISAhLy2hgiIgAiIQKwI6IkjViT91Y6EvL/WS6MVAREIk8CePXvQtm1bvPXWWzhEyzkYh64Qmjdvjueee874t71s2bIF99xzD8aNG4c7brgB9X78ER2PHMFZAJ7OkwfPAjjl2DEsuO023PLHH5jSowcyZs0CevQAqlULc3Sqnq4EJOTTdeU1bxEQARGIPQEJ+dgzTVaLFwO4HcDnACjUJ2YakpzGIyGfrFVSvyIgAnElMHz4cIwcORK///670Q/PtFerVg0TJ05EjRo1gvbNc+91y5dHuddeA6ZNM93mzzwT+PFHM+L8nXcCDz0E/N//gaJ/+fLluJO/UxGBMAhIyIcBS1VFQAREQARyJSAhnxobpFimaC+ZOR3+99JMQS8hnxprrFmIgAgEIfDxxx+jU6dOWL9+PY4xHRyA0qVLo0uXLnjkkUdCc1uzBujbF1i4EChVCiheHNi0CShTBrj/fjP6/CmnhG5HNUQgBAEJeW0RERABERCBWBGQkI8VyejaqQigA4A+QZrh9V6Z1vYSmXVG2Oo2BTAUQBXb73YCuATAFoc2ZZGPbr10twiIQJIJ7Ny5E+3bt8fbb7+NgwcPGqM54YQTcOONNxrB7BiUznVZuRJo395MH9eoEVCnjhms7v33TVGvIgIxIiAhHyOQakYEREAERAAS8snfBHUBzAEwG4BT0mFa11cBoOv8vszhUrTvAGCJ+esAjA8Q8kcBsO1lEvLJX2SNQAREIDYEhg0bZrjO//HHH0aDdJ0/77zzjHPvDFQXcfn7b2D6dGDUKGDjRqBmTWDGDCAjI+ImdaMIBBJwKeS56UqLngikEAF+YDsZllJoipqKCCSegIR84plbPTKG0jAAmwDcCmBJECFP0U4xbxf5/O9dmUGVrfZWArgt84OSop+u9bTiT5KQT94iq2cREIHoCXzwwQe4//778eWXX2Zzne/Ro4eRTi6qsnMn8PzzwNixAF8O3Hwz0LMncOWVUTWrm0XAiYALIZ+RN2/er48ePXqiCIpAqhDImzfvgaNHj54jMZ8qK6p5eIWAhLw3VoIB6ijEnSzy32W6zQcKcieLe/vMFwMU+RTy1wJYKyHvjUXWKERABNwTiInr/P79wMyZwJw5wFtvAR9/DDz1FDBkCHDBBcBjjwHDhwP//GMGs+ve3QhmpyIC8SLgQsgbR98YfPHcc8+N1zDUrggkjMBXX31lBQatnnlENGF9qyMRSHUCEvLeWOHchDwFOy328wKGSoE/F0DfzN/TSr/HVucIAJ6n3ysh741F1ihEQARyJ3DkyBEMHDgQzz//PCjkWSJynQ4ZjLcAACAASURBVP/pJ2DcOGDiRGDHDuDCC4HDhwHmfqc44jW64Y8ebVrhH3wQKFtWyyMCcSfgVsivXLkSl11GTa8iAv4m4GLP+3uCGr0IJJGAhHwS4du6DibkLRd6p7PugfdQ8DMqE4V7TwAM3TwyyPQU7M4b665RiIAIAHj99dfRq1cvfPcd30+apUyZMujWrZt713lGq6fF/emn2SBw4onA+ecD338PMBVd/fpAt27m/+fRV582XnIIuBA1+n5OztKo1zgRcLHn49SzmhWB1CegpxlvrHEwIc9z9DxDH0zI81qzzCn0ALCbBqxMEe90Nt6arR4UvLHuGoUIpC2Br7/+Gh06dMCnn36Kf+jaDqBw4cJo0qQJxo0bhyJFioTH5vrrgUWLgEqVgIsvBubPB44eBVq1MvO/U9SriECSCbgQNfp+TvIaqfvYEnCx52PboVoTgTQiICHvjcWORMjTdMVo9paQD2cmxoMC0zZdSJfTMEq5cuXCqK2qIiACInCcwJ9//mkErXv11VfxN6PEA8iXLx+uuOIKjB8/HudHI7ZfesnMAX/DDaagHzPGPO9OC72KCMSBwLZt28Ju9X//+5+RIhFAsPPCEvJhU9UNXiYgIe/l1dHY/E5AQt4bKxiJa33UQj6SqR+j+6qKCIiACIRBYNSoURgxYgR+/fXXrLsqVaqEIUOG4LbbmGwjRoXtP/OMGYV+716gZUtg6lS50scIr5rJToDxG6IoaSfkb7/9duMlXm6lRIkS2MG4FrmUpUuX4vvvv0f79ozv61zWrFmDSy+9NOj1evXqYeHChUYd1r333nsNTyCnwmsvvPACrHuiWPO0vFVCPi2XXZNOEIGovoUSNMZ06CaSYHeMBDXBFuwuHE7GG/9wbrDqSshHQk33iED6EVi8eDG6dOmCb775JitlXMmSJdGxY0cMGjTIsMS7LgcPAvv2AaUzU2sfOUJT/vHbN2ww879Pm2b+vm1b053+LJ5OUhGB+BCQkA+P69q1aw0BbpXBgwcbIpri3v5sweM1uRV+hlDMb9y4MWi1PXv2GOLbqTAeR58+fcD+GVCQYyhevHjQFwj83GJ7devWNcS/SngEJOTD46XaIhAOAQn5cGjFr26o9HPjHQLXMbjddQDei2BYEvIRQNMtIiACuRPYtGmTce79ww8/xGFGiQdwwgknGK7EfKjmA3FY5eefgfHjgQkTgH//20wPR5f5Tz8F1qwB/voLuOsuM7gd3eo7dwbuu8/8t4oIxJlAMoU8RSxfiDG6PUv16tXRv39/VKlSJSazjnf7HCQt9BTkoSzwgRNyI+SDQejduzfmzZuX9RKAQp4inS8Y+PLx2muZtfd44fhoiacHUcWKFSXkI9hdEvIRQNMtIuCSgIS8S1BxrpabkB8KgGYl+1n4igD4KjoMk1a2GUjIx3lB1bwIpAsB69z7a6+9hr8orAHkzZsXF110keGqWqNGjfBQ8PjOZ5+ZLvLM/16gAHDddcCePcCHHwKnnALcfz/Qq5cZmf72283rzANfqFB4fam2CERBIFlCfurUqYZF2X5UhdM49dRTMXToUNzFl1tRlHi3bw0tmJCfOHEi+LN69WpDPNO1vWdPJuOBYUHn71nI30l8B5s6xXrlypUNC7wVH4jtlcp88ce+At3r+dJg8+bNhscA+7Ms8vwdLfsU+rt378Yll1yCYcOG4Tp+FgHGS8t+/fqhRw/GITYLPyN5lIj1ixYtaoyF7Z911lnGMSPOi+3wpefFDNiZIkVCPkUWUtPwJAEJeW8sS25CninoeJ2Hvayc8LTQU8iPinD4CnYXITjdJgIiADDf+xNPPIFnn30WfzAPe2bhAymthK0YKT7cQvf5uXOBsWMBWhnPPBO4/HJg/Xoz//t555np41q0MAW8iggkmUAygt3RUl6rVq0cIt5CQTFPj5hILfPxbt++ZE5CnhZzxtOgwKUrOz0OKJApgGfPno29e/eiXbt2hhhfsmQJzuTnhMvC8/CXX345nmcMjcxiCXm2T2+io8x0YSsU5MOHD8ecOXOyCXme5S9durTxQoUin8E6OaZdu3YZIj2YkOecrToU8nTp5wuCvn37Gr3y5QCvh+ul4BJBUqpJyCcFuzpNEwIS8sldaFrbaV1vmjkMRoHhATLzE/14yQDQEQAD3JUIkSPezYwUFdcNJdURARHIRoAP0rQy2c+50prFSPR06w3r3Hsg26VLgbp1Abq2PvAAQEsWz9M2aGCed69XT0HrtB99T8CFqMn1+7l169aYMWNGrhxY5yVmcYigxLt9+5AChTxd3CmQKeLtYpsWav6OR3cyMjKMf4c6Ix84dYr+66+/3rCGn3zyyVmXLSFPoc6+eV7fOqNv3UNhfeutt2YJeVrj6Wpvr7tlyxbjhQNfRHCMboV8oGin1Z5crLlGsISeu8XFnvfcmDUgEfALAQn5nCuVe5SV8FZ2XnjVE1ZbQj5hqNWRCPibwIoVKwyhTmsTLfEszPd+yy23GA/bxYrRaSgGhS7133wDnHOO2dhzzwGrVgFNmwJmui4VEfA9AReiJtfv57PPPhvffvttrhxopQ4VHT5YAxSsP/zwQ67t/9///Z8RxDLaEijkKc7r16+PVatWGUdz7IXHdehuT2t8JEKe1m+2bX9BwPYtIU+XeV6nmOcLSxZa6fft24cFCxYY1+yu9RTqfIlJCzo9B+iNZC9uhTy9BKz+eD8/ZzkmCflod5fuF4H0ICAhn3Od6VcVixxrbCO/R7eR8aDAL6ECPH/qULp3757tbJdH56FhiYAIxIHA9u3bjdROPH96kC7v/DDLnx81a9Y0HqarVq0ah14zm/z6a2DECIBWR7rQDx8OdKRDkooI+IPAyJEjwZSLToVBIDPdpiNKP+dGyMebUryEvGWNtlzP7fOgkKeLO8+chyvkLcs6PYkCXfHtQt7q33phyT75u8aNG+cQ8pYFnpZ8Wvkp5O1n+d0KeQbSs5/Ll5CP9+5V+yKQWgQk5J2F/FwAS6JY6nqZ7vKRBqOLomtXt4YMdjdgwAAMHDjQVWOqJAIi4H8C+/fvR+fOnY2zoDyHykIL1LnnnmuIErqlRly2bwdmzjRd5PPmBQ4dAvbvB+zW/P/+Fxg2DHjzTUbtArp2NQV88eIRd6sbRSAZBPjd+dhjj4XqOiIh78b1vWHDhm76dxwfj8i8++67uY49Gtd9e8NuLfKWyz1d7Nu2bRu2kKdlncLbivBvH4NdyPP3FO/0ZuA5d37mWaI+0CJvb4NtT5gwwXCtt142OAl564iA/Yy8hHyoPxNdFwERyI2AhLyzkO/lkO4tnJ3E8Ko8/+5pIS+LfDhLqroikHoErKB1zz33HH7//fesCZYrV8446/nggw9GN2l79Pn8+QHmYGbk+WefBe64w8z9vm6dGYX+o48Aps5idOrWrRXQLjryujuJBOJpkY93MLp4t5+bkLcEOy3bdis1xTGDwVkW9XAt8hTnDKBHT8PAEijk+XKBFnYrCJ3l9m4X8rTS02Np2bJl2Y4A0H3fcpWnkG/WrFm2eVC08x4J+ST+caprEUgxAhLyOReUAnwWgLVRrDXzhjBdXJ8o2ojnrTojH0+6alsEPE6A6aVoMaQlySp88Lz77rsNq1JUQetobbeiz69YYUafZ4o4RrefNYsh7wFGtad1/vzzAY6Bkej5kH3LLUA+r77/9Piiani+IBDtGXlOMt7p4eLdvrVQTlHrGQWeoptCmcKXMTr436w7i58fjPzbsSPmzp1reA9RiDNOB63d/B2P/jDYnFXoVk8RztRugefuWSdQyFsp4uiNxPaswHd2Ic8XDvy8ZLo6npFnWbRokZGfntZ8yxWfZ/4ZzZ71aLGn2zxfRoQS8nwZwHr2efhicwcZpIs97+fpaewikFQCEvJJxZ+0ziXkk4ZeHYtAcgjQEtS1a1d8+eWXWSmWYh60jhb2kSMButJfc42Z351W+fnzgdKlgfvuM3/oOq8iAmlIwIWocfX9TMv5448/bghdlho1auDRRx+NOO1c4FLEu332R3HOzyV7Ckv+ftKkSYbwtfLIU7jbrekUxLyXYtcKjGe9AAgMlMff00Pin3/+cdxtFPJMI8eAdlahBZ8/9nso5Pk7q97atWuNlw1WTnvmf2dGD4p4Fh5Poks/XyRQyPPfHDNFuiXkmSKQLyvsAfh0Rj4NPxQ0ZRGIgoCEvHt4RQGUDFH9uHnLfbvJqKlgd8mgrj5FIMEEvv76ayMA0yeffJL1UFqwYEFcc801hqUoMNJy1MOjW/zu3QBd8rdtAxo2BM4+28z/Tit8oUJRd6EGRMDrBOLpWu/1uSdzfJ06dTI+75ws78kcV7r37eLlVboj0vxFIGICEvKh0TG30hwAdUNU9XKU+sChK9hd6HVXDRHwJQGedacF65133sGBAweMOdBVng+3zz77LK644orEzIsWsOnTzRRyTzyhoHWJoa5ePEAgnsHuPDA9zw6BVnMrUJ1nB5mGA5OQT8NF15QTRkBCPjTq8QA6APjeRSR7v+RIkkU+9Lqrhgj4hsCff/6JLl26GA+x9ojzDL705JNPGm6dCS2rV5sR6F99FShRAnjjDeCqqxI6BHUmAskiIIt84snzbDtd1plnXsVbBCTkvbUeGk1qEZCQD72eOwHsAFAldFXf1HB1Bs83s9FARSANCTDiPM9kTp482cpLbVA4/fTT0a1bN8cIzRFjWrsWGDsWKFsWGDzYbOZ//wOqVWOOOvO/jx0Dli8Hhgxh5CczyF2PHsA99wCFC0fctW4UgVQi4ELU6Ps5lRZcc4GLPS9KIiACERKQkA8N7iiAYQD6hq7qmxp6UPDNUmmgInCcAMX7mDFj8NRTT+Hnn3/OusBUksyvPHjw4Ogiztth0zWelnQKeKaMO/10U5ifdZaZNo7p4vhTsyawZAnwyCNmYLvzzgP69AGaNQMKFNDyiYAI2Ai4EDX6ftaOSSkCLvZ8Ss1XkxGBRBKQkA9N+zsAnwNoHrqqb2roQcE3S6WBigAwY8YMDBgwAJs3b8YxWr4BFC1a1IiCPHr0aBQpUiR2mHbuBF54AXjuOeCnn4ArrwTuvRfYs8cU9d99B/CcvZUujvnhWXfmTKBvX+DGG4G8eWM3HrUkAilEwIWo0fdzCq23pgJZ5LUJRCCOBCTkQ8Pl+fhxACoC+CF0dV/U0IOCL5ZJg0xnAgsXLkSPHj3w1VdfZaWLK1SoEBo2bIhx48bhlFNOiT2eNWtMCzut8c2bm/ndaXUfNw7YtQtgaiUKeIp7ezl61HSxt9zsYz8ytSgCKUFAQj4lllGTCIOAiz0fRmuqKgIiYCcgIR96PzDtXD8APTNd7JcA4Ll5p7I2dHOeqKFgd55YBg1CBLITYA7kBx54wLBgWDmMmS6udu3aRrq4SpUqxRcZBfxTTwFt2pjn4e+80wxYx3PuXbsC8e4/vrNT6yKQEAIKdpcQzOrEJwQk5H2yUBqmLwlIyIdeNp6Rpy+rxcr0a3Uu+UI354kaSj/niWXQIEQA2LRpk5H7+MMPP8ShQ4cMJPnz58cll1yCZ555BjVq1Egepq1bgb17zbPvd9+dvHGoZxHwEQGln/PRYmmocScgIR93xOogjQlIyIdefAa6y02821voE7o5T9SQRd4Ty6BBpCuB7du34/7778e7776L/fv3Gxjy5s2Lc889F7TmXX/99fFB8+OPQIUK7lzgN20Chg8Hpk4FTjgB+P57oHTp+IxLrYpAChGQRT6FFlNTiZqAhHzUCNWACAQlICGfnptDZ+TTc9016yQSYK73zp07G7ne9+3bZ4wkT548hrs8A9m1atUqPqNjcLz33gOefhp46y0zRVytWsBffwFffw1cxo8DW/niCzPF3Jw5QMmSQJcuwP33m/ngVURABKIi4ELU6Ps5KsK62WsEXOx5rw1Z4xEB3xCQkPfNUsV0oHpQiClONSYCzgToKt+nTx9MmzYNOxkNPrOUK1fOCGTXlefO41Vo6X/5ZVPAU5yffTbQuTNAa/+LLwLPPw+ceCJAK32+fMDKlcCTTwL/+Q9QvjzQsyfQrp1ywMdrfdRuWhJwIWr0/ZyWOyN1J+1iz6fu5DUzEYgzAQn50IAZ7M5t2eu2YpLr6UEhyQug7lOXAHO9DxkyBM899xzoQm8VRpnnWXien81H4RyvwvzyFOnjxwM7dgA33GBa1c88Exg9Gpg2zRTuDGD30ENARSbkAFC3LrBli5kDnsHuChaM1wjVrgikLQEXoiZtvp979+6NESNGGF5JGzduDLonVq9ebXymrlmzBt9//z2KFy+Oyy67zPg8bdq0abb7brvtNrz22ms52qpYsSLYX/v27V3vPfZ36aWXBq1fr149MLsI67Aux8OMIk6F11544QVY97geRApUdLHnU2CWmoIIJIeAhHxo7lawu9A1gTg+nbvp3nWdtHlQcE1EFUUgSgJz5841HhSZ690qxYoVQ4sWLTBq1CgwdVxCCkX4vHnAXXcBDz5oivkRI0xLO8+483f33QeUKpV9OHzpwOvMC68iAiIQFwIuRE3afD+XLFkSe/bsMTgz6GdGRkYO5sOHDze8mkqUKIHbb7/dEM27d+/G4sWLjZ9bb70Vc3gMKLOwztKlSzFp0iQc47EiphnauRP8fGZ9tkdvKDeFY6P4diq9evUyxjV48GDjpQKFPF8w7ODnrUOx5lq3bl1D/KdTcbHn0wmH5ioCMSUgIR8a59wgwe5oxuJPcQCsw6d3XwW7W7lypfEFpCICIhAZga+//tqw8Hz66aegJZ6lcOHCuPnmm/Hss8+CD28JL9u2ASedBBQvDhw4YAa34/l25n+nyE/UC4WET1wdioD3CbgQNWkh5JcsWWIE9aTAphjnS1Ba3e2FderXr28I+FmzZuVYXIrsjh07Gm00adLEuG4JeSdBXblyZeOFAJ99oikc67x587K8CPgcRdFPbwG+LLj22muzNc8XC7TE0/OAngES8tHQ170iIAJ2AhLy0e8HRrXn610meN4SfXMJaUFR6xOCWZ2kIgFGme/UqZPx8Pj3338bU2S6uCuuuAKTJ09G1apVvTXtDRsAWrrocu9g8fLWYDUaEfA/gXhHrW9Zty72MLZFkFLsjDMwc8mSiEHGu30OjC7wy5YtMyzYFOurVq3KYc2m9X3Lli1Brdxsh4Ka4viiiy4KKeQpuEuVKhWVkKZY5wsBWuAvvPBCo0+rXf6bYwl0r+fLBnpq0UOAAU4tIc/f0bJPoU8vA6YcHTZsGK677jqjXb4I7tevXzYPAh4bIDvWL1q0qDEWtn/WWWcZL0J4DIHt8CXHxRdfHPEeiOWNLl5exbI7tSUCaUVAQj42y/05gD8ANIhNc3FvRXnk445YHaQagTFjxhgPWfZz73xoGzp0qPFglZDClHDffWcGrHNTDh8GZswAaOnKk8eMUp83r5s7VUcERCBCAvHOI9+oalXMz+VMeaMqVTCfL/AiLPFun8Niuk3LNZ3ilJZ0inlLkNPCTeu5Vcc+Fcsd3/47HmNisSzyFNxWsVzr+/btC1r569SpEyEZGK79l19+OZ5nHJLMYgl5fg906NABR4/yRObxQkFOl34eAbALec6vdOnSxhwp8sePH2+8INi1a5ch0oMJec7RqkMhT5d+tsv5sfDlAK8Hc/OPePIR3ighHyE43SYCLghIyLuA5KLKUAA94bMz8nwzXaBAAcfpde/e3fU5Mhd8VEUEfEnggw8+wAMPPID169dnnbfkwxfd6Xk2Mq5B6yxiPOf5wQfAmDHmOXdGn//yS1OY//ILQJd+Rpm3F7rUT5kCDBtmRqX/97+Bhx8GLr/cl+ugQYuAnwjE2yIfb6Ed7/aZgrNZs2aGK/qZDMKZKeztweKsQHOsa7nNs55TADp7sDyKXN4TWCh0KbKDBaNzs7+s4wC0hp988slZt1hCnkKd3w/2MVv3UFjzCIEl5GmN57jtdel9wJfFdN1nvAC3Qj5QtFsvRoLFHXAz11jWkZCPJU21JQLZCUjIx2ZHMNIKQ6cq2F1seKoVEUgaAVrcKdQXLVoEpo9jKViwoOH+SXfFsmXLJmZs7Hv2bDPS/Jo1AF32GX3+zjtNN/mRI4GpU4HWrYFJk8wx0dV/wgRg+HDg11/pvwr06wdkuoAmZuDqRQREIBgBF6Im5Bn5kEK7QgXMZ8BLqzCAZabredCVobfPrl3G5UZNmmD+Tz8FrRqtxZ+fpRSfFLkstEZbFnnLimwJ9okTJ6Id02BmFlrjabm3imXFtqLeWxZ5CmQr2B3r0uWcL1+rV68esWs9rd8cu90az7btLvu8TjE/m5/dmUcI9u3bhwULFhj32i3yFOo0qNCCziB4dI+3F7dCnl4CVn+8n+w4Jgl5fQ6JQOoTkJAPvca5pZ9jJKu6ACYC4DdL9dDNeaJGyAcFT4xSgxCBBBLg+UKmQuIDJgsfuM4//3wjaF3t2rUTN5I//zSt7889BzCSPM9LMk0c08itXm1a2ZleiVHnKewZgZ5B9ehGT7FPC3zLlqaAp/VeRQREwDMEEiLkAcy3z5jZKH7/PXcGfOmXacluFHh/wJ3RCHnLZZ6fr3ahzf9msQeuo/t9bindWJ/CncI1UMg7uZVb58v5AiEwIF2oDWJZ1u1eBNY9diFvWcOt4KecA3/XuHHjHELessDTkk8rP4U859uzJx08g5+RD3StZyA9u6eBhHyo1dR1EUgdAhLyodfSTfo5cqSgXxa6OU/UkJD3xDJoEMkmQItF69at8dlnn2Wdazz11FMNC0m3bt2SM7z9+01BXr8+0LUrcP75wNKlwNCh5v8zYB3TJ919N0PkZx8jIzsz73GVKskZu3oVARHIlUBChLyHLfI8K86z3LSYW+faLWC0SvOcuWVdpkCmCzp/eGY8sPClAMUvrdpuhLzlzh5o5XezZTkuCm+niPeBQfQo3jk/jpmR+S1RH2iRt/fLtidMmGC41lsp8pws8lakfvsZeQl5NyuoOiKQmgQk5EOva7D0c9adOwFMoDdT6KY8U0NC3jNLoYEkg8ATTzxh5HanFYSFsSIaNmyIadOm5Xi4TMb4QLf6ggXNrv/7X6BmTaBaNaBPH9NdXrnek7Is6lQEoiWQECHv4WB3uaWAo6WZ1mtLpFru9XZxb+fP31tR690IeQp4ZhyxB9Vzu54U5/TYYvygwBIo5DkPfrdYQeisFxN2Ic958ggXI/dbAf7YLvlYrvIU8owlYLe2U7TzHgl5tyuneiKQ2gQk5FN7fYPNTkI+Pdc9rWe9YcMG3HnnnVixYkWW9b1cuXKGoOfDkmcLg919+CFQq5YZ4E5FBETAtwTSWchbqduCCWLL9d1uMWfgQAZ/o/WeApkil55UtHjTHZ8B5PjvQCHPbCL28t133xlCnKKb3wEstG7TlZ/9MbhcsGLls+c5e7votuoHCnlrHhyf/aiAXcjTm4BCnZlP6AHGwrgsfDHB+Viu+ExNxzgArEeLPV9ukGMoIU9OrJfbvBL1R+RizydqKOpHBFKOgJ4KI19SJi3l0/93PNYFYG/kTSX8Tgn5hCNXh8ki8Pjjj+Opp57KZn2/8cYbMXXq1MRb3xmMjgHqrr4aOO8890j27TPPzFPUZ6YYcn+zaoqACHiFgAtRE/L7Od553uPVPtOsUZgzHZyTqzzXiJlAaHVmcDirrF27NitHOsUpRS2t8QxeR2FLgW8X8hTSgYXWcb6wpcC3+uZ4KO5DWeitcf/zzz+O24hCnmnk7GOmBZ8/9nso5Pk7qx7nRas8XxCwMP8788ZTxLPs3bvXmCdfJFhztl5mWEK+SpUqBi97AD6dkffKX7vGIQLxJyAh747xeADtAVwKYC2A6/jylPGwGHAVwCYGLvWRmDceFJR+zt3iq5b/CND6zrPvPM9oBVQqX768IegTlvPdjo0R5CnEmXt4505g7FjggQfMGnv2AExl5JTfnQL+2WfNCPV79wL3328GwlMRARHwLIF4p5/z7MR9ODC62jPAnJOl3YfT8eSQXby88uS4NSgR8AMBCfnQq8TwocMA8DDtJQC2APgcwMUAOvJIU2YOefpx9QvdnCdqGEI+t5EMGDAAAwcO9MRgNQgRcEvgsccew+jRo0G3RRamjWvUqBGmTJmSeOs7B/DNN8BTTwEvvWQK9XvuMQPYVa4M7NgBPP20KeqZ871Jk+PTpGingB81yhTwvI+W+FzcP90yUj0REIH4EuB3Jz+LQhRmueGzRGAJaZEP1bCuuydAK7kVmM79XaoZDgEJ+XBoqa4IhEdAQj40L7rO0+puhYEuBoD5qRjgrlPm7fwy5u/9EipaFvnQ664aPiFAN026TDIA0NGjTDIBVKhQwRD0TZs2Tc4s1q4F+vcH3noLKFMGePBBoFMnM2UcrfMU97TO01WzQweA6YbKlwcOHDCv0QLPNHSWgD/zzOTMQ72KgAiETUAW+bCRJeUGK7CePU99UgaS4p1KyKf4Amt6SSUgIR8aP5WBXbRbbvX1bOnmLNf7fKGb80QNvfH3xDJoENEQ+Pjjj9GmTRsjoA9L/vz5Dev79OnTUaRIkWiajv7eZctMN3hGOG7VCjjxRGDrVmDECGDiRB4ENfO/M8Vd2bLH+2Mu+IsuApi3nhb4M86IfixqQQREwDMEXIgafT97ZrU0kFgQcLHnY9GN2hCBtCQgIR962ZlebnFmYDvWtkR7CduZ+DkAaPqTkA/NUzVEICoCzLH75JNPGoGAWBi4qH///o5pgaLqKJqbGZSOP9a5d+Z3v/NOoFAhoHNnoEsX0zrvVCjmCxSIpnfdKwIi4FECLkSNhLxH107DioyAiz0fWcO6SwREwAjWppI7gVUAmJeEP/sAUNgzuB3Pt7HQpX4zgB1+c61nIDBGW1URAa8T2L9/P1q1aoU333wzKwpwpUqVjLzvNZlj3evlhx+A6dPNAHfFi3t9tBqfCIhAnAi4EDUS8nFir2aTQ8DFnk/OwNSrrl0xaAAAIABJREFUCKQAAQn50It4KwBa3HlOngHv+BR+L4BJmVb4iZm/6w1gZOjmPFFDDwqeWAYNIhSB9evXo3nz5vjyyy+NqkzdU7duXcyZMyc5wes4CFraFy8GrrvOdJEPp/BeRqIvWjScu1RXBEQgRQi4EDX6fk6RtdY0TAIu9rxQiYAIREhAQt4duA6ZketpfadwZ7R6Fkaq7xVwht5di8mtpQeF5PJX7yEITJ48Gb169TLyDbMULlwYnTt3NnIJJ61QhM+fDzCbA/P+vvsu0KCB++EsXw706wfw/P7Che7vU00REIGUIeBC1Oj7OWVWWxORkNceEIH4EpCQj47vWZmu9mauK/8UPSj4Z63SZqRHjhxB+/btMXPmTBw6dMiYd7ly5TBx4kQ0bNgweRwCBfyVVwJMLUWLfJ48wO+/A8OHAw8/7Ow2//nn5rVFi4ALLwSefBLgfHivigiIQFoRkJBPq+XWZGWR1x4QgbgS0JNkXPF6tnEJec8uTfoN7Pfff0eTJk3AKPTHjh1Dnjx58K9//ctwn6eQT1qhgH/7bdMCv2oV8K9/mQK+bl1ThP/1FzB6tCniWfeNN0xxb5WvvgIefRSYNw+oUgV4/HHgttuOB8BL2sTUsQiIQLIISMgni7z6TRYBF3s+WUNTvyLgewIS8jmXkEHrIik8Q186khuTcI+EfBKgq8vsBHj+/bbbbsM333xjXChYsCDuvvtuPPfcc8gX7tnzeMAdNgzo0yengGfu9ylTTIFPazzzwz/yiJkvnoXX27YFZsygS4FZr00b5seLxyjVpgiIgI8IuBA1+n720XpqqKEJuNjzoRtRDREQAUcCEvI5sTBvPEV5JGzy+mSfGQ8KpUqVQoEgaa66d++OHj16+GQ6GqafCCxcuNAQ7L/88osx7JNPPhmDBg1C165dvTWNn38G1q8H6tUzLfC0uv/nP2Z+d758aNbMdJOvVCnnuDt0AM47D7j3XjOHvIoIiEDaEBg5ciRGjRrlON/Dhw9jxw7DXsDMN587VJKQT5udkh4TlZBPj3XWLJNDIBKxmpyRJq5XCnkWivlXAcwGMC9x3SekJ+NBIbeeBgwYgIG0JKqIQIwITJgwwQhgZ+V/P/XUU43z7zfddFOMeohzM3PmmOL9mmtMd/rqVgbKOPer5kVABHxFgN+dj/EYTu4l7YT8pZdeijVr1uDee+/FuHHjHOnw2gsvvIB69eqBL30TVUqWLGmMK9EBVcmDXIIVi4OX2YVaIwn5UIR0XQQiJyAh78yuKYBmAJh6joKeZW6mqH89ctyeuVMWec8sReoPhOJ97NixOHjwoDHZypUr47XXXkO1atX8NfnDh4H33z9+Rt5fo9doRUAEEkQgkRb5LVu2oHXrhzB9+mhkZGTEfIaxbP+yyy4zhHzx4sUtr4Qc46Wg3rNnj5FmNJFCvlmzZqhfvz7a8lhUAgvnyhcXToXfnX369MHgwYPhZXahcEnIhyKk6yIQOQEJ+dDsKOYp6inuWSjsKeonAHgv9O2erCHXPU8uS+oMilHnW7Zsiddffx2MRs9yxRVXGP9dtmzZ5E+UUfHHjwdatQJKlgx/PHSzV9T58LnpDhFIcwIuRI3r72eK7Dp17saWLQOQkfEY3nvvxZiK+Vi3TzFK4fr9999j8eLFuPbaa7PthqVLlxqW+EqVKqFixYoJFfJe25a9e/fGvHnzsHHjRmNofmbnYs97Db/GIwK+ISAhH95SUdTfC4ChqSnod2da6emCvyy8ppJa2/WDQlJHqc59R8CKQP/RRx8ZY8+bN6/hOv/yyy+jUKFCyZ+PlUque3fgu++Al18Gmjd3P66jR4GpU4Hnnwc++IAJ7t3fq5oiIAJpT8CFqHH1/XxcZL8IgJb4LcjIuDtmYj4e7VOMMjYPC4V6oHt9x44dsXnz5qzsJXaLPIUtPbn4EoAWfVrsackuVqyY0R7vowWbLwN2796NSy65BMOGDcN1mZlEQl0vUaIE2D9d6xmEle3s3Lkz236l6/2rr76azZtgyZIlhtV89erVxrg6dOiAoUOHRrXPOUd6rtF74UKmLM0U8pGwCzVvekD069cvW0wkciYDcixatKgxFrI566yzDD6cK/mS/8UXXxxyri72fMg2VEEERMCZgIR85DsjUNTvUtT6yGHqTn8T+Prrr40UcvYI9Pzif+qpp7wRgZ54GbiuWzdg8WLg6quBMWOAiy4ywVPgz54NXHCBGaTOqTAf/P33AytWmGfln30WKO2XRBX+3l8avQikCgEXoiakkM8psi06sRHz8WrfEvIUiRS8R/li1FYoKocPH26kHmUaUkvIU0BPmjQJFPNsg0KX7ubVq1fPqkMhXrp0aUNUM43p+PHjDSG8a9cuQ4yGum4/I08he/vtt2PVqlW4yPqOAJ23SqJ58+Z4ni9yGUTp1VeNetbPypUrjZcH0Z7v53n4yy+/PKsfu5APl52beTsJec7JYkchz5cUXJO+DPYKGC9NeD0zcGOuf54u9nyq/HlrHiKQcAIS8tEh56tIfqpZZ+nzRddcwu4O+aCQsJGoI18TWLduHW6++WbwwY/FkxHo//gD6N8fmDAB4BnSkSOBW2457hq/eTNw333AggVmGjnme7cX3v/wwwDPMZ5zDvDMM0CAS6ivF1GDFwERSBgBF6Im1+/n4CI7NmI+nu1bQp5CnQKTQpgvgFlo2b7++usNcXjrrbdmE/I8v16jRg0wm45VKNhpEaaQpNWZ7vj29jgPimqKfwr73K4ztkBgsDt6k/FeK/idNT5aoy0rOe/hufpZs2ZljYvu8BTbTkcH3Gwyqx9aw/l9apVI2IXiYs3bjZAPFO3Wy45NmzaFPM7hYs+7QaM6IiACDgQk5MPfFjTh8cx8BwDFM9PUrc50sR8RfnNJuUNCPinYU6dTWir4APbjjz8ak+IDDS0mjRs39tYk160DatcGaPmhSO/SBTjhBHOMzPc+ejQwYABQooRpYbePn2f7Kd4p4hnojlGoH3gACJKy0VsT12hEQAS8SMCFqAn6/RxaZIcW83v2ADx2Tecj66Mw666sM/eWu34wgpFZ/i0xSks7BTDF/Gx6QgGG+N23bx8WLFhgXLNb5O2j4Bl7imQKebu45HcQXc9pKabbPd3A7cXNdXvUelqkadG3zqhzfGvXrs36b0skU3gHnvXnSwB6FkSSwpfWb87fsvoHCvlw2bmZtxshTy8Ba604JrLhekrIe/FTRmNKJwIS8u5WmwfQeDaelveKmeL9+8yAdzwfv9ldM56pJSHvmaXw10BWrFhhCPht27YZA6cr47Rp09CwYUNvToRi3RLgp556fIwrVwLt2wMU+nSXZz74okWzz4Huk7zWujUwbBhw2mnenKNGJQIi4BsC0Qj5WrUa46OPugC4xsV838dVVz2NDz/MnmiHGd0aNOCZctNByV5i0X5uA7MLecuiawVDpfjl7/gyOFDI05WeIpsvkCnwKdQpICmmLdduywJPaz+t2RTyvKdnz57GkEJdD7TI84w8x2EJVY5vxIgRWV4BVmA+jocWf3vh7/hCIdxUdpY1nvM988wzs7UZKTs383Yj5HlcwB7TQELexZ+gqohAAghIyAeHzK846xy8XbxTuDNivd/Eu32mEvIJ+ONKpS4+/vhjw2Lyyy+/GNMqU6YMZsyYYZwF9FWhlZ3n5Okef/75psX98sudp7B/P80OwJVX+mqKGqwIiIB3CUQj5FPJIs8VojimOzzPsNOt3hL1gUKe9Wghp6u8JXApqhlUzumMNjlNmDDBqO9kGXe67pRH3vodRbT9zDjHbuV/p6APtP7zOu/lvMIp/I7l2HjWPrDYhXw47OztBJt3oJDnkQXGuLGfkZeQD2clVVcEEkdAQj4n6x6ZrvOXZF7aA2Bipuv8msQtTVx7kpCPK97Uafz99983gvv8+uuvxqSYOu6VV17BNde4sQh5lMPddwNnn20KernJe3SRNCwRSE0C0Qh5Egkt5iNze7dox7P9QDFKcUzruRVIzXLdtgt5y/IbGHiOonfZsmWGkKclv3379sZ/24PT0U2dLuHsJ7fr7NdJyFPM0krOCO106Q/Ma+8U8Z1n6BnIj0fN7GNxs5sDrf72eyJhF4qLNW/GILBb2ynayVJC3s2qqY4IJJeAhHxO/gyjavlJ0fq+OIwlmhRG3WRWlZBPJn0f9M0ziMwDz3RyLKeffroRSbhmzZreGz3Pr0uQe29dNCIREIEcBKIV8rmL+ehEfGgxH137gWLUSnNGV/S5c+dmBb6zC3kKaJ6lpzs9g8+x0NpupZkjT6ayYx0GtKNLO8uiRYuMPOy0+PMMe27X6c7vJOTt7vNsJzAGDC3XdN+ncKf45dEz/o4vEPhvFv435zZx4sRcg8LxhQHnzRcBTi8AImXHeZGPExfrGAPnySj/rEe2fHlC9/5QQp4vSViPQfNyKy72vD4pREAEIiQgIe8s5K3fZj/4FBwyObKuotZHuBF1mzcIvPPOO2jTpk2Wu2L58uUNawcjBnuu/P23GYiO7u/LltFPM/whMgheJPeF35PuEAEREAG4EDWuXrTHI8+7fXni0T7FKOOqMKCdVWiF5s8/jGeSWSho+TurHq3DFMwUjRSbtJQ3bdrUEPdWwDsGoqPVnUKYhVZ0uoxb4jvUdQbKYx9Ma2cvFMIcyx/MXuJQ+LLAyq1OzwJat+nyb7nVMygfjwEEehQENsV6I0eOzMbBXidSdqHmvXfvXuPYHF8kkC3/TQ8GinRLyFepUsV4UWEPwKcz8vowEwFvEJCQz7kOZmSUyIqi1kfGTXclmcCbb76Ju+++Gzt37jRGwnOIb7zxRtiugQmbxocfAvfcA/z0EzBoEMC0RPnCeI9mRaFnbvnXXz+eii5hE1BHIiAC6UggVkKe7I6L7QHIyHgM7733YkjraDjM491+OGPxc91OnToZLwnCdbX385ztY3ex51NlqpqHCCScgIR8wpF7okPjjT/fQBcI4pLMfK2RpE7xxOw0CNcEGMTulltuybI2MGgPBXy1atVct5HQin/9BfTrZwaro5fAiy+aud2tQis9U8p17QqcdJLz0L79FmjVyrTkP/qomZYunJcACZ2wOhMBEfAbAVpWR40a5Tjsw4cPWx5P1QF87lDJlUXeuo9iu3XrhzB9+uiYivhEte+3tY1kvPQwsIL6RXK/3++RkPf7Cmr8XiYgIZ9zdZgnnqnl9kaxcAxVykj3a6NoI563Gg8KuXUwYMAADBw4MJ5jUNtJJLBhwwYjSjAfAll4xo1u9efYRXESx+fY9QcfmFb4rVuBxx83g9XZBTivM5Adr7/1FlC/fvZmmCJo4kTzPqaSmzEDuOIKr81S4xEBEfA5AX53Psa0l7mXmAj5UJ3oenIJ8GgaXdTbtWuX3IEksXcJ+STCV9cpT0BCPucSHwHAiCojo1h9uucP9fCZeVnko1hcP9/K4HU8V7iO+dMz88DTAu/JIHZ20FOmAG3bmsKbVnhGnbcKz7kPGQL0729enzw5+3XW++03gA9SFPhsh1b7k0/281Jq7CIgAh4lkEiLvEcRaFgikEVAQl6bQQTiR0BCPidbRq2nCOdPpKUfAIr5MA7tRtpVRPeF5boXUQ+6yVME9u/fj0aNGhkpZViKFCmCKVOmGIFtfFGYv372bODBB7Nb4SnQW7dmiGKgb1/zvHz+/NmnxPP0t94KMIc888Y3buyLKWuQIiACqUfAhajR93PqLXtaz8jFnk9rPpq8CERDQELeWci7jVYfir2EfChCuh5XAkeOHEHr1q0xa9YsHDt2DAULFjQi7Haje7nfy/LlwB13AAxcN3060KCB84w2bwZ69gTGjmUePb/PWuMXARHwMQEXokZC3sfrq6HnJOBizwubCIhAhAQk5HOCm2vLIx8h1qzbbo+2gTjdrweFOIH1UrPMG/vUU0+BYj5fvnzo3Lmz8d8pUWhdZ0C+kiWBV14BypdPiWlpEiIgAqlNwIWo0fdzam+BtJudiz2fdkw0YRGIFQEJ+ViR9Fc7elDw13qFNdpnnnkGPXv2xMGDB5EnTx4jJ+zMmTMNMZ9ShUHtypbN6UqfUpPUZERABFKJgAtRo+/nVFpwzQUu9rwoiYAIREhAQj5CcD6/TQ8KPl9Ap+EzOi5zwe/bt8+4fPXVV+Pdd99FoUKFvD/bzz8HeJb9oYe8P1aNUAREQAQiJOBC1Oj7OUK2us2bBFzseW8OXKMSAR8QkJD3wSLFYYh6UIgD1GQ1+emnn+Lmm2/Gbwz8BuCCCy7A0qVLccoppyRrSO77ZUo4BqBjELsLLwT++9/ILOx//skIfu77VU0RcEEg3jm6XQxBVVKMgAtRo+/nFFvzdJ+Oiz2f7og0fxGImICEfMTofH2jHhR8vXzm4Ldv3446dergm2++Mf77zDPPNCzwns4Fb+f+99/AffcB06YB7dubwehOPDH8lWFKOeaPf/ddoDpTM6uIQPQEKOLr1LkbW7YMQEbGY3jvvReRkZERfcNqIa0JuBA1+n5O6x2SepN3sedTb9KakQgkiICEfIJAe6wbPSh4bEHCGQ6D1914441YuHChcVuJEiUwb948XHPNNeE0k9y6330HNG0KbNgAPP+8KcQDC6PS79oF3HJL8LE+9xzQuTPQsKEZ9E5W+eSua4r0flzEvwiA4n0LMjLulphPkfVN5jRciBp9PydzgdR3zAm42PMx71MNikC6EJCQT5eVzj5PPSj4dN179OiB0aNH4+jRo0YquVGjRuGBBx7w12zeeAO4806gdGngtdeAiy7KPv6jR4HBg4EBA8yUcvPnA3kCPqoYtZ4p5UaPBjj/MWOy55f3FxGN1kMEcop4a3AS8x5aJt8OxYWoSdnvZwZeffXVV3NdO76Y3rFjR651eHTs+++/R3t6ckVQJk6ciOHDhxttVKxYEbfeeiuGDh2a1dKll16KNWvW4N5778W4ceMce+C1F154AfXq1ct6qR7BUNLiFhd7Pi04aJIiEA8CEvLxoOr9NlP2QcH76CMb4YwZM4yHlgMHDhiR6Nu2bWs8RPiubNwInH02cOONwEsvAcWLZ58Cz/m3agUsXgz07QsMGpTzzDxd8lmHLwSYTq9Ll5xC33dgNGAvEAgu4iXmvbA+qTAGF6ImZb+f165da4hnqwwePNgQzBT3xxgvJbM0adIk16Xu2LGjEQdmI79PwiwU8byfP3Xr1sXKlSsxbNgw3HbbbZg9e7bR2mWXXWaMq3jx4kFfKpQsWRJ79uwx2rC848IcStpUd7Hn04aFJioCsSYgIR9rov5oL2UfFPyB3/0oV61ahRtuuAG///67cdNVV12FRYsW+SMSfbBpfvQRcOWVQN682WvQlf6OO4DDh4Hp001rfGD59VfgppuAL74AZs4EGjd2D1M1RSAXAqFFvHWzLPPaSJETcCFq0ub7mRZ6CvJQFvhA2tEI+cqVK4M/CxYsyGq2T58+GDFiBHhszRLyFOl86bB48WJce+212YbAMdMSX6lSJcOiLyGf+9+Diz0f+R+U7hSBNCcgIR/5BqA/cDMA3wGYC2Bv5E0l/M60eVBIONkYdbhz504jfdz69euNFhlka9myZTjrrLNi1IPHmmHAu7ZtgX/9yzzrXr688wD79QMmTwYY4K5GDY9NQsPxKwH3Il5i3q9r7JVxuxA1Ib+fp02bZnw/OAVf5F5evnw57uTxpQhLvNu3hhVMyNNqzp/Vq1cbQplu7D15lCrTWs7fs9A7zUlo5zZtiniKcLvLPPvq1KkTdu3ahaJFixoW+VKlShnNsP9A93q+SNi8ebPhRcAxWEKev+vVq5fxcmL37t245JJLDGv/ddddZ7RFK36/fv3AI3JWYdpYegOwPvvm+Ng+v+uHDBliMGA79MC7+OKLI1zR5N7mYs8nd4DqXQR8TEBC3t3ijQfQjt8hANYC4KfyIn6PAKA/2KbMa34R8yEfFNxhUa1YE6BFgG6Fb775ptE0v9jpdsgHD3vhsXFmalu3zvzp08cMAO9U+PxDT/W13LkOhXHnLrgg+4Vffsnp9W7VoMZmtrjevZ3bozf8kCHHr91wAzBvnnNdxusbftNHWHqolhnwbsIEoECBbJXPOQfYssX61TFMH70Dt3Ys7djgXXcBW7cCS5Y49/fJJ4DduJI/P7Bvn3NdHtU/6SSA8fTuuce5DuPs2U84tGljTsGp0IGgXTtg506gUCHnOswYyEx6VmEg/mAxDHk6oWhR872HU+EWasZXjZnl9NOBTfykcih0dGBAdnqW0uHBqbRoAbz++vEr3boBTz7pXPfpp4HHHwf++MP5+v79fKjNfm3lSuD8853rX345wB8mNnAqU6YA999//Eq1asBnnznX/fJLus4Cy5aZ743CF/FWu6ZlvmHDF7FsWQa+/tq5v59+AqpWzX6N+/nUU53rV6pkhpDo39/5+rBhwMCBx69xP7/9tnPd998H+PdHB5bKlZ3rMETFt98evzZpEtCypXPde++FMc8PPnC+Tn1Vs2b2a3/9ldP5xqpx8skA55PbZ9ezzx5vj/t56lTnvnn0unVrINRnl/34NU/nXH+9c3uMxUlv79w+u+xxOLmft21zbmv3buC000xHo4yMz1HdzK7B//nc4Y6Q38/cs/fccw+mTJmSTcwH+73zqIL/Nt7tWz07CfnevXsb1vFgru979+5Fu3btDNf3JUuWGNlawilsmxb4OXPmoGnTpoZQ5jjsVnpLyFNgd+jQwYhJYy8U5DxjzzbsQp7n+0uXLm20T5E/fvx4Y5zWC4JgQp79W3U4Drr0s92+PF4GGC8HeD1cz4VwuMSzroR8POmq7XQnICEfegfwNfAwALsBXGKELza/fPlqtCMAPh6xDiOl9AvdnCdqhHxQ8MQo02wQDz/8sPH2nmI+f/78GDJkKHr06O5IgUHa+dBMwcIfHhkPjBln3UiPdQqHYAYaBoanQdxeOnUCTjjBeQHGjwcuuSS4QZxi2S6i6ERw883ObXFcb7x+DB3KvIHCLW5xPOtOkbbX9oqMwuT//s+5PQpfCuHbbnO+/vPPwJw5x6/Ru59i3KnwIZ6ClMaUwBcdVv2lS02RZJVzzwXq13du76uvgEWLzNh8fIHgVPjSgCcLrEIxUaGCc10KjIIFgUaNnK9TtNNxwSp8KREsNhTFFl9I/PvfAIWkU+HLI770sQrF8FVXOdddtQr49NPs4tpe859/ALtA4zXuYcY/dCp8CVK2rLkWToVrwLWwCl+IBBOjfLkwYwZw++0AX27UqtUYH33UBUAkWR/eR7VqT6NLl9eDvuzh3uUetheuA9fDqXAd+EKDLxmcyooV5ks8q5xxBhDsSDFfIjCeJP/2S5Rwbo9/+/wMsAr3L/exU+H+5Yuo5s2dr/OFUOCLpdxCWPDFTO3auX92rVlzvC/+3fPv36nwZQT//kN9dh04cPxuCvFgGQX/8x+zXq6fXW8cb4uZMzvyicChHDwIMGYax75vX/RCnl0Eiu1YiXhr+PFun/0ECnm6s1MMU8Q/z2wmmYXWaP5u06ZNxouLaFzr2WT9+vWNlwBWoYu8/by9JeQp1Dkevky3zu3zvuuvv94Q1gySZwl5WuPZjr0uGfI7nS8nOG63Qj5QtNNqT1bW/J13mXd/KyHv3bXRyPxPQEI+9Bry0ZVW9yqZVYsB4GMP7W6dMn9HYc/fW3VCt5rcGhLyyeWfrfe5c+firrvuwt9//208FDRr1gZLl07G2LH5gj4w06IZzKqb9KnR5ElzYzBVlvQBagAicJxAtBZ55ZfXbgqHgAtR4/r7mXuXLvQtWrTApEmT8Oijj+J0vp2KUfn555/x+OOPGxbwl19+GXS5d3Lnj7S7QCFPl3SKbMaGuSjgzXTevHkNd3uOJRohTzd9sqJFna7qPAdPoU1Xdq4NiyXk6TLP8VDMW4HwaKXft2+fccae1+wWeQp1uuTTgs4geIFH4dwKeUbNt/rjeGjV55gk5CPdabpPBFKXgIR86LWlT5VdtFtu9fR1XpZ5O13vmQclX+jmPFHD9YOCJ0abooPYsGGDEURnW6Y/Zo0aNYyzdUWKFDHckunifOGFPps8TacdOpgmQJtFxWez0HDTjAAF0Xnn3YC//74LwEMACoYgoIB3abZFYjbdWAp5DmrChAmGsI13oZs4RXAsS6CQtyzPlpu5vS8KeYpvni+PVMhTEFMk08LPzC9Wsazp1osCu5C3xmQFwuM4+LvGjRvnEPKWBZ6WfJ55p5C3n+93K+QDz/BLyMdy16ktEUgtAhLyoddzJ4DFmYHtWNsS7XRUtBx+6azbVEI+NEzVAA4cOIRq1Rpj48Z3DBzlypUzAtlVDTxI6ydY9B3t3t08UM7I8xMnAkWKRDYDppeju0Fg7vjIWtNdIuCKQEZGXfzwwy0AmIKKgcLaBBH0EvGugKqSI4FYCvl0schbLveWAI9UyOfmok6BTss8A8zZhTwXkdfoMs+YNXSrt0R9oEXevuBcG75koWu99QLCSchbxwbsZ+Ql5PXhIQIi4JaAhHxoUqsYNDzzh2GxKOwZMsqIVpPpUr8ZwA6/udbTBaxAQGAxC0f37t2zRVYNjUk13BBo1qwf5swZDeAAChQoienTn0cze0QyN414rQ6jxfEgKdPKMa87o45FKsJ5aJ5R3HiIuGtXr81U40lhAlWrNsLGjfMBHALAiGqMIsiob3ZBLxGfwlsgZlMbOXIkRo0a5dje4cOHraBlEQe7Y8PxPsMe7/Y5h2Bn5GnFtkeKpxBm4De6wTO4XaRC3rK8W8LaWiAGvKN4tyztgUKe46SF3QpCZ7m924U8723fvr3xUt5+LIDB6yxXeQp5ft/b50bRznsk5GP256eGRCCtCEjIh17uWwHQ4s5z8gx4VxwA/csmZVrhJ2b+jjG8R4ZuzhM1DNf63EYyYMAADLSHRvbEsP07CH7J33FHOxw+zC10Apo06YLXXmMMRZ8XhoBmBKfv/ybiAAAgAElEQVRvvgEYJj1YiHVGrGLI88ceCy7yeT6RkdsYNeqdd4JH3PI5Mg3fmwSOC3lrfBT0LwGYmSnoayMj417oTLw3189Lo+J352P8rMu9RCzk4x1VPt7tW1icotZbOd0piilyV6xYYUSxZ91Zs2YZt1LIM7YMXdgpuosVK2a4y/N3dI/P7Rw/2+H3MdPZMXsAz50PHTrUiFrPvlgChbyVIo7n4dmHFfjOLuTpNUChznR1PCPPsmjRIsybN8+w5luu+Dw+x2MKrEeLPd3m+YIilJDnywDWi2WMglAbNFbXXXihxKortSMCaUdAQt7dknfIjFzPgHYU7taBNEaq5ye2/Qy9uxaTW8sQ8rLIx38R+JBQp04d/MTw0WBE8AZYu3YeihQJkn8s/kOKXQ8MAc8Q19u3AwsWmDm9nArzfDE8dJUqAEPoO7ncM8w9Q5Iz1xxDrTNEuYoIJJBATiFvdW5a6PPlG41HHrkL/fo9hIJMF6AiAkEIxNsiH+887/Fu38JGUU1r9B8BeSoZjI4i18ojT+FOL0GrUPzyXgpbKzCe9QLAKVBe4DJxfdg+76eg5gsDinm6zrNQyDONHAPaWYXu9fz5hyk3MguFPH9n1Vu7dq1hlbfy3DP/O/PGU8SzMHUeg+Ux8j375b85D4p0S8hXqVLFGI89ar/OyOujRgREIBgBCfno9sZZma72e6JrJuF3K9hdnJEfOnQIDRo0wHvvvWf0dNpppxmB7M6hUKV7xzEzHzO90elFHswTnTGMGjQwdbBTYWope55jBvpl3ninwnzzzNk8ZgzA9FxO5aGHgN9+O36FXvJXXulcd/qgzbh2YjOUWzTV0Xq+cSMw8O4fzFxZZcog7zW1MX1W9hzxRsu05l91FVpiJu6ZWhvXNXJ+yfHii9nzw9eqFTzdE7ky3RPTfgVLo8ec7oz+bxWm7A2Wx3zoULMd8nEqTLfGUwVWKV7cDBfgVPbsMfNm8+SAmU46Z2Hau0zjkHGRjgoMPeBU6LzAHNqBKc6sukxndxdjuNnKkCEA05Y5lX79zBR0tlhQ2apxSzPXuFXYDttzKlu3Ar17mznRg6UMfOIJ82/BKpxnsJR6NMgxvSH5OBU6iASmE2Sau2Cp1x580Ex/xnSFwYW81dMh5M//EsqXn4mHHmqJjh3bZBP0TC/45JPAiBFmWjunQkOdPdc4Y4exf6fCMBN8/8fAl05l82bgkUeyX2GecqZUdCqMP9miRfC86bzXpluM9HdMlehUmFrwmWfMUBjB0ujxs4unbqzCz6VgKTLphc5U3aE+u6y22Cf7dioMscHP1Nw+uxiH8+OPj9/NXPJteILCoSxeDLz0kpkDPlgJTHU4aFDwNI7t23+OSZOiyyMffCTpfaVTp05GcLnAiPfpTSX5s5dFPvlroBGkLgEJ+cjX9qLMAHhMTzfXFvgu8hYTd6eEfBxZ9+7dFyNHDsfRo0dxwgknGKluWjFJNs9m7DaDur//PvD770C+fMCGDUDFis4DYl5vioxgD5kUt/yxyhVXAMOHO7dFYdijBzB7Nl8sONdhfmga2q1CAVa3rnNdCoxftx/Ds885f4ys7zsT9w2tYL41qPp/yFcgLzLfaxxvkEqPbwpOOgm1i69D9z4FguZtplB+w5a3mS84KDqdysKFpqhi3mt66jsV3s+HfquMHg1ceqlzXRqDGH+PotOp8MWBfSwlS2Yfq/0e5uxmSAG2FUzEkTv3iFUobpkf26lwPSdPNufqVA4dyrmG3DPB8sVTwPN9E/eKU2E+efseYzv2PWi/h2KTApJiulo15/YovNeuPX6N8wz20mLCBDNf/Bwedvp/9s4DbI6qbP93EpJ8QkwXIiikgoAIpIBUKSlAQIQ0BAGRVPAvKiFNJQl+hjT0swBpCipgOs1ASEMhCBJSQJGSKghISVdQQsL/umfnvJl33tmds7uzuzOz97muV8nOqb9z3nfnPs9znhOQuAnVl4ehPIknOlq1Cs7fp0/mZAg3dcKFvKnj32jc+EL063e0E+PCJJ4e4Z3pvKM+2ybJlVcCf//7/r5w8yjbvejcdNu4Mbtg5d8N9tub+PvFvylBiQ4vZOvnY/JSmM/jN5mbmH/s2OC66GTDk1eLFgGf/GRwHv7t4qaVSRwPNweCEjns3Rv+t8uUZZtsOyj961/ABRdkNo9y/e2iQDeJTPwbQOYZN0rJps7fLk/j/t9jrlN3z7ZOF/v0eQ4LF0rIB89ecZ/SQm6C0hVXk0pHSUBCPkqaqksEahOQkLdbESZSPV/z+cpprqAjP56dZ/A7imMTxd6u1srlkpAvAfuHHnoIffoMxkcfve3cLTt48GDnLJw30epEAdm5c+Y4+WmnZX8RLkEXy1Ml3Q1oKpwwIfN2TIUcZCZkpHsqZ77t0zyWTf2Up9dqpcoJhAv5D/GJT/wahxxyX6BFvsrxafiWBCxEjb6fLVl6s/EcO93Tec+8UrwIWKz5eHVYvRGBBBGQkA+fLDoqMyoZo5R1ZrBYAM8BONE9K98RAPPwvHwW+2B4I2XOoReFCIG/9tpr+NKXvuREEQYa4MADR+MvfxmD9u1TcA6+EE40+3EDg/7W9KvOFcGe/uB0IzjmmEJaUhkRiIxArjPyEvCRYa76iixEjb6fq36VpAuAxZpP14A1GhEoIwEJ+XDYdJ2n1b2Tm5UB77b7AtxR2PNzkye81srm0ItCBPx5l2zv3r3xGP24wWPgB+O++1Zg9OhjMXMmcPzxETSSxCruvRegtf0b30hi79XnKiUQFLVeAr5KF0MJh20havT9XEL+qrr8BCzWfPk7pRZFICUEJOTDJ3KfT7Qbt/oeAFa4xY3rfZbTieGNlDmHXhSKBP7jH//YuWKGYp4RrBlh9tps0cGKbKvixXftAtxovhXvizogAiUi4L1H3ivgmzf/Oq666oAStapqq42AhajR93O1LYqUj9dizaecgIYnAqUjICEfznYbAIbFGeAT7S08Z+IZeqmP41edjKQXhQLn6dFH/4KLLvoe9u592Knh8ssvx720QKc1MfQ770PmHe+f+UxaR6lxiQA6duyFN9/sW+cMPCPoM8CfkghEQcBC1Oj7OQrQqiM2BCzWfGz6qo6IQNIISMiHz9hqAG3dH16mQ2HP4Hbm4ii61G8GsFWu9eEwk5rj/fc/RLt238E773DP5n00bdoTGzbMwKey3eOW1IF6+80z7gzFzrvSGLY5271WaRirxlD1BPr3vw6nn965zrVyEvJVvzQiBWAhaiTkIyWuyipNwGLNV7qLal8EEktAQj586niZEdUbz8kz4F1zAEMA8CZlWuF5my0/Gwlganh1scihF4U8pmH8+PEYN246gLdQr95ZmDRpFG66qVceNSQsK6PO8z4o3hdFIc970nIFrEvY8NRdEciHgIR8PrSUN4yAhajR93MYRD1PFAGLNZ+o8aizIhAnAhLydrMx2I1cT+s7hftQtxgj1Y/wnaG3q7GyufSiYMH/mWeewfnnn48dvPwd9XHqqePw1FM/sCiZycL7jBs2BBo3Di7CeHC849sk3v984IHBeT/6CPjgA+e69ayGcbZHDW4S709n+0GJdTFvnfZ4mfP112Pv9Jl4/5bbgG9/u6Z4nfuiWYEr8HfvzoyzUaOA1h55BP/51X3Yc+esmkvdDzggcy97UNqzB/jPf4AmTbLvH7A9b2JdrDMo8a54djOsPVOWedl2UOL1gf/+d2YYudhyvkxivmx32XP+uQ6y3cVNxJxXb+KcZbsrnH3js7D2TH10suCaCkpcCmQXxpb5TMq6BpAZJ+c2jK23L2HrvRy/X0bIky3n36Rca4Drl1zC2HrHGrbec7Fle2RrUrG/X/6/JWFrgO2G/e3yjjVsvZfr94t98v8tCfv94u+M7d+SoN8vC1Gj7+fgP0n6NKEELNZ8QkembotA5QlIyBc3B+1cV/udxVVT9tJ6UciB/IMPPsAZZ5yB1at5qgL4/Oc/j5UrV6JZs2aO+H3pJWDZMuAPfwB4PD7bSx297m+4IXOlelAaNy5z/NykCy4AFi0Kzrt4MXD++cDmzUBbHvQISB06AJs27X/wu98Bl10WnPdrXwP+/nfgySc9z6kErr4amD0bT4+4H6dOurjmIcWhV5yCkfp//GNg3jwnEB7FL2+cG0JfFW96+mng3HNx3afm4c7Xetc8YTN33x3ct9/+FrjqqoxgziYOGHvP+wLO+TiXYSgDUq9eGUG1cGHw8wcfBL7ylf3P2rQB3norOO877wCHHJKp65JLgvP06wfMn7//2U03AZMnB+clwh/8IDPWoESB5l9fzz8PfOELwflPPBE4+eTMXASlWbOAQYP2P2E9rC8ovfgi135mjZx+enCes84C/vjH/c84To43KI0a5SwtOLc0BqTXXwcOP7z2A84D5yMoMWTDNdcAP/xh8HNzMsQ85frgOglK/F0++2zg5ZeBo46qncMIed6QyN99k+66C/j614PrY9zLv/wFePbZ4Odr1gBdutR+xt+vbBs0FPFTpgDf+lZwfd/5DvB//7f/GX/v+fsflObOBQYMALZtA1ow0ktA4t+u997b/4B/l/j3KSh9+cuZTYuwv12mLNtk20Fp506gefNM33P97fKGJSGTn/40uL7bb8/8Da71t8uX1e9sxDnrZg7O+fLypszPfS78b5cpxrXENeVNFqJG38/B06lPE0rAYs0ndGTqtghUnoCEfDRz0BRAS/eO+WhqLG0telHIwvfb3x6Fn/1sMj7++GM0adIEDz74IM455xwnN4UjX8woLmjx4ksdRWc2Yb1gQSY/xVBQ4os+f0w69FCAwigovflmZuOAL83ZLJoPPVTbenvaacARRwTX96c/ZcRjD969YBIV5cSJwD334N1zBmApQzy6iS+7X/2q+w92mpWfemomCtgBB+C++zIvv528FzD+7W8ZBfi5z2HVpBVY//r/1NTXrh1wyinBfeNmBfV///7ZrewUI96Xc05RNsG3YkWmnjPPDG7vH/8Annhi/zNaAy+9NDgvhTVF/BlnAJ/9bHAeCl+KUpOOPhqgwA5KFIYvvJARVkGJAmnOnNpPuKGTTYA9+ijQqhVw0knB9W3YUFtcUjRlE2h0RHnkEaB7d16tGFwfhTE3N0w64QSAgjcorVuX2Ty6eP/+UK1sXI/cVPEmbpZk2yi7/36gffvs1zxyI8K7ScENmGybPW+/DdxyC7BxY901R4HauzfAPN41x80oj8NKrX4/8wxAfuedF8yCQpabc95E4ZotDAU3QLiG/JsMpjz3HF95ZX9t/L3nr2hQ4hw89RTQt28WDxoA/NtFDwqTvvQl4LDDguvjRg43OMP+dpnS9Nph20GJHircBAv72+XdDCIT/6aIqfvVVwGyqfnbFdAo/3Z5Ezf++DsUlLh/yU3EsL9dpizzciPImyxEjb6fg/Hr04QSsFjzCR2Zui0ClScgIR/NHNDFnnauLF//0TQSYS16UfDBXLFiBXr0GIt9+1aiXr16+O53v4upU+uGPKCVj/fDU8hlcw+NcJ7KWxWvmaOpMNsbOXvDXQyafKkAV67Mfi0dlSyFPt9kqWxbcp9LSQSSR0Bn5JM3Z3HusYWoqZrv55EjR2LKlCno0KED1q9fn3Xa1qxZg1tvvRVr167Fpk2b0Lx5c3Tt2hVDhgxBnz4MVbQ/9evXDwu4E+VL7du3B9sb5HVJslwoM2bMwOTJk522WU/fvn0xkZveburSpYvTN/bnTt70EpD4bObMmejRowce445QFSWLNV9FNDRUEYiWgIS8Hc/h7vVzDGoXlDq4key99ki7miuTq2peFMLw7ty5E6effjr++te/AjgKDRo8iief/DROOWW/9Tisjqp5TpMpTXNvvAH8+c91/aANiK1bMzsdPGhL07+urauaJZLGgUrIp3FWKzcmC1FTNd/PLVu2BL+DmTZu3Ii2Ae5tFNCjRo1CixYt0L9/f1A0M27N0qVLnR+K6rl00XIT8yxfvhyzZs1yPOuYtm3bhnnz5jn5Wd/w4Xyls0sU8UOHDnV+unfvjlWrVmHSpEnghsEc112KmwoU8txg2Mrvv4Bkxso6JOTt2CuXCIhAOAEJ+XBGtLQzZDmTiVpv/pv/T3HP6+gYAG95eHWxyFE1Lwq5aN9www34+c9/7nzZ8wv4/vsXY9q0k51A7dnOIMdi9irRCfp40/JBf3v6oWfzZaXYpy82fVppsadfuZIIJJiAhHyCJy+GXZeQz0zKsmXL0KtXL0dgU4zTWk6ruzcxT8+ePR0BP5vnS3yJFm4KbNZxqXseygj5IEHdsWNHZ0OAYtw2sQx/FnvOwnBjgZ4Ee91InxTy3JCgxZ6bBeY4nmmDGwu0xNPzgBZ9CXlb+sonAiIQRkBCPowQ8BwABrXjzy4AS1zhPswtyqj1FPtJscaz21Ut5BmNni8Hu3fvRv369XHzzTdj7Nix4SuhmnN897uZaFoPPJA5qJ8tPfcccNFFmXx0wVcSgYQTkJBP+ATGrPslEfI86H/55aUbaQnqp0WbR9oouPl9zOCyfvFN6/uWLVuyWrk54IULFzri+AQG6ABjq2Qs8kFCnoK7VatWeQlpiniKcK/LPK30w4YNw/bt29G0aVPHzZ/1MrEvfvd6bjZs3rzZMRrw6J4R8vxsxIgRTn/pZdC5c2fH2n+uG8yDVvwxY8bU8iDgsQGyY362zf6x/nbt2jkbITyGwHq4yXFituAspVspgTVbrPky90jNiUB6CEjIh88l4+sy7JcJR0XRTvHuFe4b3Lvmx4RXF4scVSnkP/xwL1q3vhe7dzOk+MPOlx2j0X8iWzStWExVDDrBKF/HHQd873uZENBhiXfbiWkYJT1PCAEJ+YRMVEK6aSFq8v9+LvUiLUH93ESnZXvChAnOmXYKcIp5I8hp4ab13OTxTq9xx/d+xltlvEKe1nGTjGv96NGjHU+As3lFhWWi5Z19oPs+z+NTKLOvXiu9EfIU2IMHD8Y+7z2VjITcsqXj0s86vEKe42vdurVTP0X+tGnTHBd9s0GQTcizfZOH/aBHIevl+Ji4OcDn2dz8LYceWTaLNR9ZW6pIBKqNgIR8+Izz5mC61hsLfGcA9Mtq4CnKqCe8/CrLpTXhjZQ5R/4vCmXuYNTN9e37EBYs+AwATt9sLFnSytllr8rkuf/devy8i4oWB/9dTdYVKKMIJJNACYyRyQShXkdCwELU5P/9XAKhXWuwEdc/f/58DBgwwHFFP8K9WoXC3hssjoKWFnnmNW7z7JP53Ns/b7A8ilyW8ScKXYrsbMHock0uPQa4AWCSPzifEfIU6hTn3j6bIwQU1jxCYIQ8rfGsx5uX3ge0yPOYAeMF2Ap5v2g3GyPZ4g5EspDzqMRizedRm7KKgAh4CUjIh68HWtt5Br6XJyvFPYX74+5nvDmZYt4r7sNrrlyO/F8UKtfXolp+7bXX8PnP/wC7d88C8AY6dHgCGzZcVVSdiS7M+7N4pxfvurriikQPRZ0XAREQgaQRsBA1+X8/My6JJ4p6HSa8f9N1Pc/Ki3cvbt8e/HjUKB5qjww1hTHFpxHHtEYbi7yxIhvBTjf2gQMH1rRNazwt9yYZK7aJem9c6ymQTbA75qUlndb/bt265eVaz80FBs6jRZ2u6tx8oNCmKzvnksnrss+xUcybQHi00vMYH8/Y85nXIk+hTpd8WtAZBI91epOtkOeGh2mP5cmOfZKQj2zJqiIRiC0BCfnwqaE1nt8idKe/zc1Occ+/4Je5/+Z/068rKefk839RCOcUuxxXX301fvOb3wCoh4YNb8ef/vRldO2a5TLk2PW+RB3ixdc//3nmEutq9UgoEVpVKwIiIAJhBCIR8nQT4Y9JixblbrZ1a+Ddd3Pn6dcPCLBk1xTq3Xt/eZ7HL/BMvnGZp6D1Cm3+m8kbuM5vpQ8aAIU7hatfyAe5lZvz5dxA8AekC6rbbCbwvPm1115bk8VY080mg1fIG2u4CYTHMfCzSy65pI6QNxZ4WvJ55p1CnhsHN91E21DGJT/ojLzftd5/hl9CPuy3UM9FID0EJOTD55ICfQuApgD6MbaK62pPcb/GjVrfHsBkAJkDSpVJ3MrtzptW3MB8jKC/NktXUi3kGUiGX5offPABGjRo4ESmZ2Caqk+/+hXAlxEGrbM56171wARABERABKIlEImQ93cpQRZ5WrZ5lpsWc3Ou3QyHVmn/tW4UzfxhYDd/4qYAxS+t2jZC3i/Aw2Y2l4s6BbqJtO8PosdnHB/7zMj8RtT7LfLe9inqp0+f7rjWmyvygoS8idTvPSMvIR82k3ouAuklICFvN7cU8xTpvP9knVuEAfDoXs/EA1n97aoqWS669o/y1M6LVbP1KZVCnsL91FNPxbp1mSk688wznai4FPNVn556CmCAnyuvBGbN0ln3ql8QAiACIlAJAiUR8hGfYa/DJcL6c10BR0szxbMRqcYi7hX33r7xcxO13kbIm2jz3qB6udaAEf7+u+fppk/xbiztfiHPcdDCboLQGbd3r5Bn2UGDBjnvKCbAH/tCPsZVnkKesQS85/op2llGQr4Sv71qUwTiR0BCvrg5ocDfWVwVTmla9Af7hLi3Wj6naz9d+Fu4D6b42qUlvguAze7nczyR9v1dTJ2Qv+CC3+HRRxs6px2aNj3Iucv1pJNOimBqUlDF66/zEB/fEIAVK4DGjYMHxSB4PCPZsmX4oJmXXg7cGDjttPD8yiECIiACIuCcq+Y5bTc4buaQde2U//dzhEI7cIoiqp/nyylUGQn+xhtvrNOUcX33noufOnWqY/mm9Z4CmSKXZ79p8aY7PgPI8b/9Qn6iL2bAhg0bnHYpup999lmnbVq36crP9hhcLiiZzQW6u3Pe2Dbr5jhMPX4hb8bB/nmPCniFPL0JKNR5XR3PyDMtWbLE2ZjgeIwrPq+mYxwA5qPFnpsb5Bgm5MmJ+bKNq5y/ihZrvpzdUVsikCoCEvKVn066w9N6TuEd5P/NzQJGdjkRwG63u7S+bwXgFfPDXfd+uvjzGT0GjPdA8S8KlecU2INXX30Vp512Gt57j3sdv0Hv3g/j978nCiWHwPvvA2ecAbzzDrBqFdCmTXYwP/kJMHky8MILwKc+lRvg3XcD11wDPPwwwJc8JREQAREQgVACFqImfyFf6qsVIqqf16xRmPM6uCBXecKjBx2tzgwOZxK97Mwd6RSnFLW0xjN4HYUtxbZXyFNI+xOt47RuU4SbttkfivswCz37TBFt2mb/vPVQyPMaOW+f6V7Pn48YYNZNFPL8zOTjuGiVp4WfiVfi8kw8RTzTrl27nHHyTL8Zs9nMMEK+U6dODq877rijph2dkQ/9NVQGEUgNAQl5u6m8FUBfxh7Jkf1jAK3tqnNy8Uz7JDciPutmSNggIU/RTjHvfcZ/M7xsfV97d7rn5NlPutXznHxQyv9FIY+BlSsr73Tl7jUTd8effPLPaNPGwppcrg7GoZ277gKuvx5YuZJvCdl79MADwKWXAt/5DnCbiemYJTvvlT/6aKBXL+B3v4vDKNUHERABEUgEgZII+USMPJ6dZPwcBpjzurfHs6fJ7ZXFmk/u4NRzEagwAQn58AmYBmCQE/oc2OEGk8tWqmN4dYE56F7Hu+mDhDwj5FPM8/40b+IVeLTmr3A/9J6JN9fhdXAD9fkbTbSQf+mll3DyySc7V7o0bNgQd999Ny4vMIJugfOVrGIbNmTc6rMlXjv0hS8APXtmohaHxRTo3x9Yvhx46SXg4IOTxUK9FQEREIEKErAQNYn+fq4g2oKappXcBKYrqAIVCiVgseZD61AGERCBYAIS8uErg2fPaW3nl6s5fx5eKr8cuYQ8BTst9hnT8/5EgT/PDcLXxz077xX79CLg5oC/HGtI7IvClVeOxz33jHMofPGLX8TKlSsVzC6/tVY398UXA7yXl8L8k5/MXduDDwJf+Qrw618DV11VbMsqLwIiIAJVRcBC1CT2+zlpE2kC63nvqU/aGJLQX4s1n4RhqI8iEEsCEvLh00IhTRf4Ul4tl03IGxd6r+Xd9NhbhufnGSzPa9GnVZ5Cn1fn+VPiXhQ2bNiGo476A/btOxPAyfj1r8fiKgnJ8NUbloPnEM8/P3Mn8Ve/mjv3zp3AMccAn/985h56997fsCb0XAREQAREIEPAQtQk7vtZcysCuQhYrHkBFAERKJCAhHw4OApmWr8vC89acI5sQp7n6Df6XOi9Qp7PBrgfXOpGv6f7f3P3jnvjdp9oIX/ZZQ9hzhyeEjgKBxxwP15//Xy0adOkYNgq6BL48EPguOMyAfD+8IdwYT50KHDPPcBf/wpkifArtiIgAiIgAtkJWIgaCXktoFQRsFjzqRqvBiMC5SQgIR9Om27tjCjPSGHPh2cvKEchQp6bC4xmb4R8Pg07LwqLFi3C8ccfn085HHbYYXnlLybz3r17natmnn/++87Neqed9ixWrixkuMX0IsVleZ/8kCEAI+aGrQNeNzd8eEbA/7//l2IoGpoIiIAI2BF444037DJ6cj3//PPo3bs3P+EddNFcP5d3L1RABMpHQEK+fKzVUvURkJC3m3MGkuM5dF7plu2cPM/RX2dXXZ1chbjWFy3kC+nrxxR0ZUiPPfYYLrroIuzZswdNmhwDbjqceWbwPa9l6E4ymuDc5OPuvncv8PTTwOmnJ2N86qUIiIAIxIgA7wkvIknIFwFPRZNDQEI+OXOlniaPQFHfQskbbkE9poDnWfOwRIXbICxTlueFBLtjEL7pBZ7ddyzyhfS1HEL+4osvxkMPPeR0j/e+zp49u5CuVleZX/4SePJJgFb2Aw6orrFrtCIgAiJQAQIS8hWAriYTR0BCPnFTpg4niICEfPhkUWQzmNwo9673XCXWhlcXmCPs+jlegTfVV5JB+M4F8HgBbcZSyHuvlWvcuDGWLFmCM89kcDulnASeego4++xMFPmZM/OzygutCIiACPie2bMAACAASURBVIhAQQQk5AvCpkJVRkBCvsomXMMtKwEJ+XDcFMy0yJfycHYuIc875Bn0ztt+ewDri/AAiJ2QP+GE+/D88y8D+CFOOeUUPPnkk7pWLnxtAgxYd+yxQOvWmYB1jRvblFIeERABERCBIglIyBcJUMWrgoCEfFVMswZZIQIS8uHgi3FhD689kyOXkOcVdHzeBcAut0Ja6Cnkb7NtwJcvNsHutm3bhuOOOw5vvnmls1fxi1/8Dddff0WBw6rCYlOnAiNHAmvXAl/4QhUC0JBFQAREoDIEFOyuMtzVarIISMgna77U22QRkJAPn68RAEYCaBWeNe8ctLbTus5z+EzzAWwKOPfOKG9D3WvwWgDgeXy/q30+jcfieps777wT119/PXju/ogjOuKZZ9bqWrl8ZvHtt4FOnYCvfQ244458SiqvCIiACIhABQhYiJpYfD9XAI2aTCkBizWf0pFrWCJQegIS8uGMmwKY5Z6Tp6jPFrWeNa0Lry4WOZwXhVatWqFhw4aBHbrxxhsxnNeNlSidddZZ+OMf/+jUfvPNN2P8+PElainF1Q4cCCxYAKxfn3Gtjyox+v2ECQDrP+SQqGpVPSIgAiJQFQSmTp2K224LdpjjTSxbt24lh6qLWs/rZNeuXYshQ4aAG/lBic9mzpyJHj16gLfXlCu1bNnS6dett95ariZrtTNjxgxMnjwZmzZtQvv27dG3b19MnEhbTybFmV0YMAn5MEJ6LgKFE5CQD2fHM/K0gJNV2N1rhUatD+9FtDlCz8iPHTsW48aNi7ZV8Ej3h2jbti3eeustNGnSBC+88ALatWMIAKW8CKxeDXTrBvzkJ8ANN4QXXbcuc/87bwA47LDc+RkBnyL+kUeA888Pr1s5REAEREAEagjwu9Nic7rqhHzXrl0dId+8eXOzmVFn1VBQ79y5E927dy+rkOcNOT179sS1115b9pVMET906FDnh+NetWoVJk2ahH79+mHOnDlOf+LMLgyYhHwYIT0XgcIJSMiHs5tkIeBNLYxsn4RUEYv8ihWbce65jwC4AUcd1REvvviiAtoVulr++ldgypTMdXNZvCpqqqaFndH/330XeOEFoFGj7K2+9RZw9NHAhRcC99xTaO9UTgREQASqloAs8sFTTzFKkU6r89KlS3HOOefUyrh8+XLHEt+hQwfHKl1Oi3wlF2vHjh3Bn8WLF9d0Y9SoUZgyZQr27t1bI+STyk5CvpKrS22nnYCEfNpnOMv3KV3ruevLL9ZypFGjHsekSW0AfBbHHvtz/PWvo8vRrNoggXvvzZyjp5tiz565mfTpAzzxBPDSS9G662smREAEREAEYCFqQs/I9+9/HU4/vTOGDr0KjXJtzBbIu1T1832DR/qYKNT97vW0SG/evNmJm8MbAbxCfuTIkViwYIGzCUCLPi3XdMFv1ozxgOGUGzFiBLgZsGPHDnTu3Nmxap97Lm/pDX/eokULxyJO13pawlkPg/F6E13v58+fX8ubYNmyZaDoXrNmjdOvwYMH13KJt5kCinhuYHh50Eo/bNgwbN++HU2bNnXe1QphF8aFHhBjxoypdZSSnMmAHNk2+0c29J4kH46VfMn/xBN5O3PuZLHmw6rQcxEQgSwEJOSrc2mEvihEieXUU3+Hp5/ml+kenH/+n/HII5dGWb3qykVg927gqKOAk08G7r8/N6uFCwEKeQr/yy8XVxEQAREQgYgJWIia0O/njh3Pw5tv9sUhh9yL73znisgFfanqN2KUIpGCd98+nlzcnygqeU587ty5tYQ8BfSsWbNAMc86KOYnTJiAbt261Yh9CvHWrVs7opobAdOmTXPc+I0QDnvuPSNPIdu/f3+sXr0aJ5xwQk0Hmeeyyy7DHW5wWYp65jM/xiU+3/P9tLyz3xx3nz59HKHMOr1W+kLZ2Yw7SMizfcOO/eAmBTdXRo/OGGG4acLnbryHnL8lFms+4t8yVScC1UNAQr565to70tAXhaiw9OrVC0uW0DVsEqZM2YPhw78YVdWqx4YAr6b76U8zFvZcsQi2bweOOQbg7vqiRUA9/Wmwwas8IiACIpAPAQtRE/r9fOSRF2L9+t8z6gw+8YnfRC7oS1W/EaMUrBSYFMKXXprZ2Kdlm+8LFIcM9Oa1yPP8+kknnQQG4TWJwpcWYQpJWp3pju+tb8uWLY5FnuKfwj7Xc8bt8Qe7q1+/vlPWBL8z/aPIPv74451usAzP1c9m7Bk3LVy40LFmBx0dyLVOWA/bMIn9Xc9Atm4qhF0YFzNuGyHvF+1ms2Pjxo1O3KNcyWLN5/MrpLwiIAIeAnpbr87lUPIz8gxqx13c119/HQceeCBefXUjDjuMrvVKZSPwyivAcccBo0YBt9ySu9lBgzKB8F58ETj88LJ1UQ2JgAiIQNoIlPqM/H6hbch9iMaNf4OWLe/F5ZdfgWuuuQrHHhscC2XnzsxFJ/xqaNw4mPwRR1yI117jRsH++qPYMDBilC7zFK4U8yaYG8Xv7t27nXPifOZ3rTc94TlximQKea+4pKim6zktxXS79wfRtXnujVpPizQt+kZMs3/r1q2r+bcRyRTf/rP+3ASgZ4HtzT/G44Bl6KpOjwNuInAMFMFMhbKzGbeNkGfUfDNX7A/ZsE8S8mn766XxJI2AhHzSZiya/pY0av1rr72Go446Cv/5z3+cXfBXXnlFQe2imTf7Whjg7oILgL/9LWONP/DA7GX//W/gvPOAAQOAb37Tvg3lFAEREAERqEOg1FHr6wp504V/A7gQhx56NN54447AmWGoFP6537wZyGZIbdToQuzZ4xXy++tv3PhC9Ot3NH772+D6cy0Hrxg1Fl0TzI3il59dcskldYQ8hS3FLl3dKfAp1CkgKaaNa7exwNPaz7PdFMEsc9NNNzldCnvut8jzjDw3FIxQZf/oAm+8AkxgPvaHFn9v4mfcULC5yo6CmCKZ3gXeiPlmo4Bn5QcOHFhLyOfDzmbcNkLef4ZfQl5/+EQgHgQk5OMxD+XuRcks8o888gguuugi5+zbhRdeiIcffrjcY1N7JPDf/2aukLv4YqBvXzERAREQAREoE4HKWOR/jZYt7yuhRf7XOOSQ+4o6k+8V8pwKimO6wzOgGt3qjaj3W+SZjxZyusofccQRzixSVPOe9aAz2hSv06dPd/IHWcaDngfdI28+Y7+9Z8bZvhHgFPRBV+iyLMcVlnK5qHvd+wtl520/27j9Qp6bCgxu5z0jLyEfNpN6LgKVISAhXxnulW419AxeIR08++y5+MMfeKf5P3HLLbfgBz/4QSHVqIyfAAMCzZ0L9OsHNGggPiIgAiIgAgklYHFeOPT7ufYZ9uIFth9lqer3i1GKY1rPTSA147rtFfLG8usPPEdX9xUrVjhCnmJ40KBBzr+9wel4vI/WbraT6znbDRLyFLN0nWeEdrr0+6/DC4r4zjP0DOTH4HzevmRbrsby7t9wYD3kZbwUCmEXxsWMmzEIvBHzKdrJUkI+oX9k1O2qIiAhX/h0M5TpAAAbAMwDsKvwqspeMvRFId8e9ex5CZYu/QmAP2Hx4lbO7rpSRAR+9Svg2muBp58GvqhggRFRVTUiIAIiUHYCUQj5jh17uVHri7OQZxt8qer3i1FzzRld0efNm1cT+M4r5CmgeZae7vQ8N85Ea7u5Zo48eZUd8/AoH13amZYsWQIGnqPFn2fYcz2nO3+QkPe6z7Me5vMmWq7pvk/hTvH77LPPOi7y3EDgfzPx3xwbXeSzBYXjRgNZ8BgAI/HTnZ/eBt56CmXHcZFPEBdzjIHjZJR/5iNbbp7wOEOYkOcmCfMp2F3Z/4yoQRGoISAhb7cYpgEYyHgjANYB4F1qSwCQHw9HbXSfJUXMRybk6QrXqVMn56zaAQeciSeeuAennPJZO6rKFU5g1y6gUyfgrLOAOXPC8yuHCIiACIhAbAlEIeRLdc+7gVaq+ilGeUUcA9qZRPdx/nz00Uc1n1HI8zOTj9ZhCmaKRopNWsp5TRvFvQl4x0B0tLrTks1EKzpdxo34DnvOQHlsg9faeROFMPvy3nvvBa4pbhaYu9XpWUDrNkW4catnUD4eA/B7FPgr43EMimgzRm4MeOsplF3YuHft2uVE2afnAdnyv7mxQJFuhDzf8dgfc+0e+64z8rH9E6OOVRkBCfnwCWeklEkAdvC7gTFTADCM6IkAhgLoCIB5JgIYE15dLHJEIuTfeOMNHHnkkXj//fdx+OGHO9FcGzUKjpQbi1EnsRO0Lvz858DLLwPu2cAkDkN9FgEREAERgBOFnFZXAPyfTEjy2imS72exjg+BYcOGOZsENq728el1dD2xWPPRNaaaRKDKCEjIh084Xedpde/kZm0GYDu9uwAMcz/jlzE/N3nCa61sjqKD3fH6l/POO88Jasfdc//ZscoOLyWt846gY48FRo8Gxo9PyaA0DBEQARFIN4FSB7tLN730jY7vSCaoX/pGFz4iCflwRsohAoUSkJAPJ7fPJ9qNW30PACvc4nS9HwQgKZHIirp+jiKeX0xMdF370Y9+FE5ROfIn8OUv038tY40/6KD8y6uECIiACIhA2QmU+vq5sg9IDRZMgGff6aLOK+SqNUnIV+vMa9zlICAhH055G4ClbmA75jaivYUnwN1cAH2SJuR5Jqxhw4aBBHhX6vDhw+s8e/TRjbjgAjopfBezZ9/snAdTKgGBJUsABgy87z7gq18Nb2DlSuALXwAsrrtxKtu7F5g9G+BmwSc/GV6/coiACIiACFgRkEXeCpMyVQkBCfkqmWgNsyIEJOTDsa8G0Nb92Q2Awp7B7ZxDbq5L/WYAW5PmWr9q1SrnehPbtHnzTrRv/zKAo3HeeUvx6KPcu1AqCYHTT89U++STQL2QX9N33wWOPBK46irgpz+1687jjwPnnANwA+C00+zKKJcIiIAIiEBRBCxEjc7IF0VYheNGwGLNx63L6o8IJIaAhHz4VPUFQIs7z8kz4F1zAEMAzHKt8DPcz3gvytTw6mKRI+8XhQ8/3IvGjXmS4Cx06jQbr756ZSwGktpOMELujh1AR8ZSDEk33ADcfTfw6qvAIYeE5c48HzIEePRRYMsWoH59uzLKJQIiIAIiUBQBC1GT9/dzUR1SYREoMQGLNV/iHqh6EUgvAQl5u7kd7EauZ0A7CndGq2dipHpeWuoNfGdXY2Vz5f2i0KZNV7z99l1o1mwtduy4qrK9V+v7CezbBxx2GNC/v701fs8eoE0b4BvfAKZMEU0REAEREIEyEbAQNXl/P5ep62pGBAoiYLHmC6pXhURABDL3oCsVTqCd62q/s/AqKlIyrxeFk046CXTDb9v2WGze/NeKdFiNZiHw7LPAyScDy5dnXOVt0iOPAL178x4koEsXmxLKIwIiIAIiEAEBC1GT1/dzBF1SFSJQUgIWa76k7atyEUgzAQn5aGa3KYCW7h3z0dRY2lqsr5+77LLLMGfOHDRv3hzvvfceGjRISmD+0gKMTe3f/z5w++3AO+8AWQIX1ukrz9I//XTGFT/s/H1sBqqOiIAIiEAyCCjYXTLmSb0sDwEJ+fJwVivVSUBCPpp5p4s9r59rFU11Ja/F6vq5vXv34n//93/RqFEjbN26FU2aNCl5x9RAngQYqf6444B777Ur+MEHmXP03/42cMstdmWUSwREQAREwJqArp+zRqWMVUBAQr4KJllDrBgBCXk79LyHjfesMdBdUOrgRrLvZFddxXOFWuS/9KUvOZb4+vXrY/PmzTj88MMr3ml1wEdg82agffvMNXK21wAuXAj06QO8+CJwzDFCKgIiIAIiEDEBWeQjBqrqEk1AQj7R06fOx5yAhHz4BNHSzmB2TCZqvflv/j/FPa+jYwC85eHVxSJHzjN4t9++Gt/85rcA/AkrV67EabqeLBaTVqcTPOP+ne8Av/890IxxGC3S3LmZu+kfeMAis7KIgAiIgAhEScBC1OiMfJTAVVfFCVis+Yr3UR0QgaQSkJAPn7nnADCoHX92AVjiCvdhblFGrafYT4o1nt3O+qJA63v79k8DOA133PEUhg27PJyQchRHgJHjX389E3VeZ9aLY6nSIiACIhBjAhaiJrVCvn///pg/f37O2WnRooVzlC9XWr58OTZt2oRBg/jqlX+aMWMGJk+e7NTRuXNnDBkypFZdXbp0wdq1a53P77zzzsAG+GzmzJno0aMHHnvssfw7UUUlLNZ8FdHQUEUgWgIS8uE8twFY6rrWMze/OSjevcJ9g3vX/Jjw6mKRw3lRePDBB/HlL3+5pkP/+te/0KpVK3z4YQP06nU7Fi++JhadTXUn3n034x5/zTXAz36W6qFqcCIgAiJQ7QQsRE1qhfy6desc8WzShAkTHMFMcf/xxx/XfH7ppZfmXCZDhw4Fxfz69evzXk4U4LNmzcLIkSPRtWtX50aeSZMmOcJ++HCeooTzOfvFIL/ZNhVatmyJnTt3onv37hLyIbNgsebznkcVEAERyBCQkA9fCft898R3pggG4A3fzmB35wLoFl5dLHI4Lwqf/nRn/OlPC9C2bVswsF3r1q2xY8cODBgwALN57lqp9ARmzACGDgXeeisThE5JBERABEQgtQQsRE1qhbx/UmmhpyAPs8D7yxUq5Cm8afGfMmUKbrzxxppq+e+JEyfW9INCnnm56bB06VKc47valX2mJb5Dhw5o3769hLyEfGr/Xmlg8ScgIR8+R7S28wx8L09WinsK98fdz24CQDGflLvZ3Kj1P0br1vfhqafuRe/evbFhwwbnS2njRg5XqSwELroI2LYNeOqpsjSnRkRABERABCpHIGoh//TTT+P666/H7bffjlNOOSXygZWy/mxCnq7v/FmzZo3zTkIr+k038TUrYy3n50z16tULFNrZIFCA9+zZE6tXr8YJJ5xQk41HCinKly1b5oh2tkHvRCa273ev50YCy9CLgH0wrvX8bMSIEc7mBI0idNuntf/cc/m6CNCKP2bMmBrLPz9bsGAB+vXr5+Rv2rQpOnbsCNbfrl073Hrrrc5YWQ/d+E888cTI57ccFVqs+XJ0Q22IQCoJSMiHTysD3Q103elvc7NT3PPs/GXuv/nfjDaWlHPyrpBvCYBfjhMA7Ebjxo3x8ssvOxZ6pTIQeP99gC8L48YBI0eWoUE1IQIiIAIiUEkCFqLGyiJPEXnLLbfgjjvuwDvvvIODDz4Y1113HW6++WZHXBabSl0/+xck5OnyTgs5xSzd1o3rO8Uub9LZtWsXBg4c6Li+U3gfccQR1kNlGZ5/pyu/132f9VDgc/OAdRshzzYHDx6Mfftou9mfKMjpij937txaQp7Wfno2jho1yhH506ZNc/q5fft2R6RnE/LkYPJQyNOln3M4evRop1FuDvB5vp4L1mBKnNFizZe4B6peBNJLoPi/9ullY0ZGgb4FQFMA/QAsdF3tKe65Lcyo9e0BTAaQ+asb/+S5R74+gMyXFL9keGc8E93OzHmx+A8noT186CHg4ot1FVxCp0/dFgEREIEgAqW+fm7btm245JJLHJG4e/fumi40adLEsd4+8MADjgt5oanU9Zt++YW8cX2niOcGhUm0RvMzegvS0FCoaz3ro1CmSKbINmfk2Q+2Tes533uMkKdQJ0ev8Kfo79WrlyOs+/btWyPkjVXfm3fLli1OndycYL9thbxftNNqzz6a8Rc6r5UqJyFfKfJqtxoISMjbzTLFPEU6D46vc4swAF7GXwpgGNb+dlXFIpdHyAf3Z+zYsRhHS7FS6Qgw4u6KFcCGDYpWXzrKqlkEREAEykqA353jx48Pa5MxdejN50+hFnmKdQaO8waIM5VQpB555JG45557wtrP+vxrX/saXn311az108Wb7unFJr+Qz+b6znbq169fYzEvRshTXNPSTpd14xo/ffp0x/Lut8jTZZ6Weop5egMwsSw3TxYvXuw887rWU6jTJZ8WdHoT0D3em2yFPL0GTHsszw0bbi5IyBe74lReBNJHQEK+uDmlwN9ZXBUVKe0Kee7Y0yLPwPyMGHsAGjSoj5YtmzlfRLLIl3Bu6Kp36KHAV78K/OQn9g0xsi/vgOeZu6Z0ErFIy5cDe/cCPXtaZFYWERABERCBYgiU2iJ/5pln4sknnyymi0WVPeOMM/DEE08UVQcL+4W8sTwbN3NvAxTyJrJ8MULe1EkXfXoe0FJurOlsn54OxiJPIW/6xIDATOyHyecX8sYCT0s+z7xTyHvP99sKeQbS857Ll5AveqmpAhFILQEJ+dRObc6BuUL+QQBDAPzTzdwFwH1o23YIHn/8Lp2VL+XaoJB/5hmgdWvgyCPtW/rLX4AvfAFYtAi44AK7cmecARx0ELB4sV1+5RIBERABESgJAQs341CL/Pe//3386Ec/ytq/a6+91nE/LzTRrf2uu+7KWpzt//CHPyy0+ppythZ543JPF3sztkKvn2M5CvVmzWiHySS6w/O2Hkap55l7r5A34p15eM6dbvVG1PuFvBcIRT0t/d6r7YKEvDk24D0jLyFf9NJSBSJQNQQk5MOnen9o0+x5d7jn6MNri0cOV8hPA3CY2yMG3D/QDdDfHqef/lM8+eT98eiterGfAF/eJkwAtm4F/ud/wsm89hrAYEB8Kfv618PzK4cIiIAIiEDJCEQh5ClGaTn2no83Hf7kJz/pnJH3X5mWz4BKXb/pS7Yz8rRiey3StMQz8JsR2sVY5Cmmu3XrVuvKOLqyN2jQAM8++6zTNb+QZz9pYTdB6Izbu1fI00o/aNAgrFixolZEfJ7JN67ybJsbBt6xUbSzjIR8PitUeUVABAwBCfnwtcBIcPQ7t008O8+tcAbIi2sKOCPPIHc/BtBDFvm4zhr7ddJJwGc+AyxkzEWLNHUq8L3vAW+/DTRnXEYlERABERCBShGIQshT9H3+85/Hm2++WWcYhx56KF588UVHdBaaSl2/6VdQ1HpGfGfUeopiilyKa/6beWfPZpgiON4G8+bNc6LGG+s6Ldv8jOfcc928w7rYBq+z45VzrINCmkHszj77bKd+v5A3V8TxPDzbMBHvvUKeXgMU6ryujkcTmZYsWYKFCxc6Fn9uvDA/N0kYaI/5aLGn2zw3KMKEPDcDmC+JtwpZrPlCl6rKiUDVE5CQD18CgwH0BdAdAC3vDE6ziUHe3c/4bUnxzrPyjF7f2T10zv/eFV59RXIECHneT7oQbdteI7f6ikyJRaN8aTvssPys6127Ap/9LHC/vCssCCuLCIiACJSUgIWoCXWtZwd5Tv1tbtD60iGHHBLJ+flS189uU5xTRL/33nu1RjFr1ixH5Jp75CnceZOOSRS/LEtha+6ENxsA/jvigyaTMQxYP8/IU7SzrBHxRsjzGjkGtDOJZ+P589FHH9V8RmHOz0w+BiDkBoS5555BCXlvPEU8E8/lM1geNw0o5PnfHAdFuhHynTp1cjYwvFH7dUa+pL+SqlwEEk1AQj58+hiZnkJ9Upbr5fg5r6JjeFIKdwr+JQDot35dePUVyREg5AfhiCNexR/+cHcid3wrQrHcjc6YQVNExrr+qU+Ft75+feb8Pa0YAwaE51cOERABERCBkhKISsiXtJMJrXzYsGFOcLkTTrA5EZnQQSaw2xZrPoGjUpdFIB4EJOTD54EWeEZF6ZQj6wYA/DnPzTPXFfa8XiaOySfkW6Bhwz1YseJRnH766XHsr/pEAhdeCOzYAaxcaceDwYgmTgTeeScT7E5JBERABESgogQsRI2VRb6ig4hp47SQm6B0Me1iVXbLYs1XJRcNWgSiICAhH06RZ+SnAxiWIyuFex8AjBjHdBOAiZ5/h7dS3hyukP+s08VGjbZi2bJF4N3xv/rVr2SRL+9c2LX2738DrVoBt9wCuOfvchbkNXXHHgscfzzwu9/ZtaFcIiACIiACJSVgIWok5AuYAZ5jp3v6wIF0kFSKEwGLNR+n7qovIpAoAhLy4dNFS/t2ALms636rPd3q+wFoFV59RXLUXD/Xtu1Pas7E87qUP/7xj7j66qsr0ik1moPAiy8CPGf30EPA5z4Xjuo//wG++13gK1/R/fHhtJRDBERABMpCwELUSMiXZSbUSLkIWKz5cnVF7YhA6ghIyIdPKa3x3OJl0LtfBmQf5FrsjdWebvibAawC0Cu8+orkcF4U6tc/AK1atXCuXfEnBpYZPnx4RTqX6kZHjgRatABGjSpsmLS019OvbWHwVEoEREAESk+AwdRuu+22wIb27NmDrbw+NGMcoBHAnyTkSz9FaqGMBCTkywhbTVUdASmC8CmnMF8DoC2A1QCWA+C3MK3tDGzXxbXYM9hdB88Xcw8AK8Krr0iOgGB3tftBN/tx48ZVpHOpbZTRbtu0Aej6x7PrSiIgAiIgAqkjwO/O8ePHh41LQj6MkJ6ngoCEfCqmUYOIKQEJebuJoZifDIDWd3+aD2Cka4VnhHta5vnvBXZVVySXI+RbtWqFhg0bBnZAFvkSzMuTTwJnngk89RRw6qklaEBVioAIiIAIVJqALPKVngG1HycCEvJxmg31JW0EJOTzm1EKet4Pzx9zpzzvj09akuteJWaMQeruugv45z+BgOMMleiS2hQBERABESgfAQtRo+/n8k2HWioDAYs1X4ZeqAkRSCcBCfl0zmvYqPSiEEaoFM8ZpO6UUzJiXkkEREAERKDqCFiIGn0/V92qSPeALdZ8ugFodCJQQgIS8iWEG+Oq9aJQ7sl59VXgqKOABQuASy8td+tqTwREQAREIAYELESNvp9jME/qQnQELNZ8dI2pJhGoMgIS8lU24e5w9aJQ7nlnBOMxYwBGK27SpNytqz0REAEREIEYELAQNfp+jsE8qQvREbBY89E1pppEoMoISMhX2YRLyFdowr/0JeDAA4FHH82vA6tWAccfDzRqFF5uzx6ANw0wKn47XqKgJAIiIAIiECcCFqJGQj5OE6a+FE3AYs0X3YYqEIFqJSAhX50zqX3EPgAAIABJREFUrxeFcs/7okXAQQcBZ51l3zKt94ccAvziF8DQoeHlHnsMOO884M9/Bk46KTy/coiACIiACJSVgIWoqZrv55EjR2LKlCno0KED1q9fn3Ue1qxZg1tvvRVr167Fpk2b0Lx5c3Tt2hVDhgxBnz59apXr168fFvAImy+1b98ebG/QoKDLh3IvgRkzZmDy5MlO2507d3ba9dbTpUsXp2/8/M477wysjM9mzpyJHj164DF+V1dRsljzVURDQxWBaAlIyEfLMym1Vc2LQlImJLCfv/0tcNVVwOuvA5/5TPhQrrkGeOIJYMMGoJ5+tcOBKYcIiIAIlJeAhaipmu/nli1bYufOzMU/GzduRNu2betMBgX0qFGj0KJFC/Tv3x8UzTt27MDSpUudn759+2Lu3Lk15Zhn+fLlmDVrFj7++GPn823btmHevHlOftY3fPhw60mnAGdd3ATg5sGqVaswadKkWvXwcwp5bjBs5QZ8QDJj7d69u4S8NX1lFAERCCOgt/0wQul8XjUvComevn79gE2bgNWra4ZxRffu2Pnaa3WHxReWjRuB5s2B1q1rnjc7/HDcu2xZojGo8yIgAiKQFgJRC/kPP/wQ3772WvzfL3+JRjZHsPIEWar6ly1bhl69ejkCm2KcQplWd29inp49ezoCfvbs2XV6Tgv30KFDnToudYPIGiEfJKg7duzobAhQjNskbjIwP70Gbrzxxpoi/PfEiRNrRDuFPPPSYs/NgnPOOadW9dxYoCWengf0DJBF3oa+8oiACNgQkJCvSyl4OzWcJrd+9yuo8PyVzCEhX0n6Nm3/978ZQX7TTcDNN9eUuPDII/H7HC6I/qov7NQJv2fEfCUREAEREIGKE4hayM/62c9w74gRuGLyZAz81rciH1+p6qcL/IoVKxwxTLG+evXqOtZsWt+3bNmS1crNwS5cuNARxyeccIIz9lxCnoK7VatW1kKaAtz0zdTPNjZv3uyIcm40ULSbevmMffG713OzgWXoIVCvXr2a9vnZiBEjHA8CehnQbZ/W/nPPPdcZC634Y8aMqeVBwGMDZMf8TZs2BTcnWH+7du2cjRAeQ2A93OQ48cQTI18PhVRoseYLqVZlREAEAEjI110G+wBQlBfCpn5CVpWEfNwnypx3X7sWcF9Q2GUJ+bhPnPonAiIgAtkJWIga6+9nWsvPO+ooPLxlCy5s2xaPvfJKpFb5UtZfv359x2V+woQJzpl2CnCKeSOYjTXc5PESNe743s+aNWtWS8jTOm6Sca0fPXq0I77PPvtsqyVKd3luJsyfP7/G4s+CxlOAZ+cHDhxYI+QpsAcPHox9+/gauT9RkNOln0cAvEKe1v7WrVs7HCjyp02b5rjob9++3RHp2YQ8WZk8FPJ06We9HB8TNwf4PJubv9XgI8xkseYjbE1ViUB1EShErKadkPkLTDE/H8AcbvqmbNDWLwopG3dyhnP99cDDDwN//3ut8+4S8smZQvVUBERABPwELESN9fczreUYNQoDP/gAMxs3Rr1vfhMDL7usLvQDDqi1IRw4KzyatX17rUez6M7+i19g4H//i5mf+ATqTZwYidWfwnjAgAGOK/oRRxzhtElh7w0Wl01Em8+9HfUGy6PIZf3+RKFLkZ0tGF22lUqhzLIU2eaMPNvgZgKt5zxvbyzyFOoU517hb44QUFjzCIER8saq781L7wPWyWMGjBdgK+T9ot1sjGSLO1Du30qLNV/uLqk9EUgNAQn54KlkGNQBAPq61nnmmueK+vtTMPvOiwJdzBo2bBg4HJ4HyycgTAqYxGcIPO9++OHAxRdnItZ7koR8fKZJPREBERCBIAJTp07FbbfdFghnz549xlLaDcBzAZmshLyxli/esgW8nPRDAL0AMB56nctKeUzr3XdzTxZjsngEMOs7D8Bitz6n/ois/nRXp/ikyGWiNdpY5I0V2Qh2Y/U2naeApuXeJGPFNlHvjWs9BbIJdse8dDmn9b9bt27WrvUsR3FNSzvLG9f46dOnO5sCfos8z75zbBTzc+bQBgSn7O7du7F48WLnmdciT6HO9zBa0BkEj+7x3mQr5Ok1YNpjebLj5oKEvP4+iUD6CUjIh88xxTxFvbnjhJZ6ivrpAB4PLx7LHM6LQq6ejR07FuN4J7lS+QkwSn3nzsC99wI9e9ZqX0K+/NOhFkVABEQgHwL87hw/fnxYkaKEvNcabxrKapUvwCLvtcbX1B+BVd64zFPQeoU2/83kDVznt9IHAaVwp3D1C/kgt3JzvtycbQ+bIO/zXbt2OdHvaSk31nTWd8kll9RY5CnkjTV87969TnGOweTzC3ljgacln2feKeTplXATY+PkOCPvd61nID2vp4GEfD4zq7wikGwCEvL5zR9F/RAAjERCQb/DtdLTj2tFflVVNLcs8uXAf/fdwFtvAe65tbyadF8C0KCBhHxe4JRZBERABCpLoNQWeb813ow2Kqt5KevnWXGe5abF3JxrN/2nVZoWbGNdplWZopk/PDPuT9wUoPilVdtGyBsB7rfy51otDETHfnj76j8a4A+iR/HOPOwzI/MbUe8X8t52Kepp6fdebRdkkTeR+r1n5CXkK/v7rtZFoJIEJOQLp+8X9Txcpqj1hfNMX8kvfhFo0wZ44IHIxiaLfGQoVZEIiIAIlJ2AxXnhUNf6IGu8GUgUZ9lLWX+uK+Boaab12ohU417vFffeCePnJmq9jZCngB82bFitoHphC4Bi2u+OT1f2Bg0a4Nlnn3WK+4U8x0ELuwlCZzYmvEKe4xw0aJATud8bEZ98jKs822YsAa+1naKdZSTkw2ZOz0WgOghIyBc3z7zbg2FCzVn62ubT4uouZenQF4VSNl4Vdf/zn8ChhwIzZwLXXhvZkCXkI0OpikRABESg7ASiEPLX9e+PTWvW4ID6dS/K2bNvHzp07ow75s4teGylqp/B7ShU/feym44a13evxZzeDQz+Ros4BTJFLs9+0+JNd3wGkON/+4U873n3pg0bNjjtUnQbAU7rNl352R5d5oMSyzCqPN3dGVSPbvAU0t7o934hb8bB/nmPCniFPL0JKNR5XR3PyDMtWbLE2ZjgeOiyz/z0CGAcAOajxZ6bG+QYJuTJifmyjavgxVFAQYs1X0CtKiICIkACEvL5rwNeVsoz84MBNHcZrnFd7KfkX11FSkjIlxr7L38JDBqUca0/5JDIWpOQjwylKhIBERCBshOwEDWp/X6mIKYw51nzIFd5TgYt3bQ6MzicSevWrau5I53ilKKW1ngGr6OwpcD3CnkKaX+idZzWbQp80zb7Q6HuvfYuaEGwzxTR7DdFO8t5r7DjZ7xGzttnutfz56OPPqqpksKcn5l8HBet8gykx8T733lvPEU8E8/lc5zcNDBjNpsZRsh36tTJ4XXHHXfUtKMz8mX/tVaDIlAxAhLydui5Vcuz8bS8t3fFOy8pZcA7no/fbFdNbHKl9kUhNoS/8hWAVvlnnom0SxLykeJUZSIgAiJQVgLVLOTLCtqyMbraM8Cc173dsqiyWRKwWPOWNSmbCIiAn4CEfPY1QfFuzsF7xTuFOwV80sS7d6QS8qX8W/DBB0CrVsD3vw+MGRNpSxLykeJUZSIgAiJQVgIWokbfz2WcEVrJTWC6MjZbVU1ZrPmq4qHBikCUBCTk69Ic7rrOd3Yf7QQww3WdXxsl/ArWpReFUsJftAi48ELghReA446LtKUrunfHztdes66z2eGH4173rl7rQsooAiIgAiJQEgIWokbfzyUhX7dSE1hv4MCBZWqxOpuxWPPVCUajFoEICEjI14W4z71ajk9ofV+aB+dZeeStZFa9KJSS/pAhjFoDbNoEuHfjWjVHgf7Zz+ZXxqpiZRIBERABEYgDAQtRo+/nOEyU+hAZAYs1H1lbqkgEqo2AhHywkDef8q54m0SOzKuo9Ta00p6HgXZ27gS+8Q37kTIgDoPiffvbwA9+YF9OOUVABERABBJDwELUSMgnZjbVURsCFmvephrlEQERCCAgIV8Xyk1FrBRFrS8CXlUX/eMfgbPOAp56Cjj11KpGocGLgAiIQFoJWIgaCfm0Tn6VjstizVcpGQ1bBIonICFfPMMk1qAXhbjN2gUXAKtXA2++yft34tY79UcEREAERCACAhaiRt/PEXBWFfEhYLHm49NZ9UQEEkZAQj5hExZRd/WiEBHISKr5+GOgSRPgsssA3j+vJAIiIAIikEoCFqJG38+pnPnqHZTFmq9eOBq5CBRJQEI+N8ATAOwAsMWTjZ9NAsAv243uVXRJU196USjyFyfS4i+9BBxzDPDAA8DFF0datSoTAREQARGIDwELUaPv5/hMl3oSAQGLNR9BK6pCBKqTgIR88LxTrC8H0BzASABT3WztXPHuLcUgd/MAXJagJaQXhThN1qRJmfvmd+0CDjooTj1TX0RABERABCIkYCFq9P0cIW9VVXkCFmu+8p1UD0QgoQQk5OtOXDMA292Pef3cBADr3H/PBdDXtcIPA8C75ini2wLoDuDxhKwDvSjEaaJOOw3YsAF4++049Up9EQEREAERiJiAhajR93PEzFVdZQlYrPnKdlCti0CCCUjI1528iQBGuIJ9oe8x75inyG/l+ZxW+20AKPKTYpXXi0Jcfml37wY+/WmgfXvghRfi0iv1QwREQAREoAQELESNvp9LwF1VVo6AxZqvXOfUsggknICEfN0JfA4ArfKdfI9OBLDaPR8/2vcsW5m4Lg+9KEQ9MytXZqLOf/ObuaPO33cfwB9v2rsXWLwY6N27bq8uvxzgj5IIiIAIiEDiCViImtR+P3fp0gVr167FkCFDcOeddwbOJZ/NnDkTPXr0wGOPPVa2+W7ZsqXTr1tvvbVsbXobmjFjBiZPnoxNmzahc+fOTl8GDRpUkyXO7MKAWaz5sCr0XAREIAsBCfm6YGhdXwpggO8R75entb4HgBW+Z0sAnAsgKfeGpfZFoWK/6ddcAzz9NPDyy4V14cILgd//vrCyKiUCIiACIpAIAhaiJrXfz127dnWEfPPmzbF169bA+aKg3rlzJ7p3715WIT9gwAD07NkT1157bdnXEUX7rFmzMHLkSJDRqlWrMGnSJEfYDx8+3OlPnNmFAbNY82FV6LkIiICEvPUaoPs8z737hXwusU6LPC32iRLyrVq1QsOGDQPB3HjjjTVfINbkqjUjLept2gBf/zowZUphFCTkC+OmUiIgAiIQMwJTp07FbbfdFtirPXv2GBHbDQDfHfwpbyFPJ69SOm5FVT/FKEU6rc5Lly7FOeecU2vsy5cvdyzxHTp0QPv27csq5Cu1hMijRYsWmDJlCvjeZRL/PXHixJoNjySzk5Cv1OpSu9VAQBb5urO8wT0Hzy9Zk0wAPLrWez83z2nF5/ay3x0/rmvIeVHI1bmxY8di3Lhxce1/bPr14Ycf4ttf/jL+77HH0OiJJ4AzziisbxLyhXFTKREQARGIGQF+d44fPz6sV5EJ+VJ/fURVP8UoDQhMFOp+9/qhQ4di8+bN+Pjjj1GvXr1aQp7W6gULFjibALTo02JPF/xmzfh6BqfciBEjwM2AHTt2OO7ptGqfey6dJcOfU0yzfbrW9+vXz6ln2za+2u1PtJzPnz+/ljfBsmXLMGrUKKxZs8bp1+DBgx0BbpvYDj0BVq9ejRNO4IVJmcTxcEOD9XPDo1B2YVzoATFmzJhahhtyJgNybNq0KTp27OiwadeuncOHYyVf8j/xRNqwcicJ+TBCei4ChROQkK/Ljtb4S92I9M+7j+lWz7vj+eM/H89vCbriB1nxC5+Z0pZ0hLws8sVDnvWzn+He734XVzRqhIG8Pu6AAwqrNKo3pcJaVykREAEREIGICJTbIl/qr4+o6jdilCKRgnffPjpA7k8UlXQnnzt3bi0h73c9p5ifMGECunXrViP2KcRbt27tiGpuBEybNs1x49++fbsjRsOee8/IU8j279+/jrhmnssuuwx33HGH02mKeuYzP8YlPp/z/ewjz7+zrksv5atnJlHAU+Dz7PzAgQNrhHy+7GzGHSTkOSbDjkKemxTcXBk9OvMKzE0TPs92RMI7rxLyEf1hUTUiEEBAQr4ulPYAaJXn/fAU7mTEu+T57w4AtniKcPuU5+W5JdzFc01d3Bdb3q57cR9QJfpHa/x5Rx2Fh7dswYUHHYTHtm1Do0aNCutKVG9KhbWuUiIgAiIgAmUgYCFq8v5+7t4d8BqBaaTulMU/cOdOYP164LjjgMaNgwf8l78A//3v/mdjxgBLeLiwyGSEPIU6BaZXvFK49urVyxGHffv2rSXkeX79pJNOquV6TsFOizCFpLFee+vbsmWLY5GnJZ/CntbtbM/btm0Lf7C7+vXrO2VN8DvTP1qjjz/+eIcEy1Bsz549u4bMwoULHWt20NGBbPgolCmSuflgzshTSNPtnmPgOflC2IVxMeO2EfJ+0W42OzZu3AjWkytZrPkiV5aKi0D1EpCQD577wQCm+R75rfF0pefVc2QYZKmP86rK+0UhzoOpVN9ojceIERj43/9iZqNGqDdlCgZ+61uFdSeqQ4iFta5SIiACIiACZSBgIWpCv5/9l58sWlS749SZ69YFD4aB4M87j67bQDb9dcghwDvv1C7vvVSl0MtUjBhlNHoKYIr5OXPmOA1R/O7evRuLFy92nvld601vKG4pkinkveKSoppehrQU0+2ebuDeZPPcG7WeQprW8vXc9XD7t27dupp/+13fvW1xE8AbqC5sWXHTgePnJoE5VjB9+nTHa8Fvkc+Xnc24bYQ8vQbMXHE8ZMP5lJAPm109F4HSEpCQz86X3wLdXbG+BsByX1beJ78RAO8qWVDaaYq89tAXhchbTFmFxhq/eMsW0Ab/IYBebdvisVdeKdwqnzJGGo4IiIAIiEBtAlEIeT/TpFnkKUaNRXcvg8UCoPjlZ5dcckkdIU9XeopsniOnwKdQp4CkmDau3cYCT2s/z3ZTyLPMTTfxZCQQ9txvkTdn141QZf+8AelMYD72h+Lbm/gZNxTyvcpu165dzrl8WrjNRoFh4t0EyYedzbhthDyPC3hjGkjI6y+bCMSDgIR8POah3L2QkC+SuGONHzUKAz/4oKammZ/4BOpNnFi4Vb7IPqm4CIiACIhAvAmUQsiX+mRWVPV7xagR73R35xl2utUbUe+3yFNE00JON/MjjjjCmWB/VHfvrFO80qLtv8LN5Al6HnSPvPmM/faeGWc95mw7Bb3f+s/nLMtx2STWwTZM4D6WIRceKeAmBsdcKLswLkHB7nhkgcHtvGfkJeRtZlJ5RKD8BCTky888Di1KyBcxC35rvKlKVvkioKqoCIiACFQBAQn5VjUB6iiOaT03gdSM67ZXyBvLrz+qO13RV6xY4VjkaaEeNGiQ829v5HeePadLONvJ9ZztBgl5ilmejWeEdrr005PAm4JEMN3j6RLPe+G9fcm1tFmPN3Af87LfDRo0wLPPPusU9Qt5G3ZhXMy4uWHgtbZTtJOlhHwV/EHSEBNPQEI+8VNY0AAk5AvClikUZI031ckqXwRYFRUBERCBlBMohZAvdYiVqOr3i1FzzRld0efNm1cTtd0r5M0963SnZ/A5JlrbzTVz5Mmr7HjengHt6NLOtGTJEjDwHC3bvL4t13O68wcJea/7POthPm+i5Zru+xTuFL8U3fyMGwhGgPPfHBvPumcLCkfvAp755zEAjoHHAyikuYlw9tlnBwp5W3YcF/kEcTHHGDhOBtpjPrLl5gk9AcKEPDcbmE/B7lL+R0vDizUBCfm605M5sJV/4iGpAu8ey7+xIktIyBcIMJs13lQnq3yBYFVMBERABKqAQCmEfFKwUcjzijgGtDOJbvP8+eijj2o+o5DnZyYfRS0FM0UjxSYt5X369HHOypuAdwxER6s7LeJMtKLz7LcR32HPGSiPbfBaO2+iEGZf3nvvvUDM3Cwwd6vTs4DWbd4jb9zqKdAp1P0eBf7KeGUhRTTPyJMTyxkRz7yFsgsbN8/l07uBmwZky/+mtZ8i3Qj5Tp06ORsV5to99kdn5JPyW6d+pp2AhHzdGWYQu9qRS+xXQUv7rBXNKSFfIP5c1nhTpazyBcJVMREQARFIOYFqFvIpn9qswxs2bJizSWDrap82ThZrPm1D1nhEoGwEJOTLhjpWDUnIFzAdYdZ4U6Ws8gXAVREREAERqAICFqJG388pWwf0MDBB/VI2NKvhWKx5q3qUSQREoC4BCfnqXBV6UShg3m2s8aZaWeULAKwiIiACIpByAhaiRt/PKVoDPMtOF/WBAwemaFT5DcVizedXoXKLgAjUEJCQr87FoBeFAub9uv79sWnNGhxQv35o6T379qFD5864Y+7c0LzKIAIiIAIiUB0ELESNvp+rYylUzSgt1nzVsNBARSBqAhLyURNNRn16UUjGPKmXIiACIiACKSJgIWr0/Zyi+dZQAIs1L0wiIAIFEpCQLxBcwovpRSHhE6jui4AIiIAIJI+AhajR93PyplU9zkHAYs2LnwiIQIEEJOQLBJfwYnpRSPgEqvsiIAIiIALJI2AhavT9nLxpVY8l5LUGRKAiBCTkK4K94o3qRaHiU6AOiIAIiIAIVBsBCflqm3GN12LNC5IIiECBBCTkCwSX8GIS8gmfQHVfBERABEQgeQQsRI2+n5M3reqxLPJaAyJQEQIS8hXBXvFG9aJQ8SlQB0RABERABKqNgIR8tc24xmux5gVJBESgQAIS8gWCS3gxCfmET6C6LwIiIAIikDwCFqLG+X7+9a9/jWOOOSZ5A1SPRcBH4G9/+xuuvvpqftoNwHMCJAIiEB0BCfnoWCapJgn5JM2W+ioCIiACIpAKAhZCvm39+vVf2rdv3/+kYsAahAgAqF+//n/27dt3NIAtAiICIhAdAQn56FgmqSYJ+STNlvoqAiIgAiKQCgIWQp7jbAugdSoGrEGIQIbAexLxWgoiED0BCfnomVaqxo0A2vkanw5gWECHJOQrNUtqVwREQAREoGoJWAr5quWjgYuACIiACNgTkJC3ZxX3nLcCmAbAzOkQAPxsl4R83KdO/RMBERABEagGAhLy1TDLGqMIiIAIlIeAhHx5OIe10h7AYACjsmTk8xFukJAWbp4pnrzNAHzsEe3nAtgKYF2W+mSRD5sRPRcBERABERCBiAlIyEcMVNWJgAiIQBUTkJCv/OR3BzAXwJwsbvAU6asBnAhgt9vdia5Q94p570hoiR+dY2gS8pWfd/VABERABESgyghIyFfZhGu4IiACIlBCAhLyJYQbUjXPs08CwLPtfQEsyyLkKdop5r1n3fnv7QwEGtAG88/OYY1nEQn5ys27WhYBERABEahSAhLyVTrxGrYIiIAIlICAhHwJoBZQJe/VXJVFyG8AQHE+y1fvPgC05q/wfb4NQMuQPkjIFzBJKiICIiACIiACxRCQkC+GnsqKgAiIgAh4CUjIx2M95BLyFOy02C/0dZUCf57PhZ5n4xnwrpOEfDwmVr0QAREQAREQAUNAQl5rQQREQAREICoCEvJRkSyunmxC3rjQB1neg8oMcoPmdZOQL25CVFoEREAEREAEoiYgIR81UdUnAiIgAtVLQEI+HnOfTcjzHD3P0GcT8nw2wDMECnnm9X4WNEK51sdj3tULERABERCBKiIgIV9Fk62hioAIiECJCUjIlxiwZfWFCHm61jOafZhol5C3nARlEwEREAEREIFSEpCQLyVd1S0CIiAC1UVAQj4e812Ia33RQn7RokU4/vjj8yJw2GGH5ZVfmUVABERABEQgjQTeeOONvIf1/PPPo3fv3izHI3D87lcSAREQAREQgYIISMgXhC3yQoUEu2N0+ukh98Vn66jjWl/IKD7++ONCiqmMCIiACIiACKSKQL16Rb1CScinajVoMCIgAiJQfgJFfQuVv7upbTHs+jlGop/qGz2j2TNK/eMFUJGQLwCaioiACIiACIiAISAhr7UgAiIgAiJQSQIS8pWkv7/tXEKed8gz6J33LHx7AOsBNCiw+xLyBYJTMREQAREQAREgAQl5rQMREAEREIFKEpCQryR9OyHPK+go9LsA2OUWoYWeQv62ArsvIV8gOBUTAREQAREQAQl5rQEREAEREIFKE5CQr+wM0NpO63oftxvzAWwKOPfeFsBQAAxw1wIAD6r7Xe3zGYkj5BXsLh9kyisCIiACIiAC+wko2J1WgwiIgAiIQCUJSMhXkn7l2tY98pVjr5ZFQAREQASqlICun6vSidewRUAERKAEBCTkSwA1AVU6Qr5Vq1Zo2LBhYHdvvPFGDB8+PAFDURdFQAREQAREID4Epk6dittuCz75tmfPHmzdupWdVdT6+EyZeiICIiACiSQgIZ/IaSu606Fn5MeOHYtx48YV3ZAqEAEREAEREIFqIsDvzvHjx4cNWUI+jJCei4AIiIAI5CQgIV+dC0QW+eqcd41aBERABESgxARkkS8xYFUvAiIgAiLgEJCQr86FoDPy1TnvGrUIiIAIiEAFCeiMfAXhq2kREAERSBkBCfmUTajlcCTkLUEpmwiIgAiIgAhERUBCPiqSqkcEREAEREBCvjrXgIR8dc67Ri0CIiACIlBBAhLyFYSvpkVABEQgZQQk5FM2oZbD0Rl5S1DKJgIiIAIiIAL5ENAZ+XxoKa8IiIAIiEChBCTkCyWX7HKKWp/s+VPvRUAEREAEYkpAUetjOjHqlgiIgAikjICEfMom1HI4sshbglI2ERABERABEciHgCzy+dBSXhEQAREQgUIJSMgXSi7Z5XRGPtnzp96LgAiIgAgkkIDOyCdw0tRlERABEYgpAQn5mE5MibslIV9iwKpeBERABERABPwEJOS1JkRABERABKIiICEfFclk1SMhn6z5Um9FQAREQARSQEBCPgWTqCGIgAiIQEwISMjHZCLK3A2dkS8zcDUnAiIgAiJQHQR0Rr465lmjFAFSayU9AAAYM0lEQVQREIFKE5CQr/QMVKZ9Ra2vDHe1KgIiIAIikHICilqf8gnW8ERABEQgJgQk5GMyEWXuhizyZQau5kRABERABKqDgCzy1THPGqUIiIAIVJqAhHylZ6Ay7euMfGW4q1UREAEREIEqJqAz8lU8+Rq6CIiACERMQEI+YqAJqU5CPiETpW6KgAiIgAikh4CEfHrmUiMRAREQgUoTkJCv9AxUpn0J+cpwV6siIAIiIAJVTEBCvoonX0MXAREQgYgJSMhHDDQh1UnIJ2Si1E0REAEREIH0EJCQT89caiQiIAIiUGkCEvKVnoHKtC8hXxnualUEREAERKCKCUjIV/Hka+giIAIiEDEBCfmIgSakOkWtT8hEqZsiIAIiIALJIqCo9cmaL/VWBERABJJKQEI+qTNXXL91j3xx/FRaBERABERABAIJ6B55LQwREAEREIFyEJCQLwfl+LUhi3z85kQ9EgEREAERSAEBWeRTMIkaggiIgAgkgICEfAImqQRd1Bn5EkBVlSIgAiIgAiKQi4DOyGt9iIAIiIAIREVAQj4qksmqR0I+WfOl3oqACIiACKSAgIR8CiZRQxABERCBmBCQkI/JRJS5GxLyZQau5kRABERABERAQl5rQAREQAREICoCEvJRkUxWPRLyyZov9VYEREAERCAFBCTkUzCJGoIIiIAIxISAhHxMJqJM3TgMwD+8bf3jH//AYYfxYyURiIbAG2+8gc985jO1KtM6i4atatlPQOtMq6EcBKJeZxLy5Zg1tSECIiAC1UFAQr465tmMUkK+uua7IqON+sW3IoNQo7EnoHUW+ylKRQejXmcS8qlYFhqECIiACMSCgIR8LKahbJ2oI+QPPvhg1K9fv04HbrzxRgwfPrxsHVND6SEQ9YtveshoJFES0DqLkqbqykagkHWm6+e0nkRABERABMpBQEK+HJTj00YdIZ+ta2PHjsW4cePi03P1JDEECnnxTczg1NHYENA6i81UpLojhawzfneOHz8+jEs3AM+FZdJzERABERABEchGQEK+utaGLPLVNd8VGW0hL74V6agaTTQBrbNET19iOl/IOpNFPjHTq46KgAiIQKIJSMgnevry7rzOyOeNTAXyJVDIi2++bSi/CGidaQ2Ug0DU60xn5Msxa2pDBERABKqDgIR8dcyzGWXihTwtHf/617/QpEmTkp/hj7KtYurKt6xtfpt8YXmCnkf94luJX9GwcUfZpyjbKqaufMvmkz8sbyHPtc7yW4VhjPOprZi68i2bT36bvLnylOPvmYR8PitNeUVABERABHIRkJCvrvWReCH/6U9/Gv/85z/Rpk0bvPXWWyWdvSjbKqaufMva5rfJF5Yn6HkaBFbYuKNceFG2VUxd+ZbNJ39Y3kKea53ltwrDGOdTWzF15Vs2n/w2eXPlKcffMwn5fFaa8oqACIiACEjIaw2kxiJv86IW1XRH2VYxdeVb1ja/Tb6wPOV48Y1qPvOpJ2zc+dQVljfKtoqpK9+y+eQPy1vIcwn5sJVV+3kY43xqK6aufMvmk98mr4R8PjOtvCIgAiIgAnEmIIt8nGcn+r7JIp8HU5uXQtvqiqkr37K2+W3yheWRkLddAdnzhTHOp4Vi6sq3bD75w/IW8lxCPp+VAYQxzqe2YurKt2w++W3ySsjnM9PKKwIiIAIiEGcCEvJxnp3o+yYhnwdTm5dC2+qKqSvfsrb5bfKF5ZGQt10BEvK5jsRonemoULbfkLC14S1nk1dCvvi/WapBBERABEQgHgQk5OMxD+XqhYR8HqRtXgptqyumrnzL2ua3yReWR0LedgVIyEvI110DYb9fxa+u/TVE2VYxdeVbNp/8Nnkl5KNcVapLBERABESgkgQk5CtJv/xtS8jnwdzmpdC2umLqyresbX6bfGF5JORtV4CEvIS8hDwJhP1N8VPKJ79NXgn54v9mqQYREAEREIF4EJCQj8c8lKsXdYR8ixYtUL9+/TrtX3HFFbjyyivL1S/rds477zxs3boVrVq1wuLFi63LFZIxyraKqSvfsrb5bfKF5Ql6/s4776B37961kC9atAgHH3xwIdNQkTJh446yU1G2VUxd+ZbNJ39Y3kKea53ltwrDGOdTWzF15Vs2n/w2eXPlierv2W9/+1vce++9gUg/+ugj7Ny5k8/OALAyH+7KKwIiIAIiIAJeAhLy1bkeqNB/U51D16hFQAREQAREoOIErgLw24r3Qh0QAREQARFILAEJ+cROXVEd/xSAnwLomaOWewDwR0kEREAEREAERMCewNcA8CdbWgLgBgDv2lepnCIgAiIgAiJQm4CEvFZEEIGNANr5HkwHMEy4RCBCAlxj3QFsc9fbcgBrI6xfVYkACZwIoD+A5wB0BTADwGahEYEICcwFMALAFk+d/PvWF8AmrbsISasqERABERCBGgIS8loMQQRuBTANgFkfQwDws13CJQIREpgIYJSnPr4MU3ApiUBUBJq5or2lWyH/zQ0jCnolESiWQB8A7d2/Y118Qn4DgI6eBvz/LrZtlRcBERABEahyAhLy6VsAfKkY7BNI3lHyOS0HtE61cB9M8WTgi+7HHtF+LoCtANalD5VGVASBYtcZm6Ylni+/xjo6B8CAIvqkoukjUOw6o9DihlEnDxquu84+0ZU+chpRGIFi15a3fv+aohcINya9645CnhZ6fZeGzYyei4AIiIAIWBGQkLfClJhMdFPmywMFUZAbPEX6atfVdLc7Kr7kUqh7xbx3wLTEj04MAXW0HASiWmfDAUx2f7gGl+oltxzTl5g2olhn3Iikd5FXUO1zj3SsSAwJdTRqAlGsrVxCfpC7od7Nk4mb51yLs6IejOoTAREQARGoTgIS8umYd57FmwSAZ9u5478si5CnaKeY94p8/ns7gLp30GUsWbMlrtKxSCIYRSnW2Z2uqKLrM93q6fasVN0Eol5nqwD0cy3wtJRyjdErSYKq+tZZ1GvLEPRb5G9y/6718iBmgDv+TK0+7BqxCIiACIhAKQhIyJeCamXr5K4/X1yDLPJ07aM497/AZrNQ8eXEnC2t7KjUetwIRLHOvGfi+eLLtdlBLs9xm+qK9ieKdcYB0ELKjU5uWlLIn6MNyorOaxwaj2ptcSx+IS+LfBxmWH0QAREQgZQTkJBP3wTnejmhYKfFfqFv2BT483wu9EEuqemjpREVSqDYdcazy4zR4N1U4jEObkL512ehfVS55BModp2RAL2OdnpQ7HXXnoJ3Jn99FDOCKNaWaV9n5IuZCZUVAREQAREoiICEfEHYYl0o28uJcaHn2UD/2dCgMkEWhVgPXJ0rK4Fi1xldnBmU0es5Qqs8N5S8VziVdVBqLHYEil1nHBA3MJu7ATy5xhjMU+7NsZvqsncoirWVTcjz8/VuME+zYcR/e2M1lH3AalAEREAERCBdBCTk0zWfHE22lxOeDaRraTYhz2feiOEU8syrKOLpWyNRjCiKdXape3XTDldorQnYZIqir6ojuQSiWGcMqsg1xu87inidjU/ueoiy51GsLW5I8nuSx4JmuAE7jUdRWwC8upVeRgx6xyC0ilgf5QyqLhEQARGocgIS8ulbAIW8nNC1ntHsJdrTtx5KNSKts1KRVb1eAlpnWg+lIqC1VSqyqlcEREAERKAsBCTky4K5rI0U4i4oIV/WKUpFY1pnqZjG2A9C6yz2U5TYDmptJXbq1HEREAEREAESkJBP3zooJIAPA/VM133x6VsMJRyR1lkJ4arqGgJaZ1oMpSKgtVUqsqpXBERABESgLAQk5MuCuayNhF2pMy0g0BODQTFK/eNl7akaSzIBrbMkz15y+q51lpy5SlpPtbaSNmPqrwiIgAiIQC0CEvLpWxC5Xk4YkIdB77xn4du70XUbpA+FRlRCAlpnJYSrqq0s8vp7poVSDAH9DSuGnsqKgAiIgAhUnICEfMWnIPIO5Ho54RV0fN7FvYqJjdNCz2txbou8J6owzQS0ztI8u/EZm9ZZfOYibT3R2krbjGo8IiACIlBlBCTk0zPhtE7Rut7HHdJ8AJsCzr3zSpyhABjgroXuVE7PAijTSLTOygS6ypvROqvyBVDC4WttlRCuqhYBERABESgfAQn58rFWSyIgAiIgAiIgAiIgAiIgAiIgAiJQNAEJ+aIRqgIREAEREAEREAEREAEREAEREAERKB8BCfnysVZLIiACIiACIiACIiACIiACIiACIlA0AQn5ohGqAhEQAREQAREQAREQAREQAREQAREoHwEJ+fKxVksiIAIiIAIiIAIiIAIiIAIiIAIiUDQBCfmiEaoCERABERABERABERABERABERABESgfAQn58rFWSyIgAiIgAiIgAiIgAiIgAiIgAiJQNAEJ+aIRqgIREAEREAEREAEREAEREAEREAERKB8BCfnysVZLIiACIiACIiACIiACIiACIiACIlA0AQn5ohGqAhEQAREQAREQAREQAREQAREQAREoHwEJ+fKxVksiIAIiIAIiIAIiIAIiIAIiIAIiUDQBCfmiEaoCERABERABERABERABERABERABESgfAQn58rFWSyIgAiIgAiIgAiIgAiIgAiIgAiJQNAEJ+aIRqgIREAGXwHQAgwC0B7BFVFBqHmTNNvoCWJhg3vMA9HH7/zGALgDWlXE8EwGM8LSXdJ5lRKemREAEREAEREAEKkVAQr5S5NWuCKSPwDRXyHeQkHcmt9Q8KOTZRr+EC/m5rpCfBGAbgBkAdpXx1+MEAN0BdHM3RZLOs4zo1JQIiIAIiIAIiEClCEjIV4q82hWB9BEotXBNGrFS80ibkG9RZgHvX09p4Zm03xP1VwREQAREQAREoAACEvIFQFMRERCBQAKlFq5Jw15qHmkRnsYiLyGftBWu/oqACIiACIiACFSMgIR8xdCrYRFIHYFcwpVu0+cC6AxgE4A1AEYC2BxAoRmAyW5+nrc3eemyzzZYh+0Zap53Hg2gHYDmbts8Vz4loF1vH9nmc24fd3ry+vu2w1PnTF+duXgMdt24u7pl2Bb7tSALj1kATgTQ0u0X8/K/bVzr8+kzmy+Efz7j8Q8xSMib8/88uz7VV4D92w5gtesOb2IR1AfAfnAenwXQyy1nuwbSsjGSuj8sGpAIiIAIiIAIiEBdAhLyWhUiIAJREcgmXDcCaOsR8BTUPJPMvz/8/xU+oUwRzfxGTDMPRfwyV9zbBkOjqGOfKPpYluevTV08hz3UbZfC0N8mBTbbYVluAvDMNvMxiF9Ttz5uSFBMs05ak/2iMxsPE9yNXNgvbx0UpcM8PNg2+8Y2zQYINzLIg//m81xnuvPtcxCLMP75jCdorQUJeb9Y95Yj51td0f5LTywCzif5cc7mALjOzWOzBli/hHxUfwlUjwiIgAiIgAiIQMkJSMiXHLEaEIGqIRAkXGkdvckVWxRWJjHAGAUqRVcrz+cUYgM9Is08onCjBT+fqOasmwKYGwe7PW1Q0PPzA9zPTJsUiLd58rHf7D9/aNVndHN+5s/HumiZNxZiU0UQD7O5QNFIEepNtMrT6t7Ds7mxFMA5AW0Od70WyCOXkM+3z/nyz3c8tkKe+Za4Gzf+4IlmY8i44pMz+0EWtL7f72nEdg1IyFfNnykNVAREQAREQATSQUBCPh3zqFGIQBwIBAnXfQFi3S90jRA1VlgK2pMCBkQBzjy2FvlsbdPaT3Fv3POZbwOAIwPaXAWAP9yE4OYDXf2Drnpj37YC6OSpI4gH26HgNC713iZp+aZ121jlaW2naM3Gg3WFWeTz6XMh/PMZT7Y1mu2MPK+kIw+vpwM3OrhhwjKXuRUaIc//924W8bHtGpCQj8NfEPVBBERABERABETAmoCEvDUqZRQBEQgh4BeuRhjSqmzOK3ur8As1I9KMBdzfnLHQeoU8haw/mXP3pj88407hR1FIUew9827a9Lu020w2XdzZF25EUIRTdIcJeQpLCvlsf3v5bD6AAe6VbOxzNh7G2p7PdWm5+lwI/3zGk6+QN0Lc6+lAFvRGIO/HPUKeHg5BHGzWgOmXXOttVr3yiIAIiIAIiIAIxIKAhHwspkGdEIFUEPAL+TCR7H8eZIH1gjH1e4U8haQ3+V316aY/xA2Qx3z8m2eC5y23EMv+iaHgZ9A+WuYpunlOnefc2UaYkDcWdgpTWpmz/f2ldZ/eAkZYsm4Gu/MnuvlTzIcJeds+58s/3/EUIuSNtd6415PN/2/vDm6jJ+IwDq9EB4gSaAEK4MSZA7UgCuBAA19ZiDM10AP6STuSY7yxZ5Vl1+TZCwmZ2DOP54v0evwf574sx9iaF8tz7c0BQf5/8efHIAgQIECAwOcSEOQ/1/U2WgKPFJhdkS8Qt1o/VpzX36/7OkLdMsj/tGpUrfpy87zx4+rYe5y90Fs9dZ+O09/AgvWRFflRrz4efx8rwh3r6KP13XhY19LfuiZ7HkdW5Gf6vHe+Lf+Z8dwT5Jc3F/64zpfG/eviYHtBfm8OjBILK/KP/Ovg2AQIECBAgMCHCgjyH8rpYAQ+tcBsjfzYWG1dI38r6M7UyLda3Ep29e3rV7qNV5t1A6FQ2Cr+ejV9XMiCaj/rJsDylWfrC300yI8xbL0zvVX6+tOGbdXhL1e8v9uYWTlVA39rRf69nd+3bj4cab/eo2BmPPcE+X5nXIOCfMF+vfndrSB/dA60kWEfQf5T//kyeAIECBAgcC4BQf5c10tvCbyywFaQL5gWUNcr3tU4V/NeEPxmMahWfQuyhfDle9nH7vdHd60fobTw3WPwy7r4UWfdynw7x49ztiv+8v3yI1iPFeARKJd18HV9tNt7tL62Yyf89c2K+tjGcWuPjLK61be9Xetn+zzrPzuerfl7a7O70Xb0qe+3bvLcCvJH5sCybEGQf+W/LvpGgAABAgQIvBEQ5E0IAgQ+SuDWe9P/uobpNqGrPn28R76Q2avWlo+ot4rahnStWNe2GvQ2aOv7/v94p3nvc9/7fLk+Rl+IH++Rb2W94xWa+3q8H77z1K9Wffu6YF27gnVf124Eztr0nvLqtLvpUJju00rx75fL5bdr+1sePSXQsUe/+t2Os+exfI98Tm2K12r8WMF/LyQf7fM9/jPjuSfIj0f+t14v1/Hee7T+6BzoOIL83r8oPydAgAABAgReRkCQf5lLoSMETi9QoGpjsW8vl8s6aPce+EJ4AbZA2srqLxvtQqievVXz2heiq/P++brhW49WfzUh1Q7nrbp2nD6duxr3VtkL58tPoW+cs3adtz4u2zWOgnPH60ZDbarXLmw2/q+vtfeN/z2P+tXO9HlU118YbtX9z42x5dHTCbXtvPWt/nf+bm7sbXY30+d7/WfGsx7i3op87cfu+FvXfq9G/ugcEOQn/mFpSoAAAQIECDxXQJB/rr+zEyDwVqBA3Cp4K8jrT6G1ne5ngjzfOYFn+O8F+W5gdO233hM/N7r3WwvyH6npWAQIECBAgMBDBQT5h/I6OAECkwI98t7j3a1sL+vaR019NeM/Th5T8+MCz/DfC/Jj5/3l2wqOj+h4S0H+uJWWBAgQIECAwJMFBPknXwCnJ0DgjUCPzhfsCvH9tw3k2rG9x9lbqa+u/Uh9PNb7BJ7hP4J8Gw3+fd0YsXKGSiC6qTNW5L+/b0i7v9VTHt0oap41/r1Shd0DakCAAAECBAgQeLSAIP9oYccnQGBW4IdrvXihvQ3oqgWvhnxdrz57XO2PCfzX/iPIj96Nlfeevhgb3bVHwnpPg2Oj2W/VfgPtvt+nDfU6V6//8yFAgAABAgQIvKyAIP+yl0bHCBAgQIAAAQIECBAgQIDAvwUEebOCAAECBAgQIECAAAECBAicSECQP9HF0lUCBAgQIECAAAECBAgQICDImwMECBAgQIAAAQIECBAgQOBEAoL8iS6WrhIgQIAAAQIECBAgQIAAAUHeHCBAgAABAgQIECBAgAABAicSEORPdLF0lQABAgQIECBAgAABAgQICPLmAAECBAgQIECAAAECBAgQOJGAIH+ii6WrBAgQIECAAAECBAgQIEBAkDcHCBAgQIAAAQIECBAgQIDAiQQE+RNdLF0lQIAAAQIECBAgQIAAAQKCvDlAgAABAgQIECBAgAABAgROJCDIn+hi6SoBAgQIECBAgAABAgQIEBDkzQECBAgQIECAAAECBAgQIHAiAUH+RBdLVwkQIECAAAECBAgQIECAgCBvDhAgQIAAAQIECBAgQIAAgRMJCPInuli6SoAAAQIECBAgQIAAAQIEBHlzgAABAgQIECBAgAABAgQInEjgH9NVBKMlYAWKAAAAAElFTkSuQmCC">
(4000000.0, 4000000000.0)
### Check of the exclude_masses parameter
Default is exclude_masses=[32.,60.] because both can be only used in SSPs of solar Z and in no continous SFR simulations.
This test requires to check the output and see if there is any 6M or 7Msun yield taken.
```python
reload(s)
s1=s.sygma(iolevel=0,exclude_masses=[32.,60.],mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
s2=s.sygma(iolevel=0,exclude_masses=[32.,60.,7,6],mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
#s3=s.sygma(iolevel=1,exclude_masses=[],mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=True,iniZ=-1,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
s3=s.sygma(iolevel=0,exclude_masses=[32.,60.,7,6,3],mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_cnoni.txt', sn1a_table='yield_tables/sn1a_cnoni.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
```
SYGMA run in progress..
SYGMA run completed - Run time: 0.19s
SYGMA run in progress..
SYGMA run completed - Run time: 0.19s
SYGMA run in progress..
SYGMA run completed - Run time: 0.19s
```python
# k_N=1e11*0.35/ (1**-0.35 - 30**-0.35) #(I)
N_tot=k_N/1.35 * (1**-1.35 - 30**-1.35) #(II)
Yield=0.1*N_tot
print 'Should be 1:',sum(s1.history.ism_iso_yield[-1])/Yield
print 'Should be 1:',sum(s2.history.ism_iso_yield[-1])/Yield
```
Should be 1: 1.0
Should be 1: 1.0
```python
N_tot=k_N/1.35 * (1**-1.35 - 8**-1.35) #(II)
Yield=0.1*N_tot
print sum(s1.history.ism_elem_yield_agb[-1])/Yield
N_tot=k_N/1.35 * (8**-1.35 - 30**-1.35) #(II)
Yield=0.1*N_tot
print sum(s1.history.ism_elem_yield_massive[-1])/Yield
```
1.0
1.0
#### For case where 3Msun excluded, which is low-mass with C, the boundary (3.5Msun) changes to 3Msun and hence N-14 is ejected in lower-mass stars.
```python
Yield_lagb_sim=s3.history.ism_iso_yield[-1][0]
Yield_magb_sim=s3.history.ism_iso_yield[-1][1]
Yield_massive_sim=s3.history.ism_iso_yield[-1][2]
```
```python
N_lagb=k_N/1.35 * (1**-1.35 - 3**-1.35)
Yield_lagb=0.1*N_lagb
N_magb=k_N/1.35 * (3**-1.35 - 8.**-1.35)
Yield_magb=0.1*N_magb
N_massive=k_N/1.35 * (8.**-1.35 - 30**-1.35)
Yield_massive=0.1*N_massive
```
```python
print 'Should be 1:',Yield_lagb_sim/Yield_lagb
print 'Should be 1:',Yield_magb_sim/Yield_magb
print 'Should be 1:',Yield_massive_sim/Yield_massive
```
Should be 1: 1.0
Should be 1: 1.0
Should be 1: 1.0
### With transitionmass and exclude_mass: Change transitionmass to 6Msun
transition masses at : 6,13Msun. excluded in one case 6,7 in the other 12.
```python
s1=s.sygma(iolevel=0,exclude_masses=[32.,60.,7,6],transitionmass=6,mgal=1e11,dt=1e7,
tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,
hardsetZ=0.0001,table='yield_tables/isotope_yield_table_cnoni.txt',
sn1a_table='yield_tables/sn1a_cnoni.txt',
iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
s2=s.sygma(iolevel=0,exclude_masses=[32.,60.,12],transitionmass=13,mgal=1e11,dt=1e7,
tend=1.3e10,imf_type='salpeter',alphaimf=2.35,imf_bdys=[1,30],sn1a_on=False,
hardsetZ=0.0001,table='yield_tables/isotope_yield_table_cnoni.txt',
sn1a_table='yield_tables/sn1a_cnoni.txt',
iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_cnoni.ppn')
```
Warning: Non-default transitionmass chosen. Use in agreement with yield input!
SYGMA run in progress..
SYGMA run completed - Run time: 0.2s
Warning: Non-default transitionmass chosen. Use in agreement with yield input!
SYGMA run in progress..
SYGMA run completed - Run time: 0.16s
```python
k_N=1e11*0.35/ (1**-0.35 - 30**-0.35) #(I)
N_tot=k_N/1.35 * (1**-1.35 - 30**-1.35) #(II)
Yield=0.1*N_tot
print 'Should be 1:',sum(s1.history.ism_iso_yield[-1])/Yield
```
Should be 1: 1.0
```python
fig=1
s1.plot_totmasses(fig=fig,marker='^',label='all, mt=6')
s1.plot_totmasses(fig=fig,marker='>',source='agb',label='agb,mt=6')
s1.plot_totmasses(fig=fig,marker='<',source='massive',label='massive,mt=6')
s1.plot_totmasses(fig=fig,source='sn1a',label='sn1a,mt=6')
s2.plot_totmasses(fig=fig,label='all, mt=12')
s2.plot_totmasses(fig=fig,source='agb',label='agb,mt=12')
s2.plot_totmasses(fig=fig,source='massive',label='massive,mt=12')
s2.plot_totmasses(fig=fig,source='sn1a',label='sn1a,mt=12')
```
<IPython.core.display.Javascript object>
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA/IAAAHGCAYAAADXFrCqAAAgAElEQVR4XuydB7gV1dX+X6lSpKooFro1/mmSfIkalSIWNFGqDSw0e6Fj9IJKFdQgnwICigIBLqIYNSJIYssXQYpd6WInUhXp9/6fd+bM5XA4ZebMKVPe/Tw83HvOnr3X/u2ZO/POWnvtI6AiAiIgAiIgAiIgAiIgAiIgAiIgAiLgGwJH+MZSGSoCIiACIiACIiACIiACIiACIiACIgAJeZ0EIiACIiACIiACIiACIiACIiACIuAjAhLyPposmSoCIiACIiACIiACIiACIiACIiACEvI6B0RABERABERABERABERABERABETARwQk5H00WTJVBERABERABERABERABERABERABCTkdQ6IgAiIgAiIgAiIgAiIgAiIgAiIgI8ISMj7aLJkqgiIgAiIgAiIgAiIgAiIgAiIgAhIyOscEAEREAEREAEREAEREAEREAEREAEfEZCQ99FkyVQREAEREAEREAEREAEREAEREAERkJDXOSACIiACIiACIiACIiACIiACIiACPiIgIe+jyZKpIiACIiACIiACIiACIiACIiACIiAhr3NABERABERABERABERABERABERABHxEQELeR5MlU0VABERABERABERABERABERABERAQl7ngAiIgAiIgAiIgAiIgAiIgAiIgAj4iICEvI8mS6aKgAiIgAiIgAiIgAiIgAiIgAiIgIS8zgEREAEREAEREAEREAEREAEREAER8BEBCXkfTZZMFQEREAEREAEREAEREAEREAEREAEJeZ0DIiACIiACIiACIiACIiACIiACIuAjAhLyPposmSoCIiACIiACIiACIiACIiACIiACEvI6B0RABERABERABERABERABERABETARwQk5H00WTJVBERABERABERABERABERABERABCTkdQ6IgAiIgAiIgAiIgAiIgAiIgAiIgI8ISMj7aLJkqgiIgAiIgAiIgAiIgAiIgAiIgAhIyOscEAEREAEREAEREAEREAEREAEREAEfEZCQ99FkyVQREAEREAEREAEREAEREAEREAERkJDXOSACIiACIiACIiACIiACIiACIiACPiIgIe+jyZKpIiACIiACIiACIiACIiACIiACIiAhr3NABERABERABERABERABERABERABHxEQELeR5MlU0VABERABERABERABERABERABERAQl7ngAiIgAiIgAiIgAiIgAiIgAiIgAj4iICEvI8mS6aKgAiIgAiIgAiIgAiIgAiIgAiIgIS8zgEREAEREAEREAEREAEREAEREAER8BEBCXkfTVaUqXMA9AewIeqzegA6AFgH4GwAkwCs9+fwZLUIiIAIiIAIiIAIiIAIiIAIiEAiAhLy/jo32gOoD2AggOYxQn4NgIZRw4n93V8jlbUiIAIiIAIiIAIiIAIiIAIiIAJxCUjI5+7EoADvGRHh8Xrl9/SyfwCgeqTCIwnM2wKgWZSQbwqAXvpGMUKeHvqVuRuiehIBERABERABERABERABERABEcg2AQn5bBM2228dEdqzAdwSp8uqAJYBoCD/OfL9SACbAcQT87FCvkfkJUGLqLb5QmACgMm5GaJ6EQEREAEREAEREAEREAEREAERyAUBCfnsUua69VEA1kbWry9KIOQp2inmo0U+f98KoFQcE2OFfL/Iy4K2UXXfAMB/Y7I7RLUuAiIgAiIgAiIgAiIgAiIgAiKQSwIS8rmjTQ/50gRCnuvZKeZjvedFEYG+OMZMeeRzN2/qSQREQAREQAREQAREQAREQAQ8RUBCPnfTkUzIU7BzPfu8GHMo8AsBDEoh5LVGPnfzqJ5EQAREQAREQAREQAREQAREIK8EJORzhz+RkLdC6LmOPtbznuiYWI88R7E6ksl+R2RI/D06+V3uRqqeREAEREAEREAEREAEREAEREAEskZAQj5raA9rOJEo5zp6rqFPJOT5XedIa/S8sx7D8LlP/MIoL35dAL0i4ftMesfEespYn7v5VU8iIAIiIAIiIAIiIAIiIAIikBMCEvI5wWx0ko6QZ2g9s9lbQj6T1p6QZmPfpnmcDhMBERABERCBIBHQfTRIs6mxiIAIiIDPCEjI527C0gmtz6aQL05z6Dpn0gSnw0RABERABAJFQPfRQE2nBiMCIiAC/iIgUZa7+Uon2R3Xwk+Mk+zOrdXHANiUZiPRe9Wn2YQOEwEREAEREAHfE+BONOmUrgBeB/DfdA7WMSIgAiIgAiJAAhLyuTsPUm0/NyHOnu/MZt8KwD8zZCbDAL/JUFtqRgREQAREQAREID0CFPPPp3eojhIBERABERABCflcngPJhDyT1zHpXfRa+PqRTPSlM2ikayG/dGm6DojMjOLiiy/G5s2bUbNmTbz+Oh0a2SuZ7MtNW06PtVvfTr1UdeJ9v2nTJlx22WWHTMyrr76KY489NnuTleGWU407k91lsi83bTk91kn9VHXT+V7nmbOzMBVjJ625acvpsU7q26mbrI7Tv2ctWrgKUDsPwLtOuKuuCIiACIiACEQTkEc+d+dDMiHPLej4fXMA1vZx9NBzC7mxGTTxMCFPYX788cfb7uKEE9LN7WO7i6QVaesPP/yA4447Dt9//31mGk3QSib7ctOW02Pt1rdTL1WdeN9/++23OPHEEw+h+s033yDf546TkyXVuJ20lapuJvty05bTY53UT1U3ne91nqU6sw79PhVjJ625acvpsU7q26mbrI7Tv2c8B52WDz/80HrRybcAvO+riIAIiIAIiEBaBCTk08Lm6CB62+ldbx85ai6AdXHWvXP7uN4AmOCuOgAm0RnjqKfUlQ8T8hJYiaHZeShMjdys4aYtp8farW+nXqo6Th987fLKd71U486kfZnsy01bTo91Uj9V3XS+l5B3dhamYuykNTdtOT3WSX07dTMp5J0ws+p+8MEHiHjyJeTTAahjREAEREAESghIyIfrZDhMyDPcuVSpUodR6NOnD/r27es5OnYe1DJldCb7ctOW02Pt1rdTL1UdCXn3Z1sqxk56cNOW02Od1E9VN53vJeSdnBnuXibG9pRqvpJZ5vRYJ/Xt1M2FkB8zZgzGjo0fTLdv3z5jeRgACXlnp7Bqi4AIiIAIxBCQkA/XKWF7jXxBQQGGDBniOTp2HtQyZXQm+3LTltNj7da3Uy9VHQl592dbKsZOenDTltNjndRPVTed7yXknZwZEvIWrVwIed47hw4dmmqCJORTEdL3IiACIiACSQlIyIfrBJFH3sF8pxIXDppSaL3WyCc8XYJ+nnHgqcaYzvcS8k7+AqWeAyetpZoveeTlkXdyPqmuCIiACIhAegQk5NPj5tejtEbewcy5eViN7cZNW06PtVvfTr1UdeSRd3BCJaiairGTHty05fRYJ/VT1U3newl5J2eGhHwuPfLJZkZr5J2dt6otAiIgAiKQmICEfLjODt8Lea49/OWXX1C5cuWsr+HPZF9u2nJ6rN36duqlqhPv+yAIrFTjzuSfjUz25aYtp8c6qZ+qbjrf6zxzdhamYuykNTdtOT3WSX07dZPVycXfMwl5J2ea6oqACIiACCQjICEfrvPD90I+XNPlz9EGQWD5k3y4rNZ5Fq75ztdoM32eScjnaybVrwiIgAgEj4CEfPDmNNmIfL9GPlzT5c/RZvrB158UZHW2Ceg8yzZhtU8C6Zxnylqvc0cEREAERCAXBCTkc0HZO334Pmu9d1DKkkQE0nnwFU0RcEpA55lTYqqfDoF0zjNlrU+HtI4RAREQARFwSkBC3ikxf9eXR97f8+cL69N58PXFwGSkpwjoPPPUdATWmHTOM3nkA3s6aGAiIAIi4CkCEvKemo6sG6M18llHrA7SefAVNRFwSkDnmVNiqp8OgUyfZ1ojn84s6BgREAEREIF4BCTkw3lenA1g6dKlS3H22fxRRQREQAREQAREINsEJOSzTVjti4AIiEB4CEjIh2euo0cqIR/OedeoRUAEREAE8khAQj6P8NW1CIiACASMgIR8wCbU5nAk5G2CUjUREAEREAERyBQBCflMkVQ7IiACIiACEvLhPAcMIV+zZk2ULVs2LoE+ffqgb9++4aSjUYuACIiACIhAmgSU7C5NcDpMBERABETAEQEJeUe4AlPZEPLJRlNQUABuoaMiAiIgAiIgAiJgn4C2n7PPSjVFQAREQATSJyAhnz47Px8pj7yfZ0+2i4AIiIAIeJaAPPKenRoZJgIiIAKBIiAhH6jptD0YrZG3jUoVRUAEREAERCAzBLRGPjMc1YoIiIAIiAAgIR/Os0BCPpzzrlGLgAiIgAjkkYCEfB7hq2sREAERCBgBCfmATajN4UjI2wSlaiIgAiIgAiKQKQIS8pkiqXZEQAREQAQk5MN5DkjIh3PeNWoREAEREIE8EpCQzyN8dS0CIiACASMgIR+wCbU5HCW7swlK1URABERABETACQElu3NCS3VFQAREQATSJSAhny45fx+n7ef8PX+yXgREQAREwKMEtP2cRydGZomACIhAwAhIyAdsQm0ORx55m6BUTQREQAREQAScEJBH3gkt1RUBERABEUiXgIR8uuT8fZzWyPt7/mS9CIiACIiADwlojbwPJ00mi4AIiIBHCUjIe3RismyWhHyWAat5ERABERABEYglICGvc0IEREAERCBTBCTkM0XSX+1IyPtrvmStCIiACIhAAAhIyAdgEjUEERABEfAIAQl5j0xEjs2QkM8xcHUnAiIgAiIgAhLyOgdEQAREQAQyRUBCPlMk/dWOhLy/5kvWioAIiIAIBICAhHwAJlFDEAEREAGPEJCQ98hE5NgMCfkcA1d3IiACIiACIiAhr3NABERABEQgUwQk5DNF0l/taPs5f82XrBUBERABEfAJAW0/55OJkpkiIAIi4HMCEvI+n8A0zTeEfLJjCwoKMGTIkDSb12EiIAIiIAIiEE4CvHcOHTo01eBbAPggVSV9LwIiIAIiIAKJCEjIh/PckEc+nPOuUYuACIiACGSZgDzyWQas5kVABERABAwCEvLhPBG0Rj6c865Ri4AIiIAI5JGA1sjnEb66FgEREIGAEZCQD9iE2hyOhLxNUKomAiIgAiIgApkiICGfKZJqRwREQAREQEI+nOeAhHw4512jFgEREAERyCMBCfk8wlfXIiACIhAwAhLyAZtQm8ORkLcJStVEQAREQAREIFMEJOQzRVLtiIAIiIAISMiH8xyQkA/nvGvUIiACIiACeSQgIZ9H+OpaBERABAJGQEI+YBNqczgS8jZBqZoIiIAIiIAIZIqAhHymSKodERABERABCflwngMS8uGcd41aBERABEQgjwQk5PMIX12LgAiIQMAISMgHbEJtDkdC3iYoVRMBERABERCBTBGQkM8USbUjAiIgAiIgIR/Oc8AQ8jVr1kTZsmXjEujTpw/69u0bTjoatQiIgAiIgAikSWDMmDEYO3Zs3KP37duHzZs387sWAD5IswsdJgIiIAIiIAKQkA/nSWAI+WRDLygowJAhQ8JJR6MWAREQAREQgTQJ8N45dOjQVEdLyKcipO9FQAREQASSEpCQD+cJIo98OOddoxYBERABEcgyAXnkswxYzYuACIiACBgEJOTDeSJojXw4512jFgEREAERsEHg1k6d0Ozcc9G1d2+UK1fOxhH2qmiNvD1OqiUCIiACIpCagIR8akZBrCEhH8RZ1ZhEQAREIIAEvvvuO/zv//4v3n77bRw4cCAnI1y/fDn+3969WFmuHKoefzxqHnccjjjC/SPTL7/8go8//phjUGh9TmZSnYiACIhAcAm4vysFl02QRyYhH+TZ1dhEQAREwMcEioqKMH/+fEyePBn//ve/sW3bNmM0FNKlSpU6bGQti4vRtLg44Yi3Apga57joAzoUFaFO1AdHFBejSuT3PQA+ATCfbSQR872LilApCff3jzgC7wDg+CTkfXyCynQREAER8AgBCXmPTESOzZCQzzFwdScCIiACIpCYwMaNG/HEE0/g73//O9asWYPqBw7gEgBXlCuHwrZtccMtt6Bt27ZxhTz69wcmTEjceIMGwIoVyfH/6U/AP/9ZUufXnTtR0RTcRmEcwDMVKmBGrVq49p574ofcn3IK8MMPifvp2xcfXHopWrSgM14eeV0PIiACIiAC7ghIyLvj59ejJeT9OnOyWwREQAQCQIBe6cLCQkydOhX/+c9/8POOHWhG4V6qFK4sXx6/2b0b9IqjaVNgzhygYcOcjrrdKafgldWrD+tzL4DnUgn6JJZqjXxOp1GdiYAIiECgCUjIB3p6Ew5OQj6c865Ri4AIiEDeCKxduxbjxo3Da6+9hnXr1hkh5pUBTDrySFxSXIxqe/YAlSsDF10EXHopcMklQO3aAAX9ypXA//0fYK2Rr1MHuOKK5GOZNg3YsSNxHb4kOPfcuN8nEvJWZUPQly2LGcccg2sHDEicFO/HH80XEZHywcaNaDFmDH/TGvm8nYnqWAREQASCQUBCPhjz6HQUEvJOiam+CIiACIiAIwIU6hTtTFT33nvv4eeffzaOZxb43/zmN+jQoQNu6d0b1a68EmjeHLjsMlNYM0v8vn3AW28B8+cDL78MbNwIcI166dKmDRdfbH6erDDUfcOGxDXuvRcYOTItIW8dtBNAu/LlcXrHjnjy+ecPb+uDD4A//OGgkC8qQgvzZYSEvKOzSZVFQAREQARiCUjIh/OckJAP57xr1CIgAiKQVQK7d+/GhAkTMH36dHz00UfYR0EO4Nhjj0XLli1x22234dwEXvBDDOvTB3j0UeCEE0zPO9ewX3ABUL58Vu23GrfjkZ9WoQJmJlszH8dShdbnZPrUiQiIgAiEgoCEfCim+bBBSsiHc941ahEQARHIOAEmqnvsscfw0ksv4auvvsJRxcW4FMC1lSvjd+XKofiTT3Ds8cc765fr05mtnp76FBnnnTVsr3ayNfLpCHir1wwI+boAjrY3CtUSgdAS+AlAknCc0HLRwANGQEI+YBNqczgS8jZBqZoIiIAIiMDhBN599108/vjj+Oc//4ktW7aAMv2qMmVwXaVKaPHLLyjN8PHGjU1P+oABQMWKZiNc7/7pp0ClSkC9ep5FGyvkuSbejYDPkJCvW6pUqc+LioqO9Cw4GSYCHiBQqlSp3UVFRadLzHtgMmRCVglIyGcVr2cbl5D37NTIMBEQARHwHoH9+/djxowZxt7u9CozhJ7lrKOOwtwyZXDK1q2m5/y884A//9kU8JZQ5zZu770HvPSS+W/dOoCh82bSN08WS8hnSsBnSMgb9+5p06bhjDPO8CQ3GSUC+Sbw2WefoVu3bjRDeSjyPRnqP+sEJOSzjtiTHRgPAzVr1kTZsmXjGtinTx/07dvXk8bLKBEQAREQgewT2LFjh5FlfubMmfjyyy+NLPMsJ598Mq644grce++9qMes8l27mhnm27UDjo5EfbPuv/9tZmyfOxf4/nsulAcuv9wU+a1bAxUqZH8QafbQtmFDdPjuO8dr4NndmDFjMHbs2Lg9M2fA5s2b0xUZegmf5nzqsPAQyMDylfDA0kh9T0BC3vdTmNYAjIeBZEcWFBRgyJAhaTWug0RABERABPxJgOvdKUS53v3rr782BlG6dGmceeaZuO6663DLLbegMreIS1VuuQWYMAGoVQvo0AHo1Ak455yDWedTHZ/n72/t1AnNzj038bZySezjvXPo0KGpRpCOt1BCPhVVfR96AhLyoT8FQgVAQj5U010yWHnkwznvGrUIiEDACVzbujW2c6s2m6XqySej35gxGD16NN544w3DW0w/+RVly+Km6tXRrEoV1PjyS5RymnBuxQpzD3dmqLe2jLNpk9+rySPv9xmU/X4mICHv59mT7U4JSMg7JRaM+nqrH4x51ChEQARE4BACqbZNi8V12hFH4MviYlQB0KFCBXSvVg2/3bIFpffsAU4/HbjqKqCgAIhehsWEdfz+SOVcc3r6uRQZunc7Ba76oSPg8hoLHS8N2N8EJOT9PX/pWq+HgXTJ6TgREAER8DABp0L+/5UqhenHH4/f/PgjSu3fDzRrZop3/qOQtwrF+9Kl5pr3wkKgZ0/gvvs8TMKbprkUGbp3e3NaZZWHCLi8xjw0EpkiAqkJSMinZhTEGnoYCOKsakwiIAKhJ+BUyLdr0ACvnHCCmYCO4r0utymPFIr3Dz8E/vY3YPZs4KuvzGR2rHfjjcD//E/oeTsF4FJk6N5tE3jHjh3xwgsvlCRojP3dZjOeqzZ37lyMGDECK1asQP369dG6dWtMYC4KlRICLq8xkRQBXxGQkPfVdGXMWD0MZAylGhIBERAB7xC4tGFDvLZ2rW2D2jVqhFdWrTq8/syZwEMPAV98AVSvDrRvD3TuDFxwAVCmjO32VfFQAi5Fhu7dNk+oTp06GUL+wIEDxhGxv9tsJiPVaAdfJCxatAgtW7ZMu03msRg4cCCqV69uCPh169Zh2bJlaN68OZYyWkbFIODyGhNFEfAVAQl5X01XxozVw0DGUKohERABEcgvgU2bNmHUqFGgt678xo2II8sTGphQyM+YAbz6KnD11UDbtkC5cvkdZEB6dykydO+2eR54TcjTnoULF6Yt5Ldv324I+LPPPhtLliwpodC7d288/fTTKCwsxFWMlFGRkNc5ECoCEvKhmu6SwephIJzzrlGLgAgEhMBXX32FkSNHYv78+fiee7QDOL1cOdQsKsI7XOtusyQU8jaPVzVnBCTknfFKt7aXhDxfsHXu3NmVkKc3ftCgQVi+fDkaN25cgoUCn2Nt06YN+vbtmy6uQB3n8hoLFAsNJvgEJOSDP8fxRighH85516hFQAR8TODzzz83xPtrr72Gn376yRjJSRUqoODEE9GhqAhV165FOwCvOBijhLwDWBmo6lJkOL537927FzfffDOmTJmCcjmIqsh2fwwnHzBggLFGnD9b68QZkVK1atWSGcqkkGdY/MqVK7F69WojRJ6h8tWqVTMENNen045evXoZYe4s1uf8+aKLLjJC6o844ggUFxcb/1vh/k5OpwYNGmDbtm3G9pAqyQm4vMaEVwR8RUBC3lfTlTFjHT8MZKxnNSQCIiACImCbAD1wTG5FMcAHeZYqVargwgsvRL9+/XDOSScBp50GXHwx0LEj2t1/P17JxBp52xaqohMCLkWG43v3uHHj0L9/f9Cje+eddzoxNa262eyP1wJDyymGO3TogBo1ahhh1BTQFLoU2lbJpJBnW2+++Sbq1atn9M316fSyU8C3atWqJJS7WbNmxudr1641Xjbwul28eDHmzJljhL9zHho2bGi8WHFaSpUqZXjdFyxYYPTBcdesWdPgQLtUDhJweY0JpQj4ioCEvK+mK2PGOn4YyFjPakgEREAERCApAQqW4cOHG6G4O3bsMOpStNC7R4HQpEmTQ4/ftQuoUMH4zHHW+kTJ7jRHWSHgUmQ4unfTO37qqadiw4YNqFu3Lr788suseuWz3Z+1Hjw2vNwS7RTQHCdLpoU8vfD0uj/55JMl5wWvSYa285rk9cpirWWPTkDHY2PXyLNeqhIdYWAJeXrkOX6+UGChl58vaRRWLyGf6nzS98EkICEfzHlNNSpHDwOpGtP3IiACIiAC7gjEE+9NqlXDTU2a4OJJk9CoUSNbHUjI28KUt0q5FPL0jjPL+a5du1C+fHncfvvt6NKlS9bGPmvWLIwfPx579uxBhQoVjGUgmYwCoHebUSmxSd0eeeQRY5z0zFsvubIh5KNfFBAiowMY4r9161YjSsYq9LpTaFsRAvGEPIV5ssLjJ06ciO7du5e8HGB9Rh5MmjTJiMhhuD9D/RkZ4CaRXtZOiDw17PIay5PV6lYE0iMgIZ8eN78fJSHv9xmU/SIgAr4nwAfxYcOG4Y033ijxvJ9RvToGNWqE9nv3osLKlQDX/f74I1C+vK3xSsjbwpS3Si5Fhu17d7R3PF+DzWYUAD3aZEkBS2HL37Mt5GPXtjNChn3GrlunwKc9yYQ8X0qkKmyHLwgsLz/FPUV7nTp1Sg5dv369Ie6tsPtUbYbhe5fXWBgQaYwBIhBWIZ/JPTrm+fB8sP0w4MOxyWQREAER8CyBeOK9QfXqGHzqqegMoBK3lqK3rk0bgN7TP/+Zi+Jtj+fa1q2xfeNG2/WrnnwyZixaZLu+Kroj4FJk2L53R3vjLYuz6ZWP9sZb/WXaK09BSw8116vTM29tx8b+mEPCT0Le6VlED36i/eIZAcCoACXCM6m6vMacTo3qi0BeCYRVyBdxaVEGyLONMhloJ9dN2H4YyLVh6k8EREAEgkbAEu/0HlprY5mo6rLLLkPB9dej/iWXAAcOAOefb+7bzv2gjz76cAzFxcB//gM8/zxQVARMmBA0VIEfj0uRYevencwbnw0vea76o5DltcQ16Qydt8LomUiO6+f9JOTpSU9VopPYcT0+Pe9Lly497DAJ+UORuLzGUk2LvhcBTxEIs5Av5EtcF7PRBkB7AKVdtJGvQ209DOTLOPUrAiIgAn4nwK3iHnroIWOrOEu882G8Xbt2GDx4sJGEzCgU51OmAJdeCtSuHX/Y69YB06ebAn7NGuD444GbbgIeftjvmEJnv0uRYeveHc8bb4HOtJec7eaiPyuEnII9OuEc+7f2WPeTkKeH3dqSLt5FEL1Gnt/Hrvm3jrHC7hmGv4TRPCryyOscCBWBMAv5/gDGuJjtfgBGSsi7IKhDRUAERMBjBOhdZJjwo48+WhKuSk9Ynz59jERhyfbi3rhxoyHeX3rppZJ93qtXq4Z2l1+O++6776B4tzPmnTuBuXOBqVOBt98GKlY0PfXXXw+0agWU9uM7ZDsDD3adXAh5ij4mT4yXUK2oqAjcJo1bomWq5KI/S7Ayudvs2bNLTOeaca5Vp9B3KuR5DF+uRWeHj8ckkYjO1Rp52sTlBFwHz8z5Tz31VImZ5DFv3jwjT0A629pl6hzwUjsurzEvDUW2iEBKAqfWmHgAACAASURBVGEV8hTgswCsTEkocYWmgLGkcaCLNvJ1qK23+vkyTv2KgAiIQD4I/Pjjj2jZsqWxXdevv/56iAkVK1Y0trZikqpatWqVfPfTTz8ZCesoLr7//nvj86MqV8YdLVrgnmOPxdFMWPfxx0DZss6GxBDa3/4WuOAC4IYbgPbtgcqVnbWh2p4j4FJkhPrebWWJb9++PVq0aGHs104BS4HLZSvc353eeYbcp8paz0zyFMF2ksS5FfKWCOcLlM6dO6NfP/qBnBe+OLD2s2dbfFnDlxEcw+uvv+68wYAe4fIaCygVDSuoBMIq5IM6n3bHFeqHAbuQVE8ERCA8BOiJb9q0KT777LOkgz7jjDPwzjvv4PHHH8fzzz9viH4Whixf06IFHmjQACfTg752LXDcceaa94ICM/u8k8KQ+6++AiL7Yjs5VHW9S8ClyAj1vXvHjh3o0aOHkdiOhcKe285xKzZL5BYWFhrb01F801O9f/9+o27s79aWcBT/CxYsSHrCxB5rVWafFNN8mRddaBdtXbVqVcnHbdu2NexmdE/0507P1EGDBhkvL5jsj2KeUUKMFlI5SMDlNSaUIuArAhLyh04XUwPXSDGD5lObv4vxMMBkS2UTeIl4Y+jbt6+/RynrRUAERMAmgeeeew633HLLYZ742MOj17UyzP783/0Oj515Js5csQJ4/32gUiXTe37ddUDLlolD4CnUj9At2Ob0+KramDFjMHbs2Lg279u3z8ou3oIJth0OLNRC3iGrlNXpzeaa+1RCPmVDquApAhLynpoOGZNlAnqKMAHTVcIFY61T8PZrlvrYYRkPA8nGWlBQgCFDhmT59FPzIiACIuANAgzH/fDDD20ZQ+/7lClTjDDZUsw2T695kyameL/iClPMJyqffmque3/jDWD5cuch97YsVKV8EuC9c+jQoalMkJBPRSjL3zPbPdfYjxgxIss9qflcEpCQzyVt9ZVvAhLy5gxwD5+eANbZyGTfO9+TloH+5ZHPAEQ1IQIiEBwCXP/+FUPZbZQ6deqUhNQb1XfvBo48MvGR27cDs2aZAp6ZpatXN0X/gw8C1arZ6FFV/ERAHnnvz9aKFSswcuRIUMxXqcJgTJWgEJCQD8pMahx2CEjIm5S2ANgMoJEdaAGoo/C8AEyihiACIpA5ArVr1y5JVhfd6m8AfBLTzWFCPp4ZDJ1/911g0iQz+/yePUDbtsCNN5pe+2TCP3PDUkseI+BSZOje7bH5lDneI+DyGvPegGSRCCQhICFvwikCMArAoJCcLXoYCMlEa5giIAKJCWzatMlYQsQEWdEJq44FcC2AGwD8PwCxMdCNGzfGSmajT1b27gVOPNHMNH/zzUDXrsBJJ2k6Qk7ApcjQvTvk54+Gn5qAy2ssdQeqIQIeIiAhb07GmkjSmS4emptsmqKHgWzSVdsiIAKeJbB7924jERnXuDPZFUvlypVx1qmn4uSPPsI1+/bhUgBMiPJ3ANMA/APAvsiImOyOx95Iz3qqsmYNUL8+UKpUqpr6PiQEXIoM3btDcp5omOkTcHmNpd+xjhSBPBCQkDehc338UwDqA7C3SDIPk5XBLvUwkEGYakoERMDbBIqKijB9+nRjyzgmtOPv3LHj3HPPxX333YdWCxageOpUHLF5M5YDeBbA3wAcuqmUOUYKea6Bvvfee709aFnnSQIuRYbu3Z6cVRnlJQIurzEvDUW2iEBKAhLyJiJmOhkMoF8kxJ6blHLdfLySIp4yJXMvVNDDgBdmQTaIgAhklcDbb7+Nhx56CPyf+8RThJ911lm4++670a1bN5SyPOV33IEDpUrh0tmz8caPP6a06Yrjj8e8iy9G6TvuAJo2TVlfFUTAIuBSZOjerVNJBFIQcHmNia8I+IqAhLw5XVwjz0hKiwd/TlRK+2qG4xurh4EATKKGIAIicDiB7777zvCyv/jii9jObPEAmJyOofD9+vVDxYoV42IbN24cBg4ciF27dsX9vnxk3fxtAJoB+KVqVVR+/nng8ss1DSJgm4BLkaF7t23SqhhWAi6vsbBi07h9SkBC3pw4JrpLJt6jp3egT+c62mw9DARgEjUEERABk8D+/fvx6KOPYsKECSXr3qtVq4b2V12F4Z0749jGjYFatZLi6tSpE5YvX37QSx+pffru3eiyfTsu//lnVCwuxrsVK2JmlSrYds45mMVs9Coi4ICAS5Ghe7cD1qoaTgIur7FwQtOofUtAQt63U+fKcD0MuMKng0VABLxAYMGCBXjwwQfx/vvv48CBAyhTpgz++Mc/YlifPvgfJpqbMgX46CNg5EhgwADnJg8aZB7LlwDMPN+jB1C3rvN2dIQIRAi4FBm6d+tMEoEUBFxeY+IrAr4iICHvq+nKmLF6GMgYSjUkAiKQSwJfffUVBg8ejJdffhm//PKL0fWpp56KO2+/Hb1POw2lpk4F5s2jmx647DLgppuASy4BypVzbuayZcC6dcCf/wyULev8eB0hAjEEXIoM3bt1RomAhLzOAREoISAhb6Jgsju7ZYfdih6up4cBD0+OTBMBETiUALeMGz16NCZPnoyvv/7a+LJmzZro3Lkzhg4diqPfeAN44AFg7VqgQQOge3egWzfg+OOFUgQ8RUBC3lPTIWMCSMDlNRZAIhpSkAlIyJuzayW7szPXSnZnh5LqiIAIiIBLAvPnz8ewYcOwbNkyY8u4cuXK4cILLzQy0bdo0eJg6zNmAK+9Zgr488+3t2/7nj2mt/30011aqcNFwD4BlyJDL+Hto7Zdk7tXdOzYEbNnz7Z9jNcqzp07FyNGjMCKFStQv359tG7d2sgZEsbi8hoLIzKN2ccEJOTNyStMkOyO+8rzX7VInfUAlOzOxye8TBcBEfA2gbVr1xpZ51955RXs3LnT2DLujDPOwD333GNkni/ZMi7dYXz/PcAH3IkTAWaw51p6axu6dNvUcSJgk4BLkSEhb5Ozk2r5FPIvvPCC8RJh0aJFaNmypROzS+oyWok7blSvXt0Q8OvWrTNefjZv3hxLly5Nq00/H+TyGvPz0GV7CAlIyNubdGa17wugAYAN9g7xdC09DHh6emScCISLgBU6//TTT+Obb74xBn/MMcfg2muvRcHAgai2ezf3kEsfSnEx8P77wLhxQGGhud79+usB7gP/m9+k366OFAGHBFyKDN27HfK2Uz3fQp47ZixcuDAtIc8tNingzz77bCxZsqRkuL179wb/nhYWFuKqq66ygyEwdVxeY4HhoIGEg4CEvP15/gDATwAutn+IZ2vqYcCzUyPDRCA8BF577TUj6zy9RlbofKtWrfDwww+j2ZFHApMnA889Z4a/v/OOczB79wIMl6WA/+ADM+P8bbeZCfBq1HDeno4QAZcEXIoM3btd8o93eD6FPEPimesjXSFPb/ygQYOMrTMbc5vNSKHA5wuCNm3aoG9f+qHCU1xeY+EBpZEGgoCEvP1pHAmgHwCtkbfPTDVFQARE4BAC8bLOM3T+3nvvxY1duqAUM85PmgS8+64ptrt2Nbd+S8dz/vPPwIknAlxPT+97u3ZA6SD8CddJ5VcCLkWGLSF/a6dOaHbuuejau7eRVyLbJZf9MWx8wIABxlpw/mytBx81ahSqVq16yFDXr1+P/v3748033zQ+p2B+6qmnjOU5bINrylksIc/w9O7dux+yzjxeu9GdMCx+5cqVWL16tREiz1D5atWqGSKaa9RpY69evYxQdxbrc/580UUXGSH1XD5UXFxs/M9tNJ2UBg0aYNu2bdi8ebOTwwJd1+U1Fmg2GlzwCEjI25/TOQDaS8jbB6aaIiACIkACe/fuxdixYzFx4kRQyLMw6/w111yDIUOGoMauXcDo0ab3fds2M2Fdz54AQ0LpmXdT/vtfxum7aUHHikDGCLgUGbaE/MUNG6LDd99hRq1auPaee7Iu6HPVH73ODCGn4O3QoQNq1KgB8qRIpqClmLYKRXyzZs2wY8cOY904xTW934z4obCnwI8W8nwhQNHNdngc+2K+Doats60qVeJvbkRhzvbq1atn2MW+2A/bYl/WfLNNfs42rZcIixcvxpw5c4wQeNrTsGFD3MyXlg4KX0LQ675gwQKjffbHv63kQ5vCWFxeY2FEpjH7mICEvDl5ybafY/xlawCTAPCValSqZN/OvK2HAd+OToaLgAh4ggAfLhk6//777xueprJlyxpZ5xk6f0jW+Y0bTa8516336MGN4T1hv4wQgUwTcCkybN27251yCl5ZvRp7ATxXoULWBX2u+rPWfceGkVNM0xNOkVyXy2cAQ9xSKEfX3bBhgyHSGXYeK+QpwulRnzVrVsmU00P/yCOPHFI39nyw+qbX/cknnyz5mi8Z2A9F+/Dhw43PrfXs0UnoaHfsGnnWS1Ws6ANLyNMjz7FyHCz08DPsPmxh9Ry7y2ssFXp9LwKeIiAhb06Hne3nyIqCfrGnZjA9Y2w9DKTXtI4SAREIMwEmq2PW+RdffBE/M7QdwGmnnYa7774bPXr0SJx1fv9+oEwZ++iKioBXXwXOOstc+64iAj4g4FJk2Lp3W8LawmEI+vLlMaNGDVx7zTXo2r49yjHho1V43TVpkpze2rXA1q1x67S76iq88vXXJd9l6wUChTnDyGOTt1FsU3TTM9+kSRPDg07POgVytDCngaxLcR0bWk8BvHXr1sM87xTk/C5R6Hq8lwjsh5EDDP+PbZNed7ZnRQ/EE/Kpdubg8Yxu4osHRgywcLyTJk0yXpQy1J/fMSog3bX3PriUEpro8hrz89BlewgJSMibk55o+znrlNgCYCKAFQE5R2w9DARkrBqGCIhAlgns378f48ePxxNPPGE8PLLwAbNLly6GR/7oatWcifRk9jKD/fTpwNixwBdfACNHAgMGZHmEal4EMkPApciwde+OFfKW5TsBtANwOoCDvmMARx8NcAlKstKxIzB3bnwhD+CVON8Y/ZUvj9M7dsSTzz+fGYCRVui1JksKVQpY/m4JeYa60yPPz7nmPbpY38UK+URbtVlCPZ7IZ7vW97Fr27n+nfbEvgCgwKetyYQ8X1ikKmyHXnf+naWw59/dOlE7e1gvM6yw+1TtBel7l9dYkFBoLCEgICEfgkmOM0RbDwPhRKNRi4AI2CXw3nvv4f7778c777wDivkyZcrgvPPOw0MPPYRzzjkH+PBDM3HdCy8An39OdW+36cPrMZkT939/4gngxx+Byy8H+vUDzj0XiISTpt+4jhSB3BBwKTJs3bvjeeSnlS+PmTn0yE+rUAEzM7xGnwKYwpxinJ55a9s1zhyTxllCnmvOGYYfzxttCdxYIZ9I8Frh9dFh+9FnSjaEvJMzkd77RC8h6P3nC4iwJcJzeY05wa+6IpB3AhLyyaeAsWadAayJeO135H3GEhvQlC+HuTyIUV2RNf3rE1S39TDg4bHKNBEQgTwR2LJliyHeGbLKn1mYKOq2227DnXfeiTLWlm8TJ5p7tx97LHDjjQC3QKLnz2lhcrwxY4CpUwGG3zOLfZ8+jNd32pLqi0DeCbgUGbbu3dFr1rMhqGMh5qo/ClaGjVOEU0AzjJ7FEu6WkGe4OkPL43nkGe7Odux65NnOvHnzEmaTz4aQ58uGVMVKZMfQf4bVcwvP2CIhb+S04jOxiggEloCE/MGpnQCgB4DmAFYCaAXgDQBkVAxgbUQge1HMc88V/uW3Nkbm79xvhTf9eMXWw0Bgz3oNTAREwBEB7vH+7LPPYsyYMficnnUARx11FK688koMGzYMJ3KLt08/BSjemXmeyZpatQJ69QL+9CfAzRZY8+ebLwJuvRW4/XbguOMc2a7KIuAlArkQ8m0jWesz7RFPxDEX/VmedHrao5PK0SZrL/XYNfJMQMft5qIL69LLnuk18pkMraeX3dqSLh5za408oxMSvUiwEusxBH/JkiVeugSybovLayzr9qkDEcgkAQl5kyb3hx8FYBuAZgA2RN7i0cvdG0DDSB3uJT84kxMQaas+gJ4ABiZom9/3j9hkxaY+ElWX2+LRtkZRn9FVZo0ltlkJ+SxMopoUgaAR+PjjjzFo0CAjRJVbyPEBk9nmH3jgAVx66aUHh1tcDJx5prnOlqKbW8c15J/NDBQmtfv1V6By5Qw0piZEIL8EXIoMW/fuXO7rTpq56M8SpvSQz549u2QSuTac69Ep9C0hzy+tZHPk3bQpH+VgrCO31qjHy1rfvn17Yzs4q1DsMzke//VhFBA9JuvXG9veWVnjs+GRt7tGnlviWWv+Y19aWJEEjEpwuqVdfq8Q9727vMbcG6AWRCCHBCTkTdgMnafX3RLC9GgzPSsT3N0SmQ+G5/DzaLGcialiJnzeOXhnsvqKbpd9cts73onMFNCmaN/MBKyR3xk9wIiCaNuYiT9Rln1bDwOZGJzaEAER8BeBnTt3Gmvc6YH/kWvRAZxwwglGxnk+2B6ZaF/3VasAJlsqX95fA5a1IpBDAi5FRqjv3ZY4p+DmC0WuW6dQ5fp2vmzkHu70uDPkniH0VkI4fk7hzZB7Cl5mfI/ems1aZ87t2xiybu0jT+HP8PRV/NsGpvowQ/aj19O7FfKWEGefnTt3Rj/m/XBY+CLD2svesp0vHGjn66+/7rA1/1d3eY35H4BGECoCEvLmdFP0Rot2K6y+TdR2c1bofekMnSH1IlEADNnvwFwtCYQ8RTvFfLTIt140lIqyhQukOkaiCSj6GVpPL/7kOPaG+mEgQ/OnZkQgUARefvllI8M8H2aZDblChQq45JJLjD2QT83Wvu5c884t5K64QgnrAnU2aTCJCLgUGaG+d+/YscN4ocjEdiwU6gyT55ZrlpgtLCws2Z6O+8ZTuJM5y+DBg9GyZUtjjXz0+vnSpUsbLykp+Pk//wYy7weFcHQYv7VVHOstWLDAaJNCnmvomewzutAetvPTTz8d8jlt5jislwP8sm3btsaYuNY9+nMnVxEjpzgmJgGkmOeOIVYUgZN2glDX5TUWBAQaQ4gISMibk80w9IWRxHb83RLtDGO31sTTa84Q9kwJ+ejTjHcZCvF4HnlGC1DMxwryeB53rvHniwFGE1DIt4ys9489pUP9MBCi61tDFYGkBDZu3Gg83L700kugJ55rL8866yzj4fjqq682j92wgdmkgAsvBFozyCcDZdcu4JlnuKmz2T6T4v32txloWE2IgLcJuBQZunfbnF56qCnGraRw1mFz5841PN/p7q9OTzfX6VtC3qY5qpZDAi6vsRxaqq5EwD0BCXmTIUPX60b+MXydwp6CmBkvWaxkcgxnz3RoPdtPJuQp2Omxnxcz3VYm/UFRNm6PqnOAWzlHvYiIPlwPA+6vHbUgAr4kwKRM3O89es/3mjVr4rrrrsOQIUNQjXu+07v02mvmdm8MzaxUiRmlgFvivWt0gGHHDoDJpx57DNi0CWjfHhg4EGjOHKMqIhB8Ai5Fhu7dNk8RrmPn3zVrv3brMHrj6alPd0s2ZshnyP2IESNsWqJquSbg8hrLtbnqTwRcEZCQN/FRKNPjznXyTHhXDUCviBecXvhJkc8GABjjinj8gxMJeSuEPt5a99hjKPhpNyMIuMiKY0lkqx4GsjCJalIEvEyA2xMx/PKtt94q2fP9j3/8Ix5++GH8/ve/N03/4Qdg8mQz+/w33wBMEtW7N0Dv/FFHpT88JsH761+B8eOBnTuB668H+vfXFnLpE9WRPiXgUmTo3m1z3idPnmyE1dMj36EDH/EAeuPpUef6+HTCzrnufuTIkcZ2d0w0p+JNAi6vMW8OSlaJQAICEvIHwTBrPDPXUzxTuDNbPQvD2rnWPHoNfaZPqERCnuvoGRmQSMjzO+5zz9I38hLC2i4v3tp4y249DGR6BtWeCHiQANdi0sv+/PPPl6zVrFu3Lu644w5zz/cyZQ5azX3auV0cP+vSxRTwDHc/IgO3iR49gBkzAP7P7M8nn+xBWjJJBLJPwKXI0L3bwRRx7To951yrzkgjJsjjGniuqVcJLgGX11hwwWhkgSSQgSe0QHKJHhTFNEPto8PWMz3odIQ8Q+u5JMAS8k5s0sOAE1qqKwI+I8AtlJik7qOPPjIS11WqVAl/+tOfjM/qMLN8vPLFF2Y4/Q03ADVqZHbE330HlC0LHHNMZttVayLgMwIuRYbu3T6bb5mbewIur7HcG6weRcAFAQl5F/AyeGg6ofWuhfyrr76Kxo0bOxoGt6FSEQER8B6BTZs2gXsjM3zUSlzH/ZPvu+++kizO3rNaFomAfwl8++23jo3/8MMPcdlll/E45uAx06nbLxLy9lmpZkgJSMiHdOJDOuywCnkmrUuncN350ekcmOKYdJLdMUqA4f5WsjsnZhkPA04OsOrSu6ciAiLgHQL0vnPbuE8//dQwigmebrjhBiOkvnLlyqahu3cD/D5bSeWYxE5rRr1zUsiSnBDgLg8uioS8C3g6VAQSEZCQ17kRJgKu7kI+BsXEcFSk6Yw/eu/2TCFItf0ct8OLTVzHMXC/+3+mYYSEfBrQdIgIeIUA9yam950i3vK+/+53vzNC5w9Z/7lunZm4bsoU4MABM5ld+fKZGwZfDjz0ELB4MbB2rbuEeJmzSi2JQE4ISMjnBLM6EQFHBCTkHeFSZZ8TSEfI+nzIhvkUwSwU83MBzI6zvVsux5lMyDPZHtfpR6+Frw9gtYs97SXkczm76ksEMkTghRdewNChQ/Hxxx8bLVavXh3dunXDQw89dND7TsH+j3+Y27zxf2ab79bNTF53xhmZsYT9P/gg00ADJ55obiHXvXtmXxJkxlK1IgJZIyAhnzW0algE0iYgIZ82Oh3oQwJhFfKcKm4rR3HMfUmsePHCiKh/McdzmUzIM4s+v+dGy9xajoUeegr5sWnaKSGfJjgdJgK5JrBlyxYj0/KsWbPwyy+/gOLh7LPPNrzvrVtzQ4tI2bPH3Pd93DiAnvgmTcx936+5BrBC7N0av3Kl6YGfNw846SRg8GDgxhsl4N1y1fG+JCAh78tpk9EBJyAhH/AJ1vAOIRBmIR8NgmKeop7inoXCnqKea9DTCV23e5rR207vutUvowPWxVn3XjeyHR4T3FVPsUe8nb4NIa9kd3ZQqY4I5IfAyy+/jAceeKAk8zy3T7r++uuNfd/j7mG8fz9wyinA2WcDd90F/OEPmdk6zho+t6abNAlg1nsKeGa3L1cuP3DUqwh4gICS3XlgEmSCCMQQkJDXKREmAhLyh882RX2vyPpzCvptES89RfbigJwcynwbkInUMIJFgPu+Dxw4EDNmzAB/psevWbNmGDZsGNq2bZt6sLt2ARUqpK6XTo3nngP27QO6djW3klMRARFwTMClyNC92zFxHRA2Ai6vsbDh0nh9TkBCPvkExor6rVnKWp/r08h4GGB267IJHsj79OmDvn375tou9ScCoSTA/d7vuusuvP322ygqKkLVqlVx7bXXGgKenngVERAB/xAYM2YMxo6Nv/Jt37592LzZ2DhHWev9M6WuLG3Tpg2WLVsGLpMKY+GWqCNGjMCKFStQv359Y0nYBC4Dy1KRkM8SWDXrSQIS8qmnpWkk1N1aS1869SGer5FyjXxBQYGxfZWKCIhA9gg899xzRvK6dVzTDuC0004zQufbt7dW20T63rTJXP9Or/zvfpc9g9SyCIiAawK8d/K6TlEk5FMRCsj3F110EZYvXw7uNuKXwsSqHTt2xKJFi9CyZcu0zR49erQRZcbErBTwvNfxpUbz5s2xdGlauyCntEVCPiUiVQgQAQn5+JPZJLJmvicAusPIaXkkxP6RAMy/PPIBmEQNwZ8Efv31VwwaNAhTp041kteVLl0a9NiMGzcOjRo1OnRQTC73178CM2cCpUqZP/fkn6UMlzVrgEWLzMz2KiIgAq4IyCPvCl/gDuYyKXrj69ZluiN/FAr5Tp06YeHChWkL+e3btxsCnslZlyxZUjLw3r174+mnn0ZhYSGuuuqqjAORkM84UjXoYQIS8gcnh39huTaenncmoCMbusmY8I7r49d7eB6dmqZ1dk6Jqb4IuCTw+eef484778TixYuN8HkmrOvevbsRPn/kkUcebJ3bx73yCvD448C//gXUrg3cdpsp4I8+2qUVMYd//bWZhX7qVKBWLeCLL7QXfGYJqzUROISAS5Ghe7fOp5wQYDh8586dXQl5euP50prRCI0bNy6xmwKfLwn4AjsbSzhdXmM54atORCBTBMIu5CnerXXw0eKdwp0CPkjiPfqc0cNApq4gtSMCKQjMnj0b999/P1av5o6RQMOGDfHggw/i6quvPvxIesW5ZRw95C1aAHffDXTokPns8AzVHzHC3Gu+YkVgwADg9tuBSpU0nyIgAlkk4FJkhPbezTDvlStXGn9H+TM9xswfQkHI9dYM2e7Vq5cRts1ifR49lazDrTy5Vps/W+u1R40aZeQliS6TJk3CxIkTjbrsh15l1mvalKstDxY79ShYKWaZG8Gynf3Heuhp2yOPPHJIODs/e/PNN43jaW+HDh0McRxrb+wp64YXlwIwpJ7JVouLi43/D/AFs8PSoEEDbNu2zcoJ4fDo9Ku7vMbS71hHikAeCIRVyDOLG7ebaxZhvh3ApEjo/Io8zEOuuwztw0CuQau/cBLYvXu3Id4ZPkjvQ6lSpYzwxMcffxxnnnlmYih8CKWHvH9/4Pe/z+z2cex12zaASbgee8xs+557gD59gJiH2HDOmkYtAtkn4FJkuLp37927FxMmPId3312OOXOezPpgM9kfhTkFbb169QxhyfXW9BpTELdq1QoWV+7ywc/Xrl1riHYmWWOhEKYY57EUwzVq1DCOofCn4LRetLKuJaj5OfthWDzDwBkmvn79+pLtP+3WozBmPxTy1tpzeqtjvdF8ybt169YS4ct15Hx5QRso4qPtZXtxtyGNzKobXowamzNnjnH/6t+/v/Hy+eabb3Z8vvC+x5cYCxYsMOaE9jPJMvlzHrNVQc6xxAAAIABJREFUXF5j2TJL7YpAVgiEVcgXRfZiJ1R63xc6oDvZQV2vVtUaea/OjOzyNQE+5N1222144403DA9G5cqVceONN2LkyJGoSM93vstllwFvvmmG6g8cCBxzTL4tUv8iEDgCXlsjbwnqxx6biR9/vAa1a8/FmjWvZ417NvqjMKUIptf9yScPvoSgIOfLUorq4cOHG2Oy1mZHJ1Sz1mXHhnlb7VL4Wx5yCtDYdd0UtWyDXnouiWKxWy9ayFvHxSZ7472DLw7YB8eXKCx93rx5hhAmh6cYUZWguOUVb408uaYq0ZEClpDnCwxy50sUFnr5473ISNW23e8l5O2SUr0gEAizkLfmj3vF2ylkxbrKWm+HluqIQIgI8MGBD2BWWCe9Ddz5oVu3bt6i8NlnQJUqwIknessuWSMCASLglaz1sYJ6166uAMqhUaN2WLXqlYwTz2Z/8QQ3B0DBzfB3erKjPdT0IlM4Wp52epkZ5h2bXI2h7Myqzr/dTZowz7Ep0ONlVad3nJ5xqx+79WKFvDWWaJtjhTtfUNCbzdD96EIRbEUWREcRxE6mW17xhDzHm6yQt/Wiw3qZwvp8QcFxXHjhhUaEAcP+GUnhJpFeMjsk5DN+aatBDxMIq5Dv52JOlLXeBTwdKgJBIvCPf/wDd999N1atWmUMq0WLFsZ6TYZ3Hla4xRy3kHv44cyveQ8SVI1FBHxOIN8e+USC2sJap047bNgQX8jT6cp0HmedBZQvH38iPv4Y2LPn4HcnnLAXhYXPwfL4Wy8MrBqZeHFgCdPYtdqxItnqk2KXYjKe2OXnFHsUkhSY/D1ayFt98YUsPd8MbY9dG89+7NaLtdEKr2fflnc/+sWDJYKtNeqxs2B5tpOtW3fLK56Q58uQVIXc+aIjegwU7XXq1Ck51Io+sMLuU7Xp9HsJeafEVN/PBMIq5P08Z5mw3dU6u0wYoDZEwM8EuHUc18B/9913htfH8pxEP6yUjO+TT4CRI4FZs4Bq1cxt3iKeHz8zkO0iIALOCbgUGbbu3ddffysKCz/Hnj0U64cnsKxYsR127owv5BcsAC6+GFi/Hki0W1rDhsDatQfHfs45t+KDDxL35wUhT2FJ0cx19vTMW9uicRRM7BYt5PkZX8bQu0wRSi84E9717NnTWCYVXezUi/eyIXr9OPugkGdkAJcHMMKAEQH0XPNFQqKSbH/3bAh5p2d7oogFthObD8Bp28nqu7zGMmmK2hKBrBMIq5Bn/BS3ltvhgnCVyDZ1K120ka9DbT0M5Ms49SsCXiTALeP4EMdQTD4IlilTxnjQGj9+vJE46bDy/vtmZvj584ETTgD69gV69MheZni6ySZONPeaLx2EFUBePAtkkwi4I+BSZNi6d8sjb4bcR3vkrcRxXEtPkWuF0Vtr32OFvDXLGzZsMIQ+RT3rJFqbnqxePCHPz/hSgV51K6zeWqdvebNTrYNPdiZmQ8jTk56qRCex432RYfVLly497DAJ+VQk9b0I2CMQViHPfTQG8KWrPUxxazE8n69m/fjEbOthwAUbHSoCgSHADPR8+OMD365du1ChQgX06NHD2IrokP3frRG/9Rbw4IMAwxDpumJSueuuSxyn6pbUTz8BBQVm2D5daOw3KozRbfM6XgREIHMEciHkLWuzuWY9HpFs9udGmMYmkou23RLRlpBnXYp2RlkxG350oTBlBBaTt9mtx+PjCXkrdJ3Z4ZlZn+1GC156s2Oz6bMtvkSmHalEvhte7CfRGvlE4f48JnqNPH9PZIP1oiI2oWCmrjKX11imzFA7IpATAmEV8sxaTxF+aIyUM+SDAVDM+1bIcxuQsmXLxh11nz59DtsaxRke1RYBfxNgIqJbb73V2DZn//79Rmhlv379jPDHpEl/7r8f+PvfgUGDzD3gs+Ud37fP3AeeIn7/foD93nVX9l4Y+Hs6Zb0I5IxAvtfIxw40G1nkk8HMRn9uhKklHBlBNXv27BLTGdJOkU1RHi3kKaBjk91RQDPRHT3JS5YsMY6xUy+RkLds4vp7evwZ6cXnLqvwPsOXDLGCnWNg5noew+RxVqE9FPhW1ng3vBIJeSdr5NkGIw74QiTRGJgjIJ1t7VJdyBLyqQjp+yARCLOQt5utPtV8+1bIJxsYM24z866KCISNwFdffWWsheSDEsPpa9eujWHDhuGGG26wh4JZoMqVy/we8NG9cyEr94D/4gvgxhuBYcOA446zZ59qiYAIZJWAV7LWJxL0ft1Hnl7idJPdWdnt27dvbyQlZRg7hSSFJpPeUVBTODPk3gp7Z5i4lbiU9wOKb77YvfLKKw20bdu2Ne4TqeolSsjHz3k8PdmxWffZF23mywarfW7hRsFOMT+LOVcixUqeF508zq2Qt0Q4x9+5c2fjJXY6JZalNQba+vrr2dkCUUI+nZnSMX4lEFYhXxi1j7zbuevktoE8HK995PMAXV16m8Ann3xieAfobWE55ZRT8Pjjj+OSSy7xluHjxwN33AGcc465Hr55c2/ZJ2tEIOQEvOaRD8J0UJjSE83oqOhCoUhx+BOXGEUViuAdO3aU7CjCn7kkisKZhd/T602vtiU2CwsLje3pWJfh7hTtFNKMxqL45xKraC+43XqJbLTW5ycTtbfccothM+1gRAC3OY323HMsVhg8X0Ys4EveSFi7G15sw3pRwcgDa2eWdM6lQYMGGS9NGNXAFwNdunQ5bAzptJvoGAn5TNJUW14nEFYh7/V5ybZ9WiOfbcJq3zcE+BDIbMbMFMzCkEqukeT/ccuOHeZe7PkqmzaZ6+A7d86u1z9f41O/IhBgAi5Fhu7dAT433AyNnnqKfEvIu2nL78e6vMb8PnzZHzICEvIhm/DIcPUwEM5516ijCLz//vuGh+ZjZnsHHdzngNvK0RMft/zwgxnCPm0awC3lTj5ZPEVABETAEQGXIkP3bke0w1OZ3n167RlJEPbi8hoLOz6N32cEJOR9NmEZMlcPAxkCqWb8R+Cdd94x1sB/8cUXxtrE888/H8888wzqJto0eetWYPRoYNw4oEwZcxu5u+8GjjrKf4OXxSIgAnkl4FJk6N6d19nzZueMJuPWqBTzVfIZLeYRPC6vMY+MQmaIgD0CEvL2OAWtlh4GgjajGk9KAkzew/WGq1evNgQ81xNOmTIFJ510Uvxjf/nFFO8U8Xv3muvS+/cHatZM2ZerChs2AFWrAtWru2pGB4uACHiPgEuRoXu396ZUFnmMgMtrzGOjkTkikJyAhHw4zxA9DIRz3kM56tdeew233367ke2XAp7J6yZPnozjjz8+MQ9mBOZWblu2AD17AvfdB9SunV1+RUXAE0+Y29bdeScw0s3umNk1Va2LgAikR8ClyNC9Oz3sOipEBFxeYyEipaEGgYCEfBBm0fkYlLXeOTMd4TMCL730Eu666y5s3LjR2Pf98ssvN0IPjznmmNQjefFF4KWXzD3a69dPXd9tjVWrgJtuAt57D+jdGxg1Kr8J9dyOR8eLQIgJKGt9iCdfQ887AQn5vE+BDMghAQn5HML2UFeGkE9mj/aR99BsyRRHBGbPnm1sbfPtt9+idOnSxp6/3PqmuhdD1Q8cAB57DLj/foARApMnAy1bOhqvKouACHiLgFf3kfcWJVkjAtkhICGfHa5q1ZsEJOS9OS/Ztkoe+WwTVvs5JzBt2jRjX+AffvjBEPCdO3fGU0895d3kP59/Dtx4I/D+++b6++HDgcqVc85NHYqACGSWgDzymeWp1kTACQEJeSe0VNfvBCTkk89gEwCdAawBUAhgh98nPGK/1tkFZCI1DC4rfwJDhw7F5s2bUaZMGVx33XUYP348KlWqlBjP9u1mQrl8leJioEULgHZMnQqcd16+LFG/IiACOSTgUmTo3p3DuVJX/iTg8hrz56BldWgJSMgfnPoJALoD4I1yJYBWAN4AQEbFANZGvguCmNfDQGgv+WAMvKioyNgvd/To0dixYwfKly+Pm266CY8++iiOPPLIxIPcswcYPx546CGACe0uvjh/QNatA447DqhYMX82qGcREIGcEnApMnTvzulsqTM/EnB5jflxyLI5xAQk5M3J7wdgFIBtAJoB2ADgAwBNAfQG0DBSh2mkBwfgfNHDQAAmMYxD2L9/P+6//36MGzcOv/76KypWrGhkpB82bJjhjU9Y6AGfN8/cPo7bu3XvDgwdagppFREQARHIEQGXIkP37hzNk7rxLwGX15h/By7LQ0lAQt6cdobO0+veKHIWMOZ2K4CJAG6JfEZhz8+tOn4+YfQw4OfZC6Hte/bsMRLYcds4/nzUUUehX79+uO+++4yM9EnL0qXAvfcC774LtGkDjB0LnHVWCClqyCIgAvkm4FJk6N6d7wlU/54n4PIa8/z4ZKAIRBOQkDdpFMWIdiusvg2AxRFgDL3vAaB0AE4hPQwEYBLDMIRffvnF8LjPnDkT+/btQ40aNfDAAw/gjjvuSC3gv/sOGDAAmD4dOP10U8AzlP6IHP3Z27cPYJRArvoLwwmhMYqAzwm4FBm6d/t8/mV+9gm4vMayb6B6EIEMEsjRE20GLc5OU1sALIwktmMPlmivHpXgbg6A9hLy2ZkAtSoC0QS2bt2KXr16Yd68eThw4ABq1aqF4cOHG+vgbZcvvwQuvNDc2q1HD1NU56osW2buC9+nD9C1a656VT8iIAIeJ+BSZEjIZ3l+BwwYgDfffBOcp6CWuXPnGjlmVqxYgfr166N169aYMIGPvcEoLq+xYEDQKEJDQELenOplAOpG/v0MgMKeye1aRM4EhtSvB7A5SKH1NWvWRNmyZeOe7Axj7tu3b2guBA3UGwS+++479OjRA6+//jqY0O6kk07C2LFj0bFjx/QMpFc8wTmeXoMpjmIyPa69Hz0aOOMM4JlngObNs9KVGhUBEfAmAW0/5815sWMVo74aNGiApVyS5bHywgsvGPfCRYsWoWXLlmlZxwSx3Ka1evXqhoBft24dli1bhubNm3tyzOkMUkI+HWo6xq8EJOTNmesAgB53rpNnwrtqAHoBmBzxwk+KfDYAwBi/TnaU3cZb/WTjKCgowJAhQwIwVA3BDwTWr1+Pm2++Gf/6179QXFyMhg0bGgntLrnkEj+Yb9r49ddAhw7A8uXA4MHAffcB5cr5x35ZKgIikBECvHdyS8wUhY4Cp25feeRTUU3j++3bt2PhwoUlXmqviloK+U6dOhm2piPkOU4K+LPPPhtLliwpIdW7d288/fTTKCwsxFVXXZUGQW8dIiHvrfmQNdklICF/kG/PSOZ6et8p3JmtnoWZ6vvHrKHP7qxkv3XjYUAe+eyDVg/JCXzyySfo3r073n//faPimWeeaYT4nXvuuf5Ct3gx0KWL6f2fOxf4/e/9Zb+sFQERyBgBeeQzhjInDTFh6hGRXCZ8kexVIc+Q+M6dO6ct5OmNHzRoEJYvX47GjRuXsKXA5wuCNm3aBCISU0I+J5eNOvEIAQn51BNRLxJqvz11Vd/U0Ft930xVMA2lN6Bnz5748MMPjQHywYkZ6Zs0aWJvwFu3mnvBn3CCuQ49n+Xxx00b/vhHc2/6WrXyaY36FgER8DABlyLD1r172sppOL/u+ahbjSsGDy0btm3AWxveQrcm3TJGKZf9TZo0CRMnTjTWd1erVs3wLo8aNQpNm3K3YLMw/HzlypVYvXq1kWuFnuZt27ahWbNmhuc5ui7rMxeLVTp06JBUyDMUnevo2T9/ttaY04aqVekHSlyi7eLP9LBzDBTRfIHN9mgvQ91ZrM/580UXXWSE1POFA1828H/mj3FSuGSAHDZv5irR4BaX11hwwWhkgSQgIZ98WqkqOke2pyuMSnzn95PB1sOA3wcp+71HgEmEbrvtNnzJRHQAzjvvPEPAn3LKKfaM3b8fmDgRKCgAdu0y16PnO5fD5MnAF18AI0fmNqGePWKqJQIi4CECLkWGrXs3xfpN82/C1D9NPUTMJ/rcLZ5c9UcB/cgjjxhr2Lm+e8uWLYZIZ7g4l2dVqVKlRABTaNerV8/4nOKcIpl1KYDXrl2LunUPf8nBg+mdT+SRpyebLw7YBtvkenrOJ4U3beKLg2SFwpz3QNrFNjgGetlpW6tWrYy2WrRoYbxw4Oe0k2NmYrrFixdjzpw5xouI/v37G8vPuBzNSeHY6HVfsGCB0T77Y2Qmx0KbglJcXmNBwaBxhISAhPzBibYy1TMz1UoA1hZ0ZMS180x+x5vojgCcG7YeBgIwTg3BIwTmz5+Pu+++Gxs2bDAeYPgwwQeSk08+2b6Fr79u7gf/+edmJvjhw02PvIoIiIAI+ISAS5Fh+94dK66zJeIt7Lnoj0I0dn037yNc400vPZdpsVAwU6jG1uVLACZ6i64be9okE/LWWvLY0HT2R+96shcEll2sR6/7k08+WdI1XwgwvJ2inbuzsFjr2aNfKsRbI896qYoVKWAJeXrkOYbo5QQMuw9KgmOX11gqnPpeBDxFQELenI5+kfXxTHTXDMCGSBIaxmpxrXzDSB2ulx/sqRlMzxjbDwPpNa+jRMAkMH36dMN78P333xuejiuuuAIMjTzmmGPsI/rsMzN0nUL+nHOAxx4DWlgbSthvRjVFQAREIN8EXIoMR/duS1zf1uI2jHpvFOZ0nHOIh/7DHz7E0RWPxglVDr4Q3b57O1ZvWY2zjj0L5cuUL8G1dsta7C/aj1OPPrXkswNFB7DihxWoV60ealasCau/gvMLMPStoYdFBLhln0hkM4yeIe7RHnmKXoaiX8gtSCOF3nl6zpOJ1mRCnl5xhqbHJoSzXhDQM59seVgiwc8XDowg4Lar1hhoMr3uFNuWpz+ekKe9yQqP54sLhvIzcoGFDHgfJhuy43eMCkg3iZ7bec308S6vsUybo/ZEIKsEJORNvGsiXvdGEdpc6LQ1JsEds8vyc6tOVicmy407ehjIsi1qPmAEuH6P3gZmbv7pp59QpkwZI0EPP4t+SLE1bIbSN2gAMBERt3TjNnSRpES2jlclERABEfAQAZciw/G9m+vhL5h2AWpUqIHN/Q9dG33ioyfihiY34OGWD5cQWrBmAS6ecTHW37X+ENHfsbAjtu7aikVdF5XU3bFnB6qOrIqZV83E1WddbXxu9fevbv8y1ulnslhCmGHg9GozND12vTv7s+rFCmO3Qj56LPSEcy4pfimK+btdIR+7tp3r33ls7Np1Cny2m0zI8+VCqsJ2eF+mkKewp2ivU6dOyWEWFyvsPlV7Xv/e5TXm9eHJPhE4hICEvImjKEa0W2H1bQBYfyWt0PvSATiHHD8MBGDMGkKWCXDfdyb8GTlyJHbs2IFy5crhpptuwqOPPooKFSqk3/tHHwFcQ3/kkem3oSNFQAREwAMEXIoMR/fuoHnkOX3cEYAeZopRilMmi2PiVN53rGIJ+VjB7FbIU1QzfJ/r3OmZt7ZyY7/0/udDyDs5pZNFG9D7zxcfQUiE5/Iac4JUdUUg7wQk5M0p2AJgYSSxHX+3RDvjkKw18dxnvj0ACfm8n7YywEsEKOALCgoMwf7rr78aop0J7bjWryy3Y/N7WboUGDYM+NvfADcvJPzOQfaLgAi4JuBSZNgW8rlYsx4NI+f9bdhgiGeKegpoeuifeuopw6RsCXmuV2coOteysw8rjN5ap58PIc+XE6mKlciOa/EZVr+U97SYIiGfiqK+FwFvEpCQN+eFe30whSn//RwR9kxuZy3EZUg9/1oyLk2h9d48l2VVjglQwNMLQsG+c+dOHHXUUUaynL/85S/GevhAFGakv+024KyzgL//HTj++EAMS4MQARHID4FcCPlcZZG3COaiPwpWinaGfzPDe3ShQGXIuOVNzoaQt7z5THgXnaiOdlj7s+dDyPNea21JF++MttbIM5IgERcrsV5scsD8XCHue3V5jbk3QC2IQA4JSMibsDsAoMed2emZ8K4agF4AJke88JMinw1gZFcO5ydbXRlv9bntSCKPaZ8+fQKTwTRbEMPc7mOPPWasgWcIfaVKlTBo0CDjn2MBz/3gKZCZhd5LZfdu4I47AAp5bvEzfrxC+700P7JFBDxMgOHfY8eOjWvhvn37LMFJRwFz7zgptjzyudzXncbnoj9LSMduDccQdya6o0d5yZIlBstsCHlL7DIx3OzZs0vmjCH+XONO+2KFPD/jSwYra3wiu3KxRp75abgkgC9CoqMXOBCOad68ecZaf6db2jk5eXNVV0I+V6TVjxcISMgfnIWekcz19L5TuDNbPQsXXvWPWUPvhblzY4PxMJCsAYZKU6ipiEA0AXpEKNi5lu7II480XvYMHTrUuYA/cACYMgUYPBjYswfg/rvHHecN2Bs3Au3bA1ybTwHfo4c37JIVIiACviDAeyf/LqYoWRPyqTr26/dt27Y1wukZKs691ln4O0U2t5u78sorjc+yIeTZrpVdvn379sZ+79xujuKX4phJ75h8j955htwzwzwFcnQCObdC3hLiHDsTyPbrxw2XnBW+NLD2smc73IaOLxxo5+vcGSYARUI+AJOoIdgmICGfGlW9SKh96s06U7fllRryyHtlJnxix7PPPms8NDALffny5XHHHXdgxIgRRkZ6x+W990xv94oVwLXXAqNGeWc/+EWLgC5dzLXwL7wA/Pa3joenA0RABMJNIN8e+aDSZwQY7zsU7fSEM9EdBTXXrEdvM0fBTA/zfu56ElUoWOm5p9hm1GG8Urp0adDrb3n3o+uw/x49ehgvDyxhz33p2bclkAsLC43t6ayt4ijuFyxYYNRPZBePpaDm/TW68MUB+1y1alXJx9bLDK51j/7cyZzzZTxfQDCagWK+S5cuCXk4adcrdSXkvTITsiMXBCTk7VOuAqBGZI95+0d5s6at8Dxvmi6rckmAIYR33303fvjhB2MZBrMDM6w+rSR2334L9O8PzJwJNG0KPPGEuS+8V8revcAZZwDclmfWLMDJXvdeGYPsEAER8DQBlyJD925Pz+6hxvHFAdfUW0LeR6b72lSX15ivxy7jw0dAQt7+nDPEnjG2Ne0f4tmaehjw7NR4w7CXX34Zt956K7799lvD696tWzeMHz/eCKdPq1C89+xperqHDwduugko7cENIL75BqhVCwhCtv20JkoHiYAIZJOAS5Ghe3c2JyfDbTObPSMHGEWgkjsCLq+x3BmqnkQgAwQk5A9C7BvZfo6J7uKVBgCYyV5Z6zNw4qkJbxJ44403jEQ4GzZsAEMMGXI3YcIEVK5c2Z3By5cDzz4LcN1ode7qqCICIiAC4SPgUmRIyPvklFmxYoWxqwvFPBPNqeSOgMtrLHeGqicRyAABCXkTIj3tEyM8raz1/JU/s1DcU8QzAd6bGeCe7yb0MJDvGfBY/2+99Ra4Pc2aNWuMxHVMGjRlypSSbLseM1fmiIAIiIAvCbgUGbp3+3LWZXQuCbi8xnJpqvoSAdcEJORNhNwChknt+G8HgDciwv2WCGFmrafYD4I3nkPSw4DrSycYDTBZDrPfrly50tiL9rLLLsMzzzyDo48+OhgD1ChEQAREwEMEXIoM3bs9NJcyxZsEXF5j3hyUrBKBBAQk5E0wWwAsjITW83eKdor3aOG+JrLX/OAAnE16GAjAJLoZAreP69q1K1555RWjmQsuuAAzZsxA7dq102u2qAgoLvbmuvf0RqSjREAERCDjBFyKDN27Mz4jajBoBFxeY0HDofEEnICEvDnBRTH7xHODUu6zHp2Ni8nuWgHg3q9+L3oY8PsMpmn/3r17jb3fn3zySRw4cACnnHIKZs2ahabMIp9u+ewzc6/1668HenP1iYfLr7+atg4cCJx1locNlWkiIAJBJOBSZOjeHcSTQmPKKAGX11hGbVFjIpBtAhLyJmF627kGvm0UcIp7Cvd/Rj7rB4Bi3oOpth2fJnoYcIzM3wcUFxfjiSeewODBg7Fz507UqFHDSGLXsWPH9AfG7dqYjXfYMODkk4HJk+naT7+9bB+5fTvQrh2wbBnw0kvARRdlu0e1H0ACfBk2YcJzePfd5Zgz58kAjlBDyiYBlyJD9+5sTo7aDgQBl9dYIBhoEOEhICFvzjUT3XWPhNOPjUw/xT3XzneJ/M6fqwZknbweBsJzjePVV181EtlxL/jy5cvjL3/5iyHomdQu7fKf/wDduwNffAH06QMUFAAVK6bdXNYP/O9/gbZtgbVrgVdfBc49N+tdqoNgEbAE/GOPzcSPP16D2rXnYs2a14M1SI0m6wRcigzdu7M+Q+rA7wRcXmN+H77sDxkBCXlzwinQNwDgHiF0Uc6LEvfLI1nr6wMYDWBQAM4RPQwEYBJTDYEJ7K6//np88sknRiK76667Dk899RQqVaqU6tDE3//yC3DffcATTwCNGwNTpgDNuBLFw+Xrr4E2bYAtW4AFCwA3ywg8PEyZlh0CsQJ+166uAMqhUaN2WLXKzDGhIgJ2CbgUGbp32wWteqEl4PIaCy03DdyfBCTkD84bxTxF+iwAKyMfMwEew+tZ5gLo5M9pPsxqPQwEZCLjDePbb7/FzTffjAUUrQDOO+88I5HdSSed5G7UTGhH0f7ll8CQIcC99wJly7prM9tHr14NtG5tJuJbuBA49dRs96j2A0IgkYC3hichH5CJzvEwXIoM3btzPF/qzn8EXF5j/huwLA41AQn51NNPgb89dTVf1TAeBmrWrImyCYRYnz59jKRoKv4hsGPHDvTr1w+TJ09GUVER6tWrh+nTp+MPf/hD5gbx2mtAo0bmP68XvnD44x+BatVMEc91/CoikIJAKgEvIa9TKBWBMWPGYOxYa5XeobX37duHzZs380MmzuWSPSdFQt4JLdUNJQEJ+VBOe2gHLSEfzqk3HgaSDb2goABD6HVV8TwBPhjywfHBBx/E7t27UbVqVYwbNw7XXXc9SpWKf4l//jnwt78B27aZ//g+h1Hy8crWrUDXrmZE/f/8T/w6w4cD//d/B79r3x7XhM2yAAAgAElEQVS44Yb4defNA2bNAubMif/9nj1Ahw6Hfjd+PFCnTvz6d9wBnHEGcMstMd/v3GlEDfzjglF4cma1ki/r1wf++tf4ba1fD9x5J/DII8Bpp8WvM2AAwET9Vrn5ZuDPf45f95lngH//G3j66fjf//ijmWogujz/vPnuIV7hPFx8MXDNNfG/nz0bmD794HdcRfDgg/Hrfvgh8Je/ABMnAol2HeQmBN9+e/D4e+4BWraM397jjwPffAOMGRP/+1WrzHQK0eXvf49fl5/+6U9Az57AZZfFrzNpEhB9PPMsxrZvHfn22+ac8rxLtLKkU6e9+Oyz57Bu3Uzs2XMNiorMEPpEpUaNdti8OX5o/QcfAEOHHjyyShVgxoz4LXG1ytVXA/37M3omfh3azjFYhWxizxvrOzKZOhV48cX4bR04cPj5yrlr0CB+fQbe1K1rXhfxyqJFh15PDPx5MkEOQJ4fvE759yLRphE8J3luWoXnfKKcnDzX2f+zz8a3zfrbFf0t2RxzTPz6vJY5B8n+dvGatspvfmPm+4wtvHcOjT4B4ncnIZ/48tc3IpA2AQn5tNHpQB8SkJA/OGm8HVM+1Egyj8UAjvbhPMeaLI98ACaRQ2D4PPeD37RpkxFdQY/8Aw88gI4dyxvi/IUX4g/09dcBPrRSMPJfvXqHCsDoo/gwzJ3l+ICdSMgzcX20kKcQT/QwTJsoqAoL49u2e/fhQv5//ze5kD/9dODWW+O3949/ADzeKhTy48bFr0shzxcDFE5sM16h4IoV8ldeGb8uRQOFPBP6xysU8pyH6EJxkkjIcx4uuSSxkCfXWCH/0EPx+1650pxTCuJkQp7iyyoUdYmE/GOPmUI+gSMSFPI8Prq8kmSJ+RVXmEKeGw3EK7T75ZcPfkMhnyiIiCJ49GiALzoSCfm6dW/F119/jqIiGpU6j0QyIb906eFCfubM+OOgkO/SBeALokRCnrbHCnnuohivkAnPO27MEK9QyPNFQHShkG/YMH59vryhkL/rrvjfU0jzeKtQyD/1VPy6PD/4cojiN5GQ5wvDWCHfKcGiNr70Yv/TpsXvz/rbFf0thXgiIX/TTWYQT7K/XdFCnmOIJ+T5YnXUqLH4+WeAL3FKR+11I498/LnK9KfckeWFF14wotNYYn/PdH+5bm/AgAF48803QdEar2zfvh39+/dHYWEhtm3bhvr166NDhw4YOZKbLwW7SMgHe341ukMJSMibPCYA4GMReWwDsCXJiZLgccdXp5bC83w1XYcb+80336BbtxuwePGbxpe8QXNv+GMiT6jz5wNlyiT2ZiYcPteSH6E/Cz4/PWR+GgTshtRbTWuNfBqQdYghvFq0oDNeofXZPB06depkCPkDfHPFBEcxv2ez79i2aQdfJCxatAgtE70JdWgQt5Bt0KABlvKtYUyhiG/WrBk2bNhgCHj+vHz5cqxdu9Y4ZjVzxwS4uLzGAkxGQwsiAT2xm7NK4U5vOwXu+iBOdMyYJOR9OskUG0OHjsTIkSNQVFQeJ5xwFAoL5+D3v/+9+xF9+qnpLmPcudcz0bsfrVoQgbgE7Ap6CXmdQOkQcCkydO+2Cd1rQp72LFy40JWQp0BnGyNGjMCKFSvQvHnzuEKe3vpHHnkEvXv3Nl7wW2XgwIEYPXo0+D3bCGpxeY0FFYvGFVACEvLmxDL2alRAtpazc6rqYcAOJY/V4dv8zp27Y8uWrwH0RKtWl+HVV9uifHmXmeP37TMXrTI+ngvRGa+aKIbeY0xkjghki0AqQS8hny3ywW7Xpciwfe+m15V5UyyPLaMAuOyqUZYSlea6v1RniZeE/Ny5c9G5c2fXQr5UqVLGVrIsxcXFCYU8ve70xlvRCNGs2EaiFwCpmPrle5fXmF+GKTtFwCAgIW+eCFxktAZAl5CcF7YfBkLCw9PD/Prrr3HrrbfiFWNB8W9RtepdePHFc3HhhRnIws5F4cyctmQJ0K8fUFAAVKjgaR6HGcdFxpUr+8tmWesbAtpH3jdT5QtDXYoMW/fuZ599FvS+/sgkHFGlVq1axhrpGxIlAUiTYC77W7duneFRpkeaPzN0vHXr1hg1apSR6NUqmRTyDItfuXKlEZJurbWvVq2aEa4/YcIEw45evXph2bJlRvfW5/z5oosuMkLqKcApvvl/PIFtB/08ZoqNFC6nSyTIKdYbNmyIVUxMElP4+datW62dE+x067s6Lq8x341XBoebgIS8Of9McjcbQDMAUflyA3ty2HoYCOzofTIwCghuYcQMyEyQVLlyZRQUPIt77rkyYTZ6R0Nj5i9mE2NmNWbiOuccR4d7ojKXA3DNIdNWMwucighkiUCsoK9dey7WrHk9S72p2aAScCkyUt67KTbPO++8w0S8xZNi/p133smYZz6X/XGd99lnn22IYQpZrhMnTwro2LXfmRTybIuJ5bilK/vmiwN62SngW7VqVZL3gGvR+TnXolvh64sXL8acOXPw9NNPG8nnKKRvjs1wmsbJnsyzzpcOfNFQl1kqowqT3pEZGS7hy/uAFpfXWECpaFhBJSAhf3BmuRlWewALk6yT5zr6BLmxfXWKpHwY8NVoAmgs18H17NnTCI9jufbaa40t5XgTdl2Y/IcCnqmtuU8c90arXt11szlv4KuvzJcPtJ0pvf04hpxDU4duCViC/t13l2POnAT7rLntRMcHloBLkZHy3n399ddjevTWFXFIss5zzz2XEca57I9rvimIKegbN25cYr8l2imgLfGaaSHPhHX0ukevOef9mOvWKdqHc3kaYPxevXr1Q7zlPDZ2jTzrpSrREQaxddMJkW/Tpg34YmHSpEkZeZmQyv58fe/yGsuX2epXBNIiICFvYqOAT7AZ1iFcKeSjNpJJi7kXDkr5MOAFI8NoA8Po77zzLrz0krkJdJ06dTBt2jScf/75mcXBfcC4UTr3sPJjlvr//hc499z/3955gF1RnO3/lqYYpVtBRIotMSKCiYpGBQWVTw01RsUKYoklIKDRANFPioAmFsQSW+QvRYxRkSbYPxUFjFETuoolSlcTpLz8r3vfsy/Lsufs7tndc7bcc11e8p4zO/PMb2bP7j3PzDMAD51/4w2gceNw+ag0ERABEYiAQECR4frsPvTQQ/Gvf/2roOV8rtBzHEaiZ/wTTqoWSIcccgj++c9/Bq6OIpRe5a5du+5QFgO7cSsBPfOtW7c2votCyFsnClgHPdtc4s+l6nV4zmAu0etOz70ZHd5JyFOIF0q8fvz48bjsssscs/kR8lw5YG4PoJifzvNnU5wC3mMpJqOmpZGAhHxlr3KP/FEABgOY7dLRC1IwEFxfBlLQxkQ1gV6+sWPHYsiQYdi06UfYZZdGuOWWXrjpppuw6667JqotkRvLw5m5nJ6rFSjiDz448ipVgQiIgAiEQSCgyHB9dnsR8mG0w08ZYQl5a530aJMlV6/Rw8y/oxby9r3t3P/OOlevXr0DDgp82lNIyHNSwi2xHOsEgTW/VyHPKPWc5ODEAMX8U0895VZt4r8PeI8lvv1qQLYISMhX9jej1tMj3ysj3e/6MpARDrFoJl9ErrjiCmO/HY9x33vvJ/DIIz/HGWe0iIV9sTKCHvguXYC33gLmzqVLJFbmyRgREAERKEQgoMhwfXZ7Wep+xhlnYNiwYaF0FCPhv/jiiwXLCmspP8UxPdTcr07PPJewU+wyMaBckoR8UPhuQp6seGY9VwxwhQC9+yeffHLQahNxfcB7LBFtlJEiYBKQkK8kwXPkx+v4Od0YpSTA5XjXXXedsVexRo0aqF27tuGVv+SSS+C27K6UdsamLu7tP+884JlngGnTgA4dYmOaDBEBERABLwQCigxXIV/K4HNsbynrY5R2BnLjnnQunTeX0XPfPPfPJ0nIL+eJMS6JwfXyJTchbx5BZ9/X71ZnGr4PeI+lAYHakCECEvKVnT0QwCAADTPS964vAxnhULZmvvDCC4ZnYdWqVdiyZYtxxuxdd92FfffdNxybeDb8pk3Aj34UTnlxKIVC/vrrgRNOAHr0iINFskEEREAEfBEIKDI8PbtLeRwcG1+K+ih8KU4p2K0B51g/l4/feOONiRLy5pnwPJLOKQXZI8+JjtGjRxtc+vfv72t8piFzwHssDQjUhgwRkJCv7GxGKXkot0+eor7QVOnCFIwPTy8DKWhn7JrA5YD0wjOAXa1au6J27d3w5z//eafgPYEM597xc88FWrUCQopMHMgeXSwCIiACImAQCCgyPD+76Sm/9dZbq44ZO+aYY3DLLbeEduycvTujrs+MBs993hN5dGoucUsa96pT6Pv1yPMaRp4vFB2e1dgD55l1x3WPPNvESY958+Zl8q4LeI9lkpkanVwCEvKVfcc98pwWJQ/n6dHtfayo9ckd72W1fNq0abjssj7497/XoqLiLhx66DeYNetCNGnSJDy7Jk2qPFqubt3kng0fHg2VJAIiIAKxIhBQZHgW8rFqdEjGmFHiu3Xrhnbt2hnntTPQHSOxM9YMz3enF5pL7t2i1jOSPCcFeO2MGTMKWhhUyHNPP+vhOfNcfXfDDTcEJpJvab25coHnyDdv3tyxHn7Os+3TmgLeY2nFonallICEfGXHjvQg4M0hwMj2SU+ZfhkodefRC3/99dcbyw+rVTsWFRWPo3PnlXj22RNQq1ZI80Lffw9cdx3w0EPJPhu+1J2j+kRABESghAQCioxMP7s3bNiAPn36GIHtmCjsGZGdQdzoHadgnjx5srHCjeJ76tSpxtY1Jvvf5pFwFP9ehLy1LHO4sE6eac8tctZEu2jrokWLqj7u1KmTYTc95dbPix161atXN86qf+edd3YoggxoV6EUlg3F2h71dQHvsajNU/kiECoBCflQcSamsEy/DJSylxjNly8efNBv3XoYgKfx0EMbceGFh4dnxt//DvTqVXkc2x//mNyz4cMjopJEQAREIJYEAooMPbtD7FV6r7nn3k3Ih1iliioBgYD3WAksVBUiEB4BCfnwWCapJL0MRNxb9ML/9re/xSOPPIK99toL33zzDS6++GKMHv0nNGiwR3i1z5wJdO0KNGsGcN/gj38cXtkqSQREQAREIFQCAUWGnt0h9gaj3XOP/fDhw0MsVUWVm0DAe6zc5qt+EfBFQELeF67UZNbLQIRdaXrh16xZg1133RUVFRXgCwOX9oWe/vEP4PbbgQceAPYIcYIgdEN9FMjliFOnAoMGAbvoJ8oHOWUVARGIOYGAIkPP7pD6l+erjxgxwng216nDeMdKaSEQ8B5LCwa1IyME9JackY62NdN4GWjYsCFq1qzpSIBHlgwYMCCbdIpsNaPq0gvPKPQHHnggPvnkE7Rv3x5/+ctfjL+VPBD44gvguOOAXXcFGHFXL1geoCmLCIhAnAjw6K8xY8Y4mrR582asXr2a37VjEHufdkvI+wSm7NkjICGfvT7Pcosl5LPZ+8bLQKGmDxkyBEOHDs0mnSJazT12lefCr8Euu/TBxo1/wR/+cJ1xti2D0ih5ILB2LXDiicCaNcAbb1RuF1ASAREQgYQR4LNz2LBhblZLyLsR0vciUAQBCfkioOmSxBKQkE9s1wUyXB75QPi2X0wvPFcvPPzwwzjssMOwfPkmVFT8FQ8/vAXnn986pFoyUMymTUCHDsCHHwKvvgr85CcZaLSaKAIikEYC8sinsVfVpqQQkJBPSk/JzjAISMiHQTF5ZWh5Xgh9xocFz6FlILtDDz0U7733Hs477zzcc8+9qFevbgg1ZKiIm24CRo0CXn4ZaN8+Qw1XU0VABLJEIKDI0LM7S4NFbS2KQMB7rKg6dZEIlIuAhHxh8nSp9gKwBMBkABvK1VEh16uXgQBAt23bhvvvvx/XXXcdmjVrhu+++w7ffvst7rvvPpx//vkBSs5z6fTplcvMDz00/LLjUOIrrwAnnwxwKeott8TBItkgAiIgApEQCCgy9OyOpFdUaJoIBLzH0oRCbckAAQn57Z18P4DLAPBBuRBABwAzAZDRNgBLc9+lQczrZaDIm5ui/fLLL8eECRNw3HHH4e2330bbtm2Nv5s3b15kqQUuGzcOuPpqoG9fgP9OW/rhB+CQQ4AmTSq98TVqpK2Fao8IiIAIVBEIKDL07NZYEgEXAgHvMfEVgUQRkJCv7K4bAIwEsA5AGwArctFkjwLQD0DLXJ4RAG5KVA87G6uXgSI68aOPPkL37t2NaPR77HEdvv56Fq6++mdGdOJatWoVUWKBS7ZuBQYOBMaOBa66CrjrrvSK3BkzgIMPBg46KFyGKk0EREAEYkYgoMjQsztm/Slz4kcg4D0WvwbJIhEoQEBCvhIOl87T694qx4obnNcCGA/gitxnPCaGn5t5kjyw9DLgs/eefPJJ9O3bF40a7Yd///tO/PBDR1x11Tu4555f+CzJQ/bvvwfOOw/429+AO+8ErrlG56l7wKYsIiACIhB3AgFFhp7dce9g2Vd2AgHvsbLbLwNEwA8BCflKWhU20W4uqz8VwJwcUC697wMgDWeJ6WXA412yceNGXH/99cae+LZtO+Lddx9G9erV8Pjj3+HXv45gz/qXXwL/8z/Axx8DEyYAZ5/t0VJlEwEREAERiDuBgCJDz+64d7DsKzuBgPdY2e2XASLgh4CEfCWtNQBm5QLb8W9TtNe3BLibBKCbhLyf4ZXsvMuWLTOi0n/wwQfo3LkznntuJpo2nYDZs09Cq1YNwm/c4sVAx44Aj2J7/nng6KPDr0MlioAIiIAIlI1AQJEhIV+2nlPFSSEQ8B5LSjNlpwgYBCTkKwfCewCa5f77NifsGdyuXW6ccEn9cgCrtbQ+G3fO3/72N/Tu3Rt169Y1gti9/PLLGDx4MG677TZUrx7RoozvvgP69QNuvx1o2jQboNVKERABEcgQgYAiQ0I+grFSrVo1Y9J+4sSJEZRe2iIHDRqEl156CRxnTmn9+vUYOHAgJk+ejHXr1hnvN4z9M2IEQ0ClIwW8x9IBQa3IDAEJ+cqu7g6AHnfuk2fAu3oALgfwUM4L/0Dus0EARqdgdOhlIE8nbtmyBb/73e8watQo/OIXv8AXX3yBL7/8Eo8++ii6deOCDCUREAEREAERKI5AQJGhZ3dx2AteVU4h//TTTxuTCLNnz8Ypp5wSuHUNGjRAixYtMG/evJ3Koohv06YNVqxYYQh4/nv+/PlYunSpcc1irgpMQQp4j6WAgJqQJQIS8tt7u28ucj297xTujFbPxGnKgbY99EkfI3oZcOhBivZf/epXeOONN/DrX/8af/3rX9G4cWM888wzOOyww5Le5+W3nysOuG2gQQTbEsrfOlkgAiIgAq4EAooMPbtdCfvPUG4h37NnT8yaNatoIU+BzuuHDx+OBQsW4Oijj3YU8vTW33HHHejXrx/uu+++KlBcbUjnBb9nGUlPAe+xpDdf9meMgIS8e4fzTCzuoV/vnjUxOfQyYOuqOXPm4Nxzz8Uuu+yCRo0G48MPP8bZZ/8bjz/+OOrUqZOYjo21oZdeCrz+OvDhh+k9Si/WHSDjREAEyk0goMjQszuCDiynkJ8yZQp69eoVSMjTfr67MG3bti2vkKfXnd74rTze1pZYRr4JgAiQR1pkwHssUttUuAiETUBC3jtRqjm6EnnGfNKTXgZyPVhRUYHbb78dQ4YMwc9//nPUrFkTr7zSE506HYpp004CH25KIRB4+mmge3dg/HigLxe/KImACIhA9ggEFBment3ndeyI9Z9+mhdu3aZN8eTs2aHBL2V9DEJLzzE9z/w3l4h37NgRI0eONGLaWNPy5cuN/eDcM85EwTxu3DjjuW71PptCnp7pyy67zCi7ULnWOrgsfuHChcaydP6bS+Xr1asHetl52g1tvPzyy/HeewzFhKrP+e/TTjvNWFJPEU4Bzv87iWy3jpo6dWpVFu53zyfI2c6WLVti0aJFOxXJz9euXYvVqxkKKtkp4D2W7MbL+swRkJD33uVcYs/j5xp6vyS2OT29DMTW+pAM40PrvPPOw4svvoiLL74Y9Mpv2LABEyb8P3Tu3CmkWhyKWb8esL1wRFdZDEr+/HPgiCOAE08EnnkGyHkOYmCZTBABERCBkhIIKDI8Pbu7HHwwni+w37lLq1Z43kHMFQuiVPVxP3fbtm0NwUvByv3g5EmRbN/jTRHPPeB8plPoU1zT+92hQwdD2FPgm8vIKXAp3Cm6WY5173j9+vXBsvKtzKNgZ3kHHXSQYRfrYj0si3WZ/c0y+Tn3o5uTCHznmDRpEh588EHDHorpS7lyLUAq5FnnhAM5NGvG2M7bE4PekSXZvvPOOwFqj8elAe+xeDRCVoiARwIS8ttBDcgdP8dAd06pBQBGsm/lkW2cs3l6GYhzA4LatnLlSpx++un47LPPjAcn94sdcsgh4Mw2H+iRpTfeALp0qRS0J50UWTWxKbiigm6HyuX0f/87sNdesTFNhoiACIhAqQkEFBment2lEtYmu1LVx73dFL0U9EceeWRV11FM0xNOkWyK1FNPPdWYnLfm5bJyCmozcrtVyFOE06P+1FNPVZVLDz33lFtFv328mHXT627dd05hzHoo2rnqj4l/c2LA6jGn3fY98sznluyrD8z8xSyRN1k98MADgScS3OwuxfcB77FSmKg6RCA0AhLylSjpaR+fo2pGreef/DcTxT1FPAPgVa7RSnby9DKQ7Cbmt/7jjz9Gp06dwGX1jEw/YcIEwzPPh9juu+8eXbMXLqwU7/ROz5gBRFlXdK3wV/KYMcCAAcD06UCnCFc5+LNKuUVABESgLAQCigxPz25XYX3AAXjeshwbNWoArVsX5rF0KbB2rWOeLl274vnPPst7fVgrACjM6T3u2rXrDnVRbFN00zPfunVrw4NOzzoFslWY8yLmpbi2L62nkOcqPbvnnYKc3+Vbcu40icB66N3mEn17mfS6szwzQryTkHfb0sfrx48fb2wDsCc/Qp6rBsytARTz0/mcTkEKeI+lgICakCUCEvKVvc0DNxnUjv9tADAzJ9yvyA0GRq2n2E+DN954xgAMajrPeNhkKb355pvo0qULGjXaGxUV52H58mG4886x+M1vflMVLCYSHlzG2L490KQJMHduNpbWc+LiZz8DrrgCuOuuSLCqUBEQARFIEoGAIsPTs9tVyAN43gqtUSPgm28KY+zRA5gyxVnI28uz5QpLyFuLpdeaLBmtnZPw/NsU8lzqTmHKz+1i1/zOLuTz7Ss3hbqTyKc95vf2ve3c/0577BMAfOeirYWEPCcs3BLLcVru71XIM0o9Jz+cViK41R337wPeY3FvnuwTgR0ISMhX4mBU+lm5pfX8m6Kd4t0q3Jfkzpq/KQVjyNPLQArauUMTnnvuOSPYTcuWh2PRonvxww+H4dFH38eFF54QbVMZdIginh74V18F9t472vriUvr55wPvv88pI2C33eJilewQAREQgbIRCCgyPD27XYV8Qj3yFMAU5hTj9MxzmbrpjGDQOFPIc/k9l+E7HelmeuvtQp7CfwZXytmSubzeumzfmiUKIR9kcLoJeTLkefVcLcDVAfTsn3zyyUGqjN21Ae+x2LVHBolAIQIS8pV0KmznxLehxxpAdQs8BrvrAKBdCoaUp5eBFLSzqgkPP/ywETm2desTMX/+g6hWbXc89dS36N794Gib+e9/VwZ527ix8ui1Aw6Itr44lc4z47/+unIVgpIIiIAIiEBV8LPcuwRXA/pJnp7drkI+ocHu6DVnwDaKcApoLqNnMoW7KeS5XJ1Lxp088uY561498iyHsXPyRZOPQshzssEtMbieU3IT8uYRdPY9/W71Jel7Cfkk9ZZsDUpAQr6SIL3t3ANv3cRLcU/hPjcH+QYAFPNWcR+Uf7mu9/QyUC7jwqyXR7ow0MzNN9+Mzp07Y86cxqhW7Qa89lodtG27X5hV7VzWhg2VIv7LLytFfKu07MyIFptKFwEREIG0EggoMjw9u9Mo5E1POj3t1qByHCdcJn7jjTfutEeeYpXHzVmTuaQ87D3yYS6tN8+F5/uLUyp2jzzbPHr0aINX//7903qLBZ0sSy0XNSydBCTkK/uVge4YNYTL6cfkuprinrPlv8r9zX/zkNI0qDFPLwNJH/J8sF5zzTXGQ5/74qdNm2ac2/rkk5PQoMGe0Tdv61bghhuACy8ELBF2o69YNYiACIiACMSRQCmEfCnPdSfjUtRnRnynh3zixIlVXcuAbXyuU+ibHnl+aQabI++jjjrKyM+85h51+/FzFMfdunUzjoMzE4Uvg+PxP1P4sh4GwDOjxkfhkY9qjzztpkee8ZHSnALeY2lGo7alkICEfGWnUqCvAFAHQA8AUy3ifn4uaj3PJBsF4MaYjgOus+qY2+/PfzO6/oI8tqZeyG/cuBHnn3++sSSOD3nufevTpw/uvfde1KxZM6ZdKLNEQAREQATSTCCgyEj9s7tQ35vinIK7Xbt2xnFzXD7P/e3cD88z3Olt5pJ7LqFnfnq1+TmFN5fc00vPfeHMN4AnqgAwl6PzqDouWTfPkafw5z7yRQxWCxjXcyLBup8+qJA3g++xTsbwuYGT/wFSvqX15ooGniOf74hdfm6dyAhgRlkvDXiPldV2VS4CfglIyG8nRjFPkc5DRBfmPmYAPC6vZ2K41p5+AZcwP5f9D7bUx2nlfPam+mWAQXDOOeccvPXWWzjmmGPw2muvGcvrzQitJewTVSUCIiACIiACVQQCioxUP7vdhsmGDRuMCXkGtmOiUOdzncHaOGFPUTx58uSq4+l4bjyFO5kz3XTTTUagN+61t+6fr169urHvnoKf/6egp6ilYLcu4zePimM+MzAehTwdBlu2bNnBfNrDclatWrXD57SZ7TAnB/glj8Nlm+gtt37uxsPpe7aF7XvnnXd2+JpsaFOhFEb9xdgc9jUB77GwzVF5IhApAQl5d7wU+OvdswXKQW9/X5sQtxbI77nsn0+j+kh6VUQAACAASURBVLkv7rDVyMj7RwMwo6Rw7VmvPFal9mXgiy++MPbCr1jxCQ48sBUWLfoAjz76KM4999xAHaSLRUAEREAERCAogYAiI7XP7qBc7ddTuFKM24PCTZkyxfB8O0W092IDPdvcp+8U4d7L9coTPYGA91j0BqoGEQiRgIR8iDCLLIrL4ek9p/A2z623FsWJhPcAcJPXt7kv6H1fDcAq5rlGjEv/+R+/42oCc2WB3bRUvgz885//NGa2f/jhB6xZMw677PJfzJrVBCcy4JxStATeegto1w6onoZYkNGiUukiIALZJRBQZKTy2R3FaOB+8IYNG1ad127WQW81PfX289292sAI+VxyP3z4cK+XKF+JCQS8x0psraoTgWAEJOQr+VWeYVI4rcvto3fL5/V77mMfmYuW3x0A14o5CXmKdop563f8ey23dtkqY3hWTgw0yC2r5z55p5S6lwEuo2dAuz322APfffcdatQ4C3/+88044wwuZlCKlMCKFUCLFsADDwCXXhppVSpcBERABJJMIKDISN2zO6q+fOihh4xl9fTId+/OVyyA3nh61IuN2s599yNGjDCOu6tThyGVlOJIIOA9FscmySYRyEtAQr4SDY+acz7nwxkdvd39QhT2XDLPMKJOQp7R8ynmH7KZQpsp2ufkPrfuiTePymuRx8ZUvQy88MILRgCapk2b4pNPPsFPf/pT/O1vf8M+++xTmlv/j38EeNTczTcDu2Twlho8GLjvPuDzz4E9S3AaQGl6VbWIgAiIQOgEAoqMVD27Q4drK5B71+k55151BnljgDzugeeeeqX0Egh4j6UXjFqWSgIZVB2O/cj96ZyypTCm553CelnOs83P6uWWqnOvPF28bXLR4fnvDSGMjEJCnoKdtjGSvjVR4E/OBejrlts7bxX7XPfFyQH7dSwjNS8DjzzyiBH85pBDDsFHH32Es88+GxMmTMDuu+8eQrd4KOKJJ4DevYHrrgPGjs2ekN+4EWjSBPj1r4E//ckDMGURAREQgewSCCgyUvPszu4IUMujJhDwHovaPJUvAqESkJCvxMnI9PSyc6m70/Fy/JznzHM5PIU7xf1MAPcDuDKEHskn5M0l9FbPu1md9Rrun+dkhNWjT688hT6P1bOnVLwMcHkcZ9cPP/xwQ8Rfe+21GDNmDBi1tSTpX/8CWrcGunUDHn+cZ9iUpNpYVfLYY8BFFwEffwwcemisTJMxIiACIhA3AgFFRiqe3XHrE9mTLgIB77F0wVBrUk9AQr6yiymKKZpbFehxesD5X+dcHi5lp7BvF8IoySfkWf5S2xJ6q5Dnd2Zk+q651QJcUcAVBPMty+5TJ+THjh2L/v37o169rli3bjbuuusPhpAvWeJRM8cfD/BomfffB/bYo2RVx6aibdsqA9zVrw/M4jyYkgiIgAiIQCECAUWGhLyGlwi4EAh4j4mvCCSKgIR8ZXdx+fr4PHvUzQ6lcOcSdtPda+5DD8P9W4yQ56QCo9nnO2Ku0EA0Xga4t/zII4/0NWAbN27sK38UmceNG4crr7wSNWteiM2b78Wtt87HzTefEEVV+cv83/8FbrkFePVVoH370tYdl9refhv4+c+Bv/4VOPvsuFglO0RABESgJAQ+Z1wQn+n999/HmWeeyavoBKg84Nx7kpD3zko5M0pAQj6jHZ/RZkvIV3Y8RTGjwBfyrtu99lxW3wNAwxDGTjFL6wML+WLs3kYvbBnTY489hosuugg1avwKW7bch/HjV6Jv3yNKa9GCBcAxxwDXXw+M4ml/GU2MDfDKK8CyZTp2LqNDQM0WgSwT2CVYcFMJ+SwPHrU9MgIS8pGhVcExJCAhX9kp9MZzDzz3mT/s0E99cnlMrz2X4S/PBZPrFEK/FhPsbk3OJqc9/W4mGbP6bpmcvi+nkJ80aRLOPfdc7LnnAdiwYSYefXQLevc+vJhmFH/N1q3AUUcBnNCYNw/Ybbfiy0r6lYMGAQccAFx9ddJbIvtFQAREwDcBCXnfyHSBCEROQEI+csSqIEYEJOQrO4PCnHvKm+WWq/P89dU5bzsDzR2d89hzzzqPdDOXw51aYB+6n252O36O3v/RtgK5HYBB+ub6qSiXN3FC/rnnnkPXrl2Nc+L58jR9+mwccwwPDyhDmj0baNSoMtCdkgiIgAiIQCYJSMhnstvV6JgTkJCPeQfJvFAJSMhvx0kxz3XS9L7b0xQAg3JeeIpneub599Mh9UYhIc8z5DmBYN0Lz2PvFlv26/s1I1FCftasWejSpQt222031KhRA3PmzPG9t98vIOUXAREQAREQgUIEJOQ1PkQgfgQk5OPXJ7IoOgIS8juzpaCnUOZ/5pnyPD8+ylRIyNMefs9VAeaZ9fTQU8iPKdKoxAS7e/XVV9G5c2dUq1YNtWvXNkT8EUeUeE98kZB1mQiIgAiIQHoJKNhdevtWLUsuAQn55PadLPdPQELeP7Mwr6C3nRMGjIbPRM//Moez7Lnkv18uKF99AIw4Z19q78euRES+feedd3DKKaegoqICe+65J+bOnWucGa8kAiIgAiIgAkkkEFBkJOLZncR+idLmU089Fe+99x7WrGFoo+ymQYMG4aWXXgLvAae0fv16DBw4EJMnT8a6devQvHlzdO/eHSNG8FXZewp4j3mvSDlFIAYEJORj0AllMMF4GWjYsCFq1qzpWD3PaB8wYEAZTKuscuHChTjxxJPw/feN0aDBrnjttQk49NBDy2aPKhYBEcgegRUrVuCCC67HE0/ciWbNOJ+qJALuBEaPHo0xY5wXzG3evBmrVzMEj46fcyeZjhynnXYa5s+fj1WrViWmQU8//TR69OiB2bNnGw6VMFKDBg3QokULzGOgYFuiiG/Tpg34m0sBz3+T2dKlS41rFi/mIlRvSULeGyflSgcBCfl09KPfVrjukR8yZAiGDh3qt9xQ8n/88cdo3749vv32BwDP4vnnW+C00/QSHQpcFSICIuCJAF8oTz75YqxYMQTNmg3D3LmPSMx7IqdMfHYOGzbMDURJjp8r9WRUqetzgxyH7zds2GB445M0GUgh37NnTzBGURAhT4HOMoYPH44FCxbg6KOPdhTy9Nbfcccd6NevH+67776qbhs8eDBGjRoFfs8yvCQJeS+UlCctBCTk09KT/toRW4/8kiVLcPzxx2Pt2rXYa6+98Morr6JlSx4UUKb0/ffAj35UpspVrQiIQDkIbBfxj+QOM1mBZs0ulpgvR2cksM64eORLPRlV6voSODQSY/KUKVPQq1evwEKe8Y3MoJA8vjifkKfXneNnK4/4tSWWke86J6AS8okZZjI0BAIS8iFATGARsdxn9+mnn+LYY4/F119/jX322QevvfYaDjqIAfvLlCZPBq67DnjnHaBx4zIZEaNq//xn4IMPgLFjgV300xGjnpEpIRLYWcSbhUvMh4g5s0UFFBmen92lnoyKuj4u8+aWOy6x5r/pMa5Xr57hNb7//vuxbNkyXH755cZedCbzc+tAYx56dukZ5r+5hLtjx44YOXIk6tZlXOHt6YEHHsD48eONvKynbdu2Rr6jjjrKdz7ukecycW6pMG1n/XYPvemVti5nN/eV83pzz/iNN964k732GyoIL24FoA0U4BTf/L+TwPZyE0+dOrUqG/e75xPkFOstW7bEokWLdiqWn9O5k9uS4lptwHvMtXxlEIE4EdDbeJx6o3S2eH4ZKJVJX375pSHiV65cif32288Q8WVdhvbVV8BPfgIcfzzw179KuG7bBhx2GMBgg5YHc6nGh+oRgVIQyC/iJeZLwT8LdQQUGZ6e3aWejCpFfRTmDJTGyX0KSwpweo0piDt06GAEUGvXrp2xt5qfc2+1dTk2hTDFOK+loOR+bV5D4W/fg20Kan7OergsngHY6tevj+XLl6NOnTrGUPWaj8KY9VCImnvPuVzcHofILlgpejl5QRso4q32sjzTDqf7Jggvng40adIkPPjgg0bwOdp16aWXBr49C3nW2U5OmNjf+xj0jn3FvmMAZC8p4D3mpQrlEYHYEJCQj01XlNQQTy8DpbLom2++wXHHHWc8kJs0aWKI+KZNm5aq+p3roWg96yzgrbeAf/wD2Gef8tkSl5pnzwZOPRV46SUgpMA3cWma7BABEnAX8RLzGinBCQQUGa7PbvdxHO7KklLVR2FKEUyvu3UPNUUe92FTVN9+++1GB/Fvim6r95d7rylMKeiPPPLIqo40y6XwN0UkBaddOPJalkEv/WWXXWZc7zWfVcib19k905wg4MSBuUecQp+ed7u99HBzIoIcxo0bl3dABuXltEeeXN2SfWWDNb/fJfK8lqsZOLHAFRJeJxMC3mNuTdT3IhArAlkV8kbI2CISj31rVMR1cbskNnvkuVyqbdv2WLbsS+y3348xb95TaFzuZexcQs7Z5ylTgG7myYBx68IS2/PLXwL/+hfw4YdanVBi9KouegLuYsRuQ7hiKPoWqoZSEijXHnnv4zj/+KVWY4DwI44Adt3VmRp3WP3wA/DFFyvQr9/F+PJLM5ZEPsrB7xcnwc3aKLi5/J3vElYPNb3I9L6b0c4pBund7dq16w5GMsAaA6rRw926desqge60BJxeY3rGzXryCVN7PruQN9titdku3DlBQRFLAWtNXOpuriwoFMk9KC8nIc/2FkrkbZ3osOf1I+Tp2DG3B5DD9OnTPd/CEvKeUSljCghkVchX5M5iL6b9hX/JkjEoYhG1/ttvv0Xbtidg0aIVqFHjCcybdwxat3b2fjPm3N13A2efXbnC2yldfTXw3HPbv+nTB7j5Zue8Dz8MjBwJ7LQda8UK4Kc/xeb/6YqWrz+6w8XTpgE//rFzeV26VH7HMp3SpEnADTds/+bgg4FZs5zzLlkCdOgATJwI/PznznnOPRd4883t3w0aBFx5pXPeO+6oXA3/f//n/P2//w0cc8yO3739NrDvvrnPPv0UYKyCP/0JuOoqHHcccM45wMCBzuXRSWA99vXYY4GnnnLOy3p69gTo8G/VyjnPaadVziGYadQooFcv57yDB1du43/hBefvOQ9xxhk7fseX1lq1nPOzn8g236rC224DHnxw+7UcB/fe61zWiy8C/foBCxYADRo452nbFvjmm+3fcZx27Oicl+ObL9OPP+78/WuvAeefv/07rgYlG6fE+4u7Jv74x8q+dUq//S3w9NPbv+ndG7j1Vue8f/kL8LvfAZ984vw9P+Vpblz8YqZnngHatHHOz3fvAw8E7rzT+Xvufrn22u3fsexXXnHOy+F8wgnAE08AJ57oxxNvL69SnFxwwSN44YVmyG3N3anStWuBnD6o+o59k2/R0UknVfZ5od+uP/xhezVkRnZOiWON/en222UdFxzTF1zgXN7vf1/5O+L222W9mvfcHns4l0exeMUVhX+77rln+7X8LbDeb9ZSuVjokkuAHX67bNXyt+vzz7d/yN8q+++B+S2fJzwtzO23y8xP0Ws+T8oVtf6EE36J11/njXBS/huv6puX0b79H/HaazsOnhkzgM6dgeXLK+9Rp9SyJbB0Kb/5JYBg9Xkw1MhiClP7Xm27SDbLo9ilB9lJ7PJzij1GU6dQ5t9WIW/WxWX89Hxzabt9b7zVJrd8dhvN5fWs2/TuWycezBUF5h51OyMzeFyhfetBeTkJeU6GuCVyz7fk36uQ56QGJ1fYTor5p/LdhHmMkZB36yV9nyYCxQjZNLSfQp6Jr5FTAEwEsD0iRxpaWLgNZffI/+c//0GbNj3wr38dhmrVLsVZZzVDrVq1DfHqlP7730phSWd5Pic5f+v//vftV7dvn/8l7Y03AL6wWF+IUVFRuWx8yRJsff8fuGVMvR1M4Yvd/vs720fxtt9+gG2yvyozX/KtIoir9a3Cw1oqhRwFC4Vavlh/Dz0ELFu2/apOnYBf/MLZtpkzK8UjBalT2rBhR+HNPMxbFfvnppsAvk3zDXjPPUEhzZWJrNMpUUCRrZmaNwdyKxF3ys6XRb6YM6bg3ns7l0dxyckGM5ExBa9ToqChmewrp/TFF5VNsSaOgRo1nPNTuLCdDJXglCiQXn99+zcUJpxkcUrcpTFhQqXAzXcQAleGfvfd9qspqPJNXFGIbtkCXHyxc32coHiEjrJc2m03gO1xSpwQ4GlZnFSxi04zP2M/chyZiZNM3IHilCimnn0WyK10dcxDDlYhz0mOfOL2/vuBhg2BHj2c61u4EOBkmZmYt39/57xr1gCjR1dy4+SRP/FjL/Nl/OQnf8S55z4D3iZO6T//ASiOrWnAgPyTOTx+nH2eT2Dyt8s6UUVmZOeUPvsMoFh1++2yiltOlv7sZ87lcaKUvztuv13Wq4cMye/Z5b3H361Cv10vv7y9NHLJN8nwz39WTmrt8NtlawZ/u9at2/7hr39dGQrFKfF5wnvR7bfLvJa/IebzxMkj/+23AO9BILpz5LPgkS9WyFMcUzRznz0981x6T9HJxMBuViHPz9iH9C7TM0wvOPdv9+3bFyOss9Qe8zlNNlDU0tM8Y8YMow4KeYpXbg8wj2mjiOVEQr5U6Fi4KIR8XkM8fuEm5NlHbBPbTx7kf/LJJ3ssfXs2CXnfyHRBgglkVcizy7hmmn697jlBz88m50R9Hv9Ggnt6R9Nd99lF2VI+FM8880y8+OIaAG8YW9APOKC6IYTpWXNZvRWdaXwLvPBCgMqX+8GVgI0b2TmVLnC7AhYfEUgBAe/ix97Y4MuFU4BPTfBJIKDIcH12u4/ncMdtqeoLKkzNwHHcS8+yzGX05t53u5A3u5Xto9CnqGSefHvTC+VzEvL8jJMKnJgwl9Wb+/RNj7zbPvhCQy8oLyePPPfxu6VCJw25CXnzCDp7HAS3Ou3fB7zH/Fan/CJQVgJZFvJW8BTzFPXmhmh66inqxwOYW9YeiqZy15eBaKqtLNVcdrj//k2Mmde9945J2IHNmytF/JlnRtn8ZJVNtxRdzB99lN81nKwWyVoR2ImAuxiRiNewCYdAQJHh6dldiijyVhqlqC+IMLUHkrPabopoU8gzL0U7veWMhm9N3LfO5d6MPu81H693EvKmUGZ0+OHDhxvlzps3r6o6il57NH1+aUZxdxP5QXixnnx75PMt9+c1QfbIc4KFqyDYH/3zLafyeAsGvMc81qJsIhAPAhLyO/cDRT3XMvEXnIKeC/G44JtL8N03CMWjX92s8PQy4FZIMd+/8MIL6NKlC2rXro1//OMfRuAYpRgT2Lq1Mnp/vrXlMTZdpomAHwLexXy4Hk0/Nipv8gkEFBmen91Rn+u+09TWihU4+eSLsWKFGfgu3PskiDA1Pdxcqj7Rsn+PS9opsinKrUKeAtoe7I4Cmu8rXPLNY9DMyQG3fPmEvGkT99/T48+ge1YBy2X2FLV2wc42MHI9r7EuO6c9nGgwo8YH4ZVPyEe5R562k7t1MqPYuz3gPVZstbpOBMpCQEK+MHa7qF+bpqj1/ME094iVYvTxoXn44Ydj06ZNxpKyYvY+lcJO1SECIpBNAu5iPlxxkk3K2W51QJHhWciT8vbxPATNmg3D3LmP7HROd5i9EWV9QYWpGd2+W7duxnnzXMbOYHP0vDPoHQU1hTOX3JvL3rlMnOfSM1E4U3zzjPpf8hQXMH5KJ+Nzt3z5AvLxc15PT7Y96j7ros18bzLL51F0FOz2AHBm8Dxzzz1tC8qL72gsj+3v1asXbrBG6y1y0ORbWm9OijAOQT7nDj/n6gUvKeA95qUK5RGB2BCQkHfviqMA3GjZS1/d/ZLY5yh5sLvvv/8erVq1wpdffom7774bV+eLRhZ7dDJQBEQgzQRKsUw4zfzUtspAaWMYtdAhbd4cXbA7p/o4ni+44Ho88cSdkYp4s+6o6qMwpSd6CyN8WhLFMAXuKh4xYEkUwRs2bMCi3FEC/HefPn0M4czE7+n1pkPBFO6TJ082jqdjXi53p2inkKbApPjn8m+rA8Jrvnw2mvvzCx2vdsUVVxg20w6KWZ4zb196bi6D52QEg+eZQj4IL5ZhTlTQU25yDHJ/V69e3VjpwBUN1sRJAzIqlPzYICEfpJd0bdIISMg79xgPE+We+b4AGLqcnObnltjfkbROdrC3pMfPVVRsQ6NGz2Pt2ntx4YX74tFHdzzWLQU81QQREIEUESj1suQUoVNTLHFgXGC0A/CuT2C+PPI+y1b2BBOgV5si3xTyCW5KYNMl5AMjVAEJIiAhv72zeGIq98ZzOT03bpMND/hiwDvuj3cP15mcji+pR/43vxmEe+7pgNq1v8W3354DzsoqiYAIiECcCUS5TDjO7ZZtwQnEySMfvDUqIQkE6N2n154rCbKeJOSzPgKy1f6sC3mKd3MfvFW8U7hTwKdJvFtHdslm9bmnifurGjTYCx9+uBT77rtntu4wtVYERCCxBKJaJpxYIDI8MIGAIqNkz+7ADVUBJSPA0394vj3FfJ06dUpWb1wrCniPxbVZsksEHAlkVcgPyC2dr4xiAqwH8EBu6fyCDIyVkrwMfPzxx/jpT39qBHL56KOPjGivsUncozVwIDBhArD//rExS4aIgAiIgAikl0BAkVGSZ3d66atlWSAQ8B7LAiK1MUUEsirkK3JHy7Er6X2f5aNPH/KRN65ZI38Z+O6773DAAQcYZ57yyLkzzjgjXiwuuQSYNQtYvhyoUSNetpXbmvnzgaOO4qGw5bZE9YuACIhAqggEFBmRP7tTBVuNySSBgPdYJpmp0cklkNU3dQp5M/GseC+JrJg3DRu8I30Z2LZtG4488kh88MEHuPXWW3HzzTd74Vu6POvWVXrhBw0ChgwpXb1JqOnjj4HDDwcmTwa6c9eJkgiIgAiIQFgEAoqMSJ/dYbVR5YhAOQkEvMfKabrqFgHfBLIq5G/wTWr7BamJWt+wYUPUrFnTEQWPNxkwgDsQ/Kczz7wB06aNxllnnYVnn33WfwFRX3HPPcC11wKffAI0aRJ1bckq/ze/AZ56CvjsM2C33ZJlu6wVAREQgRgQULC7GHSCTMgsAQn5zHZ9JhueVSGfyc62NDqy4+d+9avpmDixA+rXH4hVq8agWrVq8WK9bRtwxBFAixZAHCcZSkDryp490aZ9e/Tu1w+1atXaXuOGDUDjxsDVVwOKfFuCnlAVIiACaSQwdOhQDBs2zK1pOn7OjZC+F4EiCEjIFwFNlySWgIR8YrsukOGRHD/30EPvo0+fAwEswiefHIKmTesGMjKSi994A2jfHpg2DTj99EiqiHuhnVu2RPcvvsCT++yD866/frugv/de4JprgGXLgAPZj0oiIAIiIAJ+Ccgj75eY8otAeAQk5MNjqZLiT0BCHmgNYB2AFZbu4mcjAVDwLs0dRfdw/LvTs4Wh77NbsmQ1WrX6BsDumDz5e3TvfphnY0qasXdv4NVXgaVLgYyeZ9/l4IPx/OLF2ATg8dq1KwX9ddeh97hxqHXYYcAzz5S0S1SZCIiACGSFQECREfqzOyvc1c7sEAh4j2UHlFqaCgJZFvIU6y8BqAdgEIDRuR49KCferR3MIHeTAfwqFb1eOUExb968eWjblv8MlioqKtCsWXN89tkl6NfvWIwbd2qwAqO6+vvvgb33Bm66Cfjd76KqJfblmkLeNNQQ9LVq4clNm3DeVVeh99ixOy65j32LZKAIiIAIJINAQJER6rM7GcRkpQj4IxDwHvNXmXKLQJkJZFXIc8332hx7Hj93O4CFub8nAWC47vEArgDAs+Yp4psB6Ahgbpn7LIzqQ30Z6NSpE2bOnImrr74ad999dxj2RVfGihXAnnsCDRtGV0fMS7YL+R0Evemhty65j3l7ZJ4IiIAIJIVAQJER6rM7Kcxkpwj4IRDwHvNTlfKKQNkJZFXIjwAwMCfYp9p6gUfTUeRblR699msAUOSnwSsf2svALbfcgttuuw3HHnss3nzzzbIPaBngTiCfkJegd2enHCIgAiIQhEBAkRHasztIG3StCMSZQMB7LM5Nk20isBOBrAr5dwHQK9/KRuQoAO/l9sffaPsu3zVJHFahvAzwaLlzzjkHe++9N1auXJn3KLskAkqzzW5C3mz79wC67LorDuvRA/c98USakahtIiACIlASAgFFRijP7pI0NKGVDBo0CC+99BLYT2lPbm1dv349Bg4ciMmTJ2PdunVo3rw5unfvjhEj6AuLbwp4j8W3YbJMBBwIZFXI07s+C0AvGxOeL89fKG7ynmP7biaADgCqp2AkBX4ZWLx4MQ4//HDjeLklS5bggAMOSAGWbDTBTchzz/xjtWtjgj2qfTbwqJUiIAIiEBmBgCIj8LM7soalpOAGDRqgRYsWDCIUuxY9/fTT6NGjB2bPno1TTjklsH2F2koR36ZNG6xYscIQ8Pz3/PnzsXTpUoMP3wHjmgLeY3FtluwSAUcCWRXyXD7Pfe92IV9IrHN6lh771Aj5hg0b5vWi9+/fHwMGDHAcNJs2bcWeew7Gpk1jMXPmdJx6akyD2+mmdyRQaI+8BLwGjQiIgAgEI6Dj54LxK/XVFK2zZs3C8OHDsWDBAhx99NGxFfI9e/Y0bC1WyHttK731d9xxB/r164f77ruvqksGDx6MUaNGgd+TVxyThHwce0U2RUUgq0J+SW4ffDsLWDMAHpfWWz83s9CLv9phOX5UfRNlucasfqEKhgwZgqFDhzpmOemke/DKK1fjtNOexIwZ50Vpp8qOgIBT1HoJ+AhAq0gREIFMEuCzc9iwYW5t53uG3/Xb8si7US3ie64s3GWXytfhbdu2xVbIT5kyBb169Qok5L22lV53euO3bt26E1GWEdfJDhorIV/ETaBLEksgq0Ke3viuuYj07+d6j8vqeXY8/7Pvj+eSei7Fd/LiJ7HzjZeBYjzyn3zyibHMqm7dU7Bq1UxUq5bVIZTEbq+02XqOvAR8cvtRlouACMSTQLk98o8tfAy/aPYLNKvHw3Z2TCvWrcArK17Bha0vDA1e/cynCwAAIABJREFUKet74IEHMH78eMNzXq9ePeMI3ZEjR+Koo7hgsjJx+fnChQuN5d+XX3551R5vLg9/8MEHd8jL/FOnbo95zD3ghUTqsmXLDG806+e/+T7UsWNHw4a6dekPyp+sdvHfXCrPNtDLfv/99xvl0d733qM/CVWf89+nnXaasaSeEw6cbOD/nUS2W6d6bSvFesuWLbFo0aKdiuTna9euxerV9G3FL0nIx69PZFF0BLKqwpoDoFee58NTuJMDz5Ln3y0ArLAg53nz3C/PX+ijLcfURdcr0Zdc9Kw+H5Z8QL722mto37599JYGreHTT4E6dYB6PHhAiQQ6tWyJ7l98oT3wGg4iIAIiUGICAUWGp2c3xfolz16CP5/95x3EfL7PgyIoVX3mcm96iyme16xZY4j0+vXrY/ny5ajDZ31OAFNoH3TQQcbnFOcUycxLAcx93s2a7TzJwWsLeZu5R5wTByyDZXKPOfuTwtvLvnEKdgbSo10sg22gl522dejQocqTzAkHfk47zSXsc+bMwaRJk4yJCAago5i+9NJLA3VdobbyPY+TDHZODHrHdpPDO++8E6j+qC4OeI9FZZbKFYFICGRVyBNmXwD326javfGcbqQCJCcnT30knVKCQj29DNjt4EOQDyI+cDgznIjUowewbBmQm+FOhM0RG3llz55o0749evfrh1q1akVcm4oXAREQAREwCQQUGZ6f3XZxHZWIN9tVivooPO0CksKW+7jppb/sssuqhDyFsD0v93xzj7c1r31kFhK3rIf1UdAfeeSRVZfyvYje9UITBOYEA/PR627dd05hzL3rFO233367US7/5gSFdXUAr7XvkWc+t5RvpUAxS+QZE4mTClwZEXQiwc3uYr8PeI8VW62uE4GyEMiykCfwgwB0zIn1+QBesvUCz5NfCoARPZ4uSw9FU6nnlwGz+i1btoBL8f/73//im2++cV1CFo3ZPkv96iuA0fRHjQKuv97nxcouAiIgAiIgAuESCCgyfD27TXF9VburMPKNkZjUY9IOHvr3v3ofjXZvhMZ1Glc1cv3G9Vi8ZjGO2PsI7Fpj16rPl65Zii0VW3BIo0OqPttasRULvlqAg+odhIa7N4RZ35BfDMGwV4bttCIgKMl8wpPeYy5xt3rkKXrpcDj55JOrqqV3np5zBmvLF8y3kLilgKVHumtX7szcnswJAnrmW7fmIk7nlE/wc8KBKwi4XN1sA0ug152eezNCvJOQp72FEq/PN3HhR8hz1YC5NYBifvr06UG7M7LrA95jkdmlgkUgCgJZF/JRME1Cmb5eBtigvn37GjPRt956K26++eYktBHgzPYf/gB88QXQoEEybJaVIiACIiACqSUQUGT4fnZzP/xJj52EBrUbYPXAHfc0NxnbBBe1vgi3nXJbFe8ZS2ag85Odsfza5TuI/h6Te2Dtf9didu/tq/E2/LABdUfUxYSuE3DuEecaZZj1vXzhy8Y+/TCTKYS5NJ1ebS5Nt+6NN+sy89mFcVAhb20LPeHsS0aQp3eaf3sV8va97dz/zmvte84p8FluISHPyQW3xHKsEwRmfq9CnhMfXMnASQGK+aeeesqtyrJ+H/AeK6vtqlwE/BKQkPdLLB35fb0MvPnmpzj++J9h//2rYeXKlVXRXWONoqICaNEC4D7+J56ItakyTgREQAREIBsEAooMX8/utHnkOUIYSJAeZnqIGfSN+7jpaBgxYkTVADKFvF0wBxXyFNVcvs997vTMc+k7RTITvf/lEPJB7ho3Ic/28pg7rhbg6gByt65wCFJ3lNcGvMeiNE1li0DoBCTkQ0eaiAJ9vQzUrv03bNzYDrNmfYqOHX+WiAaCy75OPx14/XXg+OOTYbOsFAEREAERSDWBgCLD87O7FHvWrR1V8vpWrDDEM8UlBTQ99OPGjTNMikrIc786l/FzLzvrMJfRm/v0yyHkOTnhlriCwSm5CXnzCDr7nn63+sr9fcB7rNzmq34R8EUgq0J+54MxvWFjVPsa3rLGOpfnl4FrrpmDu+9uj4YNX8SqVWfHulE7GHfOOcCSJcAHHwC582GTY7wsFQEREAERSCOBgCLD07O7VFHkzf4pRX0UrBTt3J/NgLvWxGBxXPZtLk2PQsib3nwGvLMGqqMdXHp+4403lsUjb54Lz9UJTqnYPfKcrODqB7atf//+iboVA95jiWqrjBWBrAp5BrFz/tVzHxNp2Gzt6WVg48YtqF17AYB98Nln9dCkSeXRLrFPn38OHHggcOedwG9+E3tzZaAIiIAIiEA2CAQUGZ6e3aU81529Vor6TCFtP+OdS9wZ6I5Lv83j0KIQ8mYUee4RnzhxYtVg5RJ/7nGnfXaPPD/jJIMZNT6fXXHcI0+76ZGfN29e4m7MgPdY4torg7NNIKtCPtu9DhgvA4xCX7NmTUcWnIF9991PMXHiaejUaRumT/+f5DB76y3gyisBBoHR+fHJ6TdZKgIiIAIpIEBP5pgxYxxbsnnzZtNz3A7Auz6b60nI+ywzMdk7depkLKfnUnGetc7EvymyedzcL3/5S+OzKIQ8yzWjy3fr1g3t2rUzjptjoDuuEmDQOwbfowebS+4ZYZ6in9/NmDGjoF1ehTz35rM8tr1Xr1644YYbAvVdvqX15qQJ4w9wksQp8XOeax/HJCEfx16RTVERkJCPimy8yzVeBgqZeM011+Duu+/G/vs3xsqVn8W7NbJOBERABERABGJCYOjQoRg2bJibNRLyboRs32/YsAHDhw83RDs94RSaFNRcBm4NwkYhP3XqVPDYXGuiQKXnvtBy8erVqxtnt5vefev1rL9Pnz7G5IEp7BnNnXVTjFNoT5482TiezjwqjuLeKuSd7OK1PJt+1apVO9jLiQPWuWjRoqrPzckMesutn/tEaWTP11a2gzYVSmHUX4zNXq6RkPdCSXnSQkBCPi096a8drh55FvfVV1/h7bffxjHHHOOvdOUWAREQAREQgYwSkEc+ox1vazYnDrin3hTyolIaAhLypeGsWuJBQEI+Hv1QaisKLs978skncf755+P000/HtGnTSm2b6hMBERABERCBVBIIKDIyvbQ+aQOC0ey5coCrCJRKRyDgPVY6Q1WTCIRAQEI+BIgJLCLvywCXovFs1E2bNhn7+PbYY48ENk8mi4AIiIAIiED8CAQUGRLy8etSR4t49jrPtqeYr1MnIYGCE8LWzcyA95hb8fpeBGJFQEI+Vt1RMmPyvgxcdNFFeOyxx3DHHXdgwIABJTNIFYmACIiACIhA2gkEFBkS8mkfIGpfYAIB77HA9asAESglAQn5UtKOT12OLwNz5ixHhw5PYv/9J+Dzzz+Kj7WyRAREQAREQARSQCCgyJCQT8EYUBOiJRDwHovWOJUuAiETkJAPGWhCinN8Gdhrr0FYtWoQHn/8C1xwwU8S0hSZKQIiIAIiIALJIBBQZEjIJ6ObZWUZCQS8x8pouaoWAf8EJOT9M0vDFTu9DDzxxBPo3bs3Tj21J2bOnJi8NvKonw4dgPbtk2e7LBYBERABEcgEgYAiQ0I+E6NEjQxCIOA9FqRqXSsCJScgIV9y5LGo0HgZePbZZ3HWWWdh8+bNRoA7/n/NmjX40Y9+FAsjPRuxbBnQogXw4IPAZZd5vkwZRUAEREAERKCUBAKKDAn5UnaW6kokgYD3WCLbLKOzS0BCPpt9b7wM7LdfG7z55tP4/e9/D3rkx44di+uvvz6WRM7r2BHrP/3U2bZVq4B164DmzYFq1Yw8dZs2xZOzZ8eyLTJKBERABEQgmwQCigwJ+WwOG7XaB4GA95iPmpRVBMpPQEK+/H1QDguMlwHgWdSvPxRr1y5A06ZN0b//LXj99fmYNOm+cthUsM4uBx+M5xcv9mxXl1at8PyiRZ7zK6MIiIAIiIAIRE0goMiQkI+6g1R+4gkEvMcS3341IFsEJOSz1d9ma3NCfh6ARgBORsOGP8J//nMd9t9/CpYsmR47KhLysesSGSQCIiACIuCTQECRISHvk7eyZ49AwHsse8DU4kQTkJBPdPcVbbxFyPOfKwCcBuB5tGr1Wyxa9HzRBUd1oYR8VGRVrgiIgAiIQKkIBBQZxrP7sccew+GHH14qk1WPCCSKwEcffYQLL7yQNrcD8G6ijJexIuCTgIS8T2ApyW4T8mwVxXwPNGu2J5YvnxO7ZkrIx65LZJAIiIAIiIBPAgGFfLNq1ap9XFFRsZvPapVdBDJFoFq1ahsrKioOy73cZqrtamy2CEjIZ6u/zdbmhHxDADUtBLYCWIO99mqAgQMHYsCAAbGhIyEfm66QISIgAiIgAgUIjB49GmPGjHHMwdNhVq9eze+K9RY2y+2JUx+IgAjkJ7BKIl7DIwsEJOSz0Ms7tzEn5PM3/tprr8Vdd90VGzoS8rHpChkiAiIgAiJQgMDQoUMxbNgwN0bFCnm3cvW9CIiACIhARghIyGeko23NzOORN3NtQrNm+2H58n/Eho6EfGy6QoaIgAiIgAgUIBCxR17sRUAEREAERMAgICGfzYHgsEfeBLECzZpdjLlzH0GzZlzBF48kIR+PfpAVIiACIiACxRMIuEe++Ip1pQiIgAiIQOoISMinrks9NSiPkF+BGjXOwOLF02Il4tkiCXlP/apMIiACIiACMSYgIR/jzpFpIiACIpAwAhLyCeuwkMzNE7X+IjRrVk1R60OCrGJEQAREQAREwEpAQl7jQQREQAREICwCEvJhkUxWOQ7nyF8A4Am0bHkVFi9+IXatkUc+dl0ig0RABERABHwSkJD3CUzZRUAEREAE8hKQkM/m4LAI+UYATgGwCcBANGr0//DNN/8XOyrndeyI9Z9+6tmuuk2b4snZsz3nV0YREAEREAERiJqAhHzUhFW+CIiACGSHgIR8dvra2tKckH8WNWr0R5MmW1CjRg2sXfsDatdugM8+W5hNKmq1CIiACIiACERIQEI+QrgqWgREQAQyRkBCPmMdnmuuIeT3268N3nzz6dgFtstml6jVIiACIiACaScgIZ/2Hlb7REAERKB0BCTkS8c6TjUZQv7ZZ5/FWWedFSe7ZIsIiIAIiIAIpJaAhHxqu1YNEwEREIGSE5CQLznyWFRoCPl58+ahbVv+U0kEREAEREAERCBqAhLyURNW+SIgAiKQHQIS8tnpa2tLJeSz2e9qtQiIgAiIQBkJSMiXEb6qFgEREIGUEZCQT1mHemyOhLxHUMomAiIgAiIgAmERkJAPi6TKEQEREAERkJDP5hiQkM9mv6vVIiACIiACZSQgIV9G+KpaBERABFJGQEI+ZR3qsTkS8h5BKZsIiIAIiIAIhEVAQj4skipHBERABERAQj6bY0BCPpv9rlaLgAiIgAiUkYCEfBnhq2oREAERSBkBCfmUdajH5kjIewSlbCIgAiIgAiIQFgEJ+bBIqhwREAEREAEJ+WyOAQn5bPa7Wi0CIiACIlBGAhLyZYSvqkVABEQgZQQk5FPWoR6bIyHvEZSyiYAIiIAIiEBYBCTkwyKpckRABERABCTkszkGDCHfsGFD1KxZ05FA//79MWDAgGzSUatFQAREQAREoEgCo0ePxpgxYxyv3rx5M1avXs3v2gF4t8gqdJkIiIAIiIAIQEI+m4PAEPKFmj5kyBAMHTo0m3TUahEQAREQAREokgCfncOGDXO7WkLejZC+FwEREAERKEhAQj6bA0Qe+Wz2u1otAiIgAiIQMQF55CMGrOJFQAREQAQMAhLy2RwI2iOfzX5Xq0VABERABMpIQHvkywhfVYuACIhAyghIyKesQz02R0LeIyhlEwEREAEREIGwCEjIh0VS5YiACIiACEjIZ3MMSMhns9/VahEQAREQgTISkJAvI3xVLQIiIAIpIyAhn7IO9dgcCXmPoJRNBERABERABMIiICEfFkmVIwIiIAIiICGfzTEgIZ/NflerRUAEREAEykhAQr6M8FW1CIiACKSMgIR8yjrUY3Mk5D2CUjYREAEREAERCIuAhHxYJFWOCIiACIiAhHw2x4CEfDb7Xa0WAREQAREoIwEJ+TLCV9UiIAIikDICEvIp61CPzZGQ9whK2URABERABEQgLAIS8mGRVDkiIAIiIAIS8tkcAxLy2ex3tVoEREAERKCMBCTkywhfVYuACIhAyghIyKesQz02R0LeIyhlEwEREAEREIGwCEjIh0VS5YiACIiACEjIZ3MMSMhns9/VahEQAREQgTISkJAvI3xVLQIiIAIpIyAhn7IO9dgcCXmPoJRNBERABERABMIiICEfFkmVIwIiIAIiICGfzTEgIZ/NflerRUAEREAEykhAQr6M8FW1CIiACKSMgIR8yjrUY3Mk5D2CUjYREAEREAERCIuAhHxYJFWOCIiACIiAhHw2x4CEfDb7Xa0WAREQAREoIwEJ+TLCV9UiIAIikDICEvIp61CPzZGQ9whK2URABERABEQgLAIS8mGRVDkiIAIiIAIS8tkcAxLy2ex3tVoEREAERKCMBCTkywhfVYuACIhAyghIyKesQz02R0LeIyhlEwEREAEREIGwCEjIh0VS5YiACIiACEjIZ2sMNAaw0trklStXonFjfqwkAuEQ+Pzzz9GkSZMdCtM4C4etStlOQONMo6EUBMIeZxLypeg11SECIiAC2SAgIZ+NfjZbKSGfrf4uS2vDfvEtSyNUaewJaJzFvotSYWDY40xCPhXDQo0QAREQgVgQkJCPRTeEYsRSAAfZShoP4ArLZxLyoaBWIYUIhP3iK9oi4ERA40zjohQEwh5nEvKl6DXVIQIiIALZICAhn55+Hg7gfgBmn14OgJ9tkJBPTycnoSVhv/gmoc2ysfQENM5KzzyLNYY9ziTksziK1GYREAERiIaAhHw0XP2W2hxAXwCD81zI7wcCeBdA/VyeOyx56wLYZhHtHQCsBrDQVp488n57Rvl9Ewj7xde3AbogEwQ0zjLRzWVvZNjjTEK+7F0qA0RABEQgNQQk5MvflR0BTAIw0bYM3rSMIv09AEcB+Db34YicULeKeWtL6Im/0aFpEvLl7+/UWxD2i2/qgamBRRHQOCsKmy7ySSDscSYh77MDlF0EREAERCAvAQn58g0O7mcfCYB727sDmJ1HyFO0U8xb97rz77UAqjmYz/xPOXjjmTXxQn706NH47rvvsMcee2DAgAGR9l6YdQUpy++1XvN7yeeWx+n7sF98I+3kPIW7tTtMm8KsK0hZfq/1k98tbzHfa5z5G4VujP2UFqQsv9f6ye8lb6E8pfg9k5D3M9KUVwREQAREoBABCfl4jA8umZ+XR8gvAUBx/pDN1AoA9ObPsX2+BkCDPM1KvJDfb7/98NVXX2HffffFl19+GWnvhVlXkLL8Xus1v5d8bnmcvk+DwHJrd5gDL8y6gpTl91o/+d3yFvO9xpm/UejG2E9pQcrye62f/F7yFspTit8zCXk/I015RUAEREAEJOTjPwYKCXkKdnrsp9qaQYE/2baEnnvjGfCulYR88E738lLotZYgZfm91mt+L/nc8pTixdcr4zDzubU7rnUFsdvvtX7yu+Ut5nsJeX+j0I2xn9KClOX3Wj/5veSVkPfT08orAiIgAiIQZwLyyMejd/IJeXMJvZPn3emaPrmgee0k5IN3rJeXQq+1BCnL77Ve83vJ55ZHQt7rCMifz42xnxqClOX3Wj/53fIW872EvJ+RAbgx9lNakLL8Xusnv5e8EvJ+elp5RUAEREAE4kxAQj4evZNPyHMfPffQ5xPy/K6XpQkU8sxr/czaQi2t99HfXl4KvRYXpCy/13rN7yWfWx4Jea8jQEK+0JYYjTNtFcp3h7iNDet1XvJKyAf/zVIJIiACIiAC8SAgIR+PfihGyHNpPaPZ5xPtTi3bScjPmzfP8NR4TY0bs4jyJS8vamFZF2ZdQcrye63X/F7yueWRkA8+2twY+6khSFl+r/WT3y1vMd/LI+9nZMgjb9IKU8hzDPpN77//Ps4880xexpVzfPYriYAIiIAIiEBRBCTki8IW+kXFLK0PRcj7bcm2bTyuvnzJ7YU/TMvCrCtIWX6v9ZrfSz63PBLywUecG2M/NQQpy++1fvK75S3mewl5PyNDQj4KIb/LLoFeoSTk/Q1h5RYBERABEbARCPQUEs3QCBQT7I7R6cfnOS8+n2E7eeT9toAe/HKmzp07Y/Xq1WjYsCGmT58eqSlh1hWkLL/Xes3vJZ9bHqfvv/76a9PjVNU/L7zwAvbee+9I+yvMwt3aHde6gtjt91o/+d3yFvO9xpm/UejG2E9pQcrye62f/F7yFsrj9/esXbt84Wg80TwBwOueciqTCIiACIiACDgQkJCPx7BwO36OkehH20xlNHtGqZ9bRBMuAPB4EdfpEhEQAREQAREQgeAEegN4IngxKkEEREAERCCrBCTk49HzhYQ8z5Bn0DvrXvjmABYDqF6k+XsB+LrIawO5IIqsU5eJgAiIgAiIQNwIFLtEjSKeS8q+iVuDZI8IiIAIiEByCEjIx6OvCgl5HkHH748GsCFnLj30FPJjAphfKGrdGwCa2sr+S24Zv//oPgGM1KWpJsAJKp6ywG0i/PdLABakusVqXDkIHAWgZ+53tC2ABwAsL4chqjN1BMznKJ/JtwFYaWnhAQAY1e5TAD8F8CSAz3Lf6zmauqGgBomACIhA6QlIyJeeubVGetvpXe+W+3AKgGUO+96bAegHgAHu6gNgxDn7UvswWzIcAF9MzPFxOQB+Zk4khFmXysouAY7/wZbmT8oJruwSUcvDJsCJUIr2BrmC+TcnjCjolUQgKAE+u/kM5+8YJ9tXWArk87plgb+D1q3rRUAEREAEMk5AQj59A4AvFX1tAsnaSn4/MOed4qQA0x2WDHzR5USBKdq5D381gIXpQ6UWBSAQdJyxanri+fJrekcn+jxOMYD5ujQhBIKOMwotThi1srSX466NTXQlBIfMDJFA0LFlNcU+prgKhBOT1nFHYd9dz9IQe1BFiYAIiEDGCUjIp2sAcJkyXx4oiK5waBpFOs+e50vGt7nv+ZJLoW4V89ZL6Ym/MV2Y1JqABMIaZwMAjMr9xzE4Sy+5AXsmXZeHMc44EcnVRVZBxUChLHtOunCpNT4IhDG2Cgn5PrkJdWtMGW6R41h8yIedyioCIiACIiACeQlIyKdjcHB/8UgAS3Mz/rPzCHmKdop5q8jn32sBVHNAwfxPSVylY5CE0Iooxtm4nKji0mfuY+ayZ6VsEwh7nDEgWY+cB56TmBxjXJUkQZW9cRb22DIJ2j3yN+R+1zpZEM8EwP+i3BaXvR5Vi0VABEQgwwQk5NPX+W5H2VGc219g83mo+HJi7i1NHym1KAiBMMaZdU88X3w5NltoyXOQbkndtWGMM0Khh5QTnZy0pJA/RROUqRsrfhsU1thivXYhL4+8395QfhEQAREQAd8EJOR9I4v9BYVeTijYuUdvqq0V3Ls32baE3mlJauwbLwNLRiDoOOPeZcZosE4qcRsHvaf28VmyRqmi2BEIOs7YIK46Wm9p2dbc2FPwzth1d0kNCmNsmQZrj3xJu06ViYAIiIAIkICEfPrGQb6XE3MJvdPeUKdrnDwK6aOlFhVLIOg44xJnBmW0bvOgV54TStbIz8Xap+vSQSDoOCMFTmDWywXw5BiL+tSPdJBPfyvCGFv5hDw/5xGx1mNj+bc1VkP6CauFIiACIiACkRKQkI8Ub1kKz/dywr2BXFqaT8jzu14Wiynkmdf6WVkapEpjSSCMcdY1d3TTupzQmq8AZLHs63IaFcY4Y1BFjjE+7yjitTe+nD0an7rDGFuckORzktuCHsgF7DRXFPHYWB7dylVGDHrHILQ6/SU+/S9LREAERCDxBCTkE9+FOzWgmJcTLq1nNHuJ9vSNh6hapHEWFVmVayWgcabxEBUBja2oyKpcERABERCBkhCQkC8J5pJWUsxyQQn5knZRKirTOEtFN8a+ERpnse+ixBqosZXYrpPhIiACIiACJCAhn75xUEwAHwbqGa/z4tM3GCJskcZZhHBVdBUBjTMNhqgIaGxFRVblioAIiIAIlISAhHxJMJe0Ercjde53OMeWwaAYpX5uSS1VZUkmoHGW5N5Lju0aZ8npq6RZqrGVtB6TvSIgAiIgAjsQkJBP34Ao9HLCgDwMemfdC988F123evpQqEUREtA4ixCuivbkkdfvmQZKEAL6DQtCT9eKgAiIgAiUnYCEfNm7IHQDCr2c8Ag6fm89Eoceeh6LMyZ0S1RgmglonKW5d+PTNo2z+PRF2izR2Epbj6o9IiACIpAxAhLy6elweqfoXe+Wa9IUAMsc9r3zSJx+ABjgrr7OVE7PAChRSzTOSgQ649VonGV8AETYfI2tCOGqaBEQAREQgdIRkJAvHWvVJAIiIAIiIAIiIAIiIAIiIAIiIAKBCUjIB0aoAkRABERABERABERABERABERABESgdAQk5EvHWjWJgAiIgAiIgAiIgAiIgAiIgAiIQGACEvKBEaoAERABERABERABERABERABERABESgdAQn50rFWTSIgAiIgAiIgAiIgAiIgAiIgAiIQmICEfGCEKkAEREAEREAEREAEREAEREAEREAESkdAQr50rFWTCIiACIiACIiACIiACIiACIiACAQmICEfGKEKEAEREAEREAEREAEREAEREAEREIHSEZCQLx1r1SQCIiACIiACIiACIiACIiACIiACgQlIyAdGqAJEQAREQAREQAREQAREQAREQAREoHQEJORLx1o1iYAIiIAIiIAIiIAIiIAIiIAIiEBgAhLygRGqABEQAREQAREQAREQAREQAREQAREoHQEJ+dKxVk0iIAIiIAIiIAIiIAIiIAIiIAIiEJiAhHxghCpABEQgR2A8gD4AmgNYISqImgdZs47uAKYmmPdkAN1y9m8DcDSAhSVszwgAAy31JZ1nCdGpKhEQAREQAREQgXIRkJAvF3mXE7bFAAAK10lEQVTVKwLpI3B/Tsi3kJA3OjdqHhTyrKNHwoX8pJyQHwlgDYAHAGwo4e3RGkBHAO1ykyJJ51lCdKpKBERABERABESgXAQk5MtFXvWKQPoIRC1ck0Ysah5pE/L1Syzg7eMpLTyTdp/IXhEQAREQAREQgSIISMgXAU2XiIAIOBKIWrgmDXvUPNIiPE2PvIR80ka47BUBERABERABESgbAQn5sqFXxSKQOgKFhCuXTXcA0AbAMgDzAQwCsNyBQl0Ao3L5ud/ezMsl+6yDZXjdQ839zjcCOAhAvVzd3Fd+h0O9VhtZ57s5G9db8tptW2cp80FbmYV49M0t426bu4Z10a6n8/B4CMBRABrk7GJe/tvL0no/NrP6Yvj7aY+9iU5C3tz/z73ro20X0L61AN7LLYc3YxFUA0A72I/vAOiUu87rGEjLxEjqfljUIBEQAREQAREQgZ0JSMhrVIiACIRFIJ9wXQqgmUXAU1BzTzJ/f/j/OTahTBHN/KaYZh6K+Nk5ce81GBpFHW2i6OO13H9tlsV92P1y9VIY2uukwGY9vJaTANyzzXwM4lcnVx4nJCimWSa9yXbRmY+HGdyNXGiXtQyK0issPFg3bWOd5gQIJzLIg3/z+0J7uv3a7MTCjb+f9jiNNSchbxfr1uvIeXhOtD9siUXA/iQ/9tlEAFfm8ngZAyxfQj6sXwKVIwIiIAIiIAIiEDkBCfnIEasCEcgMASfhSu/oDTmxRWFlJgYYo0Cl6Gpo+ZxC7DKLSDO/onCjB99PVHOWTQHMiYNvLXVQ0PPzGrnPzDopEMdY8tFu2s//6NVndHN+Zs/HsuiZNz3EZhFOPMzJBYpGilBroleeXvdTLZMbswCc4lDngNyqBfIoJOT92uyXv9/2eBXyzDczN3FjD55oTgyZS/HJmXaQBb3vz1gq8ToGJOQz8zOlhoqACIiACIhAOghIyKejH9UKEYgDASfhWuEg1u1C1xSipheWgvYYhwZRgDOPV498vrrp7ae4N5fnM98SAAc71DkPAP/jJAQnH7jU3+moN9q2GkArSxlOPFgPBae5pN5aJT3f9G6bXnl62yla8/FgWW4eeT82F8PfT3vyjdF8e+R5JB15WFc6cKKDEya85le5Ak0hz/9bJ4v4tdcxICEfh18Q2SACIiACIiACIuCZgIS8Z1TKKAIi4ELALlxNYUivsrlf2VqEXaiZIs30gNurMz20ViFPIWtP5r570x7ucafwoyikKLbueTfrtC9p99LZXOJOWzgRQRFO0e0m5CksKeTz/fbyuykAeuWOZKPN+XiY3nY/x6UVsrkY/n7a41fIm0LcutKBLLgagbznWoQ8Vzg4cfAyBky7tLTey6hXHhEQAREQAREQgVgQkJCPRTfICBFIBQG7kHcTyfbvnTywVjBm+VYhTyFpTfal+lymf3kuQB7z8TfPDJ73kgexbO8YCn4G7aNnnqKb+9S5z511uAl508NOYUovc77fX3r3uVrAFJYsm8Hu7InL/Cnm3YS8V5v98vfbnmKEvOmtN5fXkw25W7djOI0La11uY0BCPhU/P2qECIiACIiACGSLgIR8tvpbrRWBKAn49chTENNbb3qc7X/bbTVFnVXId7Vl4l51a/A882vuY+dydope7qdmYjn8DaSw9uKRN/erm8vfTY8wy/K6tJ4TD/a99Pn6xI2HF4+8H5vd6nPi76c9xQh56+TCgtx4YbtvshTmJuTdxoC5xUIe+Sh/HVS2CIiACIiACIhAqAQk5EPFqcJEINME/O6RNwOr2ffI5xO6fvbI01tMTzb3t9uPdDOPNuMEAkUhvfh2b7rZkRSq/I6TANYjz+wd7VXIm21wOjOdXnraw4Bt3Idv9Xi3cxhZ5MQ98Pk88oUivztNPnjJb49R4Kc9xQh5XmP2AYU8hb09+F0+Ie91DDCQIZOEfKZ/vtR4ERABERABEUgWAQn5ZPWXrBWBOBNwEvIUphSodo839zhzzzuFYCNLo+j1pZClCLeey25Gv/catd4UpRTfXAZv3Rdv7rOmZ56R4806GRXfer68KaxND7ApKK374Gm6mc9taT3zmpHw7ZMVtJGB4+w8yIis8tnmFrXer81++fttj9P4zRfszsxr2sS/nSZ58gl5L2PAum1BQj7Ovy6yTQREQAREQAREYAcCEvIaECIgAmERyHdu+uKcmGYQOu5PN8+Rp8jkUWvWJer0ojIgHT3WzMs96AzQxr/5uXmmOc9zd0vjcsvoKeLNc+TpWWd5FM38t3k+POuhXfT68t8U1sxHYc1/M58pOJmH55RznzYnHSimmegpHgXg9lz+fDy4SoBlm3bxWpbjxsN6jjw5MSgevfGmB7+QSPZqczH8/bSnGCFvLvl3Ol6O5RVaWu91DLAcCXm3O0rfi4AIiIAIiIAIxIaAhHxsukKGiEDiCVBQMbBYSwB2oc1z4CnCKWApSOlZHeyQjxC4n51ec+aniOY+7565gG9cWl3dBylGOKfXleUwsW7ucaeXneLcmij6zDqZj/XSRms+toPCmeVxooF5uF+bYpPtb5Dbe8/2F+JBuxiZnjy4r59imF739x3aRh5cncC8rJe20X7Wz8kNt2B3fmwulr+f9tib6OaRZ34zOr5T37vtkfc6BiTkfdxYyioCIiACIiACIlBeAhLy5eWv2kVABHYkQEFMLzg9yPZE0cpI936EvPj6I1AO/m5CnhMY7Hunc+L9ta5wbgn5MGmqLBEQAREQAREQgUgJSMhHileFi4AI+CTAJe9c3k3PtnVfu7mnnnvGO/ssU9m9EygHfzchb0bet55W4L1F3nNKyHtnpZwiIAIiIAIiIAJlJiAhX+YOUPUiIAI7EODSeQo7inj+nwHkGLGdy9npqee+di/744W1OALl4G8KeQYaXJ0LjMjtDNwCwUkd0yN/THFNcr2Kqzw4UcRxxva7bVVwLVAZREAEREAEREAERCBqAhLyURNW+SIgAn4JnJLbL07RzgB03AvOPeT2/ep+y1V+bwRKzd8U8qZ1puedqy/MQHeMkWCPaeCtNe65GG+A0feZGFCPdfH4PyUREAEREAEREAERiC0BCfnYdo0MEwEREAEREAEREAEREAEREAEREIGdCUjIa1SIgAiIgAiIgAiIgAiIgAiIgAiIQIIISMgnqLNkqgiIgAiIgAiIgAiIgAiIgAiIgAhIyGsMiIAIiIAIiIAIiIAIiIAIiIAIiECCCEjIJ6izZKoIiIAIiIAIiIAIiIAIiIAIiIAISMhrDIiACIiACIiACIiACIiACIiACIhAgghIyCeos2SqCIiACIiACIiACIiACIiACIiACEjIawyIgAiIgAiIgAiIgAiIgAiIgAiIQIIISMgnqLNkqgiIgAiIgAiIgAiIgAiIgAiIgAhIyGsMiIAIiIAIiIAIiIAIiIAIiIAIiECCCEjIJ6izZKoIiIAIiIAIiIAIiIAIiIAIiIAISMhrDIiACIiACIiACIiACIiACIiACIhAgghIyCeos2SqCIiACIiACIiACIiACIiACIiACEjIawyIgAiIgAiIgAiIgAiIgAiIgAiIQIIISMgnqLNkqgiIgAiIgAiIgAiIgAiIgAiIgAhIyGsMiIAIiIAIiIAIiIAIiIAIiIAIiECCCEjIJ6izZKoIiIAIiIAIiIAIiIAIiIAIiIAISMhrDIiACIiACIiACIiACIiACIiACIhAggj8f0/a3Wfn5tuEAAAAAElFTkSuQmCC">
|
NuGridREPO_NAMENuPyCEEPATH_START.@NuPyCEE_extracted@NuPyCEE-master@regression_tests@temp@SYGMA_SSP_CNO_yield_input.ipynb@.PATH_END.py
|
{
"filename": "CONTRIBUTING.md",
"repo_name": "arviz-devs/arviz",
"repo_path": "arviz_extracted/arviz-main/CONTRIBUTING.md",
"type": "Markdown"
}
|
# Contributing to ArviZ
This document outlines only the most common contributions.
Please see the [Contributing guide](https://python.arviz.org/en/latest/contributing/index.html)
on our documentation for a better view of how can you contribute to ArviZ.
We welcome a wide range of contributions, not only code!
## Reporting issues
If you encounter any bug or incorrect behaviour while using ArviZ,
please report an issue to our [issue tracker](https://github.com/arviz-devs/arviz/issues).
Please include any supporting information, in particular the version of
ArviZ that you are using.
The issue tracker has several templates available to help in writing the issue
and including useful supporting information.
## Contributing code
Thanks for your interest in contributing code to ArviZ!
* If this is your first time contributing to a project on GitHub, please read through our [step by step](https://python.arviz.org/en/latest/contributing/pr_tutorial.html) guide to contributing to ArviZ
* If you have contributed to other projects on GitHub you can go straight to our [pull request checklist](https://python.arviz.org/en/latest/contributing/pr_checklist.html)
### Adding new features
If you are interested in adding a new feature to ArviZ,
first submit an issue using the "Feature Request" label for the community
to discuss its place and implementation within ArviZ.
|
arviz-devsREPO_NAMEarvizPATH_START.@arviz_extracted@arviz-main@CONTRIBUTING.md@.PATH_END.py
|
{
"filename": "result.py",
"repo_name": "NannyML/nannyml",
"repo_path": "nannyml_extracted/nannyml-main/nannyml/performance_calculation/result.py",
"type": "Python"
}
|
# Author: Niels Nuyttens <niels@nannyml.com>
#
# License: Apache Software License 2.0
"""Contains the results of the realized performance calculation and provides filtering and plotting functionality."""
from __future__ import annotations
import copy
from typing import Dict, List, Optional, Union, cast
import pandas as pd
import plotly.graph_objects as go
from nannyml._typing import Key, ProblemType, Self
from nannyml.base import PerMetricResult
from nannyml.exceptions import InvalidArgumentsException
from nannyml.performance_calculation import SUPPORTED_METRIC_FILTER_VALUES
from nannyml.performance_calculation.metrics.base import Metric
from nannyml.plots.blueprints.comparisons import ResultCompareMixin
from nannyml.plots.blueprints.metrics import plot_metrics
from nannyml.usage_logging import UsageEvent, log_usage
class Result(PerMetricResult[Metric], ResultCompareMixin):
"""Wraps performance calculation results and provides filtering and plotting functionality."""
metrics: List[Metric]
def __init__(
self,
results_data: pd.DataFrame,
problem_type: ProblemType,
y_pred: Optional[str],
y_pred_proba: Optional[Union[str, Dict[str, str]]],
y_true: str,
metrics: List[Metric],
timestamp_column_name: Optional[str] = None,
reference_data: Optional[pd.DataFrame] = None,
analysis_data: Optional[pd.DataFrame] = None,
):
"""Creates a new Result instance.
Parameters
----------
results_data: pd.DataFrame
Results data returned by a CBPE estimator.
problem_type: ProblemType
Determines which method to use. Allowed values are:
- 'regression'
- 'classification_binary'
- 'classification_multiclass'
y_pred: str
The name of the column containing your model predictions.
y_pred_proba: Union[str, Dict[str, str]]
Name(s) of the column(s) containing your model output.
- For binary classification, pass a single string refering to the model output column.
- For multiclass classification, pass a dictionary that maps a class string to the column name \
containing model outputs for that class.
y_true: str
The name of the column containing target values (that are provided in reference data during fitting).
metrics: List[nannyml.performance_calculation.metrics.base.Metric]
List of metrics to evaluate.
timestamp_column_name: str, default=None
The name of the column containing the timestamp of the model prediction.
If not given, plots will not use a time-based x-axis but will use the index of the chunks instead.
reference_data: pd.DataFrame, default=None
The reference data used for fitting. Must have target data available.
analysis_data: pd.DataFrame, default=None
The data on which NannyML calculates the perfomance.
"""
super().__init__(results_data, metrics)
self.problem_type = problem_type
self.y_true = y_true
self.y_pred_proba = y_pred_proba
self.y_pred = y_pred
self.timestamp_column_name = timestamp_column_name
self.reference_data = reference_data
self.analysis_data = analysis_data
def keys(self) -> List[Key]:
"""Creates a list of keys where each Key is a `namedtuple('Key', 'properties display_names')`."""
return [
Key(
properties=(component[1],),
display_names=(
f'realized {component[0]}',
component[0],
metric.name,
),
)
for metric in self.metrics
for component in cast(Metric, metric).components
]
@log_usage(UsageEvent.PERFORMANCE_PLOT, metadata_from_kwargs=['kind'])
def plot(
self,
kind: str = 'performance',
*args,
**kwargs,
) -> go.Figure:
"""Render realized performance metrics.
This function will return a :class:`plotly.graph_objects.Figure` object.
Parameters
----------
kind: str, default='performance'
The kind of plot to render. Only the 'performance' plot is currently available.
Raises
------
InvalidArgumentsException: when an unknown plot ``kind`` is provided.
Returns
-------
fig: :class:`plotly.graph_objs._figure.Figure`
A :class:`~plotly.graph_objs._figure.Figure` object containing the requested drift plot.
Can be saved to disk using the :meth:`~plotly.graph_objs._figure.Figure.write_image` method
or shown rendered on screen using the :meth:`~plotly.graph_objs._figure.Figure.show` method.
Examples
--------
>>> import nannyml as nml
>>> from IPython.display import display
>>> reference_df, analysis_df, analysis_targets_df = nml.load_synthetic_car_loan_dataset()
>>> analysis_df = analysis_df.merge(analysis_targets_df, left_index=True, right_index=True)
>>> display(reference_df.head(3))
>>> calc = nml.PerformanceCalculator(
... y_pred_proba='y_pred_proba',
... y_pred='y_pred',
... y_true='repaid',
... timestamp_column_name='timestamp',
... problem_type='classification_binary',
... metrics=['roc_auc', 'f1', 'precision', 'recall', 'specificity', 'accuracy'],
... chunk_size=5000)
>>> calc.fit(reference_df)
>>> results = calc.calculate(analysis_df)
>>> display(results.filter(period='analysis').to_df())
>>> display(results.filter(period='reference').to_df())
>>> figure = results.plot()
>>> figure.show()
"""
if kind == 'performance':
return plot_metrics(
result=self,
title='Realized performance',
subplot_title_format='Realized <b>{display_names[1]}</b>',
subplot_y_axis_title_format='{display_names[1]}',
)
else:
raise InvalidArgumentsException(f"unknown plot kind '{kind}'. " f"Please provide on of: ['performance'].")
def _filter(self, period: str, metrics: Optional[List[str]] = None, *args, **kwargs) -> Self:
"""Filter the results based on the specified period and metrics."""
if metrics is None:
filtered_metrics = self.metrics
else:
filtered_metrics = []
for name in metrics:
if name not in SUPPORTED_METRIC_FILTER_VALUES:
raise InvalidArgumentsException(f"invalid metric '{name}'")
m = self._get_metric_by_name(name)
if m:
filtered_metrics = filtered_metrics + [m]
else:
raise InvalidArgumentsException(f"no '{name}' in result, did you calculate it?")
metric_column_names = [name for metric in filtered_metrics for name in metric.column_names]
res = super()._filter(period, metric_column_names, *args, **kwargs)
res.metrics = filtered_metrics
return res
def _get_metric_by_name(self, name: str) -> Optional[Metric]:
for metric in self.metrics:
# If we match the metric by name, return the metric
# E.g. matching the name 'confusion_matrix'
if name == metric.name:
return metric
# If we match one of the metric component names
# E.g. matching the name 'true_positive' with the confusion matrix metric
elif name in metric.column_names:
# Only retain the component whose column name was given to filter on
res = copy.deepcopy(metric)
res.components = list(filter(lambda c: c[1] == name, metric.components))
return res
else:
continue
return None
|
NannyMLREPO_NAMEnannymlPATH_START.@nannyml_extracted@nannyml-main@nannyml@performance_calculation@result.py@.PATH_END.py
|
{
"filename": "common.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/arithmetic/common.py",
"type": "Python"
}
|
"""
Assertion helpers for arithmetic tests.
"""
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
Series,
array,
)
import pandas._testing as tm
from pandas.core.arrays import (
BooleanArray,
NumpyExtensionArray,
)
def assert_cannot_add(left, right, msg="cannot add"):
"""
Helper function to assert that two objects cannot be added.
Parameters
----------
left : object
The first operand.
right : object
The second operand.
msg : str, default "cannot add"
The error message expected in the TypeError.
"""
with pytest.raises(TypeError, match=msg):
left + right
with pytest.raises(TypeError, match=msg):
right + left
def assert_invalid_addsub_type(left, right, msg=None):
"""
Helper function to assert that two objects can
neither be added nor subtracted.
Parameters
----------
left : object
The first operand.
right : object
The second operand.
msg : str or None, default None
The error message expected in the TypeError.
"""
with pytest.raises(TypeError, match=msg):
left + right
with pytest.raises(TypeError, match=msg):
right + left
with pytest.raises(TypeError, match=msg):
left - right
with pytest.raises(TypeError, match=msg):
right - left
def get_upcast_box(left, right, is_cmp: bool = False):
"""
Get the box to use for 'expected' in an arithmetic or comparison operation.
Parameters
left : Any
right : Any
is_cmp : bool, default False
Whether the operation is a comparison method.
"""
if isinstance(left, DataFrame) or isinstance(right, DataFrame):
return DataFrame
if isinstance(left, Series) or isinstance(right, Series):
if is_cmp and isinstance(left, Index):
# Index does not defer for comparisons
return np.array
return Series
if isinstance(left, Index) or isinstance(right, Index):
if is_cmp:
return np.array
return Index
return tm.to_array
def assert_invalid_comparison(left, right, box):
"""
Assert that comparison operations with mismatched types behave correctly.
Parameters
----------
left : np.ndarray, ExtensionArray, Index, or Series
right : object
box : {pd.DataFrame, pd.Series, pd.Index, pd.array, tm.to_array}
"""
# Not for tznaive-tzaware comparison
# Note: not quite the same as how we do this for tm.box_expected
xbox = box if box not in [Index, array] else np.array
def xbox2(x):
# Eventually we'd like this to be tighter, but for now we'll
# just exclude NumpyExtensionArray[bool]
if isinstance(x, NumpyExtensionArray):
return x._ndarray
if isinstance(x, BooleanArray):
# NB: we are assuming no pd.NAs for now
return x.astype(bool)
return x
# rev_box: box to use for reversed comparisons
rev_box = xbox
if isinstance(right, Index) and isinstance(left, Series):
rev_box = np.array
result = xbox2(left == right)
expected = xbox(np.zeros(result.shape, dtype=np.bool_))
tm.assert_equal(result, expected)
result = xbox2(right == left)
tm.assert_equal(result, rev_box(expected))
result = xbox2(left != right)
tm.assert_equal(result, ~expected)
result = xbox2(right != left)
tm.assert_equal(result, rev_box(~expected))
msg = "|".join(
[
"Invalid comparison between",
"Cannot compare type",
"not supported between",
"invalid type promotion",
(
# GH#36706 npdev 1.20.0 2020-09-28
r"The DTypes <class 'numpy.dtype\[datetime64\]'> and "
r"<class 'numpy.dtype\[int64\]'> do not have a common DType. "
"For example they cannot be stored in a single array unless the "
"dtype is `object`."
),
]
)
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
with pytest.raises(TypeError, match=msg):
right < left
with pytest.raises(TypeError, match=msg):
right <= left
with pytest.raises(TypeError, match=msg):
right > left
with pytest.raises(TypeError, match=msg):
right >= left
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@arithmetic@common.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "SKA-INAF/caesar-rest",
"repo_path": "caesar-rest_extracted/caesar-rest-master/README.md",
"type": "Markdown"
}
|
<p align="left">
<img src="share/CAESAR-REST_Architecture.png" alt="Software architecture"/ width="500" height="500">
</p>
# caesar-rest
caesar-rest is a REST-ful web service for astronomical source extraction and classification with the caesar source extractor [https://github.com/SKA-INAF/caesar]. The software is developed in python and consists of a few containerized microservices, deployable on standalone servers or on a distributed cloud infrastructure. The core component is the REST web application, based on the Flask framework and running behind a nginx+uwsgi http server, and providing APIs for managing the input data (e.g. data upload/download/removal) and source finding jobs (e.g. submit, get status, get outputs) with different job management systems (Kubernetes, Slurm, Celery). Additional services (AAI, user DB, log storage, job monitor, accounting) enable the user authentication, the storage and retrieval of user data and job information, the monitoring of submitted jobs, and the aggregation of service logs and user data/job stats.
Besides caesar, we also foresee to integrate other tools widely used in the radio community (e.g. Aegean, PyBDSF) and newly developed source finders based on deep learning models.
## **Status**
This software is under development. Tested originally with python 2.7 but switched to python 3.6 later on (some apps are only available for python 3).
## **Credit**
This software is distributed with GPLv3 license. If you use caesar-rest for your research, please add repository link or acknowledge authors in your papers.
## **Installation**
### **Install dependencies**
To run caesar rest service you need to install the following tools:
* Flask [https://palletsprojects.com/p/flask/]
* uwsgi [https://uwsgi-docs.readthedocs.io/en/latest/index.html]
* nginx [https://nginx.org/]
* mongodb [https://www.mongodb.com/]
* flask-pymongo python module [https://flask-pymongo.readthedocs.io/en/latest/]
* structlog python module [https://www.structlog.org/en/stable/]
For the Celery-based job management, you need to install celery, a broker and a result backend service:
* celery [http://www.celeryproject.org/]
* broker: rabbitmq [https://www.rabbitmq.com/]
* result backend: redis [https://redis.io/] or mongodb [https://www.mongodb.com/]
For the Kubernetes-based job management, you need to install the Kubernetes python client library:
* kubernetes [https://pypi.org/project/kubernetes/]
For the Slurm-based job management, you need to install these python modules:
* requests [https://docs.python-requests.org/en/master/]
* jwt [https://pypi.org/project/jwt/]
To enable OpenID Connect based authentication you need to install:
* flask-oidc-ex python module [https://pypi.org/project/flask-oidc-ex/]
To enable log forwarding to a LogStash/ElasticSearch service, you need to install the filebeat service:
* filebeat [https://www.elastic.co/guide/en/beats/filebeat/index.html]
### **Package installation**
To build and install the package:
* Create a local install directory, e.g. ```$INSTALL_DIR```
* Add installation path to your ```PYTHONPATH``` environment variable:
``` export PYTHONPATH=$PYTHONPATH:$INSTALL_DIR/lib/python3.6/site-packages ```
* Build and install package:
``` python3.6 setup.py sdist bdist_wheel```
``` python3.6 setup build```
``` python3.6 setup install --prefix=$INSTALL_DIR```
All dependencies will be automatically downloaded and installed in ```$INSTALL_DIR```.
To use package scripts:
* Add binary directory to your ```PATH``` environment variable:
``` export PATH=$PATH:$INSTALL_DIR/bin ```
## **How to run the service?**
In the following we describe the steps done to deploy and run the application and the auxiliary services. Three possible options are described below for the deployment, depending of whether the job management is done with celery, Kubernetes, or with Slurm. To ease the deployment we provide Docker containers and configuration files for Docker Compose or Kubernetes.
### **Preliminary setup**
Before running the application you must do some preparatory stuff:
* (OPTIONAL) Create a dedicated user & group (e.g. `caesar`) allowed to run the application and services and give it ownership to the directories created below * Create the application working dir (by default `/opt/caesar-rest`)
* (OPTIONAL) Mount an external storage in the application working dir, for example using rclone: `/usr/bin/rclone mount --daemon [--uid=[UID] --gid=[UID]] --umask 000 --allow-other --file-perms 0777 --dir-cache-time 0m5s --vfs-cache-mode full [RCLONE_REMOTE_STORAGE]:[RCLONE_REMOTE_STORAGE_PATH] /opt/caesar-rest -vvv` where `UID` is the Linux user id of the user previously created.
* Create the top directory for data upload (by default `/opt/caesar-rest/data`)
* Create the top directory for jobs (by default `/opt/caesar-rest/jobs`)
* (OPTIONAL) Create the log directory for system services (see below), e.g. `/opt/caesar-rest/logs`
* (OPTIONAL) Create the run directory for system services (see below), e.g. `/opt/caesar-rest/run`
### **Run DB service**
caesar-rest requires a MongoDB service where to store user data and job information. To start the DB service:
```systemctl start mongodb.service```
Alternatively you can use the Docker container ```sriggi/caesar-rest-db:latest``` (see https://hub.docker.com/r/sriggi/caesar-rest-db) and deploy it with DockerCompose or Kubernetes (see the configuration files under the repository ```config``` directory.
### **Run Filebeat service**
caesar-rest uses filebeat to forward file logs to an ElasticSearch service. To start the service:
```systemctl start filebeat.service```
Alternatively, you can use the Docker container for the application ```sriggi/caesar-rest:latest``` (see https://hub.docker.com/r/sriggi/caesar-rest) setting the container option ```FORWARD_LOGS=1```. This will start the filebeat service in the web application container.
### **Run Celery services (OPTIONAL)**
If you want to manage jobs with Celery, you must run a message broker service (i.e. rabbitmq), a task store service (i.e. redis or mongdb) and one or more Celery worker services.
#### **Run broker service**
To run the rabbimq message broker service:
```systemctl start rabbitmq-server.service```
Alternatively, you can use the Docker container ```sriggi/caesar-rest-broker:latest``` (see https://hub.docker.com/r/sriggi/caesar-rest-broker) and deploy it with DockerCompose or Kubernetes (see the configuration files under the repository ```config``` directory.
#### **Run task store service**
If you have chosen MongoDB as task store, you are already running the service (see previous section `Run DB service`). However, if you want to use Redis as task store, run it as follows:
```systemctl start redis.service```
Docker container is still to be produced.
#### **Run celery workers**
Run celery worker with desired concurrency level (e.g. 2), message queue (e.g. celery), broker and result backend urls:
```celery --broker=[BROKER_URL] --result-backend=[RESULT_BACKEND_URL] --app=caesar_rest worker --loglevel=INFO --concurrency=2 -Q celery```
In production you may want to run this as a system service:
* Create a `/etc/default/caesar-workers` configuration file (e.g. see the example in the `config/celery` directory):
```
# The names of the workers. Only one here.
CELERYD_NODES="caesar_worker"
# The name of the Celery App
CELERY_APP="caesar_rest"
# Working dir
CELERYD_CHDIR="/opt/caesar-rest"
# Additional options
CELERYD_OPTS="--time-limit=300 --concurrency=4"
# Log and PID directories
CELERYD_LOG_FILE="/opt/caesar-rest/logs/%n%I.log"
CELERYD_PID_FILE="/opt/caesar-rest/run/%n.pid"
# Log level
CELERYD_LOG_LEVEL=INFO
# Path to celery binary, that is in your virtual environment
CELERY_BIN=/usr/local/bin/celery
```
* Create a `/etc/systemd/system/caesar-workers.service` systemd service file:
```
[Unit]
Description=Caesar Celery Worker Service
After=network.target rabbitmq-server.target redis.target
[Service]
Type=forking
User=caesar
Group=caesar
EnvironmentFile=/etc/default/caesar-workers
Environment="PATH=$INSTALL_DIR/bin"
Environment="PYTHONPATH=$INSTALL_DIR/lib/python2.7/site-packages"
WorkingDirectory=/opt/caesar-rest
ExecStart=/bin/sh -c '${CELERY_BIN} multi start ${CELERYD_NODES} \
-A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE}'
ExecReload=/bin/sh -c '${CELERY_BIN} multi restart ${CELERYD_NODES} \
-A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
[Install]
WantedBy=multi-user.target
```
* Start the service:
```sudo systemctl caesar-workers.service start```
Alternatively, you can use the Docker container ```sriggi/caesar-rest-worker:latest``` (https://hub.docker.com/r/sriggi/caesar-rest-worker) and deploy it with DockerCompose or Kubernetes (see the configuration files under the repository ```config``` directory.
### **Run the web application**
#### **Run the application in development mode**
To run caesar-rest in development mode, e.g. for debug or testing purposes:
```$INSTALL_DIR/bin/run_app.py --[ARGS]```
where supported `ARGS` are:
MAIN OPTIONS
* `datadir=[DATADIR]`: Directory where to store uploaded data (default: /opt/caesar-rest/data)
* `jobdir=[JOBDIR]`: Top directory where to store job data (default: /opt/caesar-rest/jobs)
* `job_scheduler=[SCHEDULER]`: Job scheduler to be used. Options are: {celery,kubernetes,slurm} (default=celery)
* `debug`: Run Flask application in debug mode if given
* `ssl`: To enable run of Flask application over HTTPS
AAI OPTIONS
* `aai`: Enable service authentication
* `secretfile=[SECRETFILE]`: File (.json) with OpenID Connect client auth credentials
DB OPTIONS
* `dbname=[DBNAME]`: Name of MongoDB database (default=caesardb)
* `dbhost=[DBHOST]`: Host of MongoDB database (default=localhost)
* `dbport=[DBPORT]`: Port of MongoDB database (default=27017)
LOGGING OPTIONS
* `loglevel=[LEVEL]`: Log level to be used (default=INFO)
* `logtofile`: Enable logging to file (default=no)
* `logdir`: Directory where to store logs (default=/opt/caesar-rest/logs)
* `logfile`: Name of json log file (default=app_logs.json)
* `logfile_maxsize`: Max file size in MB (default=5)
CELERY OPTIONS
* `result_backend_host=[BACKEND_HOST]`: Host of Celery result backend service (default=localhost)
* `result_backend_port=[BACKEND_PORT]`: Port of Celery result backend service (default=6379)
* `result_backend_proto=[BACKEND_PROTO]`: Celery result backend type. Options are: {mongodb,redis} (default=redis)
* `result_backend_dbname=[BACKEND_DBNAME]`: Celery result backend database name (default=0)
* `broker_host=[BROKER_HOST]`: Host of Celery broker service (default=localhost)
* `broker_port=[BROKER_PORT]`: Port of Celery broker service (default=5672)
* `broker_proto=[BROKER_PROTO]`: Protocol of Celery broker. Options are: {amqp,redis} (default=amqp)
* `broker_user=[BROKER_USER]`: Username used in Celery broker (default=guest)
* `broker_pass=[BROKER_PASS]`: Password used in Celery broker (default=guest)
KUBERNETES OPTIONS
* `kube_config=[FILE_PATH]`: Kube configuration file path (default=search in standard path)
* `kube_cafile=[FILE_PATH]`: Kube certificate authority file path
* `kube_keyfile=[FILE_PATH]`: Kube private key file path
* `kube_certfile=[FILE_PATH]`: Kube certificate file path
SLURM OPTIONS
* `slurm_keyfile=[FILE_PATH]`: Slurm rest service private key file path
* `slurm_user=[SLURM_USER]`: Username enabled to run in Slurm cluster (default=cirasa)
* `slurm_host=[SLURM_HOST]`: Slurm cluster host/ipaddress (default=localhost)
* `slurm_port=[SLURM_PORT]`: Slurm rest service port (default=6820)
* `slurm_batch_workdir=[SLURM_BATCH_WORKDIR]`: Cluster directory where to place Slurm batch logs (must be writable by slurm_user) (default=/opt/slurm/batchlogs/caesar-rest)
* `slurm_queue=[SLURM_QUEUE]`: Slurm cluster queue for submitting jobs (default=normal)
* `slurm_jobdir=[SLURM_JOBDIR]`: Path at which the job directory is mounted in Slurm cluster (default=/mnt/storage/jobs)
* `slurm_datadir=[SLURM_DATADIR]`: Path at which the data directory is mounted in Slurm cluster (default=/mnt/storage/data)
* `slurm_max_cores_per_job=[SLURM_MAX_CORES_PER_JOB]`: Slurm maximum number of cores reserved for a job (default=4)
VOLUME MOUNT OPTIONS
* `mount_rclone_volume`: Enable mounting of Nextcloud volume through rclone in container jobs (default=no)
* `mount_volume_path=[PATH]`: Mount volume path for container jobs (default=/mnt/storage)
* `rclone_storage_name=[NAME]`: rclone remote storage name (default=neanias-nextcloud)
* `rclone_storage_path=[PATH]`: rclone remote storage path (default=.)
Flask default options are defined in the `config.py`. Celery options are defined in the `celery_config.py`. Other options may be defined in the future to override default Flask and Celery options.
#### **Run the application in production**
In a production environment you can run the application behind a nginx+uwsgi (or nginx+gunicorn) server. In the `config` directory of the repository you can find sample files to create and configure required services. For example:
* Start the application with uwsgi:
uwsgi --wsgi-file $INSTALL_DIR/bin/run_app.py --callable app [WSGI_CONFIG_FILE]
where ```WSGI_CONFIG_FILE``` is a configuration file (.ini format) for uwsgi. A sample configuration file is provided in the `config/uwgsi` directory:
```
[uwsgi]
processes = 4
threads = 2
socket = ./run/caesar-rest.sock
;socket = :5000
;http-socket = :5000
socket-timeout = 65
buffer-size = 32768
master = true
chmod-socket = 660
vacuum = true
die-on-term = true
```
Alternatively you can configure options from command line, e.g.:
```uwsgi --uid=[RUNUSER] --gid=[RUNUSER] --binary-path /usr/local/bin/uwsgi --wsgi-file=$INSTALL_DIR/bin/run_app.py --callable=app --pyargv=[APP_ARGS] --workers=[NWORKERS] --enable-threads --threads=[NTHREADS] --http-socket="0.0.0.0:[PORT]" --http-timeout=[SOCKET_TIMEOUT] --http-enable-proxy-protocol --http-auto-chunked --socket-timeout=[SOCKET_TIMEOUT] --master --chmod-socket=660 --chown-socket=[RUNUSER] --buffer-size=[BUFFER_SIZE] --vacuum --die-on-term ```
where ```APP_ARGS``` are the application command line options described in the previous paragraph and ```RUNUSER``` is the username chosen for running the service. The other options are described in the uwsgi online documentation.
In production you may want to run this as a system service:
- Create an `/etc/systemd/system/caesar-rest.service` systemd service file, for example following the example provided in the `config/uwsgi` directory:
```
[Unit]
Description=uWSGI instance to serve caesar-rest application
After=network.target caesar-workers.target
[Service]
User=caesar
Group=www-data
WorkingDirectory=/opt/caesar-rest
Environment="PATH=$INSTALL_DIR/bin"
Environment="PYTHONPATH=$INSTALL_DIR/lib/python2.7/site-packages"
ExecStart=/usr/bin/uwsgi --wsgi-file $INSTALL_DIR/bin/run_app.py --callable app --ini /opt/caesar-rest/config/uwsgi.ini
[Install]
WantedBy=multi-user.target
```
- Start the service:
```sudo systemctl caesar-rest.service start```
Alternatively, you can use the Docker container `sriggi/caesar-rest:devel` (see https://hub.docker.com/r/sriggi/caesar-rest) and deploy it with DockerCompose or Kubernetes (see the configuration files under the repository ```config``` directory. All application command line options described in the previous section can be configured from container env variables.
* Start the nginx service:
- Create a `/etc/nginx/conf.d/nginx.conf` configuration file (see example file provided in the `config/nginx` directory):
```
server {
listen 8080;
client_max_body_size 1000M;
sendfile on;
keepalive_timeout 0;
location / {
include uwsgi_params;
uwsgi_pass unix:/opt/caesar-rest/run/caesar-rest.sock;
}
}
```
With this sample configuration the nginx server will listen at port 8080 and call the caesar-rest application via socket. An alternative configuration could be:
```
upstream backend {
least_conn; # load balancing strategy
server [HOST1]:[PORT];
server [HOST1]:[PORT];
keepalive 64;
}
server {
listen 8080;
client_max_body_size 1000M;
large_client_header_buffers 4 32k;
sendfile on;
keepalive_timeout 0;
location / {
include uwsgi_params;
uwsgi_pass backend;
}
}
```
with nginx load balancing incoming requests, sending them to 2 caesar-rest http applications listening at `HOST1` and `HOST2` on port `PORT`.
- Create a `/etc/systemd/system/nginx.service` systemd file, e.g. see the example provided in the `config/nginx` directory:
```
[Unit]
Description=The NGINX HTTP and reverse proxy server
After=syslog.target network.target remote-fs.target nss-lookup.target caesar-rest.target
[Service]
Type=forking
PIDFile=/run/nginx.pid
ExecStartPre=/usr/sbin/nginx -t
ExecStart=/usr/sbin/nginx
ExecReload=/usr/sbin/nginx -s reload
ExecStop=/bin/kill -s QUIT $MAINPID
PrivateTmp=true
[Install]
WantedBy=multi-user.target
```
- Run nginx server:
```sudo systemctl start nginx.service```
Alternatively you can use the Docker container `sriggi/caesar-rest-lb:latest` (see https://hub.docker.com/r/sriggi/caesar-rest-lb) and deploy it with DockerCompose. In Kubernetes this functionality is provided by ingresses (see sample configuration files).
### **Run job monitoring service**
The job monitoring service periodically monitors user jobs, updating their status on the DB. It can be started as:
```$INSTALL_DIR/bin/run_jobmonitor.py --[ARGS]```
where supported `ARGS` are:
* `job_monitoring_period=[PERIOD]`: Job monitoring poll period in seconds (default=30)
* `job_scheduler=[SCHEDULER]`: Job scheduler to be used. Options are: {celery,kubernetes,slurm} (default=celery)
* `dbname=[DBNAME]`: Name of MongoDB database (default=caesardb)
* `dbhost=[DBHOST]`: Host of MongoDB database (default=localhost)
* `dbport=[DBPORT]`: Port of MongoDB database (default=27017)
* `kube_config=[FILE_PATH]`: Kube configuration file path (default=search in standard path)
* `kube_cafile=[FILE_PATH]`: Kube certificate authority file path
* `kube_keyfile=[FILE_PATH]`: Kube private key file path
* `kube_certfile=[FILE_PATH]`: Kube certificate file path
* `slurm_keyfile=[FILE_PATH]`: Slurm rest service private key file path
* `slurm_user=[SLURM_USER]`: Username enabled to run in Slurm cluster (default=cirasa)
* `slurm_host=[SLURM_HOST]`: Slurm cluster host/ipaddress (default=localhost)
* `slurm_port=[SLURM_PORT]`: Slurm rest service port (default=6820)
Alternatively, you can use the Docker container `sriggi/caesar-rest-jobmonitor:latest` (see https://hub.docker.com/r/sriggi/caesar-rest-jobmonitor) and deploy it with DockerCompose or Kubernetes (see sample configuration files).
### **Run accounting service**
The accounting service periodically monitors user data and job info, storing aggregated stats in the DB. It can be started as:
```$INSTALL_DIR/bin/run_accounter.py --[ARGS]```
where supported `ARGS` are:
* `datadir=[DATADIR]`: Directory where to store uploaded data (default: /opt/caesar-rest/data)
* `jobdir=[JOBDIR]`: Top directory where to store job data (default: /opt/caesar-rest/jobs)
* `job_monitoring_period=[PERIOD]`: Job info monitoring poll period in seconds (default=30)
* `dbname=[DBNAME]`: Name of MongoDB database (default=caesardb)
* `dbhost=[DBHOST]`: Host of MongoDB database (default=localhost)
* `dbport=[DBPORT]`: Port of MongoDB database (default=27017)
* `mount_rclone_volume`: Enable mounting of Nextcloud volume through rclone in container jobs (default=no)
* `mount_volume_path=[PATH]`: Mount volume path for container jobs (default=/mnt/storage)
* `rclone_storage_name=[NAME]`: rclone remote storage name (default=neanias-nextcloud)
* `rclone_storage_path=[PATH]`: rclone remote storage path (default=.)
Alternatively, you can use the Docker container `sriggi/caesar-rest-accounter:latest` (see https://hub.docker.com/r/sriggi/caesar-rest-accounter) and deploy it with DockerCompose or Kubernetes (see sample configuration files).
## **Usage**
caesar-rest provides the following REST endpoints:
### **Data upload**
* URL:```http://server-address:port/caesar/api/v1.0/upload```
* Request methods: POST
* Request header: ```content-type: multipart/form-data```
A sample curl request would be:
```
curl -X POST \
-H 'Content-Type: multipart/form-data' \
-F 'file=@VGPS_cont_MOS017.fits' \
--url 'http://localhost:8080/caesar/api/v1.0/upload'
```
Server response is:
```
{
"date":"2020-04-24T17:04:26.174333",
"filename_orig":"VGPS_cont_MOS017.fits",
"format":"fits",
"size":4.00726318359375,
"status":"File uploaded with success",
"uuid":"250fdf5ed6a044888cf4406338f9e73b"
}
```
A file uuid (or file path) are returned and can be used to download the file or set job input file information.
### **Data download**
* URL:```http://server-address:port/caesar/api/v1.0/download/[file_id]```
* Request methods: GET, POST
* Request header: None
A sample curl request would be:
```
curl -X GET \
--fail -o data.fits \
--url 'http://localhost:8080/caesar/api/v1.0/download/67a49bf7555b41739095681bf52a1f99'
```
The above request will fail if file is not found, otherwise the downloaded file will be saves as `data.fits`. Without the `-o` argument raw output is written to stdout. If file is not found a json response is returned:
```
{
"status": "File with uuid 67a49bf7555b41739095681bf52a1f99 not found on the system!"
}
```
### **Get uploaded data ids**
* URL:```http://server-address:port/caesar/api/v1.0/fileids```
* Request methods: GET
* Request header: None
A sample curl request would be:
```
curl -X GET \
--url 'http://localhost:8080/caesar/api/v1.0/fileids'
```
with response:
```
{"file_ids":["a668c353ba4d4c7395ad94b4e8647d92","c54db5ef95734c62a499db38587c48a5","26bc9a545c8f4f05a2c719ec5c3917e0"]}
```
### **App description**
To get the list of supported apps:
* URL:```http://server-address:port/caesar/api/v1.0/apps```
* Request methods: GET
* Request header: none
Server response contains a list of valid apps that can be queried for further description and used in job submission:
```
{
"apps": [
"caesar",
"mrcnn"
]
}
```
To get information about a given app:
* URL:```http://server-address:port/caesar/api/v1.0/app/[app_name]/describe```
* Request methods: GET
* Request header: none
Server response contains a list of app options that can be used in job submission:
```
{"image":{"description":"Path to input image (.fits) to be given to classifier (default=empty)","mandatory":true,"type":"str"},"iouThr":{"description":"IOU threshold between detected and ground truth bboxes to consider the object as detected (default=0.6)","mandatory":false,"type":"float"},"scoreThr":{"description":"Detected object score threshold to select as final object (default=0.7)","mandatory":false,"type":"float"}}
```
### **Job submission**
* URL:```http://server-address:port/caesar/api/v1.0/job```
* Request methods: POST
* Request header: ```content-type: application/json```
A sample curl request would be:
```
curl -X POST \
-H 'Content-Type: application/json' \
-d '{"app":"caesar","job_inputs":{"inputfile":"/opt/caesar-rest/data/67a49bf7555b41739095681bf52a1f99.fits","run":true,"no-logredir":true,"envfile":"/home/riggi/Software/setvars.sh","no-mpi":true,"no-nestedsearch":true,"no-extendedsearch":true}}' \
--url 'http://localhost:8080/caesar/api/v1.0/job'
```
Job data must contain a valid app name (in this case `caesar`) and desired job inputs, e.g. a dictionary with app valid options. Valid options for `caesar` app are named as in `caesar` and can be retrieved using app description url described above.
Server response is:
```
{
"app": "caesar",
"job_id": "69ca62d7-5098-4fe7-a675-63895a2d06b1",
"job_inputs": {
"envfile": "/home/riggi/Software/setvars.sh",
"inputfile": "67a49bf7555b41739095681bf52a1f99",
"no-extendedsearch": true,
"no-logredir": true,
"no-mpi": true,
"no-nestedsearch": true,
"run": true
},
"status": "Job submitted with success",
"submit_date": "2020-04-24T14:05:24.761766"
}
```
A job id is returned in the response which can be used to query the status of the job or cancel it or retrieve output data at completion.
### **Get job status**
* URL:```http://server-address:port/caesar/api/v1.0/job/[job_id]/status```
* Request methods: GET
* Request header: None
A sample curl request would be:
```
curl -X GET \
--url 'http://localhost:8080/caesar/api/v1.0/job/f135bcee-562b-4f01-ad9b-103c35b13b36/status'
```
Server response is:
```
{
"elapsed_time": "27.3435878754",
"exit_status": 0,
"job_id": "f135bcee-562b-4f01-ad9b-103c35b13b36",
"pid": "11539",
"state": "SUCCESS",
"status": "Process terminated with success"
}
```
Exit status is the shell exit status of background task executed and pid the corresponding process id. Possible job states are: {STARTED, TIMED-OUT, ABORTED, RUNNING, SUCCESS, FAILURE}.
### **Get job output**
* URL:```http://server-address:port/caesar/api/v1.0/job/[job_id]/output```
* Request methods: GET
* Request header: None
A sample curl request would be:
```
curl -X GET \
--fail -o job_output.tar.gz \
--url 'http://localhost:8080/caesar/api/v1.0/job/c3c9348a-bea0-4141-8fe9-7f64076a2327/output'
```
The response is a tar.gz file containing all job directory files (logs, output data, run scripts, etc).
### **Cancel job**
* URL:```http://server-address:port/caesar/api/v1.0/job/[job_id]/cancel```
* Request methods: POST
* Request header: None
### **Get job ids**
* URL:```http://server-address:port/caesar/api/v1.0/jobs```
* Request methods: GET
* Request header: None
|
SKA-INAFREPO_NAMEcaesar-restPATH_START.@caesar-rest_extracted@caesar-rest-master@README.md@.PATH_END.py
|
{
"filename": "test_mock_helpers.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/empirical_models/factories/tests/test_mock_helpers.py",
"type": "Python"
}
|
"""
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pytest
from astropy.table import Table
from ..mock_helpers import infer_mask_from_kwargs
from ....custom_exceptions import HalotoolsError
__all__ = ('test_infer_mask_from_kwargs_consistency', )
def test_infer_mask_from_kwargs_consistency():
x = np.arange(10)
t = Table({'x': x})
mask1 = infer_mask_from_kwargs(t, x=4)
def f(t):
return t['x'] == 4
mask2 = infer_mask_from_kwargs(t, mask_function=f)
assert np.all(mask1 == mask2)
def test_infer_trivial_mask_from_kwargs():
x = np.arange(10)
t = Table({'x': x})
mask = infer_mask_from_kwargs(t)
assert len(t) == len(t[mask])
def test_too_many_args():
x = np.arange(10)
t = Table({'x': x, 'y': x})
with pytest.raises(HalotoolsError) as err:
mask = infer_mask_from_kwargs(t, x=4, y=5)
substr = "Only a single mask at a time is permitted "
assert substr in err.value.args[0]
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@empirical_models@factories@tests@test_mock_helpers.py@.PATH_END.py
|
{
"filename": "index.md",
"repo_name": "youngjookim/sdr",
"repo_path": "sdr_extracted/sdr-master/SITE/content/experiments/2/index.md",
"type": "Markdown"
}
|
+++
image = "2.png"
date = "2021-01-02"
title = "Effect of different hyperparameters using non-Gaussian synthetic data - Lognormal"
type = "gallery"
+++
Effects of different parameters using 2D non-Gaussian (log-normal, μ=0, and σ=1) data with 10K observations. Note that the effects of the parameters are similar to those of Figure 1. However, LGC with too large values of T and α is prone to outliers (long tails), as shown in (d) and (l), but this problem can be solved by setting a larger value of k.
|
youngjookimREPO_NAMEsdrPATH_START.@sdr_extracted@sdr-master@SITE@content@experiments@2@index.md@.PATH_END.py
|
{
"filename": "_cmid.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolargl/marker/_cmid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmid", parent_name="scatterpolargl.marker", **kwargs
):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolargl@marker@_cmid.py@.PATH_END.py
|
{
"filename": "make-contour-plots.ipynb",
"repo_name": "marnixlooijmans/shrinkage-estimation-paper",
"repo_path": "shrinkage-estimation-paper_extracted/shrinkage-estimation-paper-main/notebooks/make-contour-plots.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import pocomc as pc
import os
from getdist import plots, MCSamples
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
%matplotlib inline
```
```python
names = ["b", "f"]
labels = ["b", "f"]
ref_chain = np.load("../output/BOSS_DR12_NGC_z1/samples/n2048/cov_sample/cov_sample_2048_results_v1.npy", allow_pickle=True)
reference = MCSamples(samples=ref_chain[()]["samples"], labels=labels, names=names)
```
Removed no burn in
```python
estimates = [
['cov', 'sample', "Sample"],
['cov', 'shrinkage_emp', "Covariance shrinkage\ntarget $\mathbf{T}^{(1)}$ (empirical)"],
['cov', 'shrinkage_ana', "Covariance shrinkage\ntarget $\mathbf{T}^{(2)}$ (analytical)"],
['cov', 'NERCOME', "NERCOME"],
['pre', 'shrinkage_emp', "Precision shrinkage\ntarget $\mathbf{\Pi}_0^{(1)}$ (empirical)"],
['pre', 'shrinkage_ana', "Precision shrinkage\ntarget $\mathbf{\Pi}_0^{(2)}$ (analytical)"],
]
n = 2048
iestimate = 5 # Change this to select which estimation type above should be plotted 0,...,5
mat_type = estimates[iestimate][0]
est_type = estimates[iestimate][1]
label = estimates[iestimate][2]
if n==24:
param_limits = {"b": (1.83, 2.05), "f": (0.3, 0.9)}
elif n==30:
param_limits = {"b": (1.85, 2.05), "f": (0.4, 0.85)}
elif n==2048:
param_limits = {"b": (1.88, 2.02), "f": (0.45, 0.75)} # n=2048 tick limits
```
```python
datapath = f"../output/BOSS_DR12_NGC_z1/samples/n{n}/{mat_type}_{est_type}/"
contours = [reference]
for k in [1]: # Subset of chains included in the contour plots
fname = f"{mat_type}_{est_type}_{n}_results_v{k}.npy"
results = np.load(os.path.join(datapath, fname), allow_pickle=True)[()]
chain = MCSamples(samples=results["samples"], labels=labels, names=names)
contours.append(chain)
#colors.append("blue")
```
Removed no burn in
```python
# Source: https://stackoverflow.com/questions/31908982/multi-color-legend-entry/31909401#31909401
# define an object that will be used by the legend
class MulticolorPatch(object):
def __init__(self, colors):
self.colors = colors
# define a handler for the MulticolorPatch object
class MulticolorPatchHandler(object):
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
width, height = handlebox.width, handlebox.height
patches = []
for i, c in enumerate(orig_handle.colors):
patches.append(plt.Rectangle([width/len(orig_handle.colors) * i - handlebox.xdescent,
-handlebox.ydescent],
width / len(orig_handle.colors),
height,
facecolor=c,
edgecolor='none'))
patch = PatchCollection(patches,match_original=True)
handlebox.add_artist(patch)
return patch
```
```python
# Add fiducial values
b_fid = 2
f_fid = 0.7
colors = ["red", "darkblue", "royalblue", "dodgerblue"]
g = plots.getSubplotPlotter(subplot_size=4)
g.settings.axes_fontsize=16
g.settings.axes_labelsize=16
g.triangle_plot(
contours,
contour_colors=colors[0:len(contours)],
filled=True,
contour_args={'alpha':0.7},
legend_labels=[],
param_limits = param_limits,
)
if est_type[-3:] == "ana":
fid_color = "gray"
fid_ls = "--"
g.subplots[0,0].axvline(b_fid, c=fid_color, ls=fid_ls)
g.subplots[1,0].axvline(b_fid, c=fid_color, ls=fid_ls)
g.subplots[1,0].axhline(f_fid, c=fid_color, ls=fid_ls)
g.subplots[1,1].axvline(f_fid, c=fid_color, ls=fid_ls)
h = []
l = []
h.append(MulticolorPatch([colors[0]]))
l.append("Reference")
h.append(MulticolorPatch(colors[1:len(contours)]))
l.append(label)
g.fig.legend(h, l, loc='upper right',
handler_map={MulticolorPatch: MulticolorPatchHandler()},
fontsize=16, frameon=False)
plt.savefig(f"../output/BOSS_DR12_NGC_z1/plots/contours/{mat_type}_{est_type}_n{n}.pdf", format="pdf", bbox_inches='tight', pad_inches = 0.1)
```

```python
```
|
marnixlooijmansREPO_NAMEshrinkage-estimation-paperPATH_START.@shrinkage-estimation-paper_extracted@shrinkage-estimation-paper-main@notebooks@make-contour-plots.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/monotonic/py2/README.md",
"type": "Markdown"
}
|
monotonic
=========
NOTE: **This library is considered stable and complete, and will not receive
any further updates. Python versions 3.3 and newer include
[``time.monotonic()``][0] in the standard library.**
This module provides a ``monotonic()`` function which returns the
value (in fractional seconds) of a clock which never goes backwards.
It is compatible with Python 2 and Python 3.
On Python 3.3 or newer, ``monotonic`` will be an alias of
[``time.monotonic``][0] from the standard library. On older versions,
it will fall back to an equivalent implementation:
OS | Implementation
-----------------|-----------------------------------------
Linux, BSD, AIX | [clock_gettime][1]
Windows | [GetTickCount][2] or [GetTickCount64][3]
OS X | [mach_absolute_time][3]
If no suitable implementation exists for the current platform,
attempting to import this module (or to import from it) will
cause a RuntimeError exception to be raised.
monotonic is available via the Python Cheese Shop (PyPI):
https://pypi.python.org/pypi/monotonic/
License
-------
Copyright 2014, 2015, 2016, 2017 Ori Livneh <ori@wikimedia.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
[0]: https://docs.python.org/3/library/time.html#time.monotonic
[1]: http://linux.die.net/man/3/clock_gettime
[2]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724408
[3]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724411
[4]: https://developer.apple.com/library/mac/qa/qa1398/
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@monotonic@py2@README.md@.PATH_END.py
|
{
"filename": "testMSOffice.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/com/win32com/test/testMSOffice.py",
"type": "Python"
}
|
# Test MSOffice
#
# Main purpose of test is to ensure that Dynamic COM objects
# work as expected.
# Assumes Word and Excel installed on your machine.
import traceback
import pythoncom
import win32api
import win32com
import win32com.client.dynamic
from win32com.client import gencache
from win32com.test.util import CheckClean
# Test a few of the MSOffice components.
def TestWord():
# Try and load the object exposed by Word 8
# Office 97 - _totally_ different object model!
try:
# NOTE - using "client.Dispatch" would return an msword8.py instance!
print("Starting Word 8 for dynamic test")
word = win32com.client.dynamic.Dispatch("Word.Application")
TestWord8(word)
word = None
# Now we will test Dispatch without the new "lazy" capabilities
print("Starting Word 8 for non-lazy dynamic test")
dispatch = win32com.client.dynamic._GetGoodDispatch("Word.Application")
typeinfo = dispatch.GetTypeInfo()
attr = typeinfo.GetTypeAttr()
olerepr = win32com.client.build.DispatchItem(typeinfo, attr, None, 0)
word = win32com.client.dynamic.CDispatch(dispatch, olerepr)
dispatch = typeinfo = attr = olerepr = None
TestWord8(word)
except pythoncom.com_error:
print("Starting Word 7 for dynamic test")
word = win32com.client.Dispatch("Word.Basic")
TestWord7(word)
except Exception as e:
print("Word dynamic tests failed", e)
traceback.print_exc()
print("Starting MSWord for generated test")
try:
from win32com.client import gencache
word = gencache.EnsureDispatch("Word.Application.8")
TestWord8(word)
except Exception as e:
print("Word generated tests failed", e)
traceback.print_exc()
def TestWord7(word):
word.FileNew()
# If not shown, show the app.
if not word.AppShow():
word._proc_("AppShow")
for i in range(12):
word.FormatFont(Color=i + 1, Points=i + 12)
word.Insert("Hello from Python %d\n" % i)
word.FileClose(2)
def TestWord8(word):
word.Visible = 1
doc = word.Documents.Add()
wrange = doc.Range()
for i in range(10):
wrange.InsertAfter("Hello from Python %d\n" % i)
paras = doc.Paragraphs
for i in range(len(paras)):
# *sob* - in Word 2019, `p = paras(i+1)` seems to work to get a para
# but `p.Font` then blows up.
# p = paras[i]()
p = paras(i + 1)
p.Font.ColorIndex = i + 1
p.Font.Size = 12 + (4 * i)
# XXX - note that
# for para in paras:
# para().Font...
# doesn't seem to work - no error, just doesn't work
# Should check if it works for VB!
doc.Close(SaveChanges=0)
word.Quit()
win32api.Sleep(1000) # Wait for word to close, else we
# may get OA error.
def TestWord8OldStyle():
try:
import win32com.test.Generated4Test.msword8
except ImportError:
print("Can not do old style test")
def TextExcel(xl):
xl.Visible = 0
assert not xl.Visible, "Visible property is true."
xl.Visible = 1
assert xl.Visible, "Visible property not true."
if int(xl.Version.split(".")[0]) >= 8:
xl.Workbooks.Add()
else:
xl.Workbooks().Add()
xl.Range("A1:C1").Value = (1, 2, 3)
xl.Range("A2:C2").Value = ("x", "y", "z")
xl.Range("A3:C3").Value = ("3", "2", "1")
for i in range(20):
xl.Cells(i + 1, i + 1).Value = "Hi %d" % i
assert xl.Range("A1").Value == "Hi 0", "Single cell range failed"
assert xl.Range("A1:B1").Value == (
("Hi 0", 2),
), "flat-horizontal cell range failed"
assert xl.Range("A1:A2").Value == (
("Hi 0",),
("x",),
), "flat-vertical cell range failed"
assert xl.Range("A1:C3").Value == (
("Hi 0", 2, 3),
("x", "Hi 1", "z"),
(3, 2, "Hi 2"),
), "square cell range failed"
xl.Range("A1:C3").Value = ((3, 2, 1), ("x", "y", "z"), (1, 2, 3))
assert xl.Range("A1:C3").Value == (
(3, 2, 1),
("x", "y", "z"),
(1, 2, 3),
), "Range was not what I set it to!"
# test dates out with Excel
xl.Cells(5, 1).Value = "Excel time"
xl.Cells(5, 2).Formula = "=Now()"
import time
xl.Cells(6, 1).Value = "Python time"
xl.Cells(6, 2).Value = pythoncom.MakeTime(time.time())
xl.Cells(6, 2).NumberFormat = "d/mm/yy h:mm"
xl.Columns("A:B").EntireColumn.AutoFit()
xl.Workbooks(1).Close(0)
xl.Quit()
def TestAll():
TestWord()
try:
print("Starting Excel for Dynamic test...")
xl = win32com.client.dynamic.Dispatch("Excel.Application")
TextExcel(xl)
except Exception as e:
worked = False
print("Excel tests failed", e)
traceback.print_exc()
try:
print("Starting Excel 8 for generated excel8.py test...")
mod = gencache.EnsureModule(
"{00020813-0000-0000-C000-000000000046}", 0, 1, 2, bForDemand=1
)
xl = win32com.client.Dispatch("Excel.Application")
TextExcel(xl)
except ImportError:
print("Could not import the generated Excel 97 wrapper")
except Exception as e:
print("Generated Excel tests failed", e)
traceback.print_exc()
try:
import xl5en32 # nopycln: import # Test import. TODO: Is testing Excel 95 still relevant ?
mod = gencache.EnsureModule("{00020813-0000-0000-C000-000000000046}", 9, 1, 0)
xl = win32com.client.Dispatch("Excel.Application.5")
print("Starting Excel 95 for makepy test...")
TextExcel(xl)
except ImportError:
print("Could not import the generated Excel 95 wrapper")
except Exception as e:
print("Excel 95 tests failed", e)
traceback.print_exc()
if __name__ == "__main__":
TestAll()
CheckClean()
pythoncom.CoUninitialize()
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@com@win32com@test@testMSOffice.py@.PATH_END.py
|
{
"filename": "EXOTIC-Installation-Instructions-for-Mac-Users.md",
"repo_name": "rzellem/EXOTIC",
"repo_path": "EXOTIC_extracted/EXOTIC-main/docs/regions/English/EXOTIC-Installation-Instructions-for-Mac-Users.md",
"type": "Markdown"
}
|
EXOTIC Installation Instructions for Mac Users
There are two ways you can run EXOTIC, either with a Python Notebook in Jupyter or the Google Collab or on your own personal machine using the command line. Please review the details of the two processes below and select the best option for you.
Google Collab: Running EXOTIC in the Google Collab is the recommended option for all users as it offers a more user-friendly and interactive experience. It is especially recommended for new users, those who are unfamiliar with using the command line, students, and those analyzing data via remote observatories. To run EXOTIC with the Python Notebook, please visit the instructions βHow-to-Run-EXOTIC-with-the-Python-Notebookβ in the Documentation folder.
Command line: If you feel comfortable using the command line, are reducing large data sets, or are looking to learn more about EXOTIC and how to run it on your own machine, follow the set of instructions on the following pages:
Please note: in order to be able to click on the links and select text in this document, you must download it off GitHub. The GitHub preview simply shows you an image of the document, which does not allow for those functions.
I. Download DS9 (Astronomical Image Viewing Software) β’ Follow the link: https://sites.google.com/cfa.harvard.edu/saoimageds9?pli=1&authuser=1 β’ Download the version corresponding to the Mac operating system. β’ Run the installer once downloaded. SAOImageDS9 (@SAOImageDS9) | Twitterβ’ Follow the instructions in the installer to complete the installation.
Note: This software will be used to view the β.FITSβ images you obtain during observations. For more information on DS9, check out the User Guide: http://ds9.si.edu/doc/user/index.html Change your Mac Hostname via Terminal | OSXDaily
II. Open the Terminal app β’ Select Launch Pad on the Task Bar β’ Double-click Terminal β’ When opening Terminal for the first time, you might be prompted to enter a password. Enter your new password and take note of it. You will be prompted again later to enter it. If you are not prompted to add one, your password will be the same as the password for the computer account you are currently working on.
Note: The Terminal app allows you to perform actions on your computer (run python programs, install applications, edit files, etc.) by typing in commands. If you are interested in learning more about the terminal and the different commands you can use, follow this link: https://www.macworld.com/article/2042378/master-the-command-line-navigating-files-and-folders.html
III. Installing EXOTIC - Execute the following commands in your Terminalβs command line:
β’ Type βcd /PATH/β β replacing PATH with the directory you want the EXOTIC folder to be downloaded in. For example, typing βcd Documentsβ will mean that the EXOTIC files will be stored in Users/your\_username/Documents/EXOTIC/.
β’ This will also be the location in which you run the program.
o Note: cd stands for βChange Directoryβ. In executing this command, you are navigating to your Downloads folder, just as you would by double-clicking on Downloads in File Explorer.
β’ Install Python (or update Python to the latest version) on your Mac by typing the command β/bin/bash -c "\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)β and hitting enter. Do not include the single quotes.
β’ Type βbrew install pythonβ and hit enter. If you already have Python installed, you can skip this step.
β’ Type βcurl https://bootstrap.pypa.io/get-pip.py -o get-pip.pyβ and hit enter.
β’ Type βpython get-pip.pyβ and hit enter.
β’ Run the command βpip3 install exotic || pip install exoticβ. This command will install EXOTIC and all necessary Python packages used to run EXOTIC.
Please Note: If this command fails, you can manually download EXOTIC off GitHub. To do this, follow the additional instructions below.
β’ Wait a while. This process may take several minutes.
β’ Type βexoticβ and hit enter. You should see the introductory header to EXOTIC as pictured at the bottom of this document, which tells you that it is all up and running!
If you experienced issues with the starred command above, please follow these next steps:
β’ Type βgit clone https://github.com/rzellem/EXOTIC.gitβ and hit enter.
β’ Type βchmod 755 exotic\_installation\_mac.shβ β do not include the quotes. Hit Enter.
βͺ Note: this command alters the file you downloaded βexotic\_installation\_mac.shβ to be executable (i.e. you can now run it in your terminal).
β’ Type β./exotic\_installation\_mac.shβ β do not include the quotes. Hit Enter.
β’ Enter the password you created earlier (or already had) if prompted.
βͺ Note: this command runs the file you downloaded, which is called a script. A script is simply a list of commands to be executed in the Terminal. This script will download Python (unless you already have it) and install all the necessary packages to run EXOTIC. Finally, the script will run EXOTIC to test that it is functional.
β’ Wait a while. This process may take several minutes.
β’ Once the process has completed, you should see the introductory header to EXOTIC as pictured at the bottom of this document, which tells you that it is all up and running! ----------------------------------------------------------------------------------------------------
If you are seeing this header in your terminal, EXOTIC is successfully installed, and you are ready to begin analyzing exoplanet data!
And thatβs it! You have successfully installed EXOTIC and can now use it at any time to reduce the data from your amazing transit observations!
To learn how to run the code and how EXOTIC works, check our other guides on GitHub!
β https://github.com/rzellem/EXOTIC/tree/main/Documentation
If you have any questions or comments, please feel free to reach out to us on Slack or email at exoplanetwatch@jpl.nasa.gov
|
rzellemREPO_NAMEEXOTICPATH_START.@EXOTIC_extracted@EXOTIC-main@docs@regions@English@EXOTIC-Installation-Instructions-for-Mac-Users.md@.PATH_END.py
|
{
"filename": "fitting.py",
"repo_name": "cmccully/lcogtgemini",
"repo_path": "lcogtgemini_extracted/lcogtgemini-master/lcogtgemini/fitting.py",
"type": "Python"
}
|
import numpy as np
from scipy import optimize
from statsmodels import robust
from lcogtgemini.utils import mad
from matplotlib import pyplot
def ncor(x, y):
"""Calculate the normalized correlation of two arrays"""
d = np.correlate(x, x) * np.correlate(y, y)
return np.correlate(x, y) / d ** 0.5
def xcorfun(p, warr, farr, telwarr, telfarr):
# Telluric wavelengths and flux
# Observed wavelengths and flux
# resample the telluric spectrum at the same wavelengths as the observed
# spectrum
# Make the artifical spectrum to cross correlate
asfarr = np.interp(warr, p[0] * telwarr + p[1], telfarr, left=1.0, right=1.0)
return np.abs(1.0 / ncor(farr, asfarr))
def normalize_fitting_coordinate(x):
x_range = x.max() - x.min()
return (x - x.min()) / x_range, x.min(), x_range
# Iterative reweighting linear least squares
def irls(x, data, errors, model_function, initial_parameter_guess, good_pixels,
tol=1e-6, weight_function=robust.norms.AndrewWave, weight_scale=2.0, maxiter=10):
weights_calculator = weight_function(weight_scale)
#Normalize to make fitting easier
normalized_x, xmin, x_range = normalize_fitting_coordinate(x)
y_scale = np.median(data[good_pixels])
y = data[good_pixels] / y_scale
scatter = errors[good_pixels] / y_scale
# Do an initial fit of the model
# Use 1 / sigma^2 as weights
best_parameters = optimize.curve_fit(model_function, normalized_x[good_pixels], y, p0=initial_parameter_guess, sigma=scatter)[0]
notconverged=True
last_chi2 = np.inf
iter = 0
# Until converged
while notconverged:
# Update the weights
residuals = y - model_function(normalized_x[good_pixels], *best_parameters)
# Save the chi^2 to check for convergence
chi2 = ((residuals / scatter) ** 2.0).sum()
# update the scaling (the MAD of the residuals)
scatter = mad(residuals) * 1.4826 # To convert to standard deviation
weights = weights_calculator.weights(residuals / scatter).flatten()
fit_errors = np.zeros(weights.shape)
fit_errors[weights > 0] = weights[weights > 0] ** -2.0
fit_errors[weights == 0] = np.inf
# refit
best_parameters = optimize.curve_fit(model_function, normalized_x[good_pixels], y,
p0=best_parameters, sigma=fit_errors)[0]
# converged when the change in the chi^2 (or l2 norm or whatever) is
# less than the tolerance. Hopefully this should converge quickly.
if iter >= maxiter or np.abs(chi2 - last_chi2) < tol:
notconverged = False
else:
last_chi2 = chi2
iter += 1
return {'popt': best_parameters, 'y_scale': y_scale, 'xmin': xmin, 'x_range': x_range,
'model_function': model_function}
def eval_fit(fit_dict, x):
x_to_eval = (x - fit_dict['xmin']) / fit_dict['x_range']
return fit_dict['model_function'](x_to_eval, *fit_dict['popt']) * fit_dict['y_scale']
def polynomial_fourier_model(n_poly, n_fourier):
def model_to_optimize(x, *p):
y = p[0]
for i in range(1, n_poly + 1):
y += p[i] * x ** i
# Note this assumes that x is roughly normalized between 0 and 1
omega_t = 2.0 * np.pi * x
for i in range(1, n_fourier + 1):
y += p[2 * i - 1 + n_poly] * np.sin(i * omega_t)
y += p[2 * i + n_poly] * np.cos(i * omega_t)
return y
return model_to_optimize
def run_polynomal_fourier_fit(x, y, errors, n_poly, n_fourier, weight_scale, good_pixels):
function_to_fit = polynomial_fourier_model(n_poly, n_fourier)
p0 = np.zeros(1 + n_poly + 2 * n_fourier)
p0[0] = 1.0
best_fit = run_fit(x, y, errors, function_to_fit, p0, weight_scale, good_pixels)
return best_fit
def run_fit(x, y, errors, function_to_fit, p0, weight_scale, good_pixels):
# Run IRLS on the data given the input parameters
best_fit = irls(x, y, errors, function_to_fit, p0, good_pixels, weight_scale=weight_scale)
# Plot the best fit
plot_best_fit(x, y, best_fit, good_pixels),
return best_fit
def fit_polynomial_fourier_model(x, y, errors, n_poly, n_fourier, good_pixels, weight_scale=2.0):
best_fit = run_polynomal_fourier_fit(x, y, errors, n_poly, n_fourier, weight_scale, good_pixels)
# Go into a while loop
while True:
# Ask if the user is not happy with the fit,
response = user_input('Does this fit look good? y or n:', ['y', 'n'], 'y')
if response == 'y':
break
# If not have the user put in new values
else:
n_poly = int(user_input('Order of polynomial to fit:', [str(i) for i in range(100)], n_poly))
n_fourier = int(user_input('Order of Fourier terms to fit:', [str(i) for i in range(100)], n_fourier))
weight_scale = user_input('Scale for outlier rejection:', default=weight_scale, is_number=True)
best_fit = run_polynomal_fourier_fit(x, y, errors, n_poly, n_fourier, weight_scale, good_pixels)
return best_fit
def user_input(prompt, choices=None, default=None, is_number=False):
while True:
response = raw_input(prompt + ' [{i}]'.format(i=default))
if len(response) == 0:
response = default
break
if choices is not None and response in choices:
break
elif is_number:
try:
response = float(response)
break
except:
print('Input could not be parsed into a number. Please try again.')
else:
print('Please select a valid response')
return response
def plot_best_fit(x, y, best_fit, good_pixels):
fig = pyplot.gcf()
fig.clf()
axes = fig.get_axes()
if len(axes) == 0:
ax1 = pyplot.subplot(211)
pyplot.subplot(212, sharex=ax1)
axes = fig.get_axes()
axes[0].plot(x, y, 'b')
y_model = eval_fit(best_fit, x)
axes[0].plot(x, y_model, 'r')
axes[1].plot(x[good_pixels], y[good_pixels] - y_model[good_pixels], 'o', markersize=1.0)
def fitxcor(warr, farr, telwarr, telfarr):
"""Maximize the normalized cross correlation coefficient for the telluric
correction
"""
res = optimize.minimize(xcorfun, [1.0, 0.0], method='Nelder-Mead',
args=(warr, farr, telwarr, telfarr))
return res['x']
|
cmccullyREPO_NAMElcogtgeminiPATH_START.@lcogtgemini_extracted@lcogtgemini-master@lcogtgemini@fitting.py@.PATH_END.py
|
{
"filename": "networktest.py",
"repo_name": "CMB-S4/spt3g_software",
"repo_path": "spt3g_software_extracted/spt3g_software-master/core/tests/networktest.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import print_function
import random, os, sys, numpy, time
from spt3g import core
port = random.randint(10000, 60000)
frames = []
for i in range(0, 20):
f = core.G3Frame()
f['Sequence'] = i
f['Data'] = core.G3Timestream(numpy.zeros(100000))
frames.append(f)
print('Port: ', port)
child = os.fork()
if child != 0:
# Parent
print('Parent')
send = core.G3NetworkSender(hostname='*', port=port)
time.sleep(1) # XXX: how to signal that the remote end is ready?
print('Sending')
for f in frames:
send(f)
send(core.G3Frame(core.G3FrameType.EndProcessing))
pid, status = os.wait()
print('Child Status: ', status)
if status == 0:
print('OK')
sys.exit(status)
else:
# Child
print('Child')
recv = core.G3Reader(filename='tcp://localhost:%d' % port)
rframes = []
for k in range(len(frames)):
chunk = recv(None)
print(chunk[0])
rframes += chunk
assert(len(rframes)) == len(frames)
for i in range(len(rframes)):
assert(rframes[i]['Sequence'] == i)
sys.exit(0)
|
CMB-S4REPO_NAMEspt3g_softwarePATH_START.@spt3g_software_extracted@spt3g_software-master@core@tests@networktest.py@.PATH_END.py
|
{
"filename": "notebook_setup.py",
"repo_name": "rodluger/starry",
"repo_path": "starry_extracted/starry-master/notebooks/notebook_setup.py",
"type": "Python"
}
|
# Pretty figures
get_ipython().magic('config InlineBackend.figure_format = "retina"')
import matplotlib
import matplotlib.pyplot as plt
import warnings
# Disable annoying font warnings
matplotlib.font_manager._log.setLevel(50)
# Disable theano deprecation warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings(
"ignore", category=matplotlib.MatplotlibDeprecationWarning
)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning, module="theano")
# Style
plt.style.use("default")
plt.rcParams["savefig.dpi"] = 100
plt.rcParams["figure.dpi"] = 100
plt.rcParams["figure.figsize"] = (12, 4)
plt.rcParams["font.size"] = 14
plt.rcParams["text.usetex"] = False
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = ["Liberation Sans"]
plt.rcParams["font.cursive"] = ["Liberation Sans"]
try:
plt.rcParams["mathtext.fallback"] = "cm"
except KeyError:
plt.rcParams["mathtext.fallback_to_cm"] = True
# TODO: In case we need to patch some symbols.
if False:
try:
old_get_unicode_index
except NameError:
import matplotlib.mathtext as mathtext
old_get_unicode_index = mathtext.get_unicode_index
mathtext.get_unicode_index = (
lambda symbol, math=True: ord("x")
if symbol == "\\times"
else old_get_unicode_index(symbol, math)
)
# Several hacks to `corner` to make it prettier
import corner
import numpy as np
try:
old_corner
except NameError:
old_corner = corner.corner
def new_corner(*args, **kwargs):
# Get the usual corner plot
figure = old_corner(*args, **kwargs)
# Get the axes
ndim = int(np.sqrt(len(figure.axes)))
axes = np.array(figure.axes).reshape((ndim, ndim))
# Smaller tick labels
for ax in axes[1:, 0]:
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(formatter)
for ax in axes[-1, :]:
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(formatter)
# Pad the axes to always include the truths
truths = kwargs.get("truths", None)
if truths is not None:
for row in range(1, ndim):
for col in range(row):
lo, hi = np.array(axes[row, col].get_xlim())
if truths[col] < lo:
lo = truths[col] - 0.1 * (hi - truths[col])
axes[row, col].set_xlim(lo, hi)
axes[col, col].set_xlim(lo, hi)
elif truths[col] > hi:
hi = truths[col] - 0.1 * (hi - truths[col])
axes[row, col].set_xlim(lo, hi)
axes[col, col].set_xlim(lo, hi)
lo, hi = np.array(axes[row, col].get_ylim())
if truths[row] < lo:
lo = truths[row] - 0.1 * (hi - truths[row])
axes[row, col].set_ylim(lo, hi)
axes[row, row].set_xlim(lo, hi)
elif truths[row] > hi:
hi = truths[row] - 0.1 * (hi - truths[row])
axes[row, col].set_ylim(lo, hi)
axes[row, row].set_xlim(lo, hi)
return figure
corner.corner = new_corner
|
rodlugerREPO_NAMEstarryPATH_START.@starry_extracted@starry-master@notebooks@notebook_setup.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "dnarayanan/powderday",
"repo_path": "powderday_extracted/powderday-master/powderday/front_ends/__init__.py",
"type": "Python"
}
|
from . import enzo2pd
from . import front_end_controller
from . import gadget2pd
from . import tipsy2pd
from . import arepo2pd
|
dnarayananREPO_NAMEpowderdayPATH_START.@powderday_extracted@powderday-master@powderday@front_ends@__init__.py@.PATH_END.py
|
{
"filename": "train.py",
"repo_name": "xwzhang98/SREmulator",
"repo_path": "SREmulator_extracted/SREmulator-main/map2map/map2map/train.py",
"type": "Python"
}
|
import os
import socket
import time
import sys
from pprint import pprint
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
from torch.multiprocessing import spawn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from .data import FieldDataset, DistFieldSampler
from . import models
from .models import (
narrow_like,
resample,
lag2eul,
G,
)
from .utils import import_attr, load_model_state_dict, plt_slices, plt_power, score
ckpt_link = "checkpoint.pt"
def node_worker(args):
if "SLURM_STEP_NUM_NODES" in os.environ:
args.nodes = int(os.environ["SLURM_STEP_NUM_NODES"])
elif "SLURM_JOB_NUM_NODES" in os.environ:
args.nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
else:
raise KeyError("missing node counts in slurm env")
args.gpus_per_node = torch.cuda.device_count()
args.world_size = args.nodes * args.gpus_per_node
node = int(os.environ["SLURM_NODEID"])
if args.gpus_per_node < 1:
raise RuntimeError("GPU not found on node {}".format(node))
print(args.gpus_per_node, args.world_size, node, flush=True)
spawn(gpu_worker, args=(node, args), nprocs=args.gpus_per_node)
def gpu_worker(local_rank, node, args):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(local_rank)
device = torch.device("cuda", 0)
torch.cuda.empty_cache()
rank = args.gpus_per_node * node + local_rank
# Need randomness across processes, for sampler, augmentation, noise etc.
# Note DDP broadcasts initial model states from rank 0
torch.manual_seed(args.seed + rank)
# good practice to disable cudnn.benchmark if enabling cudnn.deterministic
# torch.backends.cudnn.deterministic = True
# torch.use_deterministic_algorithms(True)
dist_init(rank, args)
dtype = torch.float32
torch.set_default_dtype(dtype)
train_dataset = FieldDataset(
in_patterns=args.train_in_patterns,
tgt_patterns=args.train_tgt_patterns,
style_pattern=args.train_style_pattern,
noise_patterns=args.train_noise_patterns,
noise_style_pattern=args.train_noise_style_pattern,
in_norms=args.in_norms,
tgt_norms=args.tgt_norms,
noise_norms=args.noise_norms,
callback_at=args.callback_at,
augment=args.augment,
aug_shift=args.aug_shift,
aug_add=args.aug_add,
aug_mul=args.aug_mul,
crop=args.crop,
crop_start=args.crop_start,
crop_stop=args.crop_stop,
crop_step=args.crop_step,
in_pad=args.in_pad,
tgt_pad=args.tgt_pad,
noise_pad=args.noise_pad,
scale_factor=args.scale_factor,
**args.misc_kwargs,
)
train_sampler = DistFieldSampler(
train_dataset,
shuffle=True,
div_data=args.div_data,
div_shuffle_dist=args.div_shuffle_dist,
)
# random_sampler =
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=train_sampler,
num_workers=args.loader_workers,
pin_memory=True,
persistent_workers=True,
)
if args.val:
val_dataset = FieldDataset(
in_patterns=args.val_in_patterns,
tgt_patterns=args.val_tgt_patterns,
style_pattern=args.val_style_pattern,
in_norms=args.in_norms,
tgt_norms=args.tgt_norms,
callback_at=args.callback_at,
augment=False,
aug_shift=None,
aug_add=None,
aug_mul=None,
crop=args.crop,
crop_start=args.crop_start,
crop_stop=args.crop_stop,
crop_step=args.crop_step,
in_pad=args.in_pad,
tgt_pad=args.tgt_pad,
scale_factor=args.scale_factor,
**args.misc_kwargs,
)
val_sampler = DistFieldSampler(
val_dataset,
shuffle=False,
div_data=args.div_data,
div_shuffle_dist=args.div_shuffle_dist,
)
val_loader = DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=val_sampler,
num_workers=args.loader_workers,
pin_memory=True,
)
args.in_chan = train_dataset.in_chan
args.out_chan = train_dataset.tgt_chan
args.style_size = train_dataset.style_size
args.noise_style_size = train_dataset.noise_style_size
model = import_attr(args.model, models, callback_at=args.callback_at)
model = model(
2 * sum(args.in_chan),
sum(args.out_chan),
style_size=args.style_size,
scale_factor=args.scale_factor,
**args.misc_kwargs,
)
model.float()
model.to(device)
model = DistributedDataParallel(
model, device_ids=[device], process_group=dist.new_group()
)
criterion = import_attr(args.criterion, nn, models, callback_at=args.callback_at)
criterion = criterion()
criterion.to(device)
optimizer = import_attr(args.optimizer, optim, callback_at=args.callback_at)
optimizer = optimizer(
model.parameters(),
lr=args.lr,
**args.optimizer_args,
)
if args.warmup:
train_scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=args.epochs, **args.scheduler_args
)
number_warmup_epochs = args.warmup_epochs
def warmup(current_step: int):
return 1 / (2 ** (float(number_warmup_epochs - current_step)))
warmup_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=warmup)
scheduler = optim.lr_scheduler.SequentialLR(
optimizer, [warmup_scheduler, train_scheduler], [number_warmup_epochs]
)
else:
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=args.epochs, **args.scheduler_args
)
adv_model = adv_criterion = adv_optimizer = adv_scheduler = None
if args.adv:
adv_model = import_attr(args.adv_model, models, callback_at=args.callback_at)
adv_model = adv_model(
sum(args.in_chan + args.out_chan),
1,
style_size=args.style_size,
scale_factor=args.scale_factor,
**args.misc_kwargs,
)
adv_model.to(device)
adv_model = DistributedDataParallel(
adv_model,
device_ids=[device],
process_group=dist.new_group(),
)
adv_criterion = import_attr(
args.adv_criterion, nn, models, callback_at=args.callback_at
)
adv_criterion = adv_criterion()
adv_criterion.to(device)
adv_optimizer = import_attr(args.optimizer, optim, callback_at=args.callback_at)
adv_optimizer = adv_optimizer(
adv_model.parameters(),
lr=args.adv_lr,
**args.adv_optimizer_args,
)
adv_scheduler = optim.lr_scheduler.CosineAnnealingLR(
adv_optimizer, T_max=args.epochs, **args.scheduler_args
)
if (
args.load_state == ckpt_link
and not os.path.isfile(ckpt_link)
or not args.load_state
):
if args.init_weight_std is not None:
model.apply(init_weights)
start_epoch = 0
pretrained_layers = None
if rank == 0:
min_loss = None
else:
state = torch.load(args.load_state, map_location=device)
if "epoch" in state:
start_epoch = state["epoch"]
else:
start_epoch = 0
load_model_state_dict(
model.module, state["model"], strict=args.load_state_strict
)
if "optimizer" in state:
if args.lr != state["optimizer"]["param_groups"][0]["lr"]:
state["optimizer"]["param_groups"][0]["lr"] = args.lr
optimizer.load_state_dict(state["optimizer"])
if "scheduler" in state:
scheduler_state = state["scheduler"]
if args.lr != scheduler_state["base_lrs"][0]:
scheduler_state["base_lrs"] = [args.lr]
scheduler_state["_last_lr"] = [args.lr]
scheduler.load_state_dict(state["scheduler"])
if args.adv:
if "adv_model" in state:
load_model_state_dict(
adv_model.module, state["adv_model"], strict=args.load_state_strict
)
if "adv_optimizer" in state:
adv_optimizer.load_state_dict(state["adv_optimizer"])
if "adv_scheduler" in state:
adv_scheduler.load_state_dict(state["adv_scheduler"])
torch.set_rng_state(state["rng"].cpu()) # move rng state back
if rank == 0:
min_loss = state["min_loss"]
if args.adv and "adv_model" not in state:
min_loss = None # restarting with adversary wipes the record
print(
"state at epoch {} loaded from {}".format(
state["epoch"], args.load_state
),
flush=True,
)
if args.ema:
if "ema" in state:
ema_state = state["ema"]
else:
ema_state = state["model"]
del state
torch.backends.cudnn.benchmark = True
if args.detect_anomaly:
torch.autograd.set_detect_anomaly(True)
logger = None
if rank == 0:
logger = SummaryWriter()
if rank == 0:
print("pytorch {}".format(torch.__version__))
pprint(vars(args))
sys.stdout.flush()
generator = G(6, 6, 1, 8)
state = torch.load(
"state.pt",
map_location=device,
)
generator.load_state_dict(state["model"])
del state
generator.to(device)
generator.eval()
discriminator = None
# tf32 stuff on A100
if not torch.backends.cuda.matmul.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if not torch.backends.cudnn.allow_tf32:
torch.backends.cudnn.allow_tf32 = True
for epoch in range(start_epoch, args.epochs):
train_sampler.set_epoch(epoch)
train_loss = train(
epoch,
train_loader,
model,
criterion,
optimizer,
scheduler,
adv_model,
adv_criterion,
adv_optimizer,
adv_scheduler,
logger,
device,
args,
generator=generator,
)
# prof.step()
epoch_loss = train_loss
if args.reduce_lr_on_plateau:
scheduler.step(epoch_loss[2] * epoch_loss[6])
else:
scheduler.step()
if rank == 0:
logger.flush()
if (
min_loss is None or epoch_loss[7] < min_loss
) and epoch >= args.adv_start:
min_loss = epoch_loss[7]
pretrained_layers = None
state = {
"epoch": epoch + 1,
"model": model.module.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"rng": torch.get_rng_state(),
"min_loss": min_loss,
"pretrained_layers": pretrained_layers,
"mtl": mtl.module.state_dict() if args.multitaskloss else None,
}
if args.adv:
state.update(
{
"adv_model": adv_model.module.state_dict(),
"adv_optimizer": adv_optimizer.state_dict(),
"adv_scheduler": adv_scheduler.state_dict(),
}
)
if args.ema:
ema_state = update_ema(model.module.state_dict(), ema_state)
state.update({"ema": ema_state})
state_file = "state_{}.pt".format(epoch + 1)
torch.save(state, state_file)
del state
tmp_link = "{}.pt".format(time.time())
os.symlink(state_file, tmp_link) # workaround to overwrite
os.rename(tmp_link, ckpt_link)
# prof.stop()
dist.destroy_process_group()
def train(
epoch,
loader,
model,
criterion,
optimizer,
scheduler,
adv_model,
adv_criterion,
adv_optimizer,
adv_scheduler,
logger,
device,
args,
generator=None,
):
torch.cuda.reset_peak_memory_stats(device=device)
eul_scale_factor = 2
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
epoch_start = torch.cuda.Event(enable_timing=True)
training_start = torch.cuda.Event(enable_timing=True)
epoch_logging_start = torch.cuda.Event(enable_timing=True)
epoch_making_plots_start = torch.cuda.Event(enable_timing=True)
epoch_end = torch.cuda.Event(enable_timing=True)
epoch_start.record()
model.train()
if args.adv:
adv_model.train()
set_requires_grad(model, requires_grad=True)
if generator is not None:
generator.eval()
set_requires_grad(generator, requires_grad=False)
epoch_loss = torch.zeros(100, dtype=torch.float32, device=device)
if rank == 0:
training_start.record()
for i, data in enumerate(loader):
batch = epoch * len(loader) + i + 1
input, target, style = data["input"], data["target"], data["style"]
noise = data["noise"]
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
style = style.to(device, non_blocking=True)
noise = noise.to(device, non_blocking=True)
with torch.no_grad():
sr_out = generator(input, style)
sr_out = narrow_like(sr_out, noise)
output = model(sr_out, style, noise)
if i <= 1 and rank == 0:
print("##### batch :", batch)
print("total batch :", len(loader))
print("input shape :", input.shape)
print("output shape :", output.shape)
print("target shape :", target.shape)
if output.shape != target.shape:
output = narrow_like(output, target)
if i <= 1 and rank == 0:
print("narrowed shape :", output.shape)
print("lag loss weight :", args.lag_loss_weight)
print("eul loss weight :", args.eul_loss_weight)
print("adv loss weight :", args.adv_loss_weight)
print("#####", flush=True)
if (i + 1) % 200 == 0 and rank == 0:
print("##### current batch :", i + 1)
print("total batch :", len(loader))
print("input shape :", input.shape)
print("output shape :", output.shape)
print("target shape :", target.shape)
print("narrowed shape :", output.shape)
print("#####", flush=True)
if args.adv:
input = resample(input, scale_factor=args.scale_factor)
input = narrow_like(input, target)
else:
del input
optimizer.zero_grad(set_to_none=True)
disp_lag_out, disp_lag_tgt = output[:, :3], target[:, :3]
vel_lag_out, vel_lag_tgt = output[:, 3:], target[:, 3:]
if args.adv and epoch >= args.adv_start:
# discriminator
set_requires_grad(adv_model, True)
adv_optimizer.zero_grad(set_to_none=True)
score_out = adv_model(output.detach(), style=style, cond=input)
score_tgt = adv_model(target, style=style, cond=input)
adv_loss_real, adv_loss_fake = adv_criterion(score_out, score_tgt)
epoch_loss[8] += adv_loss_real.detach()
epoch_loss[9] += adv_loss_fake.detach()
adv_loss = adv_loss_fake + adv_loss_real
adv_loss.backward()
adv_optimizer.step()
if args.eul:
disp_eul_out, disp_eul_tgt = lag2eul(
dis=[disp_lag_out, disp_lag_tgt],
a=np.float64(style),
eul_scale_factor=2,
rm_dis_mean=True,
)
disp_eul_loss = criterion(disp_eul_out, disp_eul_tgt)
# ---------- displacement loss ----------
disp_lag_loss = criterion(disp_lag_out, disp_lag_tgt)
epoch_loss[0] += disp_lag_loss.detach()
if args.eul:
epoch_loss[1] += disp_eul_loss.detach()
if args.eul:
dis_loss = 1.0 * disp_lag_loss + 1.0 * disp_eul_loss
else:
dis_loss = disp_lag_loss
# ---------- velocity loss ----------
if args.vel_eul:
vel_eul_out, vel_eul_tgt = (
lag2eul(
disp_lag_tgt,
val=vel_lag_out,
a=np.float64(style),
eul_scale_factor=eul_scale_factor,
)[0],
lag2eul(
disp_lag_tgt,
val=vel_lag_tgt,
a=np.float64(style),
eul_scale_factor=eul_scale_factor,
)[0],
)
vel_lag_loss = criterion(vel_lag_out, vel_lag_tgt)
if args.vel_eul:
vel_eul_loss = criterion(vel_eul_out, vel_eul_tgt)
vel_loss = args.lag_loss_weight * (vel_lag_loss + vel_eul_loss)
else:
vel_loss = args.lag_loss_weight * vel_lag_loss
epoch_loss[2] += vel_lag_loss.detach()
if args.vel_eul:
epoch_loss[3] += vel_eul_loss.detach()
total_loss = dis_loss + vel_loss
if args.adv and epoch >= args.adv_start:
set_requires_grad(adv_model, False)
score_out = adv_model(output, style=style, cond=input)
loss_adv = adv_criterion(score_out)
epoch_loss[12] += loss_adv.detach()
total_loss += 3e-2 * loss_adv
total_loss.backward()
optimizer.step()
if i + 1 != len(loader):
try:
del (
disp_lag_out,
disp_lag_tgt,
disp_lag_loss,
disp_eul_out,
disp_eul_tgt,
disp_eul_loss,
dis_loss,
)
del (
vel_lag_out,
vel_lag_tgt,
vel_eul_out,
vel_eul_tgt,
vel_eul_loss,
vel_lag_loss,
vel_eul2_out,
vel_eul2_tgt,
vel_eul2_loss,
vel_loss,
)
del style, noise, sr_out, output, target, total_loss
except:
pass
if rank == 0:
epoch_logging_start.record()
dist.all_reduce(epoch_loss)
epoch_loss /= len(loader) * world_size
lr = scheduler.get_last_lr()[0]
if rank == 0:
# ---------- learning rate log ----------
logger.add_scalar("hyperparam/lr", lr, global_step=epoch + 1)
# ---------- loss log ----------
logger.add_scalar(
"epoch/train/loss/disp/lag", epoch_loss[0], global_step=epoch + 1
)
logger.add_scalar(
"epoch/train/loss/disp/eul", epoch_loss[1], global_step=epoch + 1
)
logger.add_scalar(
"epoch/train/loss/vel/lag", epoch_loss[2], global_step=epoch + 1
)
logger.add_scalar(
"epoch/train/loss/vel/eul", epoch_loss[3], global_step=epoch + 1
)
logger.add_scalar(
"epoch/train/loss/vel/eul2", epoch_loss[4], global_step=epoch + 1
)
if args.adv and epoch >= args.adv_start:
logger.add_scalars(
"epoch/train/loss/adv/",
{
"real": epoch_loss[8],
"fake": epoch_loss[9],
},
global_step=epoch + 1,
)
logger.add_scalars(
"epoch/train/weight",
{
"lag": args.lag_loss_weight,
"eul": args.eul_loss_weight,
"adv": args.adv_loss_weight,
},
global_step=epoch + 1,
)
logger.add_scalar(
"epoch/train/loss/adv", epoch_loss[12], global_step=epoch + 1
)
logger.add_scalars(
"stat/mem/",
{
"max_alloc": epoch_loss[-1],
"max_reserved": epoch_loss[-2],
},
global_step=epoch + 1,
)
epoch_making_plots_start.record()
if (epoch + 1) % 1 == 0:
disp_sr_out = sr_out[:, :3].detach()
vel_sr_out = sr_out[:, 3:].detach()
with torch.no_grad():
sr_eul = lag2eul(
sr_out[:, :3],
a=np.float64(style),
eul_scale_factor=eul_scale_factor,
)[0]
disp_eul_out = lag2eul(
disp_lag_out,
a=np.float64(style),
eul_scale_factor=eul_scale_factor,
)[0]
disp_eul_tgt = lag2eul(
disp_lag_tgt,
a=np.float64(style),
eul_scale_factor=eul_scale_factor,
)[0]
try:
fig = plt_slices(
disp_sr_out[-1],
disp_lag_out[-1],
disp_lag_tgt[-1],
disp_lag_out[-1] - disp_lag_tgt[-1],
sr_eul[-1],
disp_eul_out[-1],
disp_eul_tgt[-1],
disp_eul_out[-1] - disp_eul_tgt[-1],
vel_sr_out[-1],
vel_lag_out[-1],
vel_lag_tgt[-1],
vel_lag_out[-1] - vel_lag_tgt[-1],
title=[
"ai3 disp",
"out disp",
"tgt disp",
"disp diff",
"ai3 eul",
"out eul",
"tgt eul",
"eul diff",
"ai3 vel",
"out vel",
"tgt vel",
"vel diff",
],
**args.misc_kwargs,
)
logger.add_figure("fig/train", fig, global_step=epoch + 1)
fig.clf()
except Exception as error:
print(error)
pass
try:
del (
disp_sr_out,
disp_lag_out,
disp_lag_tgt,
disp_eul_out,
disp_eul_tgt,
sr_eul,
vel_sr_out,
vel_lag_out,
vel_lag_tgt,
)
except:
pass
epoch_end.record()
torch.cuda.synchronize()
logger.add_scalars(
"stat/time/train",
{
"prepare": epoch_start.elapsed_time(training_start) / 1000,
"training": training_start.elapsed_time(epoch_logging_start) / 1000,
"loss logging": epoch_logging_start.elapsed_time(
epoch_making_plots_start
)
/ 1000,
"making plots": epoch_making_plots_start.elapsed_time(epoch_end) / 1000,
},
global_step=epoch + 1,
)
max_memory_alloc = torch.cuda.max_memory_allocated(device=device)
max_momory_reserved = torch.cuda.max_memory_reserved(device=device)
max_memory_alloc = round(max_memory_alloc / (1024**3), 2)
max_momory_reserved = round(max_momory_reserved / (1024**3), 2)
logger.add_scalars(
"stat/mem/",
{
"max_alloc": max_memory_alloc,
"max_reserved": max_momory_reserved,
},
global_step=epoch + 1,
)
return epoch_loss
def dist_init(rank, args):
dist_file = "dist_addr"
if rank == 0:
addr = socket.gethostname()
with socket.socket() as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((addr, 0))
_, port = s.getsockname()
args.dist_addr = "tcp://{}:{}".format(addr, port)
with open(dist_file, mode="w") as f:
f.write(args.dist_addr)
else:
while not os.path.exists(dist_file):
time.sleep(1)
with open(dist_file, mode="r") as f:
args.dist_addr = f.read()
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_addr,
world_size=args.world_size,
rank=rank,
)
dist.barrier()
if rank == 0:
os.remove(dist_file)
def init_weights(m):
if isinstance(
m,
(
nn.Linear,
nn.Conv1d,
nn.Conv2d,
nn.Conv3d,
nn.ConvTranspose1d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
),
):
m.weight.data.normal_(0.0, args.init_weight_std)
elif isinstance(
m,
(
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.SyncBatchNorm,
nn.LayerNorm,
nn.GroupNorm,
nn.InstanceNorm1d,
nn.InstanceNorm2d,
nn.InstanceNorm3d,
),
):
if m.affine:
m.weight.data.normal_(1.0, args.init_weight_std)
m.bias.data.fill_(0)
def set_requires_grad(module, requires_grad=False):
for param in module.parameters():
param.requires_grad = requires_grad
def get_grads(model):
"""gradients of the weights of the first and the last layer"""
grads = list(p.grad for n, p in model.named_parameters() if ".weight" in n)
grads = [grads[0], grads[-1]]
grads = [g.detach().norm() for g in grads]
return grads
def get_weight(model):
"""weights of the first and the last layer"""
for n, p in model.named_parameters():
if "log" in n:
log_vars = p.detach().clone()
stds = torch.exp(log_vars) ** (1 / 2)
weights = 1 / (stds**2)
return weights
def update_ema(model, ema_model, alpha=0.9999):
for ema_param, param in zip(ema_model.values(), model.values()):
ema_param.data.mul_(alpha).add_(param.data, alpha=1 - alpha)
return ema_model
|
xwzhang98REPO_NAMESREmulatorPATH_START.@SREmulator_extracted@SREmulator-main@map2map@map2map@train.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "gomesdasilva/ACTIN",
"repo_path": "ACTIN_extracted/ACTIN-master/README.md",
"type": "Markdown"
}
|
# ACTIN 1.3.9
*This version is no longer being maintained - please use [ACTIN2](https://github.com/gomesdasilva/ACTIN2) instead.*
### Activity Indices Calculator
[](https://doi.org/10.21105/joss.00667)
[](https://doi.org/10.5281/zenodo.1478610)
Reads fits files from HARPS, HARPS-N and ESPRESSO spectrographs, rdb tables, and outputs user defined spectral activity indices (along with other relevant data).
### Requires the following Python modules:
- numpy
- matplotlib
- astropy
### Installation:
Clone the github repository to a directory of your choice and install via `python setup.py install`.
### Configuration file:
The `config_lines.txt` file is the line configuration file (instructions inside). This file is used to add line parameters to calculate any index as long as the line cores and bandpasses are inside the spectral range and spectral orders range (for 2d spectra) of the spectrograph. ACTIN will check this at start and give an error message if line parameters don't match the spectra.
Example of a `config_lines.txt` file:

Any index `ind_id` is calculated using the formula:

where Li and Rj are the fluxes in the `ind_var` main and reference lines, as indicated in the config file. ci and kj are the `ln_c` constants multiplied to each main and reference lines, respectively.
The description of the flux calculation is available in the Appendix A of [Gomes da Silva et al. 2021](https://ui.adsabs.harvard.edu/abs/2020arXiv201210199G/abstract).
`I_CaII` is the classical S-index [Duncan et al. 1991](https://ui.adsabs.harvard.edu/abs/1991ApJS...76..383D/abstract) (not calibrated to the Mt. Wilson scale).
`I_Ha16` and `I_Ha06` are Halpha indices with 1.6 and 0.6 ang central bandpasses, respectively.
`I_CaI` is an activity insensitive line.
The line parameters for `I_NaI`, `I_Ha16`, and `I_HeI` come from [Gomes da Silva et al. 2011](https://ui.adsabs.harvard.edu/abs/2011A%26A...534A..30G/abstract).
The config file is available from the directory each OS uses for storing user data (*)
To get your path to the config file call `actin` without any arguments.
The file can be copied to another directory, modified, and then used by adding `-cf dir/filename` when running `actin`.
### Quick start:
Usage:
```
actin -h [help] -f [files_list] -i [indices_list] -rv [rv_list] -cf [config_file] -s [output_path] -lp [output_path/same/show] -obj [object_name] -tl [target_list] -del [True/False] -t [test_file_type] -frc [True/False] -sp [True/False]
```
Arguments:
`-h` : Gives a description of the arguments available.
`-f` : List of files (formats S1D, S2D, e2ds, s1d, s1d_*_rv, or ADP) or rdb table(s) with headers `obj`, `date`, `bjd`, `wave`, `flux`, `error_pixel` (optional) to be read.
`-i` : List of indices to calculate. Indices ids must match the ones in the config file `config_lines.txt`.
`-rv` : List of RV values to calibrate wavelength. If not used, RVs are used from CCF files if available.
`-cf` : Path to configuration file. If not given the configuration file is read from installation directory To know installation directory call `actin` without arguments.
`-s` : Save output to .rdb table in specified path.
`-lp` : Save plots of the lines used to calculate the indices in the specified path. If `same` uses the path as specified in `-s` If `show` shows the plots without saving, useful to analyse the lines in one spectra.
`-tl` : List of stars to select from `files`.
`-del` : If `True` deletes any output file (data and logs only; only files that match current ACTIN call will be deleted) before reading the file list and saving output.
`-obj` : Object name to override the one from fits files in case the star has multiple names in different files (ex. Proxima, ProximaCen, Gl551). BE CAREFUL WHEN READING FILES FROM MULTIPLE STARS.
`-t` : Tests the program using the test files. Options are `S2D`, `S1D`, `e2ds`, `s1d`, `adp` or `rdb` to test these type of files.
`-frc` : Use fractional pixels if `True` (default), use integral pixels if `False`. If using `False` and calculating the I_CaII index as given in the original config_lines.txt file ACTIN will simulate the values of 's_raw' from the HARPS pipeline. Note however that this option might induce artificial variations in the indices due to the use of integral pixels in the bandpasses.
`-sp` : If True saves time-series and multi-plots to same directory as `-s`.
#### Important:
When running ACTIN for a second time with the same data on the same output directory use `-del True` otherwise the program will detect the same dates, ignore the measurements and not give any output.
When arguments accept lists, they can be given in the command line, e.g. `-tl Gl273 Gl581`, or from an ASCII file by using, e.g. `-tl $(cat target_list.txt)` where `target_list.txt` is a file with one column with the rows `Gl273` and `Gl581`.
### Testing the code with minimum arguments:
The example below will test the code using the test files provided in the package.
```
actin -t e2ds
```
Can also use the options `s1d`, `S1D`, `S2D`, `ADP`, and `rdb` to test on other file types.
### Example for multiple files:
```
actin -f ../fits/*/*e2ds_A.fits -i I_CaII I_Ha -s ../output -del True -tl Gl273 Gl581
```
This will execute ACTIN for all the subdirectories inside `../fits/` with files ending with `e2ds_A.fits`, calculate the indices `I_CaII` and `I_Ha`, output the data to `../output/star_names`, and, before running the code, delete any output file that was previously there, in this case `Gl273_HARPS_e2ds_data.rdb` and `Gl581_HARPS_e2ds_data.rdb`. Only fits files belonging to the stars chosen in `-tl` will be read, in this case `Gl273` and `Gl581`. Since `-frc` is True by default, fractional pixels will be used to compute the indices.
### Using ACTIN as a python module:
To use ACTIN as a module use `import actin.actin as actin` and then call the function:
```
actin.actin(files, calc_index=None, rv_in=None, config_file=None, save_output=False, ln_plts=False, obj_name=None, targ_list=None, del_out=False, frac=True, test=False, save_plots=False)
```
The arguments are the same as the ones described above (with just a slight different name in some cases).
Note that some arguments need to be given as lists when they accept lists, e.g. `calc_index` should be `calc_index=['I_CaII']` even when using only one index.
### Output rdb table:
The output rdb file has headers with data from the fits headers of the files (data headers) and data calculated by ACTIN (index headers).
#### Data headers:
Some of these headers might not be available if CCF files are not provided.
`obj` : Object targeted
`instr` : Instrument used
`obs_date` : Observation date
`bjd` : Barycentric Julian Date [days]
`rv` : Radial Velocity [m/s]
`rv_err` : Error of `rv` [m/s]
`fwhm` : Fullwidth-at-Half-Maximum of CCF profile [m/s]
`fwhm_err` : Error of `fwhm` [m/s]
`cont` : Contrast of CCF profile [%]
`cont_err` : Error of `cont` [%]
`bis` : Bisector inverse span of the CCF profile [m/s]
`bis_err` : Error of `bis` [m/s]
`ccf_noise` : CCF noise [m/s]
`median_snr` : Median signal-to-noise of spectrum
`data_flg` : Flag indicating if spectrum was not deblazed as Β΄noDeblazedΒ΄
`bv` : B-V colour
`airmass` : Airmass
`exptime` : Exposure time
#### Index headers:
These headers depend on the choices of indices to calculate.
`<ind_id>` is the identification of the index as in the configuration file.
`<ln_id>` is the identification of the line as in the configuration file.
`I_<ind_id>` : Value of the index <index_id>
`I_<ind_id>_err` : Error of index
`I_<ind_id>_snr` : Median signal-to-noise ratio of the orders used to calculate the index
`I_<ind_id>_flg` : Flag with value `negFlux` if negative values of flux found in the bandpasses of the index
`I_<ind_id>_mfracneg` : Maximum fraction of pixels with negative flux found in the bandpasses of the index
`<ln_id>_npixels` : Number of pixels (fractionary) inside the bandpass associated with the line <ind_ln>.
---
(*)
For OSX: `~/Library/Application Support/<AppName>`
For Windows: `C:\Documents and Settings\<User>\Application Data\Local Settings\<AppAuthor>\<AppName>` or possibly `C:\Documents and Settings\<User>\Application Data\<AppAuthor>\<AppName>`
For Linux: `~/.local/share/<AppName>`
---
### Citing
If you use ACTIN in your research, please cite
Gomes da Silva, J. et al. JOSS 3, 667G (2018) (DOI: 10.21105/joss.00667)
or use the ready-made BibTex entry
```
@ARTICLE{2018JOSS....3..667G,
author = {{Gomes da Silva}, Jo{\~a}o and {Figueira}, Pedro and {Santos}, Nuno and
{Faria}, Jo{\~a}o},
title = "{ACTIN: A tool to calculate stellar activity indices}",
journal = {The Journal of Open Source Software},
keywords = {Astrophysics - Instrumentation and Methods for Astrophysics, Astrophysics - Solar and Stellar Astrophysics},
year = "2018",
month = "Nov",
volume = {3},
number = {31},
pages = {667},
doi = {10.21105/joss.00667},
archivePrefix = {arXiv},
eprint = {1811.11172},
primaryClass = {astro-ph.IM},
adsurl = {https://ui.adsabs.harvard.edu/abs/2018JOSS....3..667G},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
### Published papers using ACTIN
https://ui.adsabs.harvard.edu/abs/2018JOSS....3..667G/citations
---
For issues, bugs or if you would like to contribute to the development of this code contact Joao.Silva(at)astro.up.pt
|
gomesdasilvaREPO_NAMEACTINPATH_START.@ACTIN_extracted@ACTIN-master@README.md@.PATH_END.py
|
{
"filename": "test_cosmo_docs_basic.py",
"repo_name": "CobayaSampler/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/tests/test_cosmo_docs_basic.py",
"type": "Python"
}
|
"""
Testing and automatic generation of basic cosmological examples in the docs.
"""
import os
import sys
from cobaya.yaml import yaml_dump, yaml_load
from cobaya.input import is_equal_info
from cobaya.cosmo_input import create_input
from cobaya.tools import sort_cosmetic
from .test_docs_example_quickstart import docs_folder
path = os.path.join(docs_folder, "src_examples", "cosmo_basic")
file_pre = "basic_"
preset_pre = "planck_NPIPE_CamSpec_"
def test_cosmo_docs_basic():
flag = True
for theo in ["camb", "classy"]:
info_new = create_input(preset=preset_pre + theo)
info_yaml_new = yaml_dump(sort_cosmetic(info_new))
file_path = os.path.join(path, file_pre + theo + ".yaml")
with open(file_path) as docs_file:
info_yaml_docs = docs_file.read()
info_docs = yaml_load(info_yaml_docs)
if not is_equal_info(info_new, info_docs, strict=True, print_not_log=True):
with open(file_path, "w") as docs_file:
docs_file.write(info_yaml_new)
flag = False
print("OLD:\n%s" % info_yaml_docs)
print("----------------------------------------")
print("NEW:\n%s" % info_yaml_new)
sys.stdout.flush()
assert flag, ("Differences in example input file. "
"Files have been re-generated; check out your git diff.")
|
CobayaSamplerREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@tests@test_cosmo_docs_basic.py@.PATH_END.py
|
{
"filename": "response.py",
"repo_name": "splus-collab/splus_filters",
"repo_path": "splus_filters_extracted/splus_filters-master/archive/response_curves/response_curves_2017/response.py",
"type": "Python"
}
|
# Herpich F.R. 12/08/16
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from scipy.interpolate import interp1d
import os
filters = ['20140604C080F062502.fits', '20140606C080F051502.fits',
'20140609C080F066002.fits', '20150429C080F037802.fits',
'20150504C080zSDSS02.fits', '20150506C080iSDSS02.fits',
'20150514C080F043002.fits', '20150918C080uJAVA02.fits',
'20150922C080F039502.fits', '20150922C080F086102.fits',
'20150923C080F041002.fits', '20150924C080gSDSS02.fits']
def str2float(fits_file):
mirror_col1 = []
mirror_col2 = []
mirror_reflec = fits.open(fits_file)[1].data
for i in range(len(mirror_reflec.col1)):
mirror_col1.append(np.float(mirror_reflec.col1[i]))
if np.float(mirror_reflec.col2[i]) < 0.:
mirror_col2.append(0.)
else:
mirror_col2.append(np.float(mirror_reflec.col2[i]) / 100.)
return mirror_col1, mirror_col2
def calc_filters(filters):
#wave_range = np.arange(300., 1201., 1)
#order = 1
##################### for sky ########################
atmosph_transm = fits.open('sky_trans.fits')[1].data
#at_ius = IUS(atmosph_transm.col1, atmosph_transm.col2, k = order, ext=0)
at_ius = interp1d(atmosph_transm.col1, atmosph_transm.col2)
#new_atm_trans = at_ius(wave_range)
##################### for mirror ########################
mirror_col1, mirror_col2 = str2float('mirror_reflectance.fits')
#mr_ius = IUS(mirror_col1, mirror_col2, k = order)
# the reflectance bellow was obtained from:
# https://laserbeamproducts.wordpress.com/2014/06/19/reflectivity-of-aluminium-uv-visible-and-infrared/
#mirror_col1 = np.array([248, 400, 532, 633, 800, 900, 1000, 3000])
#mirror_col2 = np.array([.926, .92, .916, .907, .868, .89, .94, .98])
mr_ius = interp1d(mirror_col1, mirror_col2)
#new_mirror_reflec = mr_ius(wave_range)
# measured
mirror_measured_wave = np.array([300., 350., 420., 470., 530., 650., 880.,
950., 1000., 1100])
mirror_measured_flux = np.array([.9126, .9126, .9126, .9126,
.911, .8725, .7971, .82, .84, .85])
mr_meas = interp1d(mirror_measured_wave, mirror_measured_flux)
mask = (mirror_col1 > min(mirror_measured_wave)) & (mirror_col1 < max(mirror_measured_wave))
measur_interp = mr_meas(np.array(mirror_col1)[mask])
##################### for ccd ########################
ccd_col1, ccd_col2 = str2float('ccd_curve.fits')
#ccd_ius = IUS(ccd_wave, ccd_flux, k = order, ext=3)
ccd_ius = interp1d(ccd_col1, ccd_col2)
#new_ccd_eff = ccd_ius(wave_range)
# measured (????)
ccd_measured_wave = np.array([300., 350., 400., 450., 500., 550., 600.,
650., 725., 800., 850., 900, 970.])
ccd_measured_flux = np.array([.2, .45, .90, .93, .88, .88, .91, .92, .95,
.88, .8, .6, .3])
ccd_measured_wave2 = np.array([350., 400., 500., 650., 900])
ccd_measured_flux2 = np.array([.39, .84, .92, .924, .61])
fig = plt.figure()
ax = fig.add_subplot(111)
for nof in filters:
t = fits.open(nof)[1].data
#mask = (wave_range >= t.col1.min()) & (wave_range <= t.col1.max())
xmin = np.array([min(atmosph_transm.col1), min(mirror_col1),
min(ccd_col1), min(t.col1)])
xmax = np.array([max(atmosph_transm.col1), max(mirror_col1),
max(ccd_col1), max(t.col1)])
wave_range = np.arange(max(xmin), min(xmax), 1.)
col = t.col3 / t.col2
k = 4
while k < 103:
ncol = 'col' + str(k)
col += t[ncol] / t.col2
k += 1
medium_col = col / 100.
##################### for filter ########################
#tex = IUS(t.col1, medium_col, k = order)
tex = interp1d(t.col1, medium_col)
transm = tex(wave_range)
##################### for sky ########################
# typical
new_atm_trans = at_ius(wave_range)
##################### for mirror ########################
new_mirror_reflec = mr_ius(wave_range)
mirror_measured_wave = np.array([300., 350., 420., 470., 530., 650., 880.])
mirror_measured_flux = np.array([.9126, .9126, .9126, .9126,
.911, .8725, .7971])
##################### for ccd ########################
new_ccd_eff = ccd_ius(wave_range)
################ calc new transmittance for each filter ################
new_filter_trans = transm * new_atm_trans * new_mirror_reflec * new_ccd_eff
# writing fits files
filter_name = nof[12:-7] + '.fits'
column1 = fits.Column(name='wavelength', format='E', array = wave_range)
column2 = fits.Column(name='transmit', format='E', array = new_filter_trans)
cols = fits.ColDefs([column1, column2])
tbhdu = fits.BinTableHDU.from_columns(cols)
if os.path.isfile(filter_name):
os.remove(filter_name)
tbhdu.writeto(filter_name)
print '---'
print 'saving filter transmitance', filter_name, '\n'
# writing ascii files
nascii_file = nof[:-5] + '.ascii'
f = open(nascii_file, 'w')
f.write('wavelength transmittance\n')
for i in range(len(wave_range)):
linha = '%s %s\n' % (wave_range[i], new_filter_trans[i])
f.write(linha)
print 'saving filter transmittance', nascii_file, '\n'
f.close()
ax.plot(wave_range * 10., new_filter_trans, label = nof[12:-7])
ax.plot(atmosph_transm.col1 * 10., atmosph_transm.col2, c = 'c',
label = 'atmosphere')
ax.plot(np.array(mirror_col1) * 10., mirror_col2, c = 'k', label = 'mirror')
ax.plot(mirror_measured_wave * 10., mirror_measured_flux, 'o', c = 'k',
label = 'meas mirror')
ax.plot(np.array(mirror_col1)[mask] * 10., measur_interp, '--', c = 'k',
label = 'meas mirror')
ax.plot(np.array(ccd_col1) * 10., ccd_col2, c = 'r', label = 'ccd eff')
ax.plot(ccd_measured_wave * 10., ccd_measured_flux, 's',
c = 'r', label = 'meas ccd')
ax.plot(ccd_measured_wave2 * 10., ccd_measured_flux2, 'd',
c='y', label = 'meas ccd2')
ax.set_xlim(2900, 12000)
ax.set_ylim(-.01, 1.)
ax.set_xlabel(r'$\mathrm{\lambda\ [\AA]}$')
plt.legend(loc = 'upper right', fontsize = 10)
plt.show()
plt.close()
return
calc_filters(filters)
def get_desv(filters):
n = 1
fig = plt.figure(figsize = (8, 7))
for nof in filters:
t = fits.open(nof)[1].data
ax = fig.add_subplot(6, 2, n)
for i in range(len(t.col3)):
if t.col3[i] == max(t.col3):
ax.plot(t.T[i][2:-1], '-', label = nof[12:-7])
if n in [11, 12]:
ax.set_xlabel(r'$\mathrm{pixel}$')
else:
plt.setp(ax.get_xticklabels(), visible = False)
n += 1
plt.legend(loc = 'upper right', fontsize = 10)
#labels = [item.get_text() for item in ax.get_yticklabels()]
#print labels
#ax.set_yticklabels(labels[1:-1])
plt.tight_layout()
plt.subplots_adjust(hspace = 0.1)
plt.show()
plt.close()
return
#get_desv(filters)
|
splus-collabREPO_NAMEsplus_filtersPATH_START.@splus_filters_extracted@splus_filters-master@archive@response_curves@response_curves_2017@response.py@.PATH_END.py
|
{
"filename": "surface_plot.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/doc/source/cookbook/surface_plot.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import yt
# Load the dataset
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
# Create a sphere object centered on the highest density point in the simulation
# with radius 1 Mpc
sphere = ds.sphere("max", (1.0, "Mpc"))
# Identify the isodensity surface in this sphere with density = 1e-24 g/cm^3
surface = ds.surface(sphere, ("gas", "density"), 1e-24)
# Color this isodensity surface according to the log of the temperature field
colors = yt.apply_colormap(np.log10(surface["gas", "temperature"]), cmap_name="hot")
# Create a 3D matplotlib figure for visualizing the surface
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
p3dc = Poly3DCollection(surface.triangles, linewidth=0.0)
# Set the surface colors in the right scaling [0,1]
p3dc.set_facecolors(colors[0, :, :] / 255.0)
ax.add_collection(p3dc)
# Let's keep the axis ratio fixed in all directions by taking the maximum
# extent in one dimension and make it the bounds in all dimensions
max_extent = (surface.vertices.max(axis=1) - surface.vertices.min(axis=1)).max()
centers = (surface.vertices.max(axis=1) + surface.vertices.min(axis=1)) / 2
bounds = np.zeros([3, 2])
bounds[:, 0] = centers[:] - max_extent / 2
bounds[:, 1] = centers[:] + max_extent / 2
ax.auto_scale_xyz(bounds[0, :], bounds[1, :], bounds[2, :])
# Save the figure
plt.savefig(f"{ds}_Surface.png")
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@doc@source@cookbook@surface_plot.py@.PATH_END.py
|
{
"filename": "schedules.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/deployments/schedules.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
from prefect.client.schemas.actions import DeploymentScheduleCreate
from prefect.client.schemas.schedules import is_schedule_type
if TYPE_CHECKING:
from prefect.client.schemas.schedules import SCHEDULE_TYPES
FlexibleScheduleList = Sequence[
Union[DeploymentScheduleCreate, dict[str, Any], "SCHEDULE_TYPES"]
]
def create_deployment_schedule_create(
schedule: "SCHEDULE_TYPES",
active: Optional[bool] = True,
) -> DeploymentScheduleCreate:
"""Create a DeploymentScheduleCreate object from common schedule parameters."""
return DeploymentScheduleCreate(
schedule=schedule,
active=active if active is not None else True,
)
def normalize_to_deployment_schedule_create(
schedules: Optional["FlexibleScheduleList"],
) -> List[DeploymentScheduleCreate]:
normalized: list[DeploymentScheduleCreate] = []
if schedules is not None:
for obj in schedules:
if is_schedule_type(obj):
normalized.append(create_deployment_schedule_create(obj))
elif isinstance(obj, dict):
normalized.append(create_deployment_schedule_create(**obj))
elif isinstance(obj, DeploymentScheduleCreate):
normalized.append(obj)
elif _is_server_schema(obj):
raise ValueError(
"Server schema schedules are not supported. Please use "
"the schedule objects from `prefect.client.schemas.schedules`"
)
else:
raise ValueError(
"Invalid schedule provided. Must be a schedule object, a dict,"
"or a `DeploymentScheduleCreate` object"
)
return normalized
def _is_server_schema(obj: Any):
return obj.__class__.__module__.startswith("prefect.server.schemas")
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@deployments@schedules.py@.PATH_END.py
|
{
"filename": "hdf5.py",
"repo_name": "franciscovillaescusa/Pylians",
"repo_path": "Pylians_extracted/Pylians-master/Examples/hdf5.py",
"type": "Python"
}
|
import numpy as np
import h5py
z = 3.0
a = np.arange(10)
# write a hdf5 file
f = h5py.File('my_file_z=%.3f.hdf5'%z, 'w')
f.create_dataset('Mass', data=a)
f.close()
# read hdf5 file
f = h5py.File('M_HI_new_75_1820_z=%.3f.hdf5'%z, 'r')
M_HI = f['M_HI'][:]
M = f['Mass'][:]
R = f['R'][:]
f.close()
|
franciscovillaescusaREPO_NAMEPyliansPATH_START.@Pylians_extracted@Pylians-master@Examples@hdf5.py@.PATH_END.py
|
{
"filename": "example_XOR_classification.py",
"repo_name": "ahmedfgad/GeneticAlgorithmPython",
"repo_path": "GeneticAlgorithmPython_extracted/GeneticAlgorithmPython-master/examples/nn/example_XOR_classification.py",
"type": "Python"
}
|
import numpy
import pygad.nn
"""
This project creates a neural network where the architecture has input and dense layers only. More layers will be added in the future.
The project only implements the forward pass of a neural network and no training algorithm is used.
For training a neural network using the genetic algorithm, check this project (https://github.com/ahmedfgad/NeuralGenetic) in which the genetic algorithm is used for training the network.
Feel free to leave an issue in this project (https://github.com/ahmedfgad/NumPyANN) in case something is not working properly or to ask for questions. I am also available for e-mails at ahmed.f.gad@gmail.com
"""
# Preparing the NumPy array of the inputs.
data_inputs = numpy.array([[1, 1],
[1, 0],
[0, 1],
[0, 0]])
# Preparing the NumPy array of the outputs.
data_outputs = numpy.array([0,
1,
1,
0])
# The number of inputs (i.e. feature vector length) per sample
num_inputs = data_inputs.shape[1]
# Number of outputs per sample
num_outputs = 2
HL1_neurons = 2
# Building the network architecture.
input_layer = pygad.nn.InputLayer(num_inputs)
hidden_layer1 = pygad.nn.DenseLayer(num_neurons=HL1_neurons, previous_layer=input_layer, activation_function="relu")
output_layer = pygad.nn.DenseLayer(num_neurons=num_outputs, previous_layer=hidden_layer1, activation_function="softmax")
# Training the network.
pygad.nn.train(num_epochs=100,
last_layer=output_layer,
data_inputs=data_inputs,
data_outputs=data_outputs,
learning_rate=0.01)
# Using the trained network for predictions.
predictions = pygad.nn.predict(last_layer=output_layer, data_inputs=data_inputs)
# Calculating some statistics
num_wrong = numpy.where(predictions != data_outputs)[0]
num_correct = data_outputs.size - num_wrong.size
accuracy = 100 * (num_correct/data_outputs.size)
print(f"Number of correct classifications : {num_correct}.")
print(f"Number of wrong classifications : {num_wrong.size}.")
print(f"Classification accuracy : {accuracy}.")
|
ahmedfgadREPO_NAMEGeneticAlgorithmPythonPATH_START.@GeneticAlgorithmPython_extracted@GeneticAlgorithmPython-master@examples@nn@example_XOR_classification.py@.PATH_END.py
|
{
"filename": "_cmax.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/marker/_cmax.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmax", parent_name="scattergeo.marker", **kwargs):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@marker@_cmax.py@.PATH_END.py
|
{
"filename": "test_sen2nbar.py",
"repo_name": "ESDS-Leipzig/sen2nbar",
"repo_path": "sen2nbar_extracted/sen2nbar-main/tests/test_sen2nbar.py",
"type": "Python"
}
|
import unittest
import cubo
import planetary_computer as pc
import pystac_client
import stackstac
import xarray as xr
from sen2nbar.nbar import nbar_cubo, nbar_stackstac
class Test(unittest.TestCase):
"""Tests for the sen2nbar package."""
def test_stackstac(self):
"""Test the nbar_stackstac with Planetary Computer"""
endpoint = "https://planetarycomputer.microsoft.com/api/stac/v1"
collection = "sentinel-2-l2a"
bounds = (-148.565368, 60.800723, -147.443389, 61.183638)
catalog = pystac_client.Client.open(endpoint, modifier=pc.sign_inplace)
area_of_interest = {
"type": "Polygon",
"coordinates": [
[
[bounds[0], bounds[1]],
[bounds[2], bounds[1]],
[bounds[2], bounds[3]],
[bounds[0], bounds[3]],
[bounds[0], bounds[1]],
]
],
}
items = catalog.search(
collections=[collection],
intersects=area_of_interest,
datetime="2019-06-01/2019-08-01",
query={"eo:cloud_cover": {"lt": 10}},
).get_all_items()
stack = stackstac.stack(
items, assets=["B05", "B06", "B07"], bounds_latlon=bounds, resolution=20
)
da = nbar_stackstac(stack, stac=endpoint, collection=collection)
self.assertIsInstance(da, xr.DataArray)
def test_cubo(self):
"""Test the nbar_cubo"""
da = nbar_cubo(
cubo.create(
lat=50,
lon=10,
collection="sentinel-2-l2a",
bands=["B02", "B03", "B04"],
start_date="2021-06-01",
end_date="2021-06-10",
edge_size=32,
resolution=10,
)
)
self.assertIsInstance(da, xr.DataArray)
def test_cubo_not_all_angles(self):
"""Test the nbar_cubo"""
da = nbar_cubo(
cubo.create(
lat=64.25611,
lon=19.7745,
collection="sentinel-2-l2a",
bands=["B02", "B03", "B04"],
start_date="2016-01-01",
end_date="2016-03-01",
edge_size=512,
resolution=10,
)
)
self.assertIsInstance(da, xr.DataArray)
def test_cubo_not_all_angles_duplicated_indices(self):
"""Test the nbar_cubo"""
da = nbar_cubo(
cubo.create(
lat=51.079225,
lon=10.452173,
collection="sentinel-2-l2a",
bands=["B02", "B03", "B04"],
start_date="2016-01-01",
end_date="2016-03-01",
edge_size=512,
resolution=10,
)
)
self.assertIsInstance(da, xr.DataArray)
def test_stac_c_factor_reprojection(self):
"""Test the nbar_cubo when items have different epsg"""
da = nbar_cubo(
cubo.create(
lat=60.64183,
lon=23.95952,
collection="sentinel-2-l2a",
bands=["B04", "B03", "B02"],
start_date=f"2016-01-01",
end_date=f"2016-03-31",
edge_size=512,
resolution=10,
)
)
self.assertIsInstance(da, xr.DataArray)
if __name__ == "__main__":
unittest.main()
|
ESDS-LeipzigREPO_NAMEsen2nbarPATH_START.@sen2nbar_extracted@sen2nbar-main@tests@test_sen2nbar.py@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/indicator/title/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="indicator.title.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@indicator@title@font@_size.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/ohlc/decreasing/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._line import LineValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._line.LineValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@ohlc@decreasing@__init__.py@.PATH_END.py
|
{
"filename": "kmpfit_varreduct_wol.py",
"repo_name": "kapteyn-astro/kapteyn",
"repo_path": "kapteyn_extracted/kapteyn-master/doc/source/EXAMPLES/kmpfit_varreduct_wol.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Demonstrate use of variance reduction to examine a model.
# Use data example from Bevington & Robinson with 1 outlier
from numpy.random import normal
import numpy
from kapteyn import kmpfit
from matplotlib.pyplot import figure, show, rc
def model_lin(p, x):
a, b = p
y = a + b*x
return y
def model_par(p, x):
a, b, c = p
y = a + b*x + c*x*x
return y
def residuals(p, data):
x, y, err, mod = data
if mod == 1:
return (y-model_lin(p,x))/err
else:
return (y-model_par(p,x))/err
# Data
x = numpy.array([1,2,3,4,5,6,7,8,9, 10.0])
y = numpy.array([2.047, -0.966, -1.923, -1.064, 2.048, 6.573, 13.647, 24.679, 34.108, 44.969])
err = numpy.array([0.102, 0.048, 0.096, 0.053, 0.102, 0.329, 0.682, 1.234, 1.705, 2.248])
#err = numpy.ones(len(y))
# Do the fit and find the Variance Reduction
params0 = (1.6,0.5)
fitter = kmpfit.Fitter(residuals=residuals, data=(x,y,err,1))
fitter.fit(params0=params0)
print("======== Fit straight line ==========")
print("Params: ", fitter.params)
print("Uncertainties: ", fitter.xerror)
print("Errors assuming red.chi^2=1: ", fitter.stderr)
print("Iterations: ", fitter.niter)
print("Function ev: ", fitter.nfev)
print("dof: ", fitter.dof)
print("chi^2, rchi2: ", fitter.chi2_min, fitter.rchi2_min)
print("Status: ", fitter.status)
N = len(y)
varmod = (y-model_lin(fitter.params,x))**2.0
y_av = y.sum()/N
vardat = (y-y_av)**2.0
vr0 = 100.0*(1-(varmod.sum()/vardat.sum()))
print("Variance reduction (%):", vr0)
params0 = (1,1, 0)
fitter2 = kmpfit.Fitter(residuals=residuals, data=(x,y,err,2))
fitter2.fit(params0=params0)
print("\n======== Fit results Parabola ==========")
print("Params: ", fitter2.params)
print("Uncertainties: ", fitter2.xerror)
print("Errors assuming red.chi^2=1: ", fitter2.stderr)
print("Iterations: ", fitter2.niter)
print("Function ev: ", fitter2.nfev)
print("dof: ", fitter2.dof)
print("chi^2, rchi2: ", fitter2.chi2_min, fitter2.rchi2_min)
print("Status: ", fitter2.status)
N = len(y)
varmod = (y-model_par(fitter2.params,x))**2.0
y_av = y.sum()/N
vardat = (y-y_av)**2.0
vr1 = 100.0*(1-(varmod.sum()/vardat.sum()))
print("variance reduction (%):", vr1)
# Prepare plot
fig = figure()
rc('legend', fontsize=8)
frame = fig.add_subplot(1,1,1)
frame.set_xlabel("x")
frame.set_ylabel("y")
frame.set_title("Improve model using Variance Reduction")
frame.errorbar(x, y, yerr=err, fmt='bo')
delta = (x.max()-x.min())/10.0
X = numpy.linspace(x.min()-delta, x.max()+delta, 100)
label="Model: $a + bx$ VR=%.2f"%vr0
frame.plot(X, model_lin(fitter.params,X), 'g', label=label)
label="Model: $a + bx +cx^2$ VR=%.2f"%vr1
frame.plot(X, model_par(fitter2.params,X), 'm', label=label)
frame.set_xlim(x.min()-delta, x.max()+delta)
leg = frame.legend(loc=2)
show()
|
kapteyn-astroREPO_NAMEkapteynPATH_START.@kapteyn_extracted@kapteyn-master@doc@source@EXAMPLES@kmpfit_varreduct_wol.py@.PATH_END.py
|
{
"filename": "rotating_convection.py",
"repo_name": "DedalusProject/dedalus",
"repo_path": "dedalus_extracted/dedalus-master/examples/evp_shell_rotating_convection/rotating_convection.py",
"type": "Python"
}
|
"""
Dedalus script solving the linear stability eigenvalue problem for rotating
Rayleigh-Benard convection in a shell. This script demonstrates solving an
eigenvalue problem with non-constant coefficients that depend on both radius
and colatitude. It should take about a minute to run (serial only).
The aspect ratio of the shell is R_inner / R_outer = 0.35, and the problem is
non-dimensionalized using the outer radius and the viscous time. The script
calculates the eigenmodes for an Ekman number of 1e-5, where the critical
mode has an azimuthal wavenumber of m=13. At the critical Rayleigh number,
the imaginary part of the eigenvalue is zero.
Both stress-free (default) and no-slip boundary conditions are implemented.
For incompressible hydro with two boundaries, we need two tau terms for each the
velocity and temperature. Here we choose to use a first-order formulation, putting
one tau term each on auxiliary first-order gradient variables and the others in
the PDE, and lifting them all to the first derivative basis. This formulation puts
a tau term in the divergence constraint, as required for this geometry.
The eigenvalues are not fully converged at the given resolution and shift slightly
if the resolution is increased. For the given resolutions, the eigenvalues agree
with Table 1 of [1] to several digits of precision.
To run and print the calculated eigenvalues:
$ python3 rotating_convection.py
References:
[1]: P. Marti, M. A. Calkins, K. Julien, "A computationally
efficient spectral method for modeling coredynamics,"
Geochemistry, Geophysics, Geosystems (2016).
"""
import numpy as np
import dedalus.public as d3
import logging
logger = logging.getLogger(__name__)
# Parameters
Nphi = 28 # Critical mode has m=13
Ntheta = 64
Nr = 64
Ri = 0.35
Ro = 1
Prandtl = 1
Ekman = 1e-5
stress_free = True
dtype = np.complex128
# Critical Rayleigh numbers
if stress_free:
Rayleigh = 2.1029e7
else:
Rayleigh = 2.0732e7
# Bases
coords = d3.SphericalCoordinates('phi', 'theta', 'r')
dist = d3.Distributor(coords, dtype=dtype)
shell = d3.ShellBasis(coords, shape=(Nphi, Ntheta, Nr), radii=(Ri, Ro), dtype=dtype)
sphere = shell.outer_surface
phi, theta, r = dist.local_grids(shell)
# Fields
om = dist.Field(name='om')
u = dist.VectorField(coords, name='u', bases=shell)
p = dist.Field(name='p', bases=shell)
T = dist.Field(name='T', bases=shell)
tau_u1 = dist.VectorField(coords, bases=sphere)
tau_u2 = dist.VectorField(coords, bases=sphere)
tau_T1 = dist.Field(bases=sphere)
tau_T2 = dist.Field(bases=sphere)
tau_p = dist.Field()
# Substitutions
dt = lambda A: -1j*om*A
rvec = dist.VectorField(coords, bases=shell.meridional_basis)
rvec['g'][2] = r
ez = dist.VectorField(coords, bases=shell.meridional_basis)
ez['g'][1] = -np.sin(theta)
ez['g'][2] = np.cos(theta)
lift_basis = shell.derivative_basis(1)
lift = lambda A: d3.Lift(A, lift_basis, -1)
grad_u = d3.grad(u) + rvec*lift(tau_u1) # First-order reduction
grad_T = d3.grad(T) + rvec*lift(tau_T1) # First-order reduction
strain_rate = d3.grad(u) + d3.transpose(d3.grad(u))
# Problem
problem = d3.EVP([p, u, T, tau_u1, tau_u2, tau_T1, tau_T2, tau_p], eigenvalue=om, namespace=locals())
problem.add_equation("trace(grad_u) + tau_p = 0")
problem.add_equation("dt(u) + (1/Ekman)*cross(ez, u) + grad(p) - Rayleigh*T*rvec - div(grad_u) + lift(tau_u2) = 0")
problem.add_equation("Prandtl*dt(T) - dot(rvec,u) - div(grad_T) + lift(tau_T2) = 0")
problem.add_equation("integ(p) = 0")
if stress_free:
problem.add_equation("radial(u(r=Ri)) = 0")
problem.add_equation("radial(u(r=Ro)) = 0")
problem.add_equation("angular(radial(strain_rate(r=Ri), 0), 0) = 0")
problem.add_equation("angular(radial(strain_rate(r=Ro), 0), 0) = 0")
else:
problem.add_equation("u(r=Ri) = 0")
problem.add_equation("u(r=Ro) = 0")
problem.add_equation("T(r=Ri) = 0")
problem.add_equation("T(r=Ro) = 0")
problem.add_equation("integ(p) = 0")
# Solver
solver = problem.build_solver(ncc_cutoff=1e-10)
# Select m=13
subproblem = solver.subproblems_by_group[(13, None, None)]
# Find 10 eigenvalues closest to the target
if stress_free:
target = 963.765
else:
target = 731.753
solver.solve_sparse(subproblem, 10, target)
# Report results
logger.info(f"Predicted eigenvalue: {target+0j:f}")
logger.info(f"Calculated eigenvalue: {solver.eigenvalues[0]:f}")
logger.info("Ten eigenvalues closest to target:")
logger.info(solver.eigenvalues)
|
DedalusProjectREPO_NAMEdedalusPATH_START.@dedalus_extracted@dedalus-master@examples@evp_shell_rotating_convection@rotating_convection.py@.PATH_END.py
|
{
"filename": "test_dask_datasets.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/tests/dask/test_dask_datasets.py",
"type": "Python"
}
|
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.part_utils import _extract_partitions
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.datasets.blobs import make_blobs
from cuml.internals.safe_imports import gpu_only_import
import pytest
import dask.array as da
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.parametrize(
"nrows", [unit_param(1e3), quality_param(1e5), stress_param(1e6)]
)
@pytest.mark.parametrize(
"ncols", [unit_param(10), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize("centers", [10])
@pytest.mark.parametrize("cluster_std", [0.1])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nparts",
[unit_param(1), unit_param(7), quality_param(100), stress_param(1000)],
)
@pytest.mark.parametrize("order", ["F", "C"])
def test_make_blobs(
nrows, ncols, centers, cluster_std, dtype, nparts, order, client
):
c = client
nrows = int(nrows)
X, y = make_blobs(
nrows,
ncols,
centers=centers,
cluster_std=cluster_std,
dtype=dtype,
n_parts=nparts,
order=order,
client=client,
)
assert len(X.chunks[0]) == nparts
assert len(y.chunks[0]) == nparts
assert X.shape == (nrows, ncols)
assert y.shape == (nrows,)
y_local = y.compute()
assert len(cp.unique(y_local)) == centers
X_ddh = DistributedDataHandler.create(data=X, client=c)
X_first = X_ddh.gpu_futures[0][1].result()
if order == "F":
assert X_first.flags["F_CONTIGUOUS"]
elif order == "C":
assert X_first.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize(
"n_samples", [unit_param(int(1e3)), stress_param(int(1e6))]
)
@pytest.mark.parametrize("n_features", [unit_param(100), stress_param(1000)])
@pytest.mark.parametrize("n_informative", [7])
@pytest.mark.parametrize("n_targets", [1, 3])
@pytest.mark.parametrize("bias", [-4.0])
@pytest.mark.parametrize("effective_rank", [None, 6])
@pytest.mark.parametrize("tail_strength", [0.5])
@pytest.mark.parametrize("noise", [1.0])
@pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("coef", [True, False])
@pytest.mark.parametrize("n_parts", [unit_param(4), stress_param(23)])
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("use_full_low_rank", [True, False])
def test_make_regression(
n_samples,
n_features,
n_informative,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
coef,
n_parts,
order,
use_full_low_rank,
client,
):
c = client
from cuml.dask.datasets import make_regression
result = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
effective_rank=effective_rank,
noise=noise,
shuffle=shuffle,
coef=coef,
n_parts=n_parts,
use_full_low_rank=use_full_low_rank,
order=order,
)
if coef:
out, values, coefs = result
else:
out, values = result
assert out.shape == (n_samples, n_features), "out shape mismatch"
if n_targets > 1:
assert values.shape == (n_samples, n_targets), "values shape mismatch"
else:
assert values.shape == (n_samples,), "values shape mismatch"
assert len(out.chunks[0]) == n_parts
assert len(out.chunks[1]) == 1
if coef:
if n_targets > 1:
assert coefs.shape == (
n_features,
n_targets,
), "coefs shape mismatch"
assert len(coefs.chunks[1]) == 1
else:
assert coefs.shape == (n_features,), "coefs shape mismatch"
assert len(coefs.chunks[0]) == 1
test1 = da.all(da.sum(coefs != 0.0, axis=0) == n_informative)
std_test2 = da.std(values - (da.dot(out, coefs) + bias), axis=0)
test1, std_test2 = da.compute(test1, std_test2)
diff = cp.abs(1.0 - std_test2)
test2 = cp.all(diff < 1.5 * 10 ** (-1.0))
assert test1, "Unexpected number of informative features"
assert test2, "Unexpectedly incongruent outputs"
data_ddh = DistributedDataHandler.create(data=(out, values), client=c)
out_part, value_part = data_ddh.gpu_futures[0][1].result()
if coef:
coefs_ddh = DistributedDataHandler.create(data=coefs, client=c)
coefs_part = coefs_ddh.gpu_futures[0][1].result()
if order == "F":
assert out_part.flags["F_CONTIGUOUS"]
if n_targets > 1:
assert value_part.flags["F_CONTIGUOUS"]
if coef:
assert coefs_part.flags["F_CONTIGUOUS"]
elif order == "C":
assert out_part.flags["C_CONTIGUOUS"]
if n_targets > 1:
assert value_part.flags["C_CONTIGUOUS"]
if coef:
assert coefs_part.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize("n_samples", [unit_param(500), stress_param(1000)])
@pytest.mark.parametrize("n_features", [unit_param(50), stress_param(100)])
@pytest.mark.parametrize("hypercube", [True, False])
@pytest.mark.parametrize("n_classes", [2, 4])
@pytest.mark.parametrize("n_clusters_per_class", [2, 4])
@pytest.mark.parametrize("n_informative", [7, 20])
@pytest.mark.parametrize("random_state", [None, 1234])
@pytest.mark.parametrize("n_parts", [unit_param(4), stress_param(23)])
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_make_classification(
n_samples,
n_features,
hypercube,
n_classes,
n_clusters_per_class,
n_informative,
random_state,
n_parts,
order,
dtype,
client,
):
from cuml.dask.datasets.classification import make_classification
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
hypercube=hypercube,
n_clusters_per_class=n_clusters_per_class,
n_informative=n_informative,
random_state=random_state,
n_parts=n_parts,
order=order,
dtype=dtype,
)
assert (len(X.chunks[0])) == n_parts
assert (len(X.chunks[1])) == 1
assert X.shape == (n_samples, n_features)
assert y.shape == (n_samples,)
assert X.dtype == dtype
assert y.dtype == np.int64
assert len(X.chunks[0]) == n_parts
assert len(y.chunks[0]) == n_parts
import cupy as cp
y_local = y.compute()
assert len(cp.unique(y_local)) == n_classes
X_parts = client.sync(_extract_partitions, X)
X_first = X_parts[0][1].result()
if order == "F":
assert X_first.flags["F_CONTIGUOUS"]
elif order == "C":
assert X_first.flags["C_CONTIGUOUS"]
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@tests@dask@test_dask_datasets.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "transientskp/lpf",
"repo_path": "lpf_extracted/lpf-main/lpf/surveys/__init__.py",
"type": "Python"
}
|
from .survey import Survey
|
transientskpREPO_NAMElpfPATH_START.@lpf_extracted@lpf-main@lpf@surveys@__init__.py@.PATH_END.py
|
{
"filename": "qtransform.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/signal/qtransform.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2016-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Python implementation of the tiled Q-transform scan.
This is a re-implementation of the original Q-transform scan from the Omega
pipeline, all credits for the original algorithm go to its
authors.
"""
import warnings
from math import (log, ceil, pi, isinf, exp)
import numpy
from numpy import fft as npfft
from ..utils import round_to_power
from ..segments import Segment
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__credits__ = 'Scott Coughlin <scott.coughlin@ligo.org>, ' \
'Alex Urban <alexander.urban@ligo.org>'
__all__ = ['QTiling', 'QPlane', 'QTile', 'QGram', 'q_scan']
# q-transform defaults
DEFAULT_FRANGE = (0, float('inf'))
DEFAULT_MISMATCH = 0.2
DEFAULT_QRANGE = (4, 64)
# -- object class definitions -------------------------------------------------
class QObject(object):
"""Base class for Q-transform objects
This object exists just to provide basic methods for all other
Q-transform objects.
"""
# pylint: disable=too-few-public-methods
def __init__(self, duration, sampling, mismatch=DEFAULT_MISMATCH):
self.duration = float(duration)
self.sampling = float(sampling)
self.mismatch = float(mismatch)
@property
def deltam(self):
"""Fractional mismatch between neighbouring tiles
:type: `float`
"""
return 2 * (self.mismatch / 3.) ** (1/2.)
class QBase(QObject):
"""Base class for Q-transform objects with fixed Q
This class just provides a property for Q-prime = Q / sqrt(11)
"""
def __init__(self, q, duration, sampling, mismatch=DEFAULT_MISMATCH):
super().__init__(duration, sampling, mismatch=mismatch)
self.q = float(q)
@property
def qprime(self):
"""Normalized Q `(q/sqrt(11))`
"""
return self.q / 11**(1/2.)
class QTiling(QObject):
"""Iterable constructor of `QPlane` objects
For a given Q-range, each of the resulting `QPlane` objects can
be iterated over.
Parameters
----------
duration : `float`
the duration of the data to be Q-transformed
qrange : `tuple` of `float`
`(low, high)` pair of Q extrema
frange : `tuple` of `float`
`(low, high)` pair of frequency extrema
sampling : `float`
sampling rate (in Hertz) of data to be Q-transformed
mismatch : `float`
maximum fractional mismatch between neighbouring tiles
"""
def __init__(self, duration, sampling,
qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE,
mismatch=DEFAULT_MISMATCH):
super().__init__(duration, sampling, mismatch=mismatch)
self.qrange = (float(qrange[0]), float(qrange[1]))
self.frange = [float(frange[0]), float(frange[1])]
qlist = list(self._iter_qs())
if self.frange[0] == 0: # set non-zero lower frequency
self.frange[0] = 50 * max(qlist) / (2 * pi * self.duration)
maxf = self.sampling / 2 / (1 + 11**(1/2.) / min(qlist))
if isinf(self.frange[1]):
self.frange[1] = maxf
elif self.frange[1] > maxf: # truncate upper frequency to maximum
warnings.warn('upper frequency of %.2f is too high for the given '
'Q range, resetting to %.2f'
% (self.frange[1], maxf))
self.frange[1] = maxf
@property
def qs(self): # pylint: disable=invalid-name
"""Array of Q values for this `QTiling`
:type: `numpy.ndarray`
"""
return numpy.array(list(self._iter_qs()))
@property
def whitening_duration(self):
"""The recommended data duration required for whitening
"""
return max(t.whitening_duration for t in self)
def _iter_qs(self):
"""Iterate over the Q values
"""
# work out how many Qs we need
cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / self.deltam), 1))
dq = cumum / nplanes # pylint: disable=invalid-name
for i in range(nplanes):
yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5))
def __iter__(self):
"""Iterate over this `QTiling`
Yields a `QPlane` at each Q value
"""
for q in self._iter_qs():
yield QPlane(q, self.frange, self.duration, self.sampling,
mismatch=self.mismatch)
def transform(self, fseries, **kwargs):
"""Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
"""
if not numpy.isfinite(fseries).all():
raise ValueError('Input signal contains non-numerical values')
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2)
nind, nplanes, peak, result = (0, 0, 0, None)
# identify the plane with the loudest tile
for plane in self:
nplanes += 1
nind += sum([1 + row.ntiles * row.deltam for row in plane])
result = plane.transform(fseries, **kwargs)
if result.peak['energy'] > peak:
out = result
peak = out.peak['energy']
return (out, nind * weight / nplanes)
class QPlane(QBase):
"""Iterable representation of a Q-transform plane
For a given Q, an array of frequencies can be iterated over, yielding
a `QTile` each time.
Parameters
----------
q : `float`
the Q-value for this plane
frange : `tuple` of `float`
`(low, high)` range of frequencies for this plane
duration : `float`
the duration of the data to be Q-transformed
sampling : `float`
sampling rate (in Hertz) of data to be Q-transformed
mismatch : `float`
maximum fractional mismatch between neighbouring tiles
"""
def __init__(self, q, frange, duration, sampling,
mismatch=DEFAULT_MISMATCH):
super().__init__(q, duration, sampling, mismatch=mismatch)
self.frange = [float(frange[0]), float(frange[1])]
if self.frange[0] == 0: # set non-zero lower frequency
self.frange[0] = 50 * self.q / (2 * pi * self.duration)
if isinf(self.frange[1]): # set non-infinite upper frequency
self.frange[1] = self.sampling / 2 / (1 + 1/self.qprime)
def __iter__(self):
"""Iterate over this `QPlane`
Yields a `QTile` at each frequency
"""
# for each frequency, yield a QTile
for freq in self._iter_frequencies():
yield QTile(self.q, freq, self.duration, self.sampling,
mismatch=self.mismatch)
def _iter_frequencies(self):
"""Iterate over the frequencies of this `QPlane`
"""
# work out how many frequencies we need
minf, maxf = self.frange
fcum_mismatch = log(maxf / minf) * (2 + self.q**2)**(1/2.) / 2.
nfreq = int(max(1, ceil(fcum_mismatch / self.deltam)))
fstep = fcum_mismatch / nfreq
fstepmin = 1 / self.duration
# for each frequency, yield a QTile
last = None
for i in range(nfreq):
this = (
minf * exp(2 / (2 + self.q**2)**(1/2.) * (i + .5) * fstep)
// fstepmin * fstepmin
)
if this != last: # yield only unique elements
yield this
last = this
@property
def frequencies(self):
"""Array of central frequencies for this `QPlane`
:type: `numpy.ndarray`
"""
return numpy.array(list(self._iter_frequencies()))
@property
def farray(self):
"""Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
"""
bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q
return self.frequencies - bandwidths / 2.
@property
def whitening_duration(self):
"""The recommended data duration required for whitening
"""
return round_to_power(self.q / (2 * self.frange[0]),
base=2, which=None)
def transform(self, fseries, norm=True, epoch=None, search=None):
"""Calculate the energy `TimeSeries` for the given `fseries`
Parameters
----------
fseries : `~gwpy.frequencyseries.FrequencySeries`
the complex FFT of a time-series data set
norm : `bool`, `str`, optional
normalize the energy of the output by the median (if `True` or
``'median'``) or the ``'mean'``, if `False` the output
is the complex `~numpy.fft.ifft` output of the Q-tranform
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional
the epoch of these data, only used for metadata in the output
`TimeSeries`, and not requires if the input `fseries` has the
epoch populated.
search : `~gwpy.segments.Segment`, optional
search window of interest to determine the loudest Q-plane
Returns
-------
results : `QGram`
the complex energies of the Q-transform of the input `fseries`
at each frequency
See also
--------
QTile.transform
for details on the transform over a row of `(Q, frequency)` tiles
QGram
an object with energies populated over time-frequency tiles
"""
out = []
for qtile in self:
# get energy from transform
out.append(qtile.transform(fseries, norm=norm, epoch=epoch))
return QGram(self, out, search)
class QTile(QBase):
"""Representation of a tile with fixed Q and frequency
"""
def __init__(self, q, frequency, duration, sampling,
mismatch=DEFAULT_MISMATCH):
super().__init__(q, duration, sampling, mismatch=mismatch)
self.frequency = frequency
@property
def bandwidth(self):
"""The bandwidth for tiles in this row
:type: `float`
"""
return 2 * pi ** (1/2.) * self.frequency / self.q
@property
def ntiles(self):
"""The number of tiles in this row
:type: `int`
"""
tcum_mismatch = self.duration * 2 * pi * self.frequency / self.q
return round_to_power(tcum_mismatch / self.deltam,
base=2, which='upper')
@property
def windowsize(self):
"""The size of the frequency-domain window for this row
:type: `int`
"""
return 2 * int(self.frequency / self.qprime * self.duration) + 1
def _get_indices(self):
half = int((self.windowsize - 1) / 2)
return numpy.arange(-half, half + 1)
def get_window(self):
"""Generate the bi-square window for this row
Returns
-------
window : `numpy.ndarray`
"""
# real frequencies
wfrequencies = self._get_indices() / self.duration
# dimensionless frequencies
xfrequencies = wfrequencies * self.qprime / self.frequency
# normalize and generate bi-square window
norm = self.ntiles / (self.duration * self.sampling) * (
315 * self.qprime / (128 * self.frequency)) ** (1/2.)
return (1 - xfrequencies ** 2) ** 2 * norm
def get_data_indices(self):
"""Returns the index array of interesting frequencies for this row
"""
return numpy.round(
self._get_indices() + 1 + self.frequency * self.duration,
).astype(int)
@property
def padding(self):
"""The `(left, right)` padding required for the IFFT
:type: `tuple` of `int`
"""
pad = self.ntiles - self.windowsize
return (int((pad - 1)/2.), int((pad + 1)/2.))
def transform(self, fseries, norm=True, epoch=None):
"""Calculate the energy `TimeSeries` for the given fseries
Parameters
----------
fseries : `~gwpy.frequencyseries.FrequencySeries`
the complex FFT of a time-series data set
norm : `bool`, `str`, optional
normalize the energy of the output by the median (if `True` or
``'median'``) or the ``'mean'``, if `False` the output
is the energy (power) of the Q-tranform
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional
the epoch of these data, only used for metadata in the output
`TimeSeries`, and not requires if the input `fseries` has the
epoch populated.
Returns
-------
energy : `~gwpy.timeseries.TimeSeries`
a `TimeSeries` of the energy from the Q-transform of
this tile against the data.
"""
from ..timeseries import TimeSeries
windowed = fseries[self.get_data_indices()] * self.get_window()
# pad data, move negative frequencies to the end, and IFFT
padded = numpy.pad(windowed, self.padding, mode='constant')
wenergy = npfft.ifftshift(padded)
# return a `TimeSeries`
if epoch is None:
epoch = fseries.epoch
tdenergy = npfft.ifft(wenergy)
cenergy = TimeSeries(tdenergy, x0=epoch,
dx=self.duration/tdenergy.size, copy=False)
energy = type(cenergy)(
cenergy.value.real ** 2. + cenergy.value.imag ** 2.,
x0=cenergy.x0, dx=cenergy.dx, copy=False)
if norm:
norm = norm.lower() if isinstance(norm, str) else norm
if norm in (True, 'median'):
narray = energy / energy.median()
elif norm in ('mean',):
narray = energy / energy.mean()
else:
raise ValueError("Invalid normalisation %r" % norm)
return narray.astype("float32", casting="same_kind", copy=False)
return energy
class QGram(object):
"""Store tile energies over an irregularly gridded plane
Parameters
----------
plane : `QPlane`
the time-frequency plane over which to populate
energies : `list` of `TimeSeries`
a list of signal energies for each row of tiles
search : `~gwpy.segments.Segment`, optional
search window of interest to determine the loudest tile
"""
def __init__(self, plane, energies, search):
self.plane = plane
self.energies = energies
self.peak = self._find_peak(search)
def _find_peak(self, search):
peak = {'energy': 0, 'snr': None, 'time': None, 'frequency': None}
for freq, energy in zip(self.plane.frequencies, self.energies):
if search is not None:
energy = energy.crop(*search)
maxidx = energy.value.argmax()
maxe = energy.value[maxidx]
if maxe > peak['energy']:
peak.update({
'energy': maxe,
'snr': (2 * maxe) ** (1/2.),
'time': energy.t0.value + energy.dt.value * maxidx,
'frequency': freq,
})
return peak
def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (
InterpolatedUnivariateSpline,
RectBivariateSpline,
)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = RectBivariateSpline(xout, frequencies, out.value)
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
outfreq = numpy.geomspace(
self.plane.frange[0],
self.plane.frange[1],
num=int(fres),
)
new = type(out)(
interp(xout, outfreq).astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new
def table(self, snrthresh=5.5):
"""Represent this `QPlane` as an `EventTable`
Parameters
----------
snrthresh : `float`, optional
lower inclusive threshold on individual tile SNR to keep in the
table, default: 5.5
Returns
-------
out : `~gwpy.table.EventTable`
a table of time-frequency tiles on this `QPlane`
Notes
-----
Only tiles with signal energy greater than or equal to
`snrthresh ** 2 / 2` will be stored in the output `EventTable`.
"""
from ..table import EventTable
# get plane properties
freqs = self.plane.frequencies
bws = 2 * (freqs - self.plane.farray)
# collect table data as a recarray
names = ('time', 'frequency', 'duration', 'bandwidth', 'energy')
rec = numpy.recarray((0,), names=names, formats=['f8'] * len(names))
for f, bw, row in zip(freqs, bws, self.energies):
ind, = (row.value >= snrthresh ** 2 / 2.).nonzero()
new = ind.size
if new > 0:
rec.resize((rec.size + new,), refcheck=False)
rec['time'][-new:] = row.times.value[ind]
rec['frequency'][-new:] = f
rec['duration'][-new:] = row.dt.to('s').value
rec['bandwidth'][-new:] = bw
rec['energy'][-new:] = row.value[ind]
# save to a table
out = EventTable(rec, copy=False)
out.meta['q'] = self.plane.q
return out
# -- utilities ----------------------------------------------------------------
def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE, duration=None, sampling=None,
**kwargs):
"""Transform data by scanning over a `QTiling`
This utility is provided mainly to allow direct manipulation of the
`QTiling.transform` output. Most users probably just want to use
:meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this.
Parameters
----------
data : `~gwpy.timeseries.TimeSeries` or `ndarray`
the time- or frequency-domain input data
mismatch : `float`, optional
maximum allowed fractional mismatch between neighbouring tiles
qrange : `tuple` of `float`, optional
`(low, high)` range of Qs to scan
frange : `tuple` of `float`, optional
`(low, high)` range of frequencies to scan
duration : `float`, optional
duration (seconds) of input, required if `data` is not a `TimeSeries`
sampling : `float`, optional
sample rate (Hertz) of input, required if `data` is not a `TimeSeries`
**kwargs
other keyword arguments to be passed to :meth:`QTiling.transform`,
including ``'epoch'`` and ``'search'``
Returns
-------
qgram : `QGram`
the raw output of :meth:`QTiling.transform`
far : `float`
expected false alarm rate (Hertz) of white Gaussian noise with the
same peak energy and total duration as `qgram`
"""
from gwpy.timeseries import TimeSeries
# prepare input
if isinstance(data, TimeSeries):
duration = abs(data.span)
sampling = data.sample_rate.to('Hz').value
kwargs.update({'epoch': data.t0.value})
data = data.fft().value
# return a raw Q-transform and its significance
qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange,
frange=frange).transform(data, **kwargs)
far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration
return (qgram, far)
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@signal@qtransform.py@.PATH_END.py
|
{
"filename": "snid.py",
"repo_name": "MickaelRigault/pysnid",
"repo_path": "pysnid_extracted/pysnid-main/pysnid/snid.py",
"type": "Python"
}
|
""" Module to handle SNID fit. """
import os
import shutil
import numpy as np
import pandas
import warnings
def run_snid(filename,
phase=None, redshift=None, delta_phase=5, delta_redshift=None,
redshift_bounds=[0, None],
lbda_range=[4000,8000], set_it=True,
verbose=False, quiet=True, get_results=True,
rm_zeros=True, **kwargs):
""" """
snid_prop = dict(quiet=quiet, lbda_range=lbda_range, verbose=verbose)
#
# - Phase
if phase is not None:
snid_prop["phase_range"]=[phase-delta_phase, phase+delta_phase]
#
# - redshift
if redshift is not None:
snid_prop["forcez"] = redshift
if delta_redshift is not None:
if redshift_bounds[0] is not None:
min_redshift = np.max([redshift-delta_redshift, redshift_bounds[0]])
else:
min_redshift = redshift-delta_redshift
if redshift_bounds[1] is not None:
max_redshift = np.min([redshift+delta_redshift, redshift_bounds[1]])
else:
max_redshift = redshift+delta_redshift
snid_prop["redshift_range"] = [min_redshift, max_redshift]
else:
snid_prop["redshift_range"] = redshift_bounds
print(snid_prop)
# - Running SNID
snidf = SNID()
options = {**snid_prop,**kwargs}
if verbose:
print("options used:", options)
outfile = snidf.run(filename, **options)
if outfile is None:
warnings.warn("SNID fit failed. Nothing returned")
return None
if get_results:
snidres = SNIDReader.from_filename(outfile)
return snidres
return outfile
def bulk_run_snid(filenames, client=None, as_dask="delayed", map_kwargs={}, **kwargs):
""" """
import dask
run_delayed = []
for i,filename in enumerate(filenames):
mkwargs = {k:v if not hasattr(v,"__iter__") else v[i] for k,v in kwargs.items()}
run_delayed.append( dask.delayed(run_snid)(filename, **mkwargs))
# ------------ #
# Dask Out #
# ------------ #
if as_dask == "delayed":
return run_delayed
if as_dask == "compute":
if client is None:
return dask.delayed(list)(run_delayed).compute()
return client.compute(run_delayed)
if as_dask == "gather":
if client is None:
return dask.delayed(list)(run_delayed).compute()
return client.gather( client.compute(run_delayed) )
raise ValueError(f"as_dask can only delayed, compute and gather: {as_dask} given")
class SNIDReader( object ):
def __init__(self, data=None, results=None, models=None):
""" """
if data is not None:
self.set_data(date)
if results is not None:
self.set_results(results)
if models is not None:
self.set_models(models)
@classmethod
def from_filename(cls, filename, load_data=True, load_models=True, load_results=True):
""" """
this = cls()
hdata = pandas.HDFStore(filename)
filekeys = hdata.keys()
if load_data:
if "/data" in filekeys:
this.set_data( hdata.get("data") )
else:
warnings.warn(f"no 'data' stored in the input filename {filename}")
if load_results:
if "/results" in filekeys:
this.set_results( hdata.get("results") )
else:
warnings.warn(f"no 'results' stored in the input filename {filename}")
if load_models:
if "/models" in filekeys:
this.set_models( hdata.get( "models" ) )
else:
comps = [l for l in filekeys if "comp" in l]
if len(comps)>0:
warnings.warn("Important: Deprecation - the old '_snid.h5' format with individual 'comp file' stored will not be supported at the next upgrade. Rebuild your file.")
this.set_models(pandas.concat({int(comp.split("comp")[-1]): hdata.get( comp ) for comp in comps}))
else:
warnings.warn(f"not a single 'comp' stored in the input filename {filename}")
this._filename = filename
hdata.close()
return this
@property
def from_run(cls, filename,
**kwargs):
""" """
raise NotImplementedError("To be implemented")
# ============== #
# Method #
# ============== #
def set_results(self, results):
""" """
results = results.copy()
types = results["type"].str.replace("II","II-", regex=False
).str.replace("--","-", regex=False
).str.replace("M-star","star-M", regex=False)
results[["typing","subtyping"]] = types.str.split("-",expand=True).fillna("None")
self._results = results
def set_data(self, data):
""" """
self._data = data
def set_models(self, models):
""" """
self._models = models
# --------- #
# GETTER #
# --------- #
def get_model_label(self, index, incl_rlap=False):
""" """
mdata = self.results.loc[index]
text = f"{mdata['type']} ({mdata['sn']}) @ z={mdata['z']:.3f} | phase={mdata['age']}"
if incl_rlap:
text+= f" | rlap={mdata['rlap']:.1f}"
return text
def get_model_rlap(self, index):
""" """
return self.results.loc[index]["rlap"]
def get_bestmatches(self, sortby="rlap", grade="good", rlap_range=[4,None], **kwargs):
""" grade="good" and **kwargs goes to get_result() """
# The reset index is to have the no. columns in the returned
# dataframe.
results = self.get_results(grade=grade,rlap_range=rlap_range, **kwargs).sort_values(sortby, ascending=False).reset_index()
bestmatches = results.groupby("type").first()
size_ = results.groupby("type").size()
size_.name="nentries"
bestmatches = pandas.merge(bestmatches, size_, left_index=True, right_index=True
).sort_values("rlap", ascending=False)
if "cutoff" in bestmatches.index:
return bestmatches.drop("cutoff")
return bestmatches
def get_results(self, types="*", typing=None,
rlap_range=None,
lap_range=None, age_range=None, z_range=None,
grade="good", nfirst=30):
""" get a subset of the result dataframe """
def _get_in_range_(res_, key, rmin=None, rmax=None):
""" """
if not rmin is None or not rmax is None:
if rmax is None:
res_ = res_[res_[key]>=rmin]
elif rmin is None:
res_ = res_[res_[key]<=rmax]
else:
res_ = res_[res_[key].between(rmin, rmax)]
return res_
# Go
if grade is None:
res = self.results
else:
res = self.results[self.results["grade"] == grade]
if not (types is None or types in ["*","all"]):
if "*" in types:
t_ = types.replace("*","")
types = [t for t in res["type"].astype("str").unique() if t_ in t]
else:
types = np.atleast_1d(types)
res = res[res["type"].isin(types)]
if rlap_range is not None:
res = _get_in_range_(res, "rlap", *rlap_range)
if z_range is not None:
res = _get_in_range_(res, "z", *z_range)
if age_range is not None:
res = _get_in_range_(res, "age", *age_range)
if lap_range is not None:
res = _get_in_range_(res, "lap", *lap_range)
#
# = Typing
#
if typing in ["*","all", "any"]:
typing = None
if typing in ["snia", "sn ia", "Ia"]:
typing = res[res["typing"] == "Ia"]["type"].unique()
elif typing in ["Ia!norm"]:
typing = res[(res["typing"] == "Ia") & (res["type"] != "Ia-norm")]["type"].unique()
elif typing is not None:
if typing == "auto":
auto_type = self.get_type()
typing_, subtype_ = np.transpose(auto_type)[0]
if typing_ == "unclear":
typing = None
elif subtype_ == "unclear":
typing = bestres[bestres["typing"] == "Ia"]["type"].unique()
else:
typing = f"{typing_}-{subtype_}"
# else typing is None
if typing is not None:
res = res[res["type"].isin(np.atleast_1d(typing))]
# ============ #
if nfirst is not None:
res = res.iloc[:nfirst]
return res
def get_inputdata(self, fluxcorr=True):
""" For some reason, the 'data' spectra recorded by SNID (and
insite self.data) corresponds to input_flux*input_lbda.
fluxcorr enables to return the correct flux such that:
input_flux = self.get_inputdata(fluxcorr=True)
-> here input_flux is actually normalised by its mean.
"""
data = self.data.copy()
if not fluxcorr:
return data
flux = data["flux"]/data["wavelength"]
flux/=flux.mean()
data["flux"] = flux*1.05 # No idea why...
return data
def get_modeldata(self, model_, fluxcorr=True):
""" """
data = self.models.xs(model_).copy()
if not fluxcorr:
return data
flux = data["flux"]/data["wavelength"]
flux/=flux.mean()
data["flux"] = flux*1.05 # No idea why...
return data
def get_type(self, min_rlap=5, nfirst=10, grade='good', min_prob=0.5,
full_output=False, fallback='unclear', min_probsub=None,
incl_subtype=True,
**kwargs):
"""
min_probsub -> min_prob is None
If incl_subtype, the subtyping is the subtyping given the best_typing.
So it reads as: p(subtype | besttype)
Returns
-------
(string, float), [(string, float) if uncl_subtype]
in details: (typename, p(type)), (subtype, p(subtype | type))
"""
results = self.get_results(grade=grade, **{**dict(rlap_range=[min_rlap,None]), **kwargs})
if nfirst is not None:
results = results.iloc[:nfirst]
rlap_sums = results.groupby(["typing","subtyping"])["rlap"].sum()
rlap_sums /= rlap_sums.sum()
rlap_sums = rlap_sums.sort_values(ascending=False)
if full_output:
return rlap_sums
if len(rlap_sums)==0:
warnings.warn(f"No 'rlap' greater than {min_rlap} ; '{fallback}' returned")
best_type, best_typefrac = (fallback, np.NaN)
else:
typing_frac = rlap_sums.groupby(level=0).sum() # already a frac
if typing_frac.iloc[0] < min_prob: # because stored by ascending=False
warnings.warn(f"No 'probabilities' above the {min_prob:.0%} ; '{fallback}' typing returned")
best_type, best_typefrac = (fallback, np.NaN)
else:
best_typing = typing_frac.iloc[0] # because stored by ascending=False
best_type, best_typefrac = typing_frac.index[0],typing_frac.iloc[0]
if not incl_subtype:
return (best_type, best_typefrac)
if best_type == fallback:
return (best_type, best_typefrac), (fallback, np.NaN)
# Subtyping:
if min_probsub is None:
min_probsub = min_prob
# return (best_type, best_typefrac), rlap_sums
subtypes = rlap_sums.xs(best_type)
subtype_frac = (subtypes/subtypes.sum()).sort_values(ascending=False)
if subtype_frac.iloc[0]<min_prob:
return (best_type, best_typefrac), (fallback, np.NaN)
return (best_type, best_typefrac), (subtype_frac.index[0],subtype_frac.iloc[0])
def get_typing_result(self, typing="auto",
rlap_range=[5,None], nfirst=30):
""" """
bestres = self.get_results(rlap_range=rlap_range)
if nfirst is not None:
bestres = bestres.iloc[:30]
return bestres
def get_redshift(self, typing="auto", weight_by="rlap",
rlap_range=[5, None], nfirst=30,
dredshift="nmad",
default_zerr=np.nan, **kwargs):
"""
Parameters
----------
typing: [None, string or list of] -optional-
if None or "*" all types will be used.
if auto, typing comes from auto-typing (self.get_type())
if given (could be a list), only given type is used.
if "Ia" all subtypes
"""
DEFAULT = [np.nan, default_zerr]
bestres = self.get_results(typing=typing, nfirst=nfirst,
rlap_range=rlap_range, **kwargs)
# nothing so back to nan
if len(bestres) == 0:
return DEFAULT
if weight_by is not None:
weights = bestres[weight_by]
# Redshift
redshift = np.average(bestres["z"], weights=weights)
# Error on
if dredshift == "nmad":
if len(bestres) < 3:
dredshift = default_zerr
else:
from scipy.stats import median_abs_deviation
dredshift = median_abs_deviation(bestres["z"])
else:
dredshift = getattr(np,"dredshift")(bestres["z"])
return redshift, dredshift
# --------- #
# GETTER #
# --------- #
def show(self, axes=None, fig=None,
nbest=4,min_rlap=5,nfirst_resummary=30,
show_typing=True, label=None,
sumprop={},
redshift=None, zlabel=None, phase=None, dphase=None,
savefile=None,
show_telluric=True, show_ha=True, telluric_color="0.6",
**kwargs):
""" """
if axes is None:
if fig is None:
import matplotlib.pyplot as mpl
fig = mpl.figure(figsize=[9,3])
axs = fig.add_axes([0.1,0.18,0.55,0.7])
#axt = fig.add_axes([0.75,0.1,0.2,0.75], polar=True)
axsum= [fig.add_axes([0.75,0.82, 0.2,0.03]),
fig.add_axes([0.75,0.18, 0.2,0.55])]
else:
axs, axsum = axes
fig = axs.figure
_ = self.show_bestmatches(ax=axs, nbest=nbest, min_rlap=min_rlap, **kwargs)
_ = self.show_ressummary(axes=axsum, nfirst=nfirst_resummary, min_rlap=min_rlap,
redshift=redshift, zlabel=zlabel, phase=phase, dphase=dphase,
**sumprop)
# - Show typing
if show_typing:
typing, subtyping = self.get_type(incl_subtype=True)
if label is not None:
axs.text(-0.05, 1.1, f"{label}",
va="bottom", ha="left", fontsize="small", weight="normal",
color="0.6", transform=axs.transAxes)
fig.text(-0.05, 1.01, f"auto typing: p({typing[0]})={typing[1]:.0%} | p({subtyping[0]}|{typing[0]})={subtyping[1]:.0%}",
va="bottom", ha="left", fontsize="small", weight="normal",
color="k", transform=axs.transAxes)
if show_ha and redshift is not None:
axs.axvline(6563*(1+redshift), ls="--", color=telluric_color, zorder=1, lw=0.5, alpha=0.8)
if show_telluric:
main_telluric = [7450,7750]
small_telluric = [6850,7050]
axs.axvspan( *main_telluric, color=telluric_color, alpha=0.05)
axs.axvspan( *small_telluric, color=telluric_color, alpha=0.05)
if savefile is not None:
fig.savefig(savefile)
return fig
def show_spider(self, ax=None, main="rlap", second="nentries",
logscale="second", nfirst=None,
min_rlap=5, matchprop={},
color_main="C0", falpha_main=0.05, lw_main=1.5,
color_second="C1",falpha_second=0.2, lw_second=1,
color_grid=None, **kwargs):
""" """
import matplotlib.pyplot as mpl
from matplotlib.colors import to_rgba
from pysnid.tools import make_spiderplot, get_polartwin
logscale = np.atleast_1d(logscale) if logscale is not None else []
best_matches = self.get_bestmatches(**{**dict(rlap_range=[min_rlap,None]), **matchprop})
if nfirst is not None:
best_matches = best_matches.iloc[:nfirst]
nbest_matchs = len(best_matches)
if nbest_matchs == 0:
warnings.warn("No bestmatch")
return None
# - Labels
labels = np.asarray(best_matches.index, dtype=str)
# - Main axis
valuemain = np.asarray(best_matches[main], dtype="float")
if "main" in logscale:
valuemain = np.log10(valuemain)
# - Secondx
if second is not None:
valuesecond = np.asarray(best_matches[second], dtype="float")
if "second" in logscale:
valuesecond = np.log10(valuesecond)
else:
valuesecond = None
fig = make_spiderplot(valuemain, labels=labels, ax=ax,
gcolor=color_grid,
rlabel_angle= (360/nbest_matchs)/2,
facecolor=to_rgba(color_main,falpha_main),
edgecolor=color_main, lw=lw_main,
rlabel=main, zorder=8,
highlight=min_rlap, highlight_color=color_grid)
if ax is None:
ax = fig.axes[0]
if valuesecond is not None:
fig = make_spiderplot(valuesecond, rtwin_from=ax,
labels=labels, gcolor="None",
facecolor=to_rgba(color_second, falpha_second),
edgecolor=color_second,
rlabel_angle=360/nbest_matchs * (0.5+int(nbest_matchs/2)),
lw=lw_second, #alpha=1,
rlabel=f"log(n>{min_rlap})" if second=="nentries" else second,
rlabel_rotation=0, rlabel_ha="center", zorder=9)
return fig
def show_ressummary(self, axes=None, nfirst=30,
phase=None, dphase=None,
redshift=None, zlabel=None,
min_rlap=5, phase_prior=5,
line_color="0.6", resprop={}):
""" """
from matplotlib.colors import to_rgba
if axes is None:
import matplotlib.pyplot as mpl
fig = mpl.figure(figsize=[4,4])
ax = fig.add_axes([0.175,0.85, 0.725,0.03])
axr = fig.add_axes([0.175,0.20, 0.725,0.55])
else:
ax, axr = axes
fig = ax.figure
res = self.get_results(rlap_range=[min_rlap,None], **resprop).iloc[:nfirst]
#
# - Rankind
#
types_values = res.reset_index().groupby("type")["no."].apply(list).sort_values()
# Loop over groups
for i_, (name, nos) in enumerate(types_values.items()):
color = f"C{i_}"
for j_, v_ in enumerate(np.asarray( list(nos), dtype="int")):
if j_==0:
label= f"{name}: #{v_}"
else:
label= "_no_legend_"
ax.axvspan(v_-1,v_, facecolor=color, edgecolor="0.7", lw=0.5, label=label)
if j_==0:
ax.text(v_-0.5, -0.8, f"{res.loc[str(v_)]['rlap']:.1f}",
color=color, va="top", ha="center", fontsize="x-small")
# - ranking legend
try:
ax.legend(loc=[0,1.2],
ncol=np.min([2,len(types_values)]), fontsize="x-small", frameon=False,
columnspacing=2, handlelength=1)
except:
warnings.warn("legend failed")
_ = ax.set_xlim(0,nfirst)#len(res))
_ = ax.set_yticks([])
_ = ax.set_xticks([])
# - Ranking texts
ax.text(-0.08, 0.3, "rank", va="center", ha="right",
fontsize="x-small", color="0.6",
transform=ax.transAxes)
ax.text(-0.08, -1.3, "rlap", va="center", ha="right",
fontsize="x-small", color="0.6",
transform=ax.transAxes)
if len(res)<nfirst-1:
ax.text(len(res), 0.3, f"#{len(res)}",
va="center", ha="left", fontsize="xx-small", color="0.3")
ax.text(1.01, 0.3, f"#{nfirst}", va="center", ha="left", fontsize="x-small", color="0.3",
transform=ax.transAxes)
#
# - Scatter
#
for i_,(name, nos) in enumerate(types_values.items()):
typeres = res.loc[nos]
axr.scatter(typeres["age"], typeres["z"],
facecolors=to_rgba(f"C{i_}", 0.9),
edgecolors="0.7", lw=0.5, zorder=3)
if phase is not None:
if dphase is not None:
axr.axvspan(phase-2*dphase, phase+2*dphase, color=to_rgba(line_color,0.1),
zorder=2)
axr.axvspan(phase-3*dphase, phase+3*dphase, color=to_rgba(line_color,0.1),
zorder=2)
else:
axr.axvline(phase, color=line_color, lw=1)
axr.axvspan(phase-phase_prior,phase+phase_prior, zorder=1,
color=to_rgba("C0",0.02))
if redshift is not None:
fig.canvas.draw()
minx_axr = axr.transData.inverted().transform(axr.transAxes.transform([0,-1]))[0]
propz = dict(color=line_color, lw=1, ls="-")
axr.axhline(redshift, **propz)
if zlabel is not None:
propzsource = dict(va="bottom", ha="left", color="0.6", fontsize="x-small")
axr.text(minx_axr, redshift, zlabel, **propzsource)
axr.set_ylabel("Redshift", fontsize="small")
axr.set_xlabel("Phase", fontsize="small")
axr.tick_params(labelsize="small")
return fig
def show_bestmatches(self, nbest=None, ax=None, savefile=None, min_rlap=5, matchprop={}, **kwargs):
""" """
best_matches = self.get_bestmatches(**{**dict(rlap_range=[min_rlap,None]), **matchprop})
if nbest is not None:
best_matches = best_matches.iloc[:nbest]
# Limit to those with models.
best_matches = best_matches[best_matches["no."].astype("int")<self.nmodels]
models = np.asarray(best_matches["no."], dtype="int")
return self.show_models(models=models, ax=ax, savefile=savefile, **kwargs)
def show_models(self, models=[1], offset_coef=None, ax=None, savefile=None, fluxcorr=True,
lw_data=1.5, color_data="0.7", lw_model=1.5, modelprop={},
**kwargs):
"""
offset_coef: [float, 'None' or None]
offset applied between spectra.
- float: value used
- None : go back to default values (90% of data)
-'None': No offset ; same as offset_coef=0
"""
if ax is None:
from matplotlib.figure import Figure
fig = Figure(figsize=[7,4])
ax = fig.add_axes([0.12,0.15,0.8,0.8])
else:
fig = ax.figure
propmodel = {**dict(lw=lw_model),**modelprop}
# - Data
data_ = self.get_inputdata(fluxcorr=fluxcorr)
if offset_coef is None:
offset_coef = np.percentile(data_["flux"], 90)*0.8
elif offset_coef == "None":
offset_coef = 0
models = np.atleast_1d(models)
if len(models)==0:
ax.plot(data_["wavelength"], data_["flux"],
lw=lw_data, color=color_data, **kwargs)
for i, model_ in enumerate(models):
datalabel = "snid-format data" if i==0 else "_no_legend_"
offset = offset_coef*i
ax.plot(data_["wavelength"], data_["flux"]-offset,
label=datalabel, lw=lw_data, color=color_data, **kwargs)
d = self.get_modeldata(model_, fluxcorr=fluxcorr)
mlabel = self.get_model_label(str(model_))
modeldata = self.results.loc[str(model_)]
if modeldata['grade'] != "good":
propmodel["ls"] = ":"
ax.plot(d["wavelength"], d["flux"]-offset,
label=f"{model_}: {mlabel}",
**propmodel)
text = f"{modeldata['sn']}: {modeldata['type']} \n z={modeldata['z']:0.3f} | {modeldata['age']:+0.1f}d \n rlap: {modeldata['rlap']:0.1f}"
if modeldata['grade'] != "good":
text += f" ({modeldata['grade']})"
ax.text(d["wavelength"][0]-50, d["flux"][0]-offset, text,
va="center", ha="right", color=f"C{i}",
fontsize="x-small", weight="bold")
#ax.set_xlim(d["wavelength"][0]*0.92)
#ax.legend(frameon=False, fontsize='x-small')
ax.set_yticks([])
clearwhich = ["left","right","top"] # "bottom"
[ax.spines[which].set_visible(False) for which in clearwhich]
ax.set_xlabel(r"Wavelength [$\AA$]", fontsize="medium")
ax.tick_params(labelsize="small")
if savefile is not None:
fig.savefig(savefile)
return fig
# ============== #
# Internal #
# ============== #
@staticmethod
def _read_snidflux_(filename_):
""" """
data = [l.split() for l in open(filename_).read().splitlines() if not l.strip().startswith("#")]
columns = ["wavelength", "flux"]
return pandas.DataFrame(np.asarray(data, dtype="float"), columns=columns)
@staticmethod
def _read_snidoutput_(filename_, nfirst=None):
""" """
f = open(filename_).read().split("### rlap-ordered template listings ###")[-1].splitlines()
dd = pandas.DataFrame([l.split() for l in f[2:]], columns=f[1][1:].split()).set_index("no.")
dd = dd[~dd["age_flag"].isin(["cut"])] # safeout
if nfirst is not None:
dd = dd.iloc[:nfirst]
return dd.astype({**{k:"str" for k in ["sn","type","grade"]},
**{k:"float" for k in ["lap","rlap","z","zerr","age"]},
**{k:"bool" for k in ["age_flag"]}}
)
# ============== #
# Properties #
# ============== #
@property
def data(self):
""" """
if not hasattr(self,"_data"):
return None
return self._data
@property
def models(self):
""" """
if not hasattr(self,"_models"):
return None
return self._models
@property
def results(self):
""" """
if not hasattr(self,"_results"):
return None
return self._results
@property
def filename(self):
""" """
if not hasattr(self,"_filename"):
return None
return self._filename
@property
def nmodels(self):
""" number of model stored inside self.models """
return len(self.models.index.levels[0])
class SNID( object ):
""" """
def __init__(self, id_=None):
""" """
if id_ is None:
self._snidid = f"{np.random.randint(1000000):08d}"
else:
self._snidid = f"{id_}"
@staticmethod
def build_snid_command(filename,
forcez=None,
lbda_range=[4000,9000],
phase_range=[-20,50],
redshift_range=[-0.05,0.4],
medlen=20, fwmed=None,
rlapmin=2,
fluxout=30,
skyclip=False, aband=False, inter=False, plot=False,
param=None, verbose=True):
""" """
print("*** build_snid_command ***")
cmd_snid = f"snid "
if param is not None:
cmd_snid += f"param={param} "
if lbda_range is not None:
lbdamin, lbdamax = lbda_range
cmd_snid += f"wmin={int(lbdamin)} wmax={int(lbdamax)} "
# Redshift
if forcez is not None:
cmd_snid += f"forcez={forcez} "
if redshift_range is not None:
zmin, zmax = redshift_range
cmd_snid += f"zmin={zmin} zmax={zmax} "
# Phase
if phase_range is not None:
agemin, agemax = phase_range
cmd_snid += f"agemin={agemin:.0f} agemax={agemax:.0f} "
# Input Spectral Structure
cmd_snid += f"skyclip={int(skyclip)} "
if medlen is not None:
cmd_snid += f"medlen={int(medlen)} "
if fwmed is not None:
cmd_snid += f"fwmed={int(fwmed)} "
cmd_snid += f"fluxout={int(fluxout)} aband={int(aband)} rlapmin={int(rlapmin)} inter={int(inter)} plot={int(plot)} "
cmd_snid += f"{filename}"
if verbose:
print(cmd_snid)
return cmd_snid
def run(self, filename, fileout=None,
dirout=None, tmpdir=None,
cleanout=True, verbose=False,
quiet=False, paramfile=None, in_tmpdir=True,
**kwargs):
""" run SNID and store the result as a hdf5 file.
**kwargs goes to build_snid_command
forcez=None,
lbda_range=[4000,8000],
phase_range=[-20,30],
redshift_range=[0,0.2],
medlen=20, rlapmin=4,
fluxout=30,
skyclip=False, aband=False, inter=False, plot=False
"""
import shutil
from subprocess import PIPE, run
from glob import glob
#
basename = os.path.basename(filename)
dirname = os.path.dirname(filename)
#
# Create a copy to bypass the SNID filepath limitation
if tmpdir is None:
tmpdir = f"tmpsnid_{self._snidid}"
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir, exist_ok=True)
if in_tmpdir:
old_pwd=os.getcwd()
os.chdir(tmpdir)
self._tmpfile = f"snid_{self._snidid}_spectofit.ascii"
else:
old_pwd = None
self._tmpfile = os.path.join(tmpdir, f"snid_{self._snidid}_spectofit.ascii")
shutil.copy(filename, self._tmpfile)
tmpbase = os.path.basename(self._tmpfile).split(".")[0]
snid_cmd = self.build_snid_command(self._tmpfile, param=paramfile, verbose=verbose, **kwargs)
self._result = run(snid_cmd.split(), stdout=PIPE, stderr=PIPE, universal_newlines=True)
if verbose:
print(f" running: {snid_cmd}")
print(self._result.stdout.split("\n"))
if self._result.returncode != 0:
warnings.warn("SNID returncode is not 0, suggesting an error")
elif "orrelation function is all zero!" in self._result.stdout:
warnings.warn("SNID failed: Searching all correlation peaks... PEAKFIT: Correlation function is all zero!")
elif "PEAKFIT: fit quits before half peak points!" in self._result.stdout:
warnings.warn("SNID failed: Searching all correlation peaks... PEAKFIT: fit quits before half peak points!")
else:
datafile = f"{tmpbase}_snidflux.dat"
modelfiles = glob(f"{tmpbase}_comp*_snidflux.dat")
snidout = f"{tmpbase}_snid.output"
try:
result = SNIDReader._read_snidoutput_(snidout)
except FileNotFoundError:
print(" SNID RETURN CODE ".center(40,"-"))
print(self._result.stdout)
print("".center(40,"-"))
if cleanout: self._cleanup_run_(tmpdir, old_pwd=old_pwd)
return None
data = SNIDReader._read_snidflux_(datafile)
models = pandas.concat({int(f_.split("comp")[-1].split("_")[0]):SNIDReader._read_snidflux_(f_)
for i,f_ in enumerate(modelfiles)})
if fileout is None:
if dirout is None:
dirout = dirname
basename_noext, ext = os.path.splitext(basename)
fileout = os.path.join(dirout, basename_noext+"_snid.h5")
elif not fileout.endswith("h5"):
fileout+=".h5"
result.to_hdf(fileout, key="results", format='table')
data.to_hdf(fileout, key="data", format='table')
models.to_hdf(fileout, key="models", format='table')
if not quiet:
print(f"snid run was successfull: data stored at {fileout}")
if cleanout:
_ = os.remove(snidout)
_ = os.remove(datafile)
_ = [os.remove(f_) for f_ in modelfiles]
# - cleanup
if cleanout:
self._cleanup_run_(tmpdir,
old_pwd=old_pwd)
return fileout
def _cleanup_run_(self, tmpdir, old_pwd=None):
""" """
os.remove("snid.param")
os.remove(self._tmpfile)
if old_pwd is not None:
os.chdir(old_pwd)
shutil.rmtree(tmpdir)
# ============== #
# Internal #
# ============== #
def _build_tmpfile_(self, tmpdir="tmp", tmpstruct="_default_"):
""" """
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir, exist_ok=True)
if tmpstruct == "_default_":
tmpstruct = f"snid_{self._snidid}_spectofit"
tmp_file = os.path.join(tmpdir, tmpstruct+".ascii")
i=1
while os.path.isfile(tmp_file):
tmp_file = os.path.join(tmpdir, tmpstruct+f"_{i}"+".ascii")
i+=1
return tmp_file
|
MickaelRigaultREPO_NAMEpysnidPATH_START.@pysnid_extracted@pysnid-main@pysnid@snid.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "jkrogager/VoigtFit",
"repo_path": "VoigtFit_extracted/VoigtFit-master/VoigtFit/utils/__init__.py",
"type": "Python"
}
|
jkrogagerREPO_NAMEVoigtFitPATH_START.@VoigtFit_extracted@VoigtFit-master@VoigtFit@utils@__init__.py@.PATH_END.py
|
|
{
"filename": "cphotang.py",
"repo_name": "NuSpaceSim/nuSpaceSim",
"repo_path": "nuSpaceSim_extracted/nuSpaceSim-main/src/nuspacesim/simulation/eas_optical/cphotang.py",
"type": "Python"
}
|
# The Clear BSD License
#
# Copyright (c) 2021 Alexander Reustle and the NuSpaceSim Team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
r"""Cherenkov photon density and angle determination class.
.. autosummary::
:toctree:
:recursive:
CphotAng
"""
import dask.bag as db
import numpy as np
from dask.diagnostics import ProgressBar
from numpy.polynomial import Polynomial
from .detector_geometry import distance_to_detector
# Wrapped in try-catch block as a hack to enable sphinx documentation to be generated
# on ReadTheDocs without pre-compiling.
try:
from .zsteps import zsteps as cppzsteps
except ImportError:
pass
__all__ = ["CphotAng"]
class CphotAng:
r"""Cherenkov Photon Angle"""
def __init__(self, detector_altitude):
r"""CphotAng: Cherenkov photon density and angle determination class.
Iterative summation of cherenkov radiation reimplemented in numpy and
C++.
"""
self.detector_altitude = detector_altitude
self.dtype = np.float32
"""numerical data type"""
self.wave1 = np.array(
[
200.0,
225.0,
250.0,
275.0,
300.0,
325.0,
350.0,
375.0,
400.0,
425.0,
450.0,
475.0,
500.0,
525.0,
550.0,
575.0,
600.0,
625.0,
650.0,
675.0,
700.0,
725.0,
750.0,
775.0,
800.0,
825.0,
850.0,
875.0,
900.0,
],
dtype=self.dtype,
)
"""internal wavelength step array"""
self.OzZeta = np.array(
[5.35, 10.2, 14.75, 19.15, 23.55, 28.1, 32.8, 37.7, 42.85, 48.25, 100.0],
dtype=self.dtype,
)
"""internal Ozone zeta array"""
self.OzDepth = np.array(
[15.0, 9.0, 10.0, 31.0, 71.0, 87.2, 57.0, 29.4, 10.9, 3.2, 1.3],
dtype=self.dtype,
)
"""internal Ozone depth array"""
self.OzDsum = np.array(
[310.0, 301.0, 291.0, 260.0, 189.0, 101.8, 44.8, 15.4, 4.5, 1.3, 0.1],
dtype=self.dtype,
)
"""internal Ozone Dsum array"""
self.aOD55 = np.array(
[
0.250,
0.136,
0.086,
0.065,
0.055,
0.049,
0.045,
0.042,
0.038,
0.035,
0.032,
0.029,
0.026,
0.023,
0.020,
0.017,
0.015,
0.012,
0.010,
0.007,
0.006,
0.004,
0.003,
0.003,
0.002,
0.002,
0.001,
0.001,
0.001,
0.001,
],
dtype=self.dtype,
)
"""internal aOD55 array"""
# self.aOD55_interp = interpolate.interp1d(np.arange(30)+1, self.aOD55,
# kind='linear',
# bounds_error=False,
# fill_value=0,
# assume_sorted=True)
# self.OzDsum_interp = interpolate.interp1d(self.OzZeta, self.OzDsum,
# kind='linear',
# bounds_error=False,
# fill_value=0.1,
# assume_sorted=True)
self.dL = self.dtype(0.1) # parin(1) step size km
"""simulation step size in Km"""
self.orbit_height = self.dtype(525.0) # parin(2) orbit height km
"""Detector orbital altitude in Km"""
self.hist_bin_size = self.dtype(4.0) # parin(3) bin size histogram km
"""histogram bin size"""
# parin(5) record time dispersion at
self.time_disp_rec_point = self.dtype(0.5)
"""record time dispersion point (km)"""
# this radial point (km)
# c parameters for 1/Beta fit vs wavelength
# c 5th order polynomial
aP = Polynomial(
np.array(
[
-1.2971,
0.22046e-01,
-0.19505e-04,
0.94394e-08,
-0.21938e-11,
0.19390e-15,
],
dtype=self.dtype,
)
)
"""1/beta fit vs wavelength polynomial"""
self.wmean = self.wave1[:-1] + self.dtype(12.5)
tBetinv = aP(self.wmean)
self.aBetaF = np.reciprocal(tBetinv, dtype=self.dtype)
self.aBetaF /= self.dtype(0.158)
# self.aBeta55 = self.dtype(0.158)
# c Ozone Trans = exp(-kappa dx)
# c where dx=ozone slant depth in atm-cm
# c and kappa = 110.5 x wave^(-44.21) in atm-cm^-1
self.Okappa = np.log10(self.wmean, dtype=self.dtype)
self.Okappa *= self.dtype(44.21)
self.Okappa = self.dtype(110.5) - self.Okappa
self.Okappa = np.power(10.0, self.Okappa, dtype=self.dtype)
self.Okappa *= self.dtype(-1e-3)
# c
# c calc OD/km difference
# c
# self.dfaOD55 = np.diff(self.aOD55[::-1], append=0)
tmp = [self.dtype(self.aOD55[i] - self.aOD55[i + 1]) for i in range(29)]
tmp.append(self.dtype(0))
self.dfaOD55 = np.array(tmp, dtype=self.dtype)
# np.append(self.dfaOD55, 0)
self.alpha = np.reciprocal(self.dtype(137.04))
self.pi = self.dtype(3.1415926)
self.PYieldCoeff = (
self.dtype(2e12)
* self.dL
* self.pi
* self.alpha
* (np.reciprocal(self.wave1)[:-1] - np.reciprocal(self.wave1)[1:])
)
self.zmax = self.orbit_height
self.zMaxZ = self.dtype(65.0)
self.RadE = self.dtype(6378.14)
Zair = self.dtype(7.4)
self.ecrit = self.dtype(0.710 / (Zair + 0.96))
def theta_view(self, ThetProp):
"""
Compute theta view from initial betas
"""
# ThetProp = np.radians(betaE)
ThetView = self.RadE / (self.RadE + self.zmax)
ThetView *= np.cos(ThetProp, dtype=self.dtype)
ThetView = np.arcsin(ThetView, dtype=self.dtype)
return ThetView
def grammage(self, z):
"""
# c Calculate Grammage
"""
X = np.empty_like(z, dtype=self.dtype)
mask1 = z < 11
mask2 = np.logical_and(z >= 11, z < 25)
mask3 = z >= 25
X[mask1] = np.power(((z[mask1] - 44.34) / -11.861), (1 / 0.19))
X[mask2] = np.exp(
np.divide(z[mask2] - 45.5, -6.34, dtype=self.dtype), dtype=self.dtype
)
X[mask3] = np.exp(
np.subtract(
13.841,
np.sqrt(28.920 + 3.344 * z[mask3], dtype=self.dtype),
dtype=self.dtype,
)
)
rho = np.empty_like(z, dtype=self.dtype)
rho[mask1] = (
self.dtype(-1.0e-5)
* (1 / 0.19)
/ (-11.861)
* ((z[mask1] - 44.34) / -11.861) ** ((1.0 / 0.19) - 1.0)
)
rho[mask2] = np.multiply(
-1e-5 * np.reciprocal(-6.34), X[mask2], dtype=self.dtype
)
rho[mask3] = np.multiply(
np.divide(
0.5e-5 * 3.344,
np.sqrt(28.920 + 3.344 * z[mask3], dtype=self.dtype),
dtype=self.dtype,
),
X[mask3],
dtype=self.dtype,
)
return X, rho
def ozone_losses(self, z):
"""
Calculate ozone losses from altitudes (z) in km.
"""
msk1 = z < 5.35
TotZon = np.empty_like(z)
TotZon[msk1] = self.dtype(310) + (
(self.dtype(5.35) - z[msk1]) / self.dtype(5.35)
) * self.dtype(15)
msk2 = z >= 100
TotZon[msk2] = self.dtype(0.1)
msk3 = np.logical_and(~msk1, ~msk2)
idxs = np.searchsorted(self.OzZeta, z[msk3])
TotZon[msk3] = (
self.OzDsum[idxs]
+ (
(self.OzZeta[idxs] - z[msk3])
/ (self.OzZeta[idxs] - self.OzZeta[idxs - 1])
)
* self.OzDepth[idxs]
)
return TotZon
def theta_prop(self, z, sinThetView):
"""
theta propagation.
"""
tp = (self.RadE + self.zmax) / (self.RadE + z)
return np.arccos(sinThetView * tp, dtype=self.dtype)
def zsteps(self, z, sinThetView):
"""
Compute all mid-bin z steps and corresponding delz values
"""
return cppzsteps(
z, sinThetView, self.RadE, self.zMaxZ, self.zmax, self.dL, self.pi
)
# return zsave, delzs
def slant_depth(self, alt, sinThetView):
"""Determine Rayleigh and Ozone slant depth."""
zsave, delzs = self.zsteps(alt, sinThetView)
gramz, rhos = self.grammage(zsave)
delgram_vals = rhos * self.dL * self.dtype(1e5)
gramsum = np.cumsum(delgram_vals)
delgram = np.cumsum(delgram_vals[::-1])[::-1]
TotZons = self.ozone_losses(np.insert(zsave, 0, alt))
ZonZ_vals = (TotZons[:-1] - TotZons[1:]) / delzs * self.dL
ZonZ = np.cumsum(ZonZ_vals[::-1])[::-1]
ThetPrpA = self.theta_prop(zsave, sinThetView)
return zsave, delgram, gramsum, gramz, ZonZ, ThetPrpA
def aerosol_model(self, z, ThetPrpA):
"""
Put in aerosol model based on 550 nm Elterman results.
Use scipy linear interpolation function initialized in constructor.
z values above 30 (km altitude) return OD filled to 0, this should then
return aTrans = 1, but in the future masking may be used to further
optimze for performance by avoiding this computation.
"""
aTrans = np.ones((*z.shape, *self.aBetaF.shape), dtype=self.dtype)
tmpOD = (
self.aOD55[np.int32(z[z < 30])]
- (z[z < 30] - self.dtype(np.int32(z[z < 30])))
* self.dfaOD55[np.int32(z[z < 30])]
)
aODepth = -np.outer(tmpOD, self.aBetaF)
costhet = np.cos(self.pi / 2 - ThetPrpA[z < 30], dtype=self.dtype)
aTrans[z < 30, :] = np.exp((aODepth / costhet[:, None]), dtype=self.dtype)
return aTrans
def tracklen(self, E0, eCthres, s):
"""Return tracklength and Tfrac."""
t1 = np.subtract(np.multiply(0.89, E0, dtype=self.dtype), 1.2, dtype=self.dtype)
t2 = np.divide(t1, (E0 + eCthres), dtype=self.dtype)
t3 = np.power(t2, s, dtype=self.dtype)
t4 = np.power(1 + self.dtype(1e-4 * s * eCthres), 2, dtype=self.dtype)
return np.divide(t3, t4, dtype=self.dtype)
def sphoton_yeild(self, thetaC, RN, delgram, ZonZ, z, ThetPrpA):
# c Calculate Light Yield
PYield = np.sin(thetaC, dtype=self.dtype)
PYield = np.power(PYield, 2, dtype=self.dtype)
PYield = PYield[..., None] * self.PYieldCoeff[None, :]
# c Calculate Losses due to Rayleigh Scattering
TrRayl = np.divide(-delgram, 2974, dtype=self.dtype)
TrRb = np.divide(400, self.wmean, dtype=self.dtype)
TrRb = np.power(TrRb, 4, dtype=self.dtype)
TrRayl = TrRayl[..., None] * TrRb[None, :]
TrRayl = np.exp(TrRayl, dtype=self.dtype)
# c Calculate Ozone Losses
# c Ozone atten parameter given by R. McPeters
TrOz = np.exp(ZonZ[:, None] * self.Okappa[None, :], dtype=self.dtype)
# c put in aerosol model based on 550 nm
# c Elterman results
aTrans = self.aerosol_model(z, ThetPrpA)
# # Scaled Photon Yield
SPYield = PYield * TrRayl * TrOz * aTrans * RN[..., None]
return SPYield
def photon_sum(
self, SPYield, DistStep, thetaC, e2hill, eCthres, Tfrac, E0, s, Eshow
):
"""
Sum photons (Gaisser-Hillas)
"""
sigval = np.divide(
SPYield,
np.power(1e3 * self.hist_bin_size, 2, dtype=self.dtype),
dtype=self.dtype,
)
# c set limits by distance to det
# c and Cherenkov Angle
CradLim = DistStep * np.tan(thetaC, dtype=self.dtype)
jlim = np.floor(CradLim) + 1
max_jlim = np.amax(jlim)
jstep = np.arange(max_jlim)
jjstep = np.broadcast_to(jstep, (*CradLim.shape, *jstep.shape))
jmask = jjstep < jlim[..., None]
athetaj = jjstep[:, 1:] - 0.5
athetaj = np.arctan2(athetaj, DistStep[:, None], dtype=self.dtype)
athetaj = 2.0 * (1.0 - np.cos(athetaj, dtype=self.dtype))
sthetaj = np.arctan2(jjstep, DistStep[:, None], dtype=self.dtype)
sthetaj = 2.0 * (1.0 - np.cos(sthetaj, dtype=self.dtype))
# c Calc ang spread ala Hillas
# plus 3 to convert to MeV and minus 2 to end the integral early (3-2=1)
Ieang = int(np.log10(Eshow)) + 1
eang = np.arange(self.dtype(1.0), Ieang + self.dtype(2))
ehill = np.power(10.0, eang, dtype=self.dtype)
ehillave = np.where(
eCthres[..., None] >= ehill[:-1][None, :],
(eCthres[..., None] + ehill[1:][None, :]) / self.dtype(2),
self.dtype(5) * ehill[:-1][None, :],
)
tlen = np.where(
eCthres[..., None] >= ehill[None, :],
Tfrac[..., None],
self.tracklen(E0[..., None], ehill[None, :], s[:, None]),
)
deltrack = tlen[..., :-1] - tlen[..., 1:]
deltrack[deltrack < 0] = self.dtype(0.0)
vhill = ehillave / e2hill[..., None]
wave = self.dtype(
0.0054 * ehillave * (1 + vhill) / (1 + 13 * vhill + 8.3 * vhill**2)
)
poweha = np.power(ehillave / 21.0, 2, dtype=self.dtype)
uhill = np.einsum("zj,ze->zje", athetaj, poweha, dtype=self.dtype)
uhill /= wave[..., None, :]
ubin = np.einsum("zj,ze->zje", sthetaj, poweha, dtype=self.dtype)
ubin /= wave[..., None, :]
ubin = ubin[..., 1:, :] - ubin[..., :-1, :]
ubin[ubin < 0] = self.dtype(0)
# z0hill = self.dtype(0.59)
# ahill = self.dtype(0.777)
xhill = np.sqrt(uhill, dtype=self.dtype) - self.dtype(0.59)
svtrm = np.where(xhill < 0, self.dtype(0.478), self.dtype(0.380))
svtrm = np.exp(-xhill / svtrm)
svtrm *= ubin * deltrack[..., None, :] * self.dtype(0.777)
svtrm[~jmask[..., 1:]] = self.dtype(0)
photsum = np.einsum("zje,zw->", svtrm, sigval, dtype=self.dtype)
photsum *= np.power(1e3 * self.hist_bin_size, 2, dtype=self.dtype)
return photsum
def valid_arrays(self, zsave, delgram, gramsum, gramz, ZonZ, ThetPrpA, Eshow):
"""
Return data arrays with invalid values removed
"""
mask = zsave <= self.zmax
AirN = np.empty_like(zsave, dtype=self.dtype)
AirN[mask] = 1.0 + 0.000296 * (gramz[mask] / 1032.9414) * (
273.2 / (204.0 + 0.091 * gramz[mask])
)
mask &= (AirN != 1) & (AirN != 0)
# c do greissen param
t = np.zeros_like(zsave, dtype=self.dtype)
t[mask] = gramsum[mask] / self.dtype(36.66)
greisen_beta = self.dtype(np.log(np.float64(Eshow) / np.float64(self.ecrit)))
s = np.zeros_like(zsave, dtype=self.dtype)
s[mask] = self.dtype(3) * t[mask] / (t[mask] + self.dtype(2) * greisen_beta)
RN = np.zeros_like(zsave, dtype=self.dtype)
RN[mask] = (
self.dtype(0.31)
/ np.sqrt(greisen_beta, dtype=self.dtype)
* np.exp(
t[mask] * (1 - self.dtype(3 / 2) * np.log(s[mask], dtype=self.dtype)),
dtype=self.dtype,
)
)
RN[RN < 0] = self.dtype(0)
mask &= ~((RN < 1) & (s > 1))
e2hill = np.zeros_like(zsave, dtype=self.dtype)
e2hill[mask] = self.dtype(1150) + self.dtype(454) * np.log(
s[mask], dtype=self.dtype
)
mask &= ~(e2hill <= 0)
# final mask set for loop
zs = zsave[mask]
delgram = delgram[mask]
ZonZ = ZonZ[mask]
ThetPrpA = ThetPrpA[mask]
AirN = AirN[mask]
s = s[mask]
RN = RN[mask]
e2hill = e2hill[mask]
return zs, delgram, ZonZ, ThetPrpA, AirN, s, RN, e2hill
def e0(self, shape, s):
"""Hillas Energy Paramaterization.
From Hillas 1461. page 1466 eqn 8.
"""
E0 = np.full(shape, 26.0, dtype=self.dtype)
E0[s >= 0.4] = 44.0 - 17.0 * (s[(s >= 0.4)] - 1.46) ** 2
return E0
def cherenkov_threshold_angle(self, AirN):
"""Calc Cherenkov Threshold and Cherenkov angle."""
eCthres = np.reciprocal(np.power(AirN, 2))
eCthres = np.sqrt(1.0 - eCthres, dtype=self.dtype)
eCthres = np.divide(self.dtype(0.511), eCthres)
# c Calculate Cerenkov Angle
thetaC = np.arccos(np.reciprocal(AirN), dtype=self.dtype)
return eCthres, thetaC
def d_to_det(self, ThetView, ThetPrpA, zs):
"""Distance to detector."""
AngE = self.pi / (2) - ThetView - ThetPrpA
DistStep = np.sin(AngE, dtype=self.dtype)
DistStep /= np.sin(ThetView, dtype=self.dtype)
DistStep *= self.RadE + zs
return DistStep
def cher_ang_sig_i(self, taphotstep, taphotsum, thetaC, AveCangI):
""" """
nAcnt = np.count_nonzero(taphotstep * thetaC, axis=-1)
CangsigI = taphotstep / taphotsum
CangsigI *= np.power(thetaC - AveCangI, 2, dtype=self.dtype)
CangsigI = np.sum(CangsigI, axis=-1, dtype=self.dtype)
if nAcnt > 1:
CangsigI = np.sqrt(CangsigI * nAcnt / (nAcnt - 1), dtype=self.dtype)
else:
CangsigI = np.sqrt(CangsigI, dtype=self.dtype)
return CangsigI
def cherenkov_area(self, AveCangI, DistStep, izRNmax):
CherArea = np.tan(AveCangI, dtype=self.dtype) * self.dtype(1e3)
CherArea *= DistStep[izRNmax]
CherArea = self.pi * np.power(CherArea, 2, dtype=self.dtype)
return CherArea
def run(self, betaE, alt, Eshow100PeV, lat, long, cloudf=None):
"""Main body of simulation code."""
# Should we just skip these with a mask in valid_arrays?
betaE = self.dtype(
np.radians(self.dtype(1)) if betaE < np.radians(1.0) else betaE
)
Eshow = self.dtype(Eshow100PeV * 1e8) # GeV
ThetView = self.theta_view(betaE)
sinThetView = np.sin(ThetView, dtype=self.dtype)
#
# Shower
#
zs, delgram, ZonZ, ThetPrpA, AirN, s, RN, e2hill = self.valid_arrays(
*self.slant_depth(alt, sinThetView), Eshow
)
# Cloud top height
cloud_top_height = cloudf(lat, long) if cloudf else -np.inf
# early return check, ensure at least 2 segments above cloud-top height.
if zs[-2] < cloud_top_height:
return self.dtype(0), self.dtype(0)
# Cloud mask. No particles will be considered if generated below the clouds.
cloud_mask = zs < cloud_top_height
izRNmax = np.argmax(RN, axis=-1)
E0 = self.e0(zs.shape, s) # in MeV
# c Calc Cherenkov Threshold
eCthres, thetaC = self.cherenkov_threshold_angle(AirN)
Tfrac = self.tracklen(E0, eCthres, s)
# c
# c Determine geometry
# c
# distance to detector
DistStep = self.d_to_det(ThetView, ThetPrpA, zs)
# Scaled Photon Yield [zs, w]
SPYield = self.sphoton_yeild(thetaC, RN, delgram, ZonZ, zs, ThetPrpA)
SPYield[cloud_mask, ...] = self.dtype(0)
# Total photons
photsum = self.photon_sum(
SPYield, DistStep, thetaC, e2hill, eCthres, Tfrac, E0, s, Eshow
)
taphotstep = np.sum(SPYield, axis=-1, dtype=self.dtype) * Tfrac
taphotsum = np.sum(taphotstep, axis=-1, dtype=self.dtype)
if taphotsum == 0:
return self.dtype(0), self.dtype(0)
AveCangI = np.sum(taphotstep * thetaC, axis=-1, dtype=self.dtype) / taphotsum
CangsigI = self.cher_ang_sig_i(taphotstep, taphotsum, thetaC, AveCangI)
CherArea = self.cherenkov_area(AveCangI, DistStep, izRNmax)
photonDen = self.dtype(0.5) * photsum / CherArea
altitude_scaling = (
distance_to_detector(betaE, alt, self.orbit_height, self.RadE)
/ distance_to_detector(betaE, alt, self.detector_altitude, self.RadE)
) ** 2
photonDen *= altitude_scaling
Cang = np.degrees(AveCangI + CangsigI)
return photonDen, Cang
def __call__(self, betaE, alt, Eshow100PeV, init_lat, init_long, cloudf=None):
"""
Iterate over the list of events and return the result as pair of
numpy arrays.
"""
if (
len(betaE) < 1
or len(alt) < 1
or len(Eshow100PeV) < 1
or len(init_lat) < 1
or len(init_long) < 1
):
return np.empty([]), np.empty([])
#######################
b = db.from_sequence(
zip(betaE, alt, Eshow100PeV, init_lat, init_long), partition_size=100
)
with ProgressBar():
Dphots, Cang = zip(*b.map(lambda x: self.run(*x, cloudf)).compute())
return np.asarray(Dphots), np.array(Cang)
|
NuSpaceSimREPO_NAMEnuSpaceSimPATH_START.@nuSpaceSim_extracted@nuSpaceSim-main@src@nuspacesim@simulation@eas_optical@cphotang.py@.PATH_END.py
|
{
"filename": "test_linearity.py",
"repo_name": "lsst/cp_pipe",
"repo_path": "cp_pipe_extracted/cp_pipe-main/tests/test_linearity.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# LSST Data Management System
#
# Copyright 2008-2017 AURA/LSST.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <https://www.lsstcorp.org/LegalNotices/>.
#
"""Test cases for cp_pipe linearity code."""
import logging
import unittest
import numpy as np
import lsst.utils
import lsst.utils.tests
from lsst.ip.isr import PhotonTransferCurveDataset
import lsst.afw.image
import lsst.afw.math
from lsst.cp.pipe import LinearitySolveTask
from lsst.cp.pipe.ptc import PhotonTransferCurveSolveTask
from lsst.cp.pipe.utils import funcPolynomial
from lsst.ip.isr.isrMock import FlatMock, IsrMock
class FakeCamera(list):
def getName(self):
return "FakeCam"
class LinearityTaskTestCase(lsst.utils.tests.TestCase):
"""Test case for the linearity tasks."""
def setUp(self):
mock_image_config = IsrMock.ConfigClass()
mock_image_config.flatDrop = 0.99999
mock_image_config.isTrimmed = True
self.dummy_exposure = FlatMock(config=mock_image_config).run()
self.detector = self.dummy_exposure.getDetector()
self.input_dims = {"detector": 0}
self.camera = FakeCamera([self.detector])
self.amp_names = []
for amp in self.detector:
self.amp_names.append(amp.getName())
def _create_ptc(
self,
amp_names,
exp_times,
means,
ccobcurr=None,
photo_charges=None,
temperatures=None,
ptc_turnoff=None,
):
"""
Create a PTC with values for linearity tests.
Parameters
----------
amp_names : `list` [`str`]
Names of amps.
exp_times : `np.ndarray`
Array of exposure times.
means : `np.ndarray`
Array of means.
ccobcurr : `np.ndarray`, optional
Array of CCOBCURR to put into auxiliary values.
photo_charges : `np.ndarray`, optional
Array of photoCharges to put into ptc.
temperatures : `np.ndarray`, optional
Array of temperatures (TEMP6) to put into ptc.
ptc_turnoff : `float`, optional
Turnoff value to set (by hand) for testing.
Returns
-------
ptc : `lsst.ip.isr.PhotonTransferCurveDataset`
PTC filled with relevant values.
"""
exp_id_pairs = np.arange(len(exp_times)*2).reshape((len(exp_times), 2)).tolist()
if photo_charges is None:
photo_charges = np.full(len(exp_times), np.nan)
datasets = []
for i in range(len(exp_times)):
partial = PhotonTransferCurveDataset(amp_names, ptcFitType="PARTIAL", covMatrixSide=1)
for amp_name in amp_names:
# For the first amp, we add a few bad points.
if amp_name == amp_names[0] and i >= 5 and i < 7:
exp_id_mask = False
raw_mean = np.nan
else:
exp_id_mask = True
raw_mean = means[i]
partial.setAmpValuesPartialDataset(
amp_name,
inputExpIdPair=exp_id_pairs[i],
rawExpTime=exp_times[i],
rawMean=raw_mean,
rawVar=raw_mean,
kspValue=1.0,
expIdMask=exp_id_mask,
photoCharge=photo_charges[i],
)
aux_dict = {}
if ccobcurr is not None:
aux_dict["CCOBCURR"] = ccobcurr[i]
if temperatures is not None:
aux_dict["TEMP6"] = temperatures[i]
if aux_dict:
partial.setAuxValuesPartialDataset(aux_dict)
datasets.append(partial)
datasets.append(PhotonTransferCurveDataset(amp_names, ptcFitType="DUMMY"))
config = PhotonTransferCurveSolveTask.ConfigClass()
config.maximumRangeCovariancesAstier = 1
config.maxDeltaInitialPtcOutlierFit = 100_000.0
solve_task = PhotonTransferCurveSolveTask(config=config)
# Suppress logging here.
with self.assertNoLogs(level=logging.CRITICAL):
ptc = solve_task.run(datasets).outputPtcDataset
# Make the last amp a bad amp.
ptc.badAmps = [amp_names[-1]]
if ptc_turnoff is not None:
for amp_name in amp_names:
if amp_name in ptc.badAmps:
ptc.ptcTurnoff[amp_name] = np.nan
ptc.finalMeans[amp_name][:] = np.nan
else:
ptc.ptcTurnoff[amp_name] = ptc_turnoff
high = (ptc.rawMeans[amp_name] > ptc_turnoff)
ptc.expIdMask[amp_name][high] = False
ptc.finalMeans[amp_name][high] = np.nan
return ptc
def _check_linearity(self, linearity_type, min_adu=0.0, max_adu=100000.0):
"""Run and check linearity.
Parameters
----------
linearity_type : `str`
Must be ``Polynomial``, ``Squared``, or ``LookupTable``.
min_adu : `float`, optional
Minimum cut on ADU for fit.
max_adu : `float`, optional
Maximum cut on ADU for fit.
"""
flux = 1000.
time_vec = np.arange(1., 101., 5)
k2_non_linearity = -5e-6
coeff = k2_non_linearity/(flux**2.)
mu_vec = flux * time_vec + k2_non_linearity * time_vec**2.
ptc = self._create_ptc(self.amp_names, time_vec, mu_vec)
config = LinearitySolveTask.ConfigClass()
config.linearityType = linearity_type
config.minLinearAdu = min_adu
config.maxLinearAdu = max_adu
task = LinearitySolveTask(config=config)
linearizer = task.run(ptc, [self.dummy_exposure], self.camera, self.input_dims).outputLinearizer
if linearity_type == "LookupTable":
t_max = config.maxLookupTableAdu / flux
time_range = np.linspace(0.0, t_max, config.maxLookupTableAdu)
signal_ideal = time_range * flux
signal_uncorrected = funcPolynomial(np.array([0.0, flux, k2_non_linearity]), time_range)
linearizer_table_row = signal_ideal - signal_uncorrected
# Skip the last amp which is marked bad.
for i, amp_name in enumerate(ptc.ampNames[:-1]):
if linearity_type in ["Squared", "Polynomial"]:
self.assertFloatsAlmostEqual(linearizer.fitParams[amp_name][0], 0.0, atol=1e-2)
self.assertFloatsAlmostEqual(linearizer.fitParams[amp_name][1], 1.0, rtol=1e-5)
self.assertFloatsAlmostEqual(linearizer.fitParams[amp_name][2], coeff, rtol=1e-6)
if linearity_type == "Polynomial":
self.assertFloatsAlmostEqual(linearizer.fitParams[amp_name][3], 0.0)
if linearity_type == "Squared":
self.assertEqual(len(linearizer.linearityCoeffs[amp_name]), 1)
self.assertFloatsAlmostEqual(linearizer.linearityCoeffs[amp_name][0], -coeff, rtol=1e-6)
else:
self.assertEqual(len(linearizer.linearityCoeffs[amp_name]), 2)
self.assertFloatsAlmostEqual(linearizer.linearityCoeffs[amp_name][0], -coeff, rtol=1e-6)
self.assertFloatsAlmostEqual(linearizer.linearityCoeffs[amp_name][1], 0.0)
else:
index = linearizer.linearityCoeffs[amp_name][0]
self.assertEqual(index, i)
self.assertEqual(len(linearizer.tableData[index, :]), len(linearizer_table_row))
self.assertFloatsAlmostEqual(linearizer.tableData[index, :], linearizer_table_row, rtol=1e-4)
lin_mask = np.isfinite(linearizer.fitResiduals[amp_name])
lin_mask_expected = (mu_vec > min_adu) & (mu_vec < max_adu) & ptc.expIdMask[amp_name]
self.assertListEqual(lin_mask.tolist(), lin_mask_expected.tolist())
self.assertFloatsAlmostEqual(linearizer.fitResiduals[amp_name][lin_mask], 0.0, atol=1e-2)
# If we apply the linearity correction, we should get the true
# linear values out.
image = lsst.afw.image.ImageF(len(mu_vec), 1)
image.array[:, :] = mu_vec
lin_func = linearizer.getLinearityTypeByName(linearizer.linearityType[amp_name])
lin_func()(
image,
coeffs=linearizer.linearityCoeffs[amp_name],
table=linearizer.tableData,
log=None,
)
linear_signal = flux * time_vec
self.assertFloatsAlmostEqual(image.array[0, :] / linear_signal, 1.0, rtol=1e-6)
self._check_linearizer_lengths(linearizer)
def _check_linearizer_lengths(self, linearizer):
# Check that the lengths of all the fields match.
lenCoeffs = -1
lenParams = -1
lenParamsErr = -1
lenResiduals = -1
lenFit = -1
for ampName in linearizer.ampNames:
if lenCoeffs < 0:
lenCoeffs = len(linearizer.linearityCoeffs[ampName])
lenParams = len(linearizer.fitParams[ampName])
lenParamsErr = len(linearizer.fitParamsErr[ampName])
lenResiduals = len(linearizer.fitResiduals[ampName])
lenFit = len(linearizer.linearFit[ampName])
else:
self.assertEqual(
len(linearizer.linearityCoeffs[ampName]),
lenCoeffs,
msg=f"amp {ampName} linearityCoeffs length mismatch",
)
self.assertEqual(
len(linearizer.fitParams[ampName]),
lenParams,
msg=f"amp {ampName} fitParams length mismatch",
)
self.assertEqual(
len(linearizer.fitParamsErr[ampName]),
lenParamsErr,
msg=f"amp {ampName} fitParamsErr length mismatch",
)
self.assertEqual(
len(linearizer.fitResiduals[ampName]),
lenResiduals,
msg=f"amp {ampName} fitResiduals length mismatch",
)
self.assertEqual(
len(linearizer.linearFit[ampName]),
lenFit,
msg=f"amp {ampName} linearFit length mismatch",
)
def test_linearity_polynomial(self):
"""Test linearity with polynomial fit."""
self._check_linearity("Polynomial")
def test_linearity_squared(self):
"""Test linearity with a single order squared solution."""
self._check_linearity("Squared")
def test_linearity_table(self):
"""Test linearity with a lookup table solution."""
self._check_linearity("LookupTable")
def test_linearity_polynomial_aducuts(self):
"""Test linearity with polynomial and ADU cuts."""
self._check_linearity("Polynomial", min_adu=10000.0, max_adu=90000.0)
def _check_linearity_spline(
self,
do_pd_offsets=False,
n_points=200,
do_mu_offset=False,
do_weight_fit=False,
do_temperature_fit=False,
):
"""Check linearity with a spline solution.
Parameters
----------
do_pd_offsets : `bool`, optional
Apply offsets to the photodiode data.
do_mu_offset : `bool`, optional
Apply constant offset to mu data.
do_weight_fit : `bool`, optional
Fit the weight parameters?
do_temperature_fit : `bool`, optional
Apply a temperature dependence and fit it?
"""
np.random.seed(12345)
# Create a test dataset representative of real data.
pd_values = np.linspace(1e-8, 2e-5, n_points)
time_values = pd_values * 1000000.
linear_ratio = 5e9
mu_linear = linear_ratio * pd_values
# Test spline parameters are taken from a test fit to LSSTCam
# data, run 7193D, detector 22, amp C00. The exact fit is not
# important, but this is only meant to be representative of
# the shape of the non-linearity that we see.
n_nodes = 10
non_lin_spline_nodes = np.linspace(0, mu_linear.max(), n_nodes)
non_lin_spline_values = np.array(
[0.0, -8.87, 1.46, 1.69, -6.92, -68.23, -78.01, -11.56, 80.26, 185.01]
)
spl = lsst.afw.math.makeInterpolate(
non_lin_spline_nodes,
non_lin_spline_values,
lsst.afw.math.stringToInterpStyle("AKIMA_SPLINE"),
)
mu_values = mu_linear + spl.interpolate(mu_linear)
# Add a temperature dependence if necessary.
if do_temperature_fit:
temp_coeff = 0.0006
temperatures = np.random.normal(scale=0.5, size=len(mu_values)) - 100.0
# We use a negative sign here because we are doing the
# opposite of the correction.
mu_values *= (1 - temp_coeff*(temperatures - (-100.0)))
else:
temperatures = None
# Add a constant offset if necessary.
if do_mu_offset:
offset_value = 2.0
mu_values += offset_value
else:
offset_value = 0.0
# Add some noise.
mu_values += np.random.normal(scale=mu_values, size=len(mu_values)) / 10000.
# Add some outlier values.
if n_points >= 200:
outlier_indices = np.arange(5) + 170
else:
outlier_indices = []
mu_values[outlier_indices] += 200.0
# Add some small offsets to the pd_values if requested.
pd_values_offset = pd_values.copy()
ccobcurr = None
if do_pd_offsets:
ccobcurr = np.zeros(pd_values.size)
n_points_group = n_points//4
group0 = np.arange(n_points_group)
group1 = np.arange(n_points_group) + n_points_group
group2 = np.arange(n_points_group) + 2*n_points_group
group3 = np.arange(n_points_group) + 3*n_points_group
ccobcurr[group0] = 0.01
ccobcurr[group1] = 0.02
ccobcurr[group2] = 0.03
ccobcurr[group3] = 0.04
pd_offset_factors = [0.995, 1.0, 1.005, 1.002]
pd_values_offset[group0] *= pd_offset_factors[0]
pd_values_offset[group2] *= pd_offset_factors[2]
pd_values_offset[group3] *= pd_offset_factors[3]
# Add one bad photodiode value, but don't put it at the very
# end because that would change the spline node positions
# and make comparisons to the "truth" here in the tests
# more difficult.
pd_values_offset[-2] = np.nan
ptc = self._create_ptc(
self.amp_names,
time_values,
mu_values,
ccobcurr=ccobcurr,
photo_charges=pd_values_offset,
temperatures=temperatures,
)
config = LinearitySolveTask.ConfigClass()
config.linearityType = "Spline"
config.usePhotodiode = True
config.minLinearAdu = 0.0
config.splineKnots = n_nodes
config.splineGroupingMinPoints = 101
config.doSplineFitOffset = do_mu_offset
config.doSplineFitWeights = do_weight_fit
config.splineFitWeightParsStart = [7.2e-5, 1e-4]
config.doSplineFitTemperature = do_temperature_fit
config.maxFracLinearityDeviation = 0.05
if do_pd_offsets:
config.splineGroupingColumn = "CCOBCURR"
if do_temperature_fit:
config.splineFitTemperatureColumn = "TEMP6"
task = LinearitySolveTask(config=config)
linearizer = task.run(
ptc,
[self.dummy_exposure],
self.camera,
self.input_dims,
).outputLinearizer
if do_weight_fit:
# These checks currently fail, and weight fitting is not
# recommended.
return
# Skip the last amp which is marked bad.
for amp_name in ptc.ampNames[:-1]:
# This test data doesn't have a real turnoff.
self.assertEqual(linearizer.linearityTurnoff[amp_name], np.nanmax(ptc.rawMeans[amp_name]))
self.assertEqual(linearizer.linearityMaxSignal[amp_name], np.nanmax(ptc.rawMeans[amp_name]))
lin_mask = np.isfinite(linearizer.fitResiduals[amp_name])
# Make sure that non-finite initial values in range are also
# masked.
check, = np.where(~np.isfinite(ptc.rawMeans[amp_name]))
if len(check) > 0:
np.testing.assert_array_equal(lin_mask[check], False)
# Make sure the outliers are masked.
np.testing.assert_array_equal(lin_mask[outlier_indices], False)
# Check the turnoff and max values.
# The first point at very low flux is noisier and so we exclude
# it from the test here.
resid_atol = 1.1e-3
self.assertFloatsAlmostEqual(
(linearizer.fitResiduals[amp_name][lin_mask] / mu_linear[lin_mask])[1:],
0.0,
atol=resid_atol,
)
# Loose check on the chi-squared.
self.assertLess(linearizer.fitChiSq[amp_name], 2.0)
# Check the residual sigma_mad.
self.assertLess(linearizer.fitResidualsSigmaMad[amp_name], 1.2e-4)
# If we apply the linearity correction, we should get the true
# linear values out.
image = lsst.afw.image.ImageF(len(mu_values), 1)
image.array[:, :] = mu_values
lin_func = linearizer.getLinearityTypeByName(linearizer.linearityType[amp_name])
lin_func()(
image,
coeffs=linearizer.linearityCoeffs[amp_name],
log=None,
)
# We scale by the median because of ambiguity in the overall
# gain parameter which is not part of the non-linearity.
ratio = image.array[0, lin_mask]/mu_linear[lin_mask]
# When we have an offset, this test gets a bit confused
# mixing truth and offset values.
ratio_rtol = 5e-2 if do_mu_offset else 5e-4
self.assertFloatsAlmostEqual(
ratio / np.median(ratio),
1.0,
rtol=ratio_rtol,
)
# Check that the spline parameters recovered are consistent,
# with input to some low-grade precision.
# The first element should be identically zero.
self.assertFloatsEqual(linearizer.linearityCoeffs[amp_name][0], 0.0)
# We have two different comparisons here; for the terms that are
# |value| < 20 (offset) or |value| > 20 (ratio), to avoid
# divide-by-small-number problems. In all cases these are
# approximate, and the real test is in the residuals.
small = (np.abs(non_lin_spline_values) < 20)
spline_atol = 6.0 if do_pd_offsets else 2.0
spline_rtol = 0.14 if do_pd_offsets else 0.05
self.assertFloatsAlmostEqual(
linearizer.linearityCoeffs[amp_name][n_nodes:][small],
non_lin_spline_values[small],
atol=spline_atol,
)
self.assertFloatsAlmostEqual(
linearizer.linearityCoeffs[amp_name][n_nodes:][~small],
non_lin_spline_values[~small],
rtol=spline_rtol,
)
# And check the offsets if they were included.
if do_pd_offsets:
# The relative scaling is to group 1.
fit_offset_factors = linearizer.fitParams[amp_name][1] / linearizer.fitParams[amp_name]
extra_pars = 0
if do_mu_offset:
extra_pars += 1
if do_temperature_fit:
extra_pars += 1
if extra_pars > 0:
fit_offset_factors = fit_offset_factors[:-extra_pars]
self.assertFloatsAlmostEqual(fit_offset_factors, np.array(pd_offset_factors), rtol=6e-4)
# And check if the offset is fit well.
fit_offset = None
fit_temp_coeff = None
if do_mu_offset and do_temperature_fit:
fit_offset = linearizer.fitParams[amp_name][-2]
fit_temp_coeff = linearizer.fitParams[amp_name][-1]
elif do_mu_offset:
fit_offset = linearizer.fitParams[amp_name][-1]
elif do_temperature_fit:
fit_temp_coeff = linearizer.fitParams[amp_name][-1]
if fit_offset is not None:
self.assertFloatsAlmostEqual(fit_offset, offset_value, rtol=6e-3)
if fit_temp_coeff is not None:
self.assertFloatsAlmostEqual(fit_temp_coeff, temp_coeff, rtol=2e-2)
self._check_linearizer_lengths(linearizer)
def test_linearity_spline(self):
self._check_linearity_spline(do_pd_offsets=False, do_mu_offset=False)
def test_linearity_spline_offsets(self):
self._check_linearity_spline(do_pd_offsets=True, do_mu_offset=False)
def test_linearity_spline_mu_offset(self):
self._check_linearity_spline(do_pd_offsets=True, do_mu_offset=True)
def test_linearity_spline_fit_weights(self):
self._check_linearity_spline(do_pd_offsets=True, do_mu_offset=True, do_weight_fit=True)
def test_linearity_spline_fit_temperature(self):
self._check_linearity_spline(do_pd_offsets=True, do_mu_offset=True, do_temperature_fit=True)
def test_linearity_spline_offsets_too_few_points(self):
with self.assertRaisesRegex(RuntimeError, "too few points"):
self._check_linearity_spline(do_pd_offsets=True, n_points=100)
def test_linearity_turnoff(self):
# Use some real LSSTComCam linearity data to measure the turnoff.
abscissa, ordinate, ptc_mask = self._comcam_raw_linearity_data()
config = LinearitySolveTask.ConfigClass()
task = LinearitySolveTask(config=config)
with self.assertNoLogs(level=logging.WARNING):
turnoff_index, turnoff, max_signal = task._computeTurnoffAndMax(abscissa, ordinate, ptc_mask)
# This was visually inspected such that these are reasonable.
self.assertEqual(turnoff_index, 90)
np.testing.assert_almost_equal(turnoff, 99756.30512572)
np.testing.assert_almost_equal(max_signal, 108730.32842316)
# Do the linearity fit with these data.
ptc = self._create_ptc(
self.amp_names,
abscissa * 1000000000,
ordinate,
photo_charges=abscissa,
ptc_turnoff=np.max(ordinate[ptc_mask]),
)
config = LinearitySolveTask.ConfigClass()
config.linearityType = "Spline"
config.usePhotodiode = True
config.minLinearAdu = 30.0
config.splineKnots = 10
config.doSplineFitOffset = False
config.doSplineFitWeights = False
config.splineFitWeightParsStart = [7.2e-5, 1e-4]
config.doSplineFitTemperature = False
task = LinearitySolveTask(config=config)
linearizer = task.run(
ptc,
[self.dummy_exposure],
self.camera,
self.input_dims,
).outputLinearizer
# Confirm that the turnoff for the good amps is the same.
for amp_name in self.amp_names:
if amp_name in ptc.badAmps:
self.assertTrue(np.isnan(linearizer.linearityTurnoff[amp_name]))
self.assertTrue(np.isnan(linearizer.linearityMaxSignal[amp_name]))
else:
self.assertEqual(linearizer.linearityTurnoff[amp_name], turnoff)
self.assertEqual(linearizer.linearityMaxSignal[amp_name], max_signal)
# Check that the linearizer gives reasonable values over the
# range up to the ptc turnoff.
nodes, values = np.split(linearizer.linearityCoeffs[amp_name], 2)
self.assertEqual(values[0], 0.0)
to_test = (nodes > 0.0) & (nodes < ptc.ptcTurnoff[amp_name])
np.testing.assert_array_less(np.abs(values[to_test]/nodes[to_test]), 0.002)
# Check the residuals are reasonable up to the linearity
# turnoff.
to_test = ((ptc.rawMeans[amp_name] <= linearizer.linearityTurnoff[amp_name])
& np.isfinite(ptc.rawMeans[amp_name]))
residuals_scaled = linearizer.fitResiduals[amp_name][to_test]/ptc.rawMeans[amp_name][to_test]
np.testing.assert_array_less(np.abs(residuals_scaled), 0.0015)
# Try again after cutting it off, make sure it warns.
cutoff = (ordinate < turnoff)
with self.assertLogs(level=logging.WARNING) as cm:
turnoff_index2, turnoff2, max_signal2 = task._computeTurnoffAndMax(
abscissa[cutoff],
ordinate[cutoff],
ptc_mask[cutoff],
)
self.assertIn("No linearity turnoff", cm.output[0])
self.assertEqual(turnoff_index2, len(ptc_mask[cutoff]) - 1)
def _comcam_raw_linearity_data(self):
# These are LSSTComCam measurements taken from a calibration
# run as part of DM-46357.
photo_charges = np.array(
[
3.22636000e-10, 3.38780400e-10, 3.54841300e-10, 3.87126000e-10,
4.03360000e-10, 4.35709800e-10, 4.51693200e-10, 4.83922500e-10,
5.16297600e-10, 5.48525400e-10, 5.80748400e-10, 6.13082500e-10,
6.45148000e-10, 6.77922000e-10, 7.25721750e-10, 7.74712800e-10,
8.22586650e-10, 8.70876900e-10, 9.19831800e-10, 9.68568000e-10,
1.03278400e-09, 1.08122255e-09, 1.14522645e-09, 1.22590280e-09,
1.29087600e-09, 1.37134750e-09, 1.45214100e-09, 1.53323350e-09,
1.62903910e-09, 1.72676065e-09, 1.83921900e-09, 1.93578600e-09,
2.06456960e-09, 2.17876500e-09, 2.30696180e-09, 2.45230720e-09,
2.59714735e-09, 2.74327300e-09, 2.91997345e-09, 3.08153670e-09,
3.27340599e-09, 3.46962700e-09, 3.67778820e-09, 3.88937850e-09,
4.12976640e-09, 4.37225980e-09, 4.62984095e-09, 4.90212160e-09,
5.19730540e-09, 5.50027885e-09, 5.83860750e-09, 6.17903475e-09,
6.54889207e-09, 6.93710400e-09, 7.35386640e-09, 7.79136960e-09,
8.25815040e-09, 8.74769030e-09, 9.27673375e-09, 9.82420530e-09,
1.04087197e-08, 1.10388366e-08, 1.17030225e-08, 1.23862272e-08,
1.31366576e-08, 1.39122921e-08, 1.47334462e-08, 1.56154856e-08,
1.65528171e-08, 1.75371688e-08, 1.85675291e-08, 1.96824430e-08,
2.08605509e-08, 2.21063200e-08, 2.34186546e-08, 2.48260115e-08,
2.62975235e-08, 2.78649723e-08, 2.95408665e-08, 3.12853772e-08,
3.31612268e-08, 3.51149011e-08, 3.72152551e-08, 3.94439604e-08,
4.17678940e-08, 4.42723820e-08, 4.69153456e-08, 4.96932949e-08,
5.27096702e-08, 5.58084160e-08, 5.91300138e-08, 6.26865948e-08,
6.64351212e-08, 7.03429300e-08, 7.45695391e-08, 7.89900791e-08,
8.37413792e-08, 9.40025100e-08, 9.95942560e-08, 8.87289232e-08,
],
)
raw_means = np.array(
[
545.61773431, 572.92351724, 599.73600889, 654.80522687,
681.89236561, 736.36468948, 763.35233159, 818.3558025,
872.28757051, 926.97891772, 981.93722274, 1036.0641088,
1091.15536336, 1145.34900099, 1227.09504139, 1308.5247877,
1390.45828326, 1472.05043362, 1553.69250543, 1636.30579999,
1744.80644944, 1826.53524631, 1936.27610568, 2072.63450198,
2181.56004372, 2316.47487095, 2453.76647885, 2589.54660646,
2754.19117479, 2916.91523806, 3107.95392897, 3272.65202763,
3490.14569333, 3680.430583, 3896.17664859, 4146.03684447,
4391.418256, 4633.96031196, 4935.13618128, 5208.9852533,
5533.58336929, 5860.25295412, 6218.8609866, 6570.97901761,
6977.63082612, 7386.3804372, 7823.50205095, 8291.10153869,
8780.51405056, 9300.4324222, 9870.51463262, 10446.11494902,
11069.51440385, 11733.55548663, 12437.83099445, 13179.9215084,
13972.28308964, 14789.63243003, 15692.2466057, 16622.22691148,
17592.14597643, 18670.98290396, 19778.68937623, 20960.06668402,
22216.68121994, 23518.23906809, 24907.61374144, 26424.82837902,
28002.38769651, 29681.8198974, 31422.27519355, 33285.31574829,
35303.14661517, 37381.69611718, 39633.10759127, 41958.90250067,
44458.19627488, 47100.85288959, 49902.33562771, 52837.68322699,
55976.05887806, 59291.58601354, 62822.08965674, 66519.7625026,
70480.78728045, 74721.82649122, 79220.00783343, 83928.39988665,
88959.09479122, 94289.83758648, 99756.30512572, 104244.9446884,
106938.47971957, 108134.21868844, 108602.08685706, 108702.48845118,
108730.32842316, 108799.56402709, 108803.45364906, 108851.90458027,
],
)
ptc_mask = np.array(
[
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, False, False,
False, False, False, False, False, False, False, False, False,
False,
],
)
return photo_charges, raw_means, ptc_mask
class TestMemory(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
|
lsstREPO_NAMEcp_pipePATH_START.@cp_pipe_extracted@cp_pipe-main@tests@test_linearity.py@.PATH_END.py
|
{
"filename": "copy_injection_recovery.py",
"repo_name": "ThibeauWouters/TurboPE-BNS",
"repo_path": "TurboPE-BNS_extracted/TurboPE-BNS-main/injections/outdir_TF2/injection_85/copy_injection_recovery.py",
"type": "Python"
}
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "2"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.1"
import numpy as np
import argparse
# The following is needed on CIT cluster to avoid an obscure Python error
import psutil
p = psutil.Process()
p.cpu_affinity([0])
# Regular imports
import argparse
import copy
import numpy as np
from astropy.time import Time
import time
import shutil
import json
import jax
jax.config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jimgw.jim import Jim
from jimgw.single_event.detector import H1, L1, V1
from jimgw.single_event.likelihood import HeterodynedTransientLikelihoodFD, TransientLikelihoodFD
from jimgw.single_event.waveform import RippleTaylorF2, RippleIMRPhenomD_NRTidalv2
from jimgw.prior import Uniform, Composite
import utils # our plotting and postprocessing utilities script
import optax
# Names of the parameters and their ranges for sampling parameters for the injection
NAMING = ['M_c', 'q', 's1_z', 's2_z', 'lambda_1', 'lambda_2', 'd_L', 't_c', 'phase_c', 'cos_iota', 'psi', 'ra', 'sin_dec']
PRIOR = {
"M_c": [0.8759659737275101, 2.6060030916165484],
"q": [0.5, 1.0],
"s1_z": [-0.05, 0.05],
"s2_z": [-0.05, 0.05],
"lambda_1": [0.0, 5000.0],
"lambda_2": [0.0, 5000.0],
"d_L": [30.0, 300.0],
"t_c": [-0.1, 0.1],
"phase_c": [0.0, 2 * jnp.pi],
"cos_iota": [-1.0, 1.0],
"psi": [0.0, jnp.pi],
"ra": [0.0, 2 * jnp.pi],
"sin_dec": [-1, 1]
}
################
### ARGPARSE ###
################
# TODO save these into a new file
def get_parser(**kwargs):
add_help = kwargs.get("add_help", True)
parser = argparse.ArgumentParser(
description="Perform an injection recovery.",
add_help=add_help,
)
# TODO os does not use them
# parser.add_argument(
# "--GPU-device",
# type=int,
# default=0,
# help="Select GPU index to use.",
# )
# parser.add_argument(
# "--GPU-memory-fraction",
# type=float,
# default=0.5,
# help="Select percentage of GPU memory to use.",
# )
parser.add_argument(
"--outdir",
type=str,
default="./outdir/",
help="Output directory for the injection.",
)
parser.add_argument(
"--load-existing-config",
type=bool,
default=False,
help="Whether to load and redo an existing injection (True) or to generate a new set of parameters (False).",
)
parser.add_argument(
"--N",
type=str,
default="",
help="Number (or generically, a custom identifier) of this injection, used to locate the output directory. If an empty string is passed (default), we generate a new injection.",
)
parser.add_argument(
"--SNR-threshold",
type=float,
default=12,
help="Skip injections with SNR below this threshold.",
)
parser.add_argument(
"--waveform-approximant",
type=str,
default="TaylorF2",
help="Which waveform approximant to use. Recommended to use TaylorF2 for now, NRTidalv2 might still be a bit unstable.",
)
parser.add_argument(
"--relative-binning-binsize",
type=int,
default=100,
help="Number of bins for the relative binning.",
)
parser.add_argument(
"--relative-binning-ref-params-equal-true-params",
type=bool,
default=True,
help="Whether to set the reference parameters in the relative binning code to injection parameters.",
)
parser.add_argument(
"--save-training-chains",
type=bool,
default=False,
help="Whether to save training chains or not (can be very large!)",
)
parser.add_argument(
"--eps-mass-matrix",
type=float,
default=1e-6,
help="Overall scale factor to rescale the step size of the local sampler.",
)
parser.add_argument(
"--smart-initial-guess",
type=bool,
default=False,
help="Distribute the walkers around the injected parameters. TODO change this to reference parameters found by the relative binning code.",
)
parser.add_argument(
"--use-scheduler",
type=bool,
default=True,
help="Use a learning rate scheduler instead of a fixed learning rate.",
)
parser.add_argument(
"--stopping-criterion-global-acc",
type=float,
default=1.0,
help="Stop the run once we reach this global acceptance rate.",
)
# # TODO this has to be implemented
# parser.add_argument(
# "--autotune_local_sampler",
# type=bool,
# default=False,
# help="TODO Still has to be implemented! Specify whether to use autotuning for the local sampler.",
# )
return parser
####################
### Script setup ###
####################
def body(args):
"""
Run an injection and recovery. To get an explanation of the hyperparameters, go to:
- jim hyperparameters: https://github.com/ThibeauWouters/jim/blob/8cb4ef09fefe9b353bfb89273a4bc0ee52060d72/src/jimgw/jim.py#L26
- flowMC hyperparameters: https://github.com/ThibeauWouters/flowMC/blob/ad1a32dcb6984b2e178d7204a53d5da54b578073/src/flowMC/sampler/Sampler.py#L40
"""
start_time = time.time()
# TODO move and get these as arguments
# Deal with the hyperparameters
naming = NAMING
HYPERPARAMETERS = {
"flowmc":
{
"n_loop_training": 400,
"n_loop_production": 50,
"n_local_steps": 5,
"n_global_steps": 400,
"n_epochs": 50,
"n_chains": 1000,
"learning_rate": 0.001, # using a scheduler below
"max_samples": 50000,
"momentum": 0.9,
"batch_size": 50000,
"use_global": True,
"logging": True,
"keep_quantile": 0.0,
"local_autotune": None,
"train_thinning": 10,
"output_thinning": 30,
"n_sample_max": 10000,
"precompile": False,
"verbose": False,
"outdir": args.outdir,
"stopping_criterion_global_acc": args.stopping_criterion_global_acc
},
"jim":
{
"seed": 0,
"n_chains": 1000,
"num_layers": 10,
"hidden_size": [128, 128],
"num_bins": 8,
}
}
flowmc_hyperparameters = HYPERPARAMETERS["flowmc"]
jim_hyperparameters = HYPERPARAMETERS["jim"]
hyperparameters = {**flowmc_hyperparameters, **jim_hyperparameters}
for key, value in args.__dict__.items():
if key in hyperparameters:
hyperparameters[key] = value
### POLYNOMIAL SCHEDULER
if args.use_scheduler:
print("Using polynomial learning rate scheduler")
total_epochs = hyperparameters["n_epochs"] * hyperparameters["n_loop_training"]
start = int(total_epochs / 10)
start_lr = 1e-3
end_lr = 1e-5
power = 4.0
schedule_fn = optax.polynomial_schedule(start_lr, end_lr, power, total_epochs-start, transition_begin=start)
hyperparameters["learning_rate"] = schedule_fn
print(f"Saving output to {args.outdir}")
# Fetch waveform used
supported_waveforms = ["TaylorF2", "NRTidalv2", "IMRPhenomD_NRTidalv2"]
if args.waveform_approximant not in supported_waveforms:
print(f"Waveform approximant {args.waveform_approximant} not supported. Supported waveforms are {supported_waveforms}. Changing to TaylorF2.")
args.waveform_approximant = "TaylorF2"
if args.waveform_approximant == "TaylorF2":
ripple_waveform_fn = RippleTaylorF2
elif args.waveform_approximant in ["IMRPhenomD_NRTidalv2", "NRTv2", "NRTidalv2"]:
ripple_waveform_fn = RippleIMRPhenomD_NRTidalv2
else:
raise ValueError(f"Waveform approximant {args.waveform_approximant} not supported.")
# Before main code, check if outdir is correct dir format TODO improve with sys?
if args.outdir[-1] != "/":
args.outdir += "/"
outdir = f"{args.outdir}injection_{args.N}/"
# Get the prior bounds, both as 1D and 2D arrays
prior_ranges = jnp.array([PRIOR[name] for name in naming])
prior_low, prior_high = prior_ranges[:, 0], prior_ranges[:, 1]
bounds = np.array(list(PRIOR.values()))
# Now go over to creating parameters, and potentially check SNR cutoff
network_snr = 0.0
print(f"The SNR threshold parameter is set to {args.SNR_threshold}")
while network_snr < args.SNR_threshold:
# Generate the parameters or load them from an existing file
if args.load_existing_config:
config_path = f"{outdir}config.json"
print(f"Loading existing config, path: {config_path}")
config = json.load(open(config_path))
else:
print(f"Generating new config")
config = utils.generate_config(prior_low, prior_high, naming, args.N, args.outdir)
key = jax.random.PRNGKey(config["seed"])
# Save the given script hyperparams
with open(f"{outdir}script_args.json", 'w') as json_file:
json.dump(args.__dict__, json_file)
# Start injections
print("Injecting signals . . .")
waveform = ripple_waveform_fn(f_ref = config["fref"])
# Create frequency grid
freqs = jnp.arange(
config["fmin"],
config["f_sampling"] / 2, # maximum frequency being halved of sampling frequency
1. / config["duration"]
)
# convert injected mass ratio to eta, and apply arccos and arcsin
q = config["q"]
eta = q / (1 + q) ** 2
iota = float(jnp.arccos(config["cos_iota"]))
dec = float(jnp.arcsin(config["sin_dec"]))
# Setup the timing setting for the injection
epoch = config["duration"] - config["post_trigger_duration"]
gmst = Time(config["trigger_time"], format='gps').sidereal_time('apparent', 'greenwich').rad
# Array of injection parameters
true_param = {
'M_c': config["M_c"], # chirp mass
'eta': eta, # symmetric mass ratio 0 < eta <= 0.25
's1_z': config["s1_z"], # aligned spin of priminary component s1_z.
's2_z': config["s2_z"], # aligned spin of secondary component s2_z.
'lambda_1': config["lambda_1"], # tidal deformability of priminary component lambda_1.
'lambda_2': config["lambda_2"], # tidal deformability of secondary component lambda_2.
'd_L': config["d_L"], # luminosity distance
't_c': config["t_c"], # timeshift w.r.t. trigger time
'phase_c': config["phase_c"], # merging phase
'iota': iota, # inclination angle
'psi': config["psi"], # polarization angle
'ra': config["ra"], # right ascension
'dec': dec # declination
}
# Get the true parameter values for the plots
truths = copy.deepcopy(true_param)
truths["eta"] = q
truths = np.fromiter(truths.values(), dtype=float)
detector_param = {
'ra': config["ra"],
'dec': dec,
'gmst': gmst,
'psi': config["psi"],
'epoch': epoch,
't_c': config["t_c"],
}
print(f"The injected parameters are {true_param}")
# Generating the geocenter waveform
h_sky = waveform(freqs, true_param)
# Setup interferometers
ifos = [H1, L1, V1]
psd_files = ["./psds/psd.txt", "./psds/psd.txt", "./psds/psd_virgo.txt"]
# inject signal into ifos
for idx, ifo in enumerate(ifos):
key, subkey = jax.random.split(key)
ifo.inject_signal(
subkey,
freqs,
h_sky,
detector_param,
psd_file=psd_files[idx] # note: the function load_psd actaully loads the asd
)
print("Signal injected")
# Compute the SNR
h1_snr = utils.compute_snr(H1, h_sky, detector_param)
l1_snr = utils.compute_snr(L1, h_sky, detector_param)
v1_snr = utils.compute_snr(V1, h_sky, detector_param)
network_snr = np.sqrt(h1_snr**2 + l1_snr**2 + v1_snr**2)
# If the SNR is too low, we need to generate new parameters
if network_snr < args.SNR_threshold:
print(f"Network SNR is less than {args.SNR_threshold}, generating new parameters")
if args.load_existing_config:
raise ValueError("SNR is less than threshold, but loading existing config. This should not happen!")
print("H1 SNR:", h1_snr)
print("L1 SNR:", l1_snr)
print("V1 SNR:", v1_snr)
print("Network SNR:", network_snr)
print(f"Saving network SNR")
with open(outdir + 'network_snr.txt', 'w') as file:
file.write(str(network_snr))
print("Start prior setup")
# Priors without transformation
Mc_prior = Uniform(prior_low[0], prior_high[0], naming=['M_c'])
q_prior = Uniform(prior_low[1], prior_high[1], naming=['q'],
transforms={
'q': (
'eta',
lambda params: params['q'] / (1 + params['q']) ** 2
)
}
)
s1z_prior = Uniform(prior_low[2], prior_high[2], naming=['s1_z'])
s2z_prior = Uniform(prior_low[3], prior_high[3], naming=['s2_z'])
lambda_1_prior = Uniform(prior_low[4], prior_high[4], naming=['lambda_1'])
lambda_2_prior = Uniform(prior_low[5], prior_high[5], naming=['lambda_2'])
dL_prior = Uniform(prior_low[6], prior_high[6], naming=['d_L'])
tc_prior = Uniform(prior_low[7], prior_high[7], naming=['t_c'])
phic_prior = Uniform(prior_low[8], prior_high[8], naming=['phase_c'])
cos_iota_prior = Uniform(prior_low[9], prior_high[9], naming=["cos_iota"],
transforms={
"cos_iota": (
"iota",
lambda params: jnp.arccos(
jnp.arcsin(jnp.sin(params["cos_iota"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
psi_prior = Uniform(prior_low[10], prior_high[10], naming=["psi"])
ra_prior = Uniform(prior_low[11], prior_high[11], naming=["ra"])
sin_dec_prior = Uniform(prior_low[12], prior_high[12], naming=["sin_dec"],
transforms={
"sin_dec": (
"dec",
lambda params: jnp.arcsin(
jnp.arcsin(jnp.sin(params["sin_dec"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
# Save the prior bounds
print("Saving prior bounds")
utils.save_prior_bounds(prior_low, prior_high, outdir)
# Compose the prior
prior_list = [
Mc_prior,
q_prior,
s1z_prior,
s2z_prior,
lambda_1_prior,
lambda_2_prior,
dL_prior,
tc_prior,
phic_prior,
cos_iota_prior,
psi_prior,
ra_prior,
sin_dec_prior,
]
complete_prior = Composite(prior_list)
bounds = jnp.array([[p.xmin, p.xmax] for p in complete_prior.priors])
print("Finished prior setup")
print("Initializing likelihood")
if args.relative_binning_ref_params_equal_true_params:
ref_params = true_param
print("Using the true parameters as reference parameters for the relative binning")
else:
ref_params = None
print("Will search for reference waveform for relative binning")
likelihood = HeterodynedTransientLikelihoodFD(
ifos,
prior=complete_prior,
bounds=bounds,
n_bins = args.relative_binning_binsize,
waveform=waveform,
trigger_time=config["trigger_time"],
duration=config["duration"],
post_trigger_duration=config["post_trigger_duration"],
ref_params=ref_params # put the reference waveform of the relative binning at the true parameters
)
# Save the ref params
utils.save_relative_binning_ref_params(likelihood, outdir)
# Generate arguments for the local sampler
mass_matrix = jnp.eye(len(prior_list))
for idx, prior in enumerate(prior_list):
mass_matrix = mass_matrix.at[idx, idx].set(prior.xmax - prior.xmin) # fetch the prior range
local_sampler_arg = {'step_size': mass_matrix * args.eps_mass_matrix} # set the overall step size
hyperparameters["local_sampler_arg"] = local_sampler_arg
# Create jim object
jim = Jim(
likelihood,
complete_prior,
nf_lr_autotune = True,
**hyperparameters
)
if args.smart_initial_guess:
n_chains = hyperparameters["n_chains"]
n_dim = len(prior_list)
initial_guess = utils.generate_smart_initial_guess(gmst, [H1, L1, V1], true_param, n_chains, n_dim, prior_low, prior_high)
# Plot it
utils.plot_chains(initial_guess, "initial_guess", outdir, truths = truths)
else:
initial_guess = jnp.array([])
### Finally, do the sampling
jim.sample(jax.random.PRNGKey(24), initial_guess = initial_guess)
# === Show results, save output ===
# Print a summary to screen:
jim.print_summary()
# Save and plot the results of the run
# - training phase
name = outdir + f'results_training.npz'
print(f"Saving samples to {name}")
state = jim.Sampler.get_sampler_state(training = True)
chains, log_prob, local_accs, global_accs, loss_vals = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"], state["loss_vals"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
if args.save_training_chains:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals, chains=chains)
else:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals)
utils.plot_accs(local_accs, "Local accs (training)", "local_accs_training", outdir)
utils.plot_accs(global_accs, "Global accs (training)", "global_accs_training", outdir)
utils.plot_loss_vals(loss_vals, "Loss", "loss_vals", outdir)
utils.plot_log_prob(log_prob, "Log probability (training)", "log_prob_training", outdir)
# - production phase
name = outdir + f'results_production.npz'
state = jim.Sampler.get_sampler_state(training = False)
chains, log_prob, local_accs, global_accs = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
np.savez(name, chains=chains, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs)
utils.plot_accs(local_accs, "Local accs (production)", "local_accs_production", outdir)
utils.plot_accs(global_accs, "Global accs (production)", "global_accs_production", outdir)
utils.plot_log_prob(log_prob, "Log probability (production)", "log_prob_production", outdir)
# Plot the chains as corner plots
utils.plot_chains(chains, "chains_production", outdir, truths = truths)
# Save the NF and show a plot of samples from the flow
print("Saving the NF")
jim.Sampler.save_flow(outdir + "nf_model")
name = outdir + 'results_NF.npz'
chains = jim.Sampler.sample_flow(10_000)
np.savez(name, chains = chains)
# Finally, copy over this script to the outdir for reproducibility
shutil.copy2(__file__, outdir + "copy_injection_recovery.py")
print("Saving the jim hyperparameters")
jim.save_hyperparameters(outdir = outdir)
end_time = time.time()
runtime = end_time - start_time
print(f"Time taken: {runtime} seconds ({(runtime)/60} minutes)")
print(f"Saving runtime")
with open(outdir + 'runtime.txt', 'w') as file:
file.write(str(runtime))
print("Finished injection recovery successfully!")
############
### MAIN ###
############
def main(given_args = None):
parser = get_parser()
args = parser.parse_args()
print(given_args)
# Update with given args
if given_args is not None:
args.__dict__.update(given_args)
if args.load_existing_config and args.N == "":
raise ValueError("If load_existing_config is True, you need to specify the N argument to locate the existing injection. ")
print("------------------------------------")
print("Arguments script:")
for key, value in args.__dict__.items():
print(f"{key}: {value}")
print("------------------------------------")
print("Starting main code")
# If no N is given, fetch N from the structure of outdir
if len(args.N) == 0:
N = utils.get_N(args.outdir)
args.N = N
# TODO fix that os uses these
# import os
# os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = str(args.GPU_memory_fraction)
# os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_device)
# print(f"Running on GPU {args.GPU_device}")
# Execute the script
body(args)
if __name__ == "__main__":
main()
|
ThibeauWoutersREPO_NAMETurboPE-BNSPATH_START.@TurboPE-BNS_extracted@TurboPE-BNS-main@injections@outdir_TF2@injection_85@copy_injection_recovery.py@.PATH_END.py
|
{
"filename": "xray_emission_fields.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/fields/xray_emission_fields.py",
"type": "Python"
}
|
import os
import numpy as np
from yt.config import ytcfg
from yt.fields.derived_field import DerivedField
from yt.funcs import mylog, only_on_root, parse_h5_attr
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.cosmology import Cosmology
from yt.utilities.exceptions import YTException, YTFieldNotFound
from yt.utilities.linear_interpolators import (
BilinearFieldInterpolator,
UnilinearFieldInterpolator,
)
from yt.utilities.on_demand_imports import _h5py as h5py
data_version = {"cloudy": 2, "apec": 3}
data_url = "http://yt-project.org/data"
def _get_data_file(table_type, data_dir=None):
data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
if data_dir is None:
supp_data_dir = ytcfg.get("yt", "supp_data_dir")
data_dir = supp_data_dir if os.path.exists(supp_data_dir) else "."
data_path = os.path.join(data_dir, data_file)
if not os.path.exists(data_path):
msg = "Failed to find emissivity data file {}! Please download from {}".format(
data_file,
data_url,
)
mylog.error(msg)
raise OSError(msg)
return data_path
class EnergyBoundsException(YTException):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def __str__(self):
return f"Energy bounds are {self.lower:e} to {self.upper:e} keV."
class ObsoleteDataException(YTException):
def __init__(self, table_type):
data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
self.msg = "X-ray emissivity data is out of date.\n"
self.msg += f"Download the latest data from {data_url}/{data_file}."
def __str__(self):
return self.msg
class XrayEmissivityIntegrator:
r"""Class for making X-ray emissivity fields. Uses hdf5 data tables
generated from Cloudy and AtomDB/APEC.
Initialize an XrayEmissivityIntegrator object.
Parameters
----------
table_type : string
The type of data to use when computing the emissivity values. If "cloudy",
a file called "cloudy_emissivity.h5" is used, for photoionized
plasmas. If, "apec", a file called "apec_emissivity.h5" is used for
collisionally ionized plasmas. These files contain emissivity tables
for primordial elements and for metals at solar metallicity for the
energy range 0.1 to 100 keV.
redshift : float, optional
The cosmological redshift of the source of the field. Default: 0.0.
data_dir : string, optional
The location to look for the data table in. If not supplied, the file
will be looked for in the location of the YT_DEST environment variable
or in the current working directory.
use_metals : boolean, optional
If set to True, the emissivity will include contributions from metals.
Default: True
"""
def __init__(self, table_type, redshift=0.0, data_dir=None, use_metals=True):
filename = _get_data_file(table_type, data_dir=data_dir)
only_on_root(mylog.info, "Loading emissivity data from %s", filename)
in_file = h5py.File(filename, mode="r")
if "info" in in_file.attrs:
only_on_root(mylog.info, parse_h5_attr(in_file, "info"))
if parse_h5_attr(in_file, "version") != data_version[table_type]:
raise ObsoleteDataException(table_type)
else:
only_on_root(
mylog.info,
f"X-ray '{table_type}' emissivity data version: "
f"{parse_h5_attr(in_file, 'version')}.",
)
self.log_T = in_file["log_T"][:]
self.emissivity_primordial = in_file["emissivity_primordial"][:]
if "log_nH" in in_file:
self.log_nH = in_file["log_nH"][:]
if use_metals:
self.emissivity_metals = in_file["emissivity_metals"][:]
self.ebin = YTArray(in_file["E"], "keV")
in_file.close()
self.dE = np.diff(self.ebin)
self.emid = 0.5 * (self.ebin[1:] + self.ebin[:-1]).to("erg")
self.redshift = redshift
def get_interpolator(self, data_type, e_min, e_max, energy=True):
data = getattr(self, f"emissivity_{data_type}")
if not energy:
data = data[..., :] / self.emid.v
e_min = YTQuantity(e_min, "keV") * (1.0 + self.redshift)
e_max = YTQuantity(e_max, "keV") * (1.0 + self.redshift)
if (e_min - self.ebin[0]) / e_min < -1e-3 or (
e_max - self.ebin[-1]
) / e_max > 1e-3:
raise EnergyBoundsException(self.ebin[0], self.ebin[-1])
e_is, e_ie = np.digitize([e_min, e_max], self.ebin)
e_is = np.clip(e_is - 1, 0, self.ebin.size - 1)
e_ie = np.clip(e_ie, 0, self.ebin.size - 1)
my_dE = self.dE[e_is:e_ie].copy()
# clip edge bins if the requested range is smaller
my_dE[0] -= e_min - self.ebin[e_is]
my_dE[-1] -= self.ebin[e_ie] - e_max
interp_data = (data[..., e_is:e_ie] * my_dE).sum(axis=-1)
if data.ndim == 2:
emiss = UnilinearFieldInterpolator(
np.log10(interp_data),
[self.log_T[0], self.log_T[-1]],
"log_T",
truncate=True,
)
else:
emiss = BilinearFieldInterpolator(
np.log10(interp_data),
[self.log_nH[0], self.log_nH[-1], self.log_T[0], self.log_T[-1]],
["log_nH", "log_T"],
truncate=True,
)
return emiss
def add_xray_emissivity_field(
ds,
e_min,
e_max,
redshift=0.0,
metallicity=("gas", "metallicity"),
table_type="cloudy",
data_dir=None,
cosmology=None,
dist=None,
ftype="gas",
):
r"""Create X-ray emissivity fields for a given energy range.
Parameters
----------
e_min : float
The minimum energy in keV for the energy band.
e_min : float
The maximum energy in keV for the energy band.
redshift : float, optional
The cosmological redshift of the source of the field. Default: 0.0.
metallicity : str or tuple of str or float, optional
Either the name of a metallicity field or a single floating-point
number specifying a spatially constant metallicity. Must be in
solar units. If set to None, no metals will be assumed. Default:
("gas", "metallicity")
table_type : string, optional
The type of emissivity table to be used when creating the fields.
Options are "cloudy" or "apec". Default: "cloudy"
data_dir : string, optional
The location to look for the data table in. If not supplied, the file
will be looked for in the location of the YT_DEST environment variable
or in the current working directory.
cosmology : :class:`~yt.utilities.cosmology.Cosmology`, optional
If set and redshift > 0.0, this cosmology will be used when computing the
cosmological dependence of the emission fields. If not set, yt's default
LCDM cosmology will be used.
dist : (value, unit) tuple or :class:`~yt.units.yt_array.YTQuantity`, optional
The distance to the source, used for making intensity fields. You should
only use this if your source is nearby (not cosmological). Default: None
ftype : string, optional
The field type to use when creating the fields, default "gas"
This will create at least three fields:
"xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
"xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
"xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)
and if a redshift or distance is specified it will create two others:
"xray_intensity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3 arcsec^-2)
"xray_photon_intensity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3 arcsec^-2)
These latter two are really only useful when making projections.
Examples
--------
>>> import yt
>>> ds = yt.load("sloshing_nomag2_hdf5_plt_cnt_0100")
>>> yt.add_xray_emissivity_field(ds, 0.5, 2)
>>> p = yt.ProjectionPlot(
... ds, "x", ("gas", "xray_emissivity_0.5_2_keV"), table_type="apec"
... )
>>> p.save()
"""
if not isinstance(metallicity, float) and metallicity is not None:
try:
metallicity = ds._get_field_info(metallicity)
except YTFieldNotFound as e:
raise RuntimeError(
f"Your dataset does not have a {metallicity} field! "
+ "Perhaps you should specify a constant metallicity instead?"
) from e
if table_type == "cloudy":
# Cloudy wants to scale by nH**2
other_n = "H_nuclei_density"
else:
# APEC wants to scale by nH*ne
other_n = "El_number_density"
def _norm_field(field, data):
return data[ftype, "H_nuclei_density"] * data[ftype, other_n]
ds.add_field(
(ftype, "norm_field"), _norm_field, units="cm**-6", sampling_type="local"
)
my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, redshift=redshift)
em_0 = my_si.get_interpolator("primordial", e_min, e_max)
emp_0 = my_si.get_interpolator("primordial", e_min, e_max, energy=False)
if metallicity is not None:
em_Z = my_si.get_interpolator("metals", e_min, e_max)
emp_Z = my_si.get_interpolator("metals", e_min, e_max, energy=False)
def _emissivity_field(field, data):
with np.errstate(all="ignore"):
dd = {
"log_nH": np.log10(data[ftype, "H_nuclei_density"]),
"log_T": np.log10(data[ftype, "temperature"]),
}
my_emissivity = np.power(10, em_0(dd))
if metallicity is not None:
if isinstance(metallicity, DerivedField):
my_Z = data[metallicity.name].to_value("Zsun")
else:
my_Z = metallicity
my_emissivity += my_Z * np.power(10, em_Z(dd))
my_emissivity[np.isnan(my_emissivity)] = 0
return data[ftype, "norm_field"] * YTArray(my_emissivity, "erg*cm**3/s")
emiss_name = (ftype, f"xray_emissivity_{e_min}_{e_max}_keV")
ds.add_field(
emiss_name,
function=_emissivity_field,
display_name=rf"\epsilon_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="erg/cm**3/s",
)
def _luminosity_field(field, data):
return data[emiss_name] * data[ftype, "mass"] / data[ftype, "density"]
lum_name = (ftype, f"xray_luminosity_{e_min}_{e_max}_keV")
ds.add_field(
lum_name,
function=_luminosity_field,
display_name=rf"\rm{{L}}_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="erg/s",
)
def _photon_emissivity_field(field, data):
dd = {
"log_nH": np.log10(data[ftype, "H_nuclei_density"]),
"log_T": np.log10(data[ftype, "temperature"]),
}
my_emissivity = np.power(10, emp_0(dd))
if metallicity is not None:
if isinstance(metallicity, DerivedField):
my_Z = data[metallicity.name].to_value("Zsun")
else:
my_Z = metallicity
my_emissivity += my_Z * np.power(10, emp_Z(dd))
return data[ftype, "norm_field"] * YTArray(my_emissivity, "photons*cm**3/s")
phot_name = (ftype, f"xray_photon_emissivity_{e_min}_{e_max}_keV")
ds.add_field(
phot_name,
function=_photon_emissivity_field,
display_name=rf"\epsilon_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="photons/cm**3/s",
)
fields = [emiss_name, lum_name, phot_name]
if redshift > 0.0 or dist is not None:
if dist is None:
if cosmology is None:
if hasattr(ds, "cosmology"):
cosmology = ds.cosmology
else:
cosmology = Cosmology()
D_L = cosmology.luminosity_distance(0.0, redshift)
angular_scale = 1.0 / cosmology.angular_scale(0.0, redshift)
dist_fac = ds.quan(
1.0 / (4.0 * np.pi * D_L * D_L * angular_scale * angular_scale).v,
"rad**-2",
)
else:
redshift = 0.0 # Only for local sources!
try:
# normal behaviour, if dist is a YTQuantity
dist = ds.quan(dist.value, dist.units)
except AttributeError as e:
try:
dist = ds.quan(*dist)
except (RuntimeError, TypeError):
raise TypeError(
"dist should be a YTQuantity or a (value, unit) tuple!"
) from e
angular_scale = dist / ds.quan(1.0, "radian")
dist_fac = ds.quan(
1.0 / (4.0 * np.pi * dist * dist * angular_scale * angular_scale).v,
"rad**-2",
)
ei_name = (ftype, f"xray_intensity_{e_min}_{e_max}_keV")
def _intensity_field(field, data):
I = dist_fac * data[emiss_name]
return I.in_units("erg/cm**3/s/arcsec**2")
ds.add_field(
ei_name,
function=_intensity_field,
display_name=rf"I_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="erg/cm**3/s/arcsec**2",
)
i_name = (ftype, f"xray_photon_intensity_{e_min}_{e_max}_keV")
def _photon_intensity_field(field, data):
I = (1.0 + redshift) * dist_fac * data[phot_name]
return I.in_units("photons/cm**3/s/arcsec**2")
ds.add_field(
i_name,
function=_photon_intensity_field,
display_name=rf"I_{{X}} ({e_min}-{e_max} keV)",
sampling_type="local",
units="photons/cm**3/s/arcsec**2",
)
fields += [ei_name, i_name]
for field in fields:
mylog.info("Adding ('%s','%s') field.", field[0], field[1])
return fields
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@fields@xray_emission_fields.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "migueldvb/cine",
"repo_path": "cine_extracted/cine-master/paper/paper.md",
"type": "Markdown"
}
|
---
title: 'Cine: Line excitation by infrared fluorescence in cometary atmospheres'
tags:
- Python
- Fluorescence
- Comets
- Planetary Atmospheres
authors:
- name: Miguel de Val-Borro
orcid: 0000-0002-0455-9384
affiliation: 1,2
- name: Martin A. Cordiner
orcid: 0000-0001-8233-2436
affiliation: 1,2
- name: Stefanie N. Milam
orcid: 0000-0001-7694-4129
affiliation: 1
- name: Steven B. Charnley
affiliation: 1
affiliations:
- name: NASA Goddard Space Flight Center
index: 1
- name: The Catholic University of America
index: 2
date: 1 Feb 2017
bibliography: paper.bib
---
# Summary
*CINE* is a Python module for calculating infrared pumping efficiencies that can
be applied to the most common molecules found in cometary comae such as water,
hydrogen cyanide or methanol. Excitation by solar radiation of vibrational
bands followed by radiative decay to the ground vibrational state is one of the
main mechanisms for molecular excitation in comets. This code calculates the
effective pumping rates for rotational levels in the ground vibrational state
scaled by the heliocentric distance of the comet as detailed in @bensch and @crovisier.
Line transitions are queried from the latest version of the HITRAN
spectroscopic repository [@hitran] using the astroquery affiliated package of
astropy [@astroquery]. Molecular data are obtained from the LAMDA database
[@lamda].
These coefficients are useful for modeling rotational emission lines observed
in cometary spectra at sub-millimeter wavelengths. Combined with computational
methods to solve the radiative transfer equations based, e.g., on the Monte
Carlo algorithm [@crete], this model can retrieve production rates and
rotational temperatures from the observed emission spectrum.
The code and issue tracker of *CINE* are available on GitHub [@cine_github] and
any questions or bug reports can be raised there. The source code for *CINE*
is also available from the Python Package Index (PyPI).
# References
|
migueldvbREPO_NAMEcinePATH_START.@cine_extracted@cine-master@paper@paper.md@.PATH_END.py
|
{
"filename": "_pie.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/template/data/_pie.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class PieValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="pie", parent_name="layout.template.data", **kwargs):
super(PieValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Pie"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@template@data@_pie.py@.PATH_END.py
|
{
"filename": "cpp.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/extern/ply/cpp.py",
"type": "Python"
}
|
# -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
import sys
# Some Python 3 compatibility shims
if sys.version_info.major < 3:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = str
xrange = range
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@extern@ply@cpp.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "saopicc/killMS",
"repo_path": "killMS_extracted/killMS-master/killMS/Predict/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
killMS, a package for calibration in radio interferometry.
Copyright (C) 2013-2017 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
|
saopiccREPO_NAMEkillMSPATH_START.@killMS_extracted@killMS-master@killMS@Predict@__init__.py@.PATH_END.py
|
{
"filename": "test_PointLens.py",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/source/MulensModel/tests/test_PointLens.py",
"type": "Python"
}
|
import unittest
import numpy as np
import os
import MulensModel as mm
import fortran_files
from test_FitData import create_0939_parallax_model, SAMPLE_FILE_03, SAMPLE_FILE_03_EPH
SAMPLE_FILE = os.path.join(mm.DATA_PATH, 'unit_test_files', 'FSPL_test_1.dat')
PSPL_SAMPLE_DIR = os.path.join(mm.DATA_PATH, 'unit_test_files', 'fspl_derivs', 'test_PointLensClasses')
def get_file_params(filename):
"""Read in the model parameters used to create the file"""
with open(filename) as data_file:
lines = data_file.readlines()
ulens_params = lines[2].split()
model = mm.ModelParameters({'t_0': float(ulens_params[1]), 'u_0': float(ulens_params[2]),
't_E': float(ulens_params[3]), 'rho': float(ulens_params[4])})
return (model, float(ulens_params[5]))
def get_variables():
"""return a few variables used by 4 test functions below"""
if 'out' not in get_variables.__dict__:
names = ['Time', 'b_0', 'b_1', 'Mag_FS', 'Mag_LD', 'Mag']
data = np.genfromtxt(SAMPLE_FILE, names=names)
(parameters, gamma) = get_file_params(SAMPLE_FILE)
trajectory = mm.Trajectory(data['Time'], parameters)
get_variables.out = (data, gamma, trajectory)
return get_variables.out
def test_B_0_function():
"""test private _B_0_function"""
(data, _, trajectory) = get_variables()
point_lens = mm.FiniteSourceUniformGould94Magnification(
trajectory=trajectory)
test_b_0 = point_lens._B_0_function()
np.testing.assert_almost_equal(test_b_0, data['b_0'], decimal=5)
def test_B_1_function():
"""test private _B_1_function"""
(data, gamma, trajectory) = get_variables()
test_FSPL_LD = mm.FiniteSourceLDYoo04Magnification(
trajectory=trajectory, gamma=gamma)
test_b_1 = test_FSPL_LD._B_1_function()
np.testing.assert_almost_equal(test_b_1, data['b_1'], decimal=4)
def test_get_point_lens_finite_source_magnification():
"""test PLFS"""
(data, _, trajectory) = get_variables()
test_FSPL = mm.FiniteSourceUniformGould94Magnification(
trajectory=trajectory)
fspl_magnification = test_FSPL.get_magnification()
np.testing.assert_almost_equal(
fspl_magnification, data['Mag_FS'], decimal=5)
def test_get_point_lens_limb_darkening_magnification():
"""test PLFS+LD"""
(data, gamma, trajectory) = get_variables()
test_FSPL_LD = mm.FiniteSourceLDYoo04Magnification(
trajectory=trajectory, gamma=gamma)
fspl_magnification = test_FSPL_LD.get_magnification()
np.testing.assert_almost_equal(
fspl_magnification/data['Mag_LD'], 1., decimal=4)
def test_fspl_noLD():
"""
check if FSPL magnification is calculate properly
"""
t_0 = 2456789.012345
t_E = 23.4567
u_0 = 1e-4
rho = 1e-3
t_vec = np.array([-(rho**2-u_0**2)**0.5, 0., ((0.5*rho)**2-u_0**2)**0.5])
t_vec = t_vec * t_E + t_0
params = mm.ModelParameters(
{'t_0': t_0, 'u_0': u_0, 't_E': t_E, 'rho': rho})
trajectory = mm.Trajectory(t_vec, params)
mag_curve = mm.FiniteSourceUniformGould94Magnification(
trajectory=trajectory)
results = mag_curve.get_magnification()
u = np.array([rho, u_0, 0.5*rho])
pspl = (u**2 + 2.) / np.sqrt(u**2 * (u**2 + 4.))
expected = np.array([1.27323965, 0.19949906, 0.93421546])
# These values were calculated by Andy Gould (file b0b1.dat).
expected *= pspl
np.testing.assert_almost_equal(expected, results, decimal=4)
def test_get_d_u_d_params():
"""
Test that calculating derivatives with an ephemeris file is different from
without an ephemeris file.
"""
parameters = ['pi_E_N', 'pi_E_E']
model_with_par = create_0939_parallax_model()
data_ephm = mm.MulensData(
file_name=SAMPLE_FILE_03, ephemerides_file=SAMPLE_FILE_03_EPH)
parallax = {'earth_orbital': True,
'satellite': True,
'topocentric': True}
traj_ephm = mm.Trajectory(
data_ephm.time, parameters=model_with_par.parameters,
satellite_skycoord=data_ephm.satellite_skycoord,
coords=model_with_par.coords, parallax=parallax)
pl_ephm = mm.PointSourcePointLensMagnification(traj_ephm)
derivs_ephm = pl_ephm.get_d_u_d_params(parameters)
traj_no_ephm = mm.Trajectory(
data_ephm.time, parameters=model_with_par.parameters,
coords=model_with_par.coords, parallax=parallax)
pl_no_ephm = mm.PointSourcePointLensMagnification(traj_no_ephm)
derivs_no_ephm = pl_no_ephm.get_d_u_d_params(parameters)
for param in parameters:
ratio = derivs_ephm[param] / derivs_no_ephm[param]
assert (np.abs(ratio - 1.) > 0.001).all()
# Make sure every element of the PointLensMagnification classes are tested.
class TestPointSourcePointLensMagnification(unittest.TestCase):
def setUp(self):
self.sfit_files = fortran_files.read_sfit_files(PSPL_SAMPLE_DIR)
parameters = ['t_0', 'u_0', 't_E', 'rho']
self.parameters = mm.ModelParameters(
dict(zip(parameters, self.sfit_files['51'].a)))
self.gammas = self.sfit_files['51'].a[4:5] # Cludgy and inflexible.
self.trajectories = []
self._set_trajectories()
self.mag_objs = []
self._set_mag_objs()
def _set_trajectories(self):
for nob_indices in self.sfit_files['63'].sfit_nob_indices:
trajectory = mm.Trajectory(
self.sfit_files['63'].t[nob_indices], self.parameters)
self.trajectories.append(trajectory)
def _set_mag_objs(self):
for trajectory in self.trajectories:
mag_obj = mm.PointSourcePointLensMagnification(trajectory)
self.mag_objs.append(mag_obj)
def test_get_pspl_magnification(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices, self.mag_objs):
pspl_mag = mag_obj.get_pspl_magnification()
np.testing.assert_allclose(
pspl_mag, self.sfit_files['63'].amp[nob_indices], rtol=0.0001)
def test_get_magnification(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices, self.mag_objs):
mag = mag_obj.get_magnification()
np.testing.assert_allclose(
mag, self.sfit_files['63'].amp[nob_indices], rtol=0.0001)
def _get_factor_b1(self, nob_indices, gamma):
fspl_factor = self.sfit_files['63'].amp[nob_indices] * (
self.sfit_files['61'].db0[nob_indices] -
gamma * self.sfit_files['61'].db1[nob_indices])
fspl_factor /= self.sfit_files['51'].a[3] # rho
fspl_factor += self.sfit_files['62'].dAdu[nob_indices] * (
self.sfit_files['63'].b0[nob_indices] -
gamma * self.sfit_files['63'].b1[nob_indices])
return fspl_factor
def test_get_d_A_d_params(self):
"""
df/dparams = fs * dA/dparams (FSPL)
dA/dparams (PSPL) = d_A_d_u * d_u_d_params[key]
dA/dparams (FSPL) = d_u_d_params[key] * factor =
factor * dA/dparams(PSPL) / dA_du
dA_dparams(PSPL) = df/dparams * dA_du / fs / factor
"""
params = ['t_0', 'u_0', 't_E']
for (nob_indices, source_flux, gamma, mag_obj) in zip(
self.sfit_files['62'].sfit_nob_indices,
self.sfit_files['51'].source_fluxes,
self.gammas, self.mag_objs):
dA_dparam = mag_obj.get_d_A_d_params(params)
fspl_factor = self._get_factor_b1(nob_indices, gamma)
for j, param in enumerate(params):
short_param = param.replace('_', '')
sfit_df_dparam = self.sfit_files['62'].data[
'dfd{0}'.format(short_param)][nob_indices]
sfit_dA_dparam = (sfit_df_dparam *
self.sfit_files['62'].dAdu[nob_indices] /
source_flux / fspl_factor)
np.testing.assert_allclose(
dA_dparam[param], sfit_dA_dparam, rtol=0.015)
def test_get_d_u_d_params(self):
"""
PSPL
d_A_d_params[key] = d_A_d_u * d_u_d_params[key]
FSPL
if key == 'rho':
d_A_d_params[key] = self.get_d_A_d_rho()
else:
d_A_d_params[key] = d_u_d_params[key] * factor
sfit returns: FSPL:
61 dA/drho
62 df/dparams, dAdu
df/dparams = fs * dA/dparams (FSPL)
So, df/dparams / fs / factor = d_u_d_params
"""
params = ['t_0', 'u_0', 't_E']
for (nob_indices, source_flux, gamma, mag_obj) in zip(
self.sfit_files['62'].sfit_nob_indices,
self.sfit_files['51'].source_fluxes,
self.gammas, self.mag_objs):
du_dparam = mag_obj.get_d_u_d_params(params)
fspl_factor = self._get_factor_b1(nob_indices, gamma)
for j, param in enumerate(params):
short_param = param.replace('_', '')
sfit_df_dparam = self.sfit_files['62'].data[
'dfd{0}'.format(short_param)][nob_indices]
sfit_dA_dparam = sfit_df_dparam / source_flux / fspl_factor
np.testing.assert_allclose(
du_dparam[param], sfit_dA_dparam, rtol=0.015)
def test_get_d_A_d_u(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['62'].sfit_nob_indices, self.mag_objs):
dA_du = mag_obj.get_d_A_d_u()
np.testing.assert_allclose(
dA_du, self.sfit_files['62'].dAdu[nob_indices], rtol=0.015)
def test_pspl_magnification(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices, self.mag_objs):
np.testing.assert_allclose(
mag_obj.pspl_magnification,
self.sfit_files['63'].amp[nob_indices],
rtol=0.0001)
def test_magnification(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices, self.mag_objs):
with self.assertRaises(AttributeError):
_ = mag_obj.magnification
mag_obj.get_magnification()
np.testing.assert_allclose(
mag_obj.magnification,
self.sfit_files['63'].amp[nob_indices],
rtol=0.0001)
def test_u_(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices, self.mag_objs):
np.testing.assert_allclose(
mag_obj.u_, self.sfit_files['63'].x[nob_indices],
rtol=0.0001)
def test_u_2(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices, self.mag_objs):
np.testing.assert_allclose(
mag_obj.u_2, self.sfit_files['63'].x2[nob_indices],
rtol=0.0001)
class TestFiniteSourceUniformGould94Magnification(TestPointSourcePointLensMagnification):
def setUp(self):
TestPointSourcePointLensMagnification.setUp(self)
self.zs = []
for mag_obj in self.mag_objs:
z = mag_obj.u_ / mag_obj.trajectory.parameters.rho
self.zs.append(z)
self._indexes = []
self._indices_not_near_1 = []
self._indices_not_near_1_db = []
self.indices_mag_test = []
self._set_indices()
def _set_mag_objs(self):
for trajectory in self.trajectories:
mag_obj = mm.FiniteSourceUniformGould94Magnification(
trajectory=trajectory)
self.mag_objs.append(mag_obj)
def _set_indices(self):
z_break = 1.3
zs_1_margin = 0.001
for (zs, indices) in zip(
self.zs, self.sfit_files['63'].sfit_nob_indices):
# sfit uses different calculations for z < 0.001 and z > 10.
index_10 = (zs < 10.)
index_001 = (zs < 0.001)
# The sfit code is not accurate near 1.0.
not_near_1 = (np.abs(zs - 1.) > 0.03)
self.indices_mag_test.append(index_10 & ~index_001 & not_near_1)
index_large = (zs > z_break)
index_small = (zs <= z_break)
self._indexes.append([index_large, index_small])
# The sfit code is not accurate near 1.0.
near_1 = (np.abs(zs - 1.) > zs_1_margin)
self._indices_not_near_1.append(near_1)
near_1_db = (zs < 0.88) | (zs > 1.1)
self._indices_not_near_1_db.append(near_1_db)
def test_get_magnification(self):
for (nob_indices, mag_test_indices, gamma, mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.indices_mag_test, self.gammas, self.mag_objs):
mag = mag_obj.get_magnification()
sfit_mag = self.sfit_files['61'].mag[nob_indices][mag_test_indices]
b1_factor = (self.sfit_files['63'].amp[nob_indices][
mag_test_indices] *
self.sfit_files['63'].b1[nob_indices][
mag_test_indices] * gamma)
sfit_mag = sfit_mag + b1_factor
np.testing.assert_allclose(
mag[mag_test_indices], sfit_mag, rtol=0.005)
def test_magnification(self):
for (nob_indices, mag_test_indices, gamma, mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.indices_mag_test, self.gammas, self.mag_objs):
with self.assertRaises(AttributeError):
_ = mag_obj.magnification
sfit_mag = self.sfit_files['61'].mag[nob_indices][mag_test_indices]
b1_factor = (self.sfit_files['63'].amp[nob_indices][
mag_test_indices] *
self.sfit_files['63'].b1[nob_indices][
mag_test_indices] * gamma)
sfit_mag += b1_factor
mag_obj.get_magnification()
np.testing.assert_allclose(
mag_obj.magnification[mag_test_indices],
sfit_mag, rtol=0.005)
def _get_factor_b0(self, nob_indices):
fspl_factor_b0 = (self.sfit_files['63'].amp[nob_indices] *
self.sfit_files['61'].db0[nob_indices])
fspl_factor_b0 /= self.sfit_files['51'].a[3] # rho
fspl_factor_b0 += (self.sfit_files['62'].dAdu[nob_indices] *
self.sfit_files['63'].b0[nob_indices])
return fspl_factor_b0
def test_get_d_u_d_params(self):
super().test_get_d_u_d_params()
def test_get_d_A_d_u(self):
self._get_d_A_d_u_1()
self._get_d_A_d_u_2()
def _get_d_A_d_u_1(self):
"""
sfit returns: FSPL:
61 dA/drho
62 df/dparams, dAdu
A_US = A_PS(u) * b0(z)
dAdu_US = dA_PS(u) * b0(z) + A_PS(u) * db0(z)
dAdu_FS = (damp*(b0-gamma*b1)+amp*(db0-gamma*db1)/rho)
= dA*b0 - dA*gamma*b1 + A*db0/rho - A*gamma*db1/rho
= dAdu_US - dA*gamma*b1 - A*gamma*db1/rho
"""
rho = self.sfit_files['51'].a[3]
for (nob_indices, mag_test_indices, mag_obj, gamma) in zip(
self.sfit_files['62'].sfit_nob_indices,
self.indices_mag_test, self.mag_objs,
self.gammas):
dA_du = mag_obj.get_d_A_d_u()[mag_test_indices]
sfit_dA_du_US = (self.sfit_files['63'].b0[nob_indices][mag_test_indices] *
self.sfit_files['62'].dAdu[nob_indices][mag_test_indices])
sfit_dA_du_US += (self.sfit_files['63'].amp[nob_indices][mag_test_indices] *
self.sfit_files['61'].db0[nob_indices][mag_test_indices] /
rho)
np.testing.assert_allclose(dA_du, sfit_dA_du_US, rtol=0.015)
def _get_d_A_d_u_2(self):
"""
sfit returns: FSPL:
61 dA/drho
62 df/dparams, dAdu
A_US = A_PS(u) * b0(z)
dAdu_US = dA_PS(u) * b0(z) + A_PS(u) * db0(z)
dAdu_FS = (damp*(b0-gamma*b1)+amp*(db0-gamma*db1)/rho)
= dA*b0 - dA*gamma*b1 + A*db0/rho - A*gamma*db1/rho
= dAdu_US - dA*gamma*b1 - A*gamma*db1/rho
df_FS = fs * (dAdu_US - dA*gamma*b1 - A*gamma*db1/rho)
"""
rho = self.sfit_files['51'].a[3]
zip_ = zip(self.sfit_files['62'].sfit_nob_indices, self.sfit_files['51'].source_fluxes,
self.gammas, self.indices_mag_test, self.mag_objs)
for (nob_indices, source_flux, gamma, mag_test_indices, mag_obj) in zip_:
dA_du = mag_obj.get_d_A_d_u()[mag_test_indices]
b1_term = (self.sfit_files['62'].dAdu * self.sfit_files['63'].b1)[nob_indices][mag_test_indices]
b1_term += (self.sfit_files['63'].amp * self.sfit_files['61'].db1)[nob_indices][mag_test_indices] / rho
b1_term *= gamma
sfit_dA_du_US = self.sfit_files['62'].df[nob_indices][mag_test_indices] / source_flux + b1_term
np.testing.assert_allclose(dA_du, sfit_dA_du_US, rtol=0.015)
def test_get_d_A_d_params(self):
"""
df/dparams = fs * dA/dparams (FSPL)
dA/dparams (PSPL) = d_A_d_u * d_u_d_params[key]
dA/dparams (FSPL) = d_u_d_params[key] * factor
= factor * dA/dparams(PSPL) / dA_du
dA/dparams (b0) = d_u_d_params * factor_b0
dA/dparams (FSPL) = d_u_d_params * factor_b1
dA/dparams (b0) = dA/dparams (FSPL) * factor_b1 / factor_b0
= df/dparams * factor_b1 / factor_b0 / fs
b0:
factor = self.pspl_magnification * self.db0
factor /= self.trajectory.parameters.rho
factor += self.get_d_A_d_u() * self.b0
b0, b1:
factor = self.pspl_magnification * (self.db0 - self._gamma * self.db1)
factor /= self.trajectory.parameters.rho
factor += self.get_d_A_d_u() * (self.b0 - self._gamma * self.b1)
"""
params = ['t_0', 'u_0', 't_E']
for (nob_indices, source_flux, gamma, mag_test_indices, not_near_1,
mag_obj) in zip(
self.sfit_files['62'].sfit_nob_indices,
self.sfit_files['51'].source_fluxes,
self.gammas,
self.indices_mag_test, self._indices_not_near_1_db,
self.mag_objs):
dA_dparam = mag_obj.get_d_A_d_params(params)
fspl_factor_b1 = self._get_factor_b1(nob_indices, gamma)
fspl_factor_b0 = self._get_factor_b0(nob_indices)
for j, param in enumerate(params):
short_param = param.replace('_', '')
sfit_df_dparam = self.sfit_files['62'].data[
'dfd{0}'.format(short_param)][nob_indices]
sfit_dA_dparam = (sfit_df_dparam * fspl_factor_b0 /
fspl_factor_b1 / source_flux)
np.testing.assert_allclose(
dA_dparam[param][mag_test_indices & not_near_1],
sfit_dA_dparam[mag_test_indices & not_near_1], rtol=0.015)
def test_get_d_A_d_rho(self):
"""
d_A_d_rho = np.ones(len(self.trajectory.times))
d_A_d_rho *= self.pspl_magnification
d_A_d_rho *= -self.u_ / self.trajectory.parameters.rho**2
d_A_d_rho *= (self.db0 - self._gamma * self.db1)
dA_drho_b0 = db0 * dA_drho_b1 /(db0 - gamma*db1)
"""
for (nob_indices, source_flux, gamma, mag_test_indices,
mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.sfit_files['51'].source_fluxes, self.gammas,
self.indices_mag_test, self.mag_objs):
sfit_df_dparam = self.sfit_files['61'].data['dfdrho'][nob_indices]
factor = self.sfit_files['61'].data['db0'][nob_indices]
factor /= (self.sfit_files['61'].data['db0'][nob_indices] -
gamma * self.sfit_files['61'].data['db1'][nob_indices])
sfit_dA_drho = factor * sfit_df_dparam / source_flux
dAdrho = mag_obj.get_d_A_d_rho()
np.testing.assert_allclose(
dAdrho[mag_test_indices], sfit_dA_drho[mag_test_indices],
rtol=0.015)
def test_b0(self):
for (nob_indices, mag_test_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices,
self.indices_mag_test, self.mag_objs):
np.testing.assert_allclose(
mag_obj.b0[mag_test_indices],
self.sfit_files['63'].b0[nob_indices][mag_test_indices],
atol=0.0001)
def test_db0(self):
for (nob_indices, mag_test_indices, mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.indices_mag_test, self.mag_objs):
np.testing.assert_allclose(
mag_obj.db0[mag_test_indices],
self.sfit_files['61'].db0[nob_indices][mag_test_indices],
atol=0.005)
def test_z_(self):
for (nob_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices, self.mag_objs):
np.testing.assert_allclose(
mag_obj.z_,
self.sfit_files['63'].x[nob_indices] /
self.sfit_files['51'].a[3],
rtol=0.0001)
class TestFiniteSourceUniformGould94DirectMagnification(TestFiniteSourceUniformGould94Magnification):
def setUp(self):
TestFiniteSourceUniformGould94Magnification.setUp(self)
def _set_mag_objs(self):
for trajectory in self.trajectories:
mag_obj = mm.FiniteSourceUniformGould94Magnification(
trajectory=trajectory, direct=True)
self.mag_objs.append(mag_obj)
def test_get_d_A_d_u(self):
# derivatives of B_0 not implemented for direct method.
pass
def test_db0(self):
for mag_obj in self.mag_objs:
with self.assertRaises(NotImplementedError):
_ = mag_obj.db0
def test_get_d_A_d_params(self):
"""
df/dparams = fs * dA/dparams (FSPL)
"""
params = ['t_0', 'u_0', 't_E', 'rho']
for mag_obj in self.mag_objs:
with self.assertRaises(NotImplementedError):
mag_obj.get_d_A_d_params(params)
def test_get_d_A_d_rho(self):
for mag_obj in self.mag_objs:
with self.assertRaises(NotImplementedError):
mag_obj.get_d_A_d_rho()
class TestFiniteSourceLDYoo04Magnification(TestFiniteSourceUniformGould94Magnification):
def setUp(self):
TestFiniteSourceUniformGould94Magnification.setUp(self)
def _set_mag_objs(self):
for (trajectory, gamma) in zip(self.trajectories, self.gammas):
mag_obj = mm.FiniteSourceLDYoo04Magnification(
trajectory=trajectory, gamma=gamma)
self.mag_objs.append(mag_obj)
def test_get_magnification(self):
for (nob_indices, mag_test_indices, mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.indices_mag_test, self.mag_objs):
mag = mag_obj.get_magnification()
np.testing.assert_allclose(
mag[mag_test_indices],
self.sfit_files['61'].mag[nob_indices][mag_test_indices],
rtol=0.005)
def test_magnification(self):
for (nob_indices, mag_test_indices, mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.indices_mag_test, self.mag_objs):
with self.assertRaises(AttributeError):
_ = mag_obj.magnification
mag_obj.get_magnification()
np.testing.assert_allclose(
mag_obj.magnification[mag_test_indices],
self.sfit_files['61'].mag[nob_indices][mag_test_indices],
rtol=0.005)
def test_get_d_u_d_params(self):
super().test_get_d_u_d_params()
def test_get_d_A_d_u(self):
"""
Straight up reported in fort.62.
"""
for (nob_indices, source_flux, mag_test_indices, mag_obj) in zip(
self.sfit_files['62'].sfit_nob_indices,
self.sfit_files['51'].source_fluxes,
self.indices_mag_test, self.mag_objs):
dA_du = mag_obj.get_d_A_d_u()
sfit_dA_du = self.sfit_files['62'].df[nob_indices] / source_flux
np.testing.assert_allclose(
dA_du[mag_test_indices],
sfit_dA_du[mag_test_indices], rtol=0.015)
def test_get_d_A_d_params(self):
"""
df/dparams = fs * dA/dparams (FSPL)
"""
params = ['t_0', 'u_0', 't_E']
for (nob_indices, source_flux, gamma, mag_test_indices, mag_obj) in zip(
self.sfit_files['62'].sfit_nob_indices,
self.sfit_files['51'].source_fluxes,
self.gammas, self.indices_mag_test, self.mag_objs):
dA_dparam = mag_obj.get_d_A_d_params(params)
for j, param in enumerate(params):
short_param = param.replace('_', '')
sfit_df_dparam = self.sfit_files['62'].data[
'dfd{0}'.format(short_param)][nob_indices]
sfit_dA_dparam = sfit_df_dparam / source_flux
np.testing.assert_allclose(
dA_dparam[param][mag_test_indices],
sfit_dA_dparam[mag_test_indices], rtol=0.015)
def test_get_d_A_d_rho(self):
for (nob_indices, source_flux, mag_test_indices, mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.sfit_files['51'].source_fluxes,
self.indices_mag_test, self.mag_objs):
sfit_df_dparam = self.sfit_files['61'].data['dfdrho'][nob_indices]
sfit_dA_dparam = sfit_df_dparam / source_flux
dAdrho = mag_obj.get_d_A_d_rho()
np.testing.assert_allclose(
dAdrho[mag_test_indices], sfit_dA_dparam[mag_test_indices],
rtol=0.015)
def test_b1(self):
for (nob_indices, mag_test_indices, mag_obj) in zip(
self.sfit_files['63'].sfit_nob_indices,
self.indices_mag_test, self.mag_objs):
np.testing.assert_allclose(
mag_obj.b1[mag_test_indices],
self.sfit_files['63'].b1[nob_indices][mag_test_indices],
atol=0.0001)
def test_db1(self):
for (nob_indices, mag_test_indices, mag_obj) in zip(
self.sfit_files['61'].sfit_nob_indices,
self.indices_mag_test, self.mag_objs):
np.testing.assert_allclose(
mag_obj.db1[mag_test_indices],
self.sfit_files['61'].db1[nob_indices][mag_test_indices],
atol=0.001)
def test_gamma(self):
for (gamma, mag_obj) in zip(self.gammas, self.mag_objs):
np.testing.assert_almost_equal(gamma, mag_obj.gamma)
class TestFiniteSourceLDYoo04DirectMagnification(TestFiniteSourceLDYoo04Magnification):
def setUp(self):
TestFiniteSourceLDYoo04Magnification.setUp(self)
def _set_trajectories(self):
# Take only N_MAX indices from within t_0 +- 3*t_star to reduce runtime
n_max = 10
index_1 = self.sfit_files['63'].t > (
self.parameters.t_0 - 3. * self.parameters.t_star)
index_2 = self.sfit_files['63'].t < (
self.parameters.t_0 + 3. * self.parameters.t_star)
index = index_1 & index_2
for i, nob_indices in enumerate(self.sfit_files['63'].sfit_nob_indices):
new_nob_indices = nob_indices & index
if np.sum(new_nob_indices) > n_max:
n_skip = np.floor(np.sum(new_nob_indices) / n_max).astype(int)
mask = np.zeros(len(nob_indices), dtype=bool)
mask[::n_skip] = True
new_nob_indices = new_nob_indices & mask
for f_sixty in ['61', '62', '63']:
self.sfit_files[f_sixty].sfit_nob_indices[i] = new_nob_indices
TestFiniteSourceLDYoo04Magnification._set_trajectories(self)
def _set_mag_objs(self):
for (trajectory, gamma) in zip(self.trajectories, self.gammas):
mag_obj = mm.FiniteSourceLDYoo04Magnification(
trajectory=trajectory, gamma=gamma, direct=True)
self.mag_objs.append(mag_obj)
def test_get_d_A_d_u(self):
# derivatives of B_0 not implemented for direct method.
pass
def test_db0(self):
for mag_obj in self.mag_objs:
with self.assertRaises(NotImplementedError):
_ = mag_obj.db0
def test_db1(self):
for mag_obj in self.mag_objs:
with self.assertRaises(NotImplementedError):
_ = mag_obj.db1
def test_get_d_A_d_params(self):
"""
df/dparams = fs * dA/dparams (FSPL)
"""
params = ['t_0', 'u_0', 't_E', 'rho']
for mag_obj in self.mag_objs:
with self.assertRaises(NotImplementedError):
mag_obj.get_d_A_d_params(params)
def test_get_d_A_d_rho(self):
for mag_obj in self.mag_objs:
with self.assertRaises(NotImplementedError):
mag_obj.get_d_A_d_rho()
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@source@MulensModel@tests@test_PointLens.py@.PATH_END.py
|
{
"filename": "multiproc.py",
"repo_name": "dstndstn/astrometry.net",
"repo_path": "astrometry.net_extracted/astrometry.net-main/util/multiproc.py",
"type": "Python"
}
|
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import multiprocessing
class FakeAsyncResult(object):
def __init__(self, X):
self.X = X
def wait(self, *a):
pass
def get(self, *a):
return self.X
def ready(self):
return True
def successful(self):
return True
class funcwrapper(object):
def __init__(self, func):
self.func = func
def __call__(self, *X):
#print 'Trying to call', self.func
#print 'with args', X
try:
return self.func(*X)
except:
import traceback
print('Exception while calling your function:')
print(' params:', X)
print(' exception:')
traceback.print_exc()
raise
class memberfuncwrapper(object):
def __init__(self, obj, funcname):
self.obj = obj
self.funcname = funcname
def __call__(self, *X):
func = self.obj.getattr(self.funcname)
#print 'Trying to call', self.func
#print 'with args', X
try:
return func(self.obj, *X)
except:
import traceback
print('Exception while calling your function:')
print(' object:', self.obj)
print(' member function:', self.funcname)
print(' ', func)
print(' params:', X)
print(' exception:')
traceback.print_exc()
raise
class multiproc(object):
def __init__(self, nthreads=1, init=None, initargs=[],
map_chunksize=1, pool=None, wrap_all=False):
self.wrap_all = wrap_all
if pool is not None:
self.pool = pool
self.applyfunc = self.pool.apply_async
else:
if nthreads == 1:
self.pool = None
# self.map = map
self.applyfunc = lambda f,a,k: f(*a, **k)
if init is not None:
init(*initargs)
else:
self.pool = multiprocessing.Pool(nthreads, init, initargs)
# self.map = self.pool.map
self.applyfunc = self.pool.apply_async
self.async_results = []
self.map_chunksize = map_chunksize
def map(self, f, args, chunksize=None, wrap=False):
cs = chunksize
if cs is None:
cs = self.map_chunksize
if self.pool:
if wrap or self.wrap_all:
f = funcwrapper(f)
#print 'pool.map: f', f
#print 'args', args
#print 'cs', cs
return self.pool.map(f, args, cs)
return list(map(f, args))
def map_async(self, func, iterable, wrap=False):
if self.pool is None:
return FakeAsyncResult(map(func, iterable))
if wrap or self.wrap_all:
return self.pool.map_async(funcwrapper(func), iterable)
return self.pool.map_async(func, iterable)
def imap(self, func, iterable, chunksize=None, wrap=False):
cs = chunksize
if cs is None:
cs = self.map_chunksize
if self.pool is None:
import itertools
if 'imap' in dir(itertools):
# py2
return itertools.imap(func, iterable)
else:
# py3
return map(func, iterable)
if wrap or self.wrap_all:
func = funcwrapper(func)
return self.pool.imap(func, iterable, chunksize=cs)
def imap_unordered(self, func, iterable, chunksize=None, wrap=False):
cs = chunksize
if cs is None:
cs = self.map_chunksize
if self.pool is None:
import itertools
if 'imap' in dir(itertools):
# py2
return itertools.imap(func, iterable)
else:
# py3
return map(func, iterable)
if wrap or self.wrap_all:
func = funcwrapper(func)
return self.pool.imap_unordered(func, iterable, chunksize=cs)
def apply(self, f, args, wrap=False, kwargs={}):
if self.pool is None:
return FakeAsyncResult(f(*args, **kwargs))
if wrap:
f = funcwrapper(f)
res = self.applyfunc(f, args, kwargs)
self.async_results.append(res)
return res
def waitforall(self):
print('Waiting for async results to finish...')
for r in self.async_results:
print(' waiting for', r)
r.wait()
print('all done')
self.async_results = []
def close(self):
if self.pool is not None:
self.pool.close()
self.pool = None
|
dstndstnREPO_NAMEastrometry.netPATH_START.@astrometry.net_extracted@astrometry.net-main@util@multiproc.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "sfarrens/sf_deconvolve",
"repo_path": "sf_deconvolve_extracted/sf_deconvolve-master/README.md",
"type": "Markdown"
}
|
<a href="https://ascl.net/2212.010"><img src="https://img.shields.io/badge/ascl-2212.010-blue.svg?colorB=262255" alt="ascl:2212.010" /></a>
SF_DECONVOLVE
=============
> Author: **Samuel Farrens**
> Year: **2018**
> Version: **5.0**
> Email: [samuel.farrens@gmail.com](mailto:samuel.farrens@gmail.com)
> Website: [sfarrens.github.io](https://sfarrens.github.io/)
> Reference Paper: [arXiv:1703.02305](https://arxiv.org/abs/1703.02305)
Contents
------------
1. [Introduction](#intro_anchor)
1. [Installation](#install_anchor)
1. [Required Packages](#required_package)
1. [Execution](#exe_anchor)
1. [Input Format](#in_format)
1. [Running the executable script](#py_ex)
1. [Running the code in a Python session](#py_sesh)
1. [Example](#eg_anchor)
1. [Code Options](#opt_anchor)
1. [Troubleshooting](#trouble)
<a name="intro_anchor"></a>
## Introduction
This repository contains a Python code designed for PSF deconvolution and analysis.
The directory `lib` contains several primary functions and classes, but the majority of the optimisation and analysis tools are provided in **[sf_tools](https://sfarrens.github.io/sf_tools/)**.

<a name="install_anchor"></a>
## Installation
After downloading or cloning the repository simply run:
```bash
$ python setup.py install
```
<a name="required_package"></a>
### Required Packages
In order to run sf_deconvolve the following packages must be installed:
* **[Python](https://www.python.org/)</a>**
[Tested with v 2.7.11 and 3.6.3]
* **[ModOpt](https://github.com/CEA-COSMIC/ModOpt)** [>=1.1.4]
* **[sf_tools](https://sfarrens.github.io/sf_tools/)** [>=2.0]
* The current implementation of wavelet transformations additionally requires the `mr_transform.cc` C++ script from the Sparse2D library **(https://github.com/CosmoStat/Sparse2D)**. These C++ scripts will be need to be compiled in order to run. *Note:* The low-rank approximation method can be run purely in Python without the Sparse2D binaries.
<a name="exe_anchor"></a>
## Execution
The primary code is an executable script called ``sf_deconvolve.py`` which is designed to take an observed (*i.e.* with PSF effects and noise) stack of galaxy images and a known PSF, and attempt to reconstruct the original images. The input format are Numpy binary files (.npy) or FITS image files (.fits).
<a name="in_format"></a>
### Input Format
The input files should have the following format:
- Input Images: This should be either a Numpy binary or a FITS file containing a 3D array of galaxy images. *e.g.* for a sample of 10 images, each with size 41x41, the shape of the array should be [10, 41, 41].
- Input PSF(s): This should be either a Numpy binary or a FITS file containing a 2D array (for a fixed PSF) or a 3D array (for a spatially varying PSF) of PSF images. For the spatially varying case the number of PSF images must match the number of corresponding galaxy images. *e.g.* For a sample of 10 images the codes expects 10 PSFs.
See the files provided in the `examples` directory for reference.
<a name="py_ex"></a>
### Running the executable script
The code can be run in a terminal (not in a Python session) as follows:
```bash
$ sf_deconvolve.py -i INPUT_IMAGES.npy -p PSF.npy -o OUTPUT_NAME
```
Where `INPUT_IMAGES.npy` denotes the Numpy binary file containing the stack of observed galaxy images, `PSF.npy` denotes the PSF corresponding to each galaxy image and `OUTPUT_NAME` specifies the output path and file name.
Alternatively the code arguments can be stored in a configuration file (with any name) and the code can be run by providing
the file name preceded by a `@`.
```bash
$ sf_deconvolve.py @config.ini
```
An example configuration file is provided in the `examples` directory.
<a name="py_sesh"></a>
### Running the code in a Python session
The code can be run in an active Python session in two ways. For either approach first import `sf_deconvolve`:
```Python
>>> import sf_deconvolve
```
The first approach simply runs the full script where the command line arguments can be passed as a list of strings:
```Python
>>> sf_deconvolve.main(['-i', 'INPUT_IMAGES.npy', '-p', 'PSF.npy', '-o', 'OUTPUT_NAME'])
```
The second approach assumes that the user has already has read the images and PSF(s) into memory and wishes to return the deconvolution results to memory:
```Python
>>> opts = vars(sf_deconvolve.get_opts(['-i', 'INPUT_IMAGES.npy', '-p', 'PSF.npy', '-o', 'OUTPUT_NAME']))
>>> primal_res, dual_res, psf_res = sf_deconvolve.run(INPUT_IMAGES, INPUT_PSFS, **opts)
```
Where `INPUT_IMAGES` and `INPUT_PSFS` are both Numpy arrays. The resulting deconvolved images will be saved to the variable `primal_res`.
In both cases it is possible to read a predefined configuration file.
```Python
>>> opts = vars(sf_deconvolve.get_opts(['@config.ini']))
>>> primal_res, dual_res, psf_res = sf_deconvolve.run(INPUT_IMAGES, INPUT_PSFS, **opts)
```
<a name="eg_anchor"></a>
### Example
The following example can be run on the sample data provided in the ``example`` directory.
This example takes a sample of 100 galaxy images (with PSF effects and added noise) and the corresponding PSFs, and recovers the original images using low-rank approximation via Condat-Vu optimisation.
```bash
$ sf_deconvolve.py -i example_image_stack.npy -p example_psfs.npy -o example_output --mode lowr
```
The example can also be run using the configuration file provided.
The result will be two Numpy binary files called `example_output_primal.npy` and `example_output_dual.npy` corresponding to the primal and dual variables in the splitting algorithm. The reconstructed images will be in the `example_output_primal.npy` file.
The example can also be run with the FITS files provided.
<a name="opt_anchor"></a>
### Code Options
#### Required Arguments
* **-i INPUT, --input INPUT:** Input data file name. File should be a Numpy binary containing a stack of noisy galaxy images with PSF effects (*i.e.* a 3D array).
* **-p PSF, --psf PSF:** PSF file name. File should be a Numpy binary containing either: (a) a single PSF (*i.e.* a 2D array for *fixed* format) or (b) a stack of PSFs corresponding to each of the galaxy images (*i.e.* a 3D array for *obj_var* format).
#### Optional Arguments
* **-h, --help:** Show the help message and exit.
* **-v, --version:** Show the program's version number and exit.
* **-q, --quiet:** Suppress verbose for each iteration.
* **-o, --output:** Output file name. If not specified output files will placed in input file path.
* **--output_format** Output file format [npy or fits].
*Initialisation:*
* **-k, --current_res**: Current deconvolution results file name (*i.e.* the file containing the primal results from a previous run).
* **--noise_est:** Initial estimate of the noise standard deviation in the observed galaxy images. If not specified this quantity is automatically calculated using the median absolute deviation of the input image(s).
*Optimisation:*
* **-m, --mode {all,sparse,lowr,grad}:** Option to specify the optimisation mode [all, sparse, lowr or grad]. *all* performs optimisation using both low-rank approximation and sparsity, *sparse* using only sparsity, *lowr* uses only low-rank and *grad* uses only gradient descent. (default: lowr)
* **--opt_type {condat,fwbw,gfwbw}:** Option to specify the optimisation method to be implemented [condat, fwbw or gfwbw]. *condat* implements the Condat-Vu proximal splitting method, *fwbw* implements Forward-Backward splitting with FISTA speed-up and *gfwbw* implements the generalised Forward-Backward splitting method. (default: condat)
* **--n_iter:** Number of iterations. (default: 150)
* **--cost_window:** Window to measure cost function (*i.e.* interval of iterations for which cost should be calculated). (default: 1)
* **--convergence:** Convergence tolerance. (default: 0.0001)
* **--no_pos:** Option to turn off positivity constraint.
* **--grad_type:** Option to specify the type of gradient [psf_known, psf_unknown, none]. *psf_known* implements deconvolution with PSFs provided, *psf_unknown* simultaneously improves the PSF while performing the deconvolution, *none* implements deconvolution without gradient descent (for testing purposes only). (default: psf_known)
*Low-Rank Aproximation:*
* **--lowr_thresh_factor:** Low rank threshold factor. (default: 1)
* **--lowr_type:** Type of low-rank regularisation [standard or ngole]. (default: standard)
* **--lowr_thresh_type:** Low rank threshold type [soft or hard]. (default: hard)
*Sparsity:*
* **--wavelet_type:** Type of Wavelet to be used (see [iSap Documentation](http://www.cosmostat.org/wp-content/uploads/2014/12/doc_iSAP.pdf)). (default: 1)
* **--wave_thresh_factor:** Wavelet threshold factor. (default: [3.0, 3.0, 4.0])
* **--n_reweights:** Number of reweightings. (default: 1)
*PSF Estimation*
* **--lambda_psf:** Regularisation control parameter for PSF estimation. (default: 1.0)
* **--beta_psf:** Gradient step for PSF estimation. (default: 1.0)
*Condat Algorithm:*
* **--relax:** Relaxation parameter (rho_n in Condat-Vu method). (default: 0.8)
* **--condat_sigma:** Condat proximal dual parameter. If the option is provided without any value, an appropriate value is calculated automatically. (default: 0.5)
* **--condat_tau:** Condat proximal primal parameter. If the option is provided without any value, an appropriate value is calculated automatically. (default: 0.5)
*Testing:*
* **-c, --clean_data:** Clean data file name.
* **-r, --random_seed:** Random seed. Use this option if the input data is a randomly selected subset (with known seed) of the full sample of clean data.
* **--true_psf:** True PSFs file name.
* **--kernel:** Standard deviation of pixels for Gaussian kernel. This option will multiply the deconvolution results by a Gaussian kernel.
* **--metric:** Metric to average errors [median or mean]. (default: median)
<a name="trouble"></a>
## Troubleshooting
* If you get the following error:
`ERROR: svd() got an unexpected keyword argument 'lapack_driver'`
Update your Numpy and Scipy installations
```bash
$ pip install --upgrade numpy
$ pip install --upgrade scipy
```
|
sfarrensREPO_NAMEsf_deconvolvePATH_START.@sf_deconvolve_extracted@sf_deconvolve-master@README.md@.PATH_END.py
|
{
"filename": "test_field.py",
"repo_name": "LSSTDESC/NaMaster",
"repo_path": "NaMaster_extracted/NaMaster-master/pymaster/tests/test_field.py",
"type": "Python"
}
|
import pytest
import numpy as np
import pymaster as nmt
import healpy as hp
import warnings
import sys
from .utils import normdiff
class FieldTester(object):
def __init__(self):
# This is to avoid showing an ugly warning that
# has nothing to do with pymaster
if (sys.version_info > (3, 1)):
warnings.simplefilter("ignore", ResourceWarning)
self.nside = 64
self.lmax = 3*self.nside-1
self.ntemp = 5
self.npix = int(hp.nside2npix(self.nside))
self.msk = np.ones(self.npix)
self.mps = np.zeros([3, self.npix])
self.tmp = np.zeros([self.ntemp, 3, self.npix])
self.beam = np.ones(self.lmax+1)
th, ph = hp.pix2ang(self.nside, np.arange(self.npix))
sth = np.sin(th)
cth = np.cos(th)
# Re(Y_22)
self.mps[0] = np.sqrt(15./2./np.pi)*sth**2*np.cos(2*ph)
# _2Y^E_20 + _2Y^B_30
self.mps[1] = -np.sqrt(15./2./np.pi)*sth**2/4.
self.mps[2] = -np.sqrt(105./2./np.pi)*cth*sth**2/2.
for i in range(self.ntemp):
# Re(Y_22)
self.tmp[i][0] = np.sqrt(15./2./np.pi)*sth**2*np.cos(2*ph)
# _2Y^E_20 + _2Y^B_3 0
self.tmp[i][1] = -np.sqrt(15./2./np.pi)*sth**2/4.
self.tmp[i][2] = -np.sqrt(105./2./np.pi)*cth*sth**2/2.
FT = FieldTester()
def test_field_compatibility():
map_ran = np.random.rand(FT.npix)*0.1+1
f0 = nmt.NmtField(FT.msk, [FT.mps[0]])
f1 = nmt.NmtField(FT.msk*map_ran, [map_ran])
assert f0.is_compatible(f1)
# Diff. lmax
f1 = nmt.NmtField(FT.msk*map_ran, [map_ran],
lmax=127)
assert not f0.is_compatible(f1)
# Diff. lmax_mask
f1 = nmt.NmtField(FT.msk*map_ran, [map_ran],
lmax_mask=127)
assert not f0.is_compatible(f1)
# Diff. nside
npix = hp.nside2npix(128)
msk = np.ones(npix)
mp = np.random.randn(npix)
f1 = nmt.NmtField(msk, [mp])
assert not f0.is_compatible(f1)
# Strictness
# Diff. nside but same lmax
f0 = nmt.NmtField(FT.msk, [FT.mps[0]], lmax=100, lmax_mask=100)
f1 = nmt.NmtField(msk, [mp], lmax=100, lmax_mask=100)
assert f0.is_compatible(f1, strict=False)
assert not f0.is_compatible(f1)
def test_field_get_mask():
nside = 32
npix = hp.nside2npix(nside)
mp = np.random.randn(1, npix)
msk = np.random.rand(npix)
f = nmt.NmtField(msk, mp, n_iter=0)
mskb = f.get_mask()
assert np.amax(np.fabs(mskb-msk)/np.std(msk)) < 1E-5
# Do the same with a big-endian mask
f = nmt.NmtField(msk.astype('>f8'), mp, n_iter=0)
mskb = f.get_mask()
assert np.amax(np.fabs(mskb-msk)/np.std(msk)) < 1E-5
def test_field_get_alms():
nside = 32
npix = hp.nside2npix(nside)
mp = np.random.randn(3, npix)
msk = np.ones(npix)
# Spin 0
f = nmt.NmtField(msk, [mp[0]], n_iter=0)
alm = f.get_alms()[0]
cl_tt_nmt = hp.alm2cl(alm)
# Spin 2
f = nmt.NmtField(msk, mp[1:], n_iter=0)
alm = f.get_alms()
cl_ee_nmt = hp.alm2cl(alm[0])
cl_bb_nmt = hp.alm2cl(alm[1])
cl_tt, cl_ee, cl_bb, cl_te, cl_eb, cl_tb = hp.anafast(mp, iter=0,
pol=True)
assert (np.all(np.fabs(cl_tt_nmt/cl_tt-1) < 1E-10))
assert (np.all(np.fabs(cl_ee_nmt[2:]/cl_ee[2:]-1) < 1E-10))
assert (np.all(np.fabs(cl_bb_nmt[2:]/cl_bb[2:]-1) < 1E-10))
def test_field_map_alm():
# Compare map-based alms with analytical input and make sure they
# are equal up to numerical accuracy.
nside = 32
npix = int(hp.nside2npix(nside))
lmax = 20
msk = np.ones(npix)
mps = np.zeros([3, npix])
th, ph = hp.pix2ang(nside, np.arange(npix))
sth = np.sin(th)
cth = np.cos(th)
# Re(Y_22)
mps[0] = np.sqrt(15./2./np.pi)*sth**2*np.cos(2*ph)
# _2Y^E_20 + _2Y^B_30
mps[1] = -np.sqrt(15./2./np.pi)*sth**2/4.
mps[2] = -np.sqrt(105./2./np.pi)*cth*sth**2/2.
# spin 0
f0_map = nmt.NmtField(msk, [mps[0]], lmax=lmax)
# spin 2
f2_map = nmt.NmtField(msk, [mps[1], mps[2]], lmax=lmax)
alms_map = np.array([f0_map.get_alms()[0],
f2_map.get_alms()[0],
f2_map.get_alms()[1]])
alms_in = np.zeros_like(alms_map)
alms_in[0, hp.Alm.getidx(lmax, 2, 2)] = 2.
alms_in[1, hp.Alm.getidx(lmax, 2, 0)] = 1.
alms_in[2, hp.Alm.getidx(lmax, 3, 0)] = 2.
for f_idx in range(3):
assert np.all(np.absolute(alms_map - alms_in)[f_idx] < 1.e-12)
def test_field_masked():
nside = 64
b = nmt.NmtBin.from_nside_linear(nside, 16)
msk = hp.read_map("test/benchmarks/msk.fits",
dtype=float)
mskb = msk > 0
mps = np.array(hp.read_map("test/benchmarks/mps.fits",
field=[0, 1, 2],
dtype=float))*mskb[None, :]
mps_msk = np.array([m * msk for m in mps])
f0 = nmt.NmtField(msk, [mps[0]])
f0_msk = nmt.NmtField(msk, [mps_msk[0]],
masked_on_input=True)
f2 = nmt.NmtField(msk, mps[1:])
f2_msk = nmt.NmtField(msk, mps_msk[1:],
masked_on_input=True)
w00 = nmt.NmtWorkspace.from_fields(f0, f0, b)
w02 = nmt.NmtWorkspace.from_fields(f0, f2, b)
w22 = nmt.NmtWorkspace.from_fields(f2, f2, b)
def mkcl(w, f, g):
return w.decouple_cell(nmt.compute_coupled_cell(f, g))
c00 = mkcl(w00, f0, f0).flatten()
c02 = mkcl(w02, f0, f2).flatten()
c22 = mkcl(w22, f2, f2).flatten()
c00_msk = mkcl(w00, f0_msk, f0_msk).flatten()
c02_msk = mkcl(w02, f0_msk, f2_msk).flatten()
c22_msk = mkcl(w22, f2_msk, f2_msk).flatten()
assert (np.all(np.fabs(c00-c00_msk) /
np.mean(c00) < 1E-10))
assert (np.all(np.fabs(c02-c02_msk) /
np.mean(c02) < 1E-10))
assert (np.all(np.fabs(c22-c22_msk) /
np.mean(c22) < 1E-10))
def test_field_masked_pure():
nside = 64
b = nmt.NmtBin.from_nside_linear(nside, 16)
msk = hp.read_map("test/benchmarks/msk.fits",
dtype=float)
mskb = msk > 0
mps = np.array(hp.read_map("test/benchmarks/mps.fits",
field=[1, 2],
dtype=float))*mskb[None, :]
mps_msk = np.array([m * msk for m in mps])
f2 = nmt.NmtField(msk, mps,
templates=[[FT.tmp[0][1]*mskb,
FT.tmp[0][2]*mskb]],
purify_b=True)
f2_msk = nmt.NmtField(msk, mps_msk,
templates=[[FT.tmp[0][1]*msk*mskb,
FT.tmp[0][2]*msk*mskb]],
masked_on_input=True,
purify_b=True)
w22 = nmt.NmtWorkspace.from_fields(f2, f2, b)
def mkcl(w, f, g):
return w.decouple_cell(nmt.compute_coupled_cell(f, g))
c22 = mkcl(w22, f2, f2).flatten()
c22_msk = mkcl(w22, f2_msk, f2_msk).flatten()
assert (np.all(np.fabs(c22-c22_msk) /
np.mean(c22) < 1E-10))
def test_field_alloc():
# No templates
f0 = nmt.NmtField(FT.msk, [FT.mps[0]],
beam=FT.beam)
f2 = nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]],
beam=FT.beam)
f2p = nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]],
beam=FT.beam,
purify_e=True, purify_b=True,
n_iter_mask=10)
assert (normdiff(f0.get_maps()[0],
FT.mps[0]*FT.msk) < 1E-10)
assert (normdiff(f2.get_maps()[0],
FT.mps[1]*FT.msk) < 1E-10)
assert (normdiff(f2.get_maps()[1],
FT.mps[2]*FT.msk) < 1E-10)
assert (1E-5*np.mean(np.fabs(f2p.get_maps()[0])) >
np.mean(np.fabs(f2p.get_maps()[0] -
FT.mps[1]*FT.msk)))
assert (1E-5*np.mean(np.fabs(f2p.get_maps()[1])) >
np.mean(np.fabs(f2p.get_maps()[1] -
FT.mps[2]*FT.msk)))
for f in [f0, f2, f2p]:
with pytest.raises(ValueError): # No templates
f.get_templates()
# With templates
f0 = nmt.NmtField(FT.msk, [FT.mps[0]],
templates=np.array([[t[0]] for t in FT.tmp]),
beam=FT.beam)
f2 = nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]],
templates=np.array([[t[1], t[2]] for t in FT.tmp]),
beam=FT.beam)
# Map should be zero, since template = map
assert (normdiff(f0.get_maps()[0], 0*FT.msk) < 1E-10)
assert (normdiff(f2.get_maps()[0], 0*FT.msk) < 1E-10)
assert (normdiff(f2.get_maps()[1], 0*FT.msk) < 1E-10)
assert (len(f0.get_templates()) == 5)
assert (len(f2.get_templates()) == 5)
def test_field_lite():
# Lite field
fl = nmt.NmtField(FT.msk, [FT.mps[0]],
beam=FT.beam, lite=True)
# Empty field
with pytest.raises(ValueError): # No maps and no spin
fe = nmt.NmtField(FT.msk, None, beam=FT.beam)
fe = nmt.NmtField(FT.msk, None, beam=FT.beam, spin=1)
# Error checks
for f in [fl, fe]:
with pytest.raises(ValueError): # Query maps
f.get_maps()
with pytest.raises(ValueError): # Query templates
f.get_templates()
def test_field_error():
with pytest.raises(ValueError): # Incorrect mask size
nmt.NmtField(FT.msk[:15], FT.mps)
with pytest.raises(ValueError): # Incorrect map size
nmt.NmtField(FT.msk, [FT.mps[0, :15]])
with pytest.raises(ValueError): # Incorrect template size
nmt.NmtField(FT.msk, [FT.mps[0]],
templates=[[FT.tmp[0, 0, :15]]])
with pytest.raises(ValueError): # Passing 3 maps!
nmt.NmtField(FT.msk, FT.mps)
with pytest.raises(ValueError): # Passing 3 template maps!
nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]],
templates=FT.tmp)
with pytest.raises(ValueError): # Passing crap as templates
nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]],
templates=1)
with pytest.raises(ValueError): # Passing wrong beam
nmt.NmtField(FT.msk, [FT.mps[0]], beam=FT.beam[:30])
with pytest.raises(ValueError): # Passing crap as beam
nmt.NmtField(FT.msk, [FT.mps[0]], beam=1)
# Automatically assign spin = 0 for a single map
f = nmt.NmtField(FT.msk, [FT.mps[0]], n_iter=0)
assert (f.spin == 0)
# Automatically assign spin = 2 for 2 maps
f = nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]], n_iter=0)
assert (f.spin == 2)
with pytest.raises(ValueError): # Spin=0 but 2 maps
f = nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]],
spin=0, n_iter=0)
with pytest.raises(ValueError): # Spin=1 but 1 maps
f = nmt.NmtField(FT.msk, [FT.mps[0]], spin=1, n_iter=0)
with pytest.raises(ValueError):
f = nmt.NmtField(FT.msk, [FT.mps[1], FT.mps[2]], spin=1,
purify_b=True, n_iter=0)
# lmax must be zero
with pytest.raises(ValueError):
nmt.NmtField(FT.msk, [FT.mps[0]], lmax=0)
with pytest.raises(ValueError):
nmt.NmtField(FT.msk, [FT.mps[0]], lmax_mask=0)
|
LSSTDESCREPO_NAMENaMasterPATH_START.@NaMaster_extracted@NaMaster-master@pymaster@tests@test_field.py@.PATH_END.py
|
{
"filename": "mnist_test.py",
"repo_name": "afarahi/tatter",
"repo_path": "tatter_extracted/tatter-master/tatter/tests/mnist_test.py",
"type": "Python"
}
|
def mnist_digits_distance():
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from tatter import two_sample_test
from sklearn.metrics import pairwise_distances
import os
plot_path = './tatter/tests/plots/'
if not os.path.exists(plot_path):
os.makedirs(plot_path)
digits = datasets.load_digits()
data = digits.data
target = digits.target
num = [data[target == i] for i in range(10)]
plt.figure(figsize=(16, 16))
for i in range(1, 9):
for j in range(8):
ax = plt.subplot(9, 8, i * 8 + j + 1)
if i == 1: ax.set_title('%i' % (j + 1), size=25, fontweight="bold")
if j == 0: ax.set_ylabel('%i' % (i + 1), size=25, fontweight="bold")
ax.set_yticklabels([])
if i != 8: ax.set_xticklabels([])
if i <= j:
plt.axis('off')
continue
sigma2 = np.median(pairwise_distances(num[i + 1], num[j + 1], metric='euclidean')) ** 2 * 2.0
mmd2u, mmd2u_null, p_value = two_sample_test(num[i+1], num[j+1], model='MMD',
kernel_function='rbf', gamma=1.0/sigma2,
iterations=500, verbose=True, n_jobs=4)
# print mmd2u
prob, bins, patches = plt.hist(mmd2u_null, range=[-0.005, 0.1], bins=5, normed=True)
plt.plot(mmd2u, prob.max() / 10, 'wv', markersize=14, markeredgecolor='k',
markeredgewidth=2, label="$MMD^2_u$ p-value < %0.3f" % p_value)
plt.xlim([-0.01, 0.5])
if i == 8 and j == 3:
ax.set_xlabel('$MMD^2_u$ estimate', size=25)
# plt.legend(loc=1, numpoints=1, prop={'size':15})
plt.savefig(plot_path + 'MNIST-test.pdf', bbox_inches='tight')
plt.figure(figsize=(24, 2))
for i in range(9):
ax = plt.subplot(1, 9, i+1)
ax.set_title('%i' % (i + 1), size=25, fontweight="bold")
if i == 0: ax.set_ylabel('PDF', size=25)
ax.set_yticklabels([])
sigma2 = np.median(pairwise_distances(num[i + 1][::2], num[i + 1][1::2], metric='euclidean')) ** 2 * 2.0
mmd2u, mmd2u_null, p_value = two_sample_test(num[i+1][::2], num[i+1][1::2], model='MMD',
kernel_function='rbf', gamma=1.0/sigma2,
iterations=5000, verbose=True, n_jobs=4)
# print mmd2u
prob, bins, patches = plt.hist(mmd2u_null, range=[-0.005, 0.01], bins=50, normed=True)
plt.plot(mmd2u, prob.max() / 10, 'wv', markersize=14, markeredgecolor='k',
markeredgewidth=2, label="$MMD^2_u$ p-value < %0.3f" % p_value)
plt.xlim([-0.005, 0.01])
if i == 4:
ax.set_xlabel('$MMD^2_u$ estimate', size=25)
# plt.legend(loc=1, numpoints=1, prop={'size':15})
ax.set_xticks([-0.005, 0, 0.005, 0.01 ])
ax.set_xticklabels([-0.005, '0', 0.005, ' ' ], size=14)
plt.savefig(plot_path + 'MNIST-test-2.pdf', bbox_inches='tight')
|
afarahiREPO_NAMEtatterPATH_START.@tatter_extracted@tatter-master@tatter@tests@mnist_test.py@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/caps/_z.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="z", parent_name="isosurface.caps", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Z"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the z `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@caps@_z.py@.PATH_END.py
|
{
"filename": "errors.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/samp/errors.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines custom errors and exceptions used in `astropy.samp`.
"""
import xmlrpc.client as xmlrpc
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["SAMPClientError", "SAMPHubError", "SAMPProxyError", "SAMPWarning"]
class SAMPWarning(AstropyUserWarning):
"""
SAMP-specific Astropy warning class.
"""
class SAMPHubError(Exception):
"""
SAMP Hub exception.
"""
class SAMPClientError(Exception):
"""
SAMP Client exceptions.
"""
class SAMPProxyError(xmlrpc.Fault):
"""
SAMP Proxy Hub exception.
"""
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@samp@errors.py@.PATH_END.py
|
{
"filename": "FontFile.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pillow/py2/PIL/FontFile.py",
"type": "Python"
}
|
#
# The Python Imaging Library
# $Id$
#
# base class for raster font file parsers
#
# history:
# 1997-06-05 fl created
# 1997-08-19 fl restrict image width
#
# Copyright (c) 1997-1998 by Secret Labs AB
# Copyright (c) 1997-1998 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
import os
from . import Image, _binary
WIDTH = 800
def puti16(fp, values):
# write network order (big-endian) 16-bit sequence
for v in values:
if v < 0:
v += 65536
fp.write(_binary.o16be(v))
##
# Base class for raster font file handlers.
class FontFile(object):
bitmap = None
def __init__(self):
self.info = {}
self.glyph = [None] * 256
def __getitem__(self, ix):
return self.glyph[ix]
def compile(self):
"""Create metrics and bitmap"""
if self.bitmap:
return
# create bitmap large enough to hold all data
h = w = maxwidth = 0
lines = 1
for glyph in self:
if glyph:
d, dst, src, im = glyph
h = max(h, src[3] - src[1])
w = w + (src[2] - src[0])
if w > WIDTH:
lines += 1
w = src[2] - src[0]
maxwidth = max(maxwidth, w)
xsize = maxwidth
ysize = lines * h
if xsize == 0 and ysize == 0:
return ""
self.ysize = h
# paste glyphs into bitmap
self.bitmap = Image.new("1", (xsize, ysize))
self.metrics = [None] * 256
x = y = 0
for i in range(256):
glyph = self[i]
if glyph:
d, dst, src, im = glyph
xx = src[2] - src[0]
# yy = src[3] - src[1]
x0, y0 = x, y
x = x + xx
if x > WIDTH:
x, y = 0, y + h
x0, y0 = x, y
x = xx
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
self.bitmap.paste(im.crop(src), s)
self.metrics[i] = d, dst, s
def save(self, filename):
"""Save font"""
self.compile()
# font data
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
# font metrics
with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
fp.write(b"PILfont\n")
fp.write((";;;;;;%d;\n" % self.ysize).encode("ascii")) # HACK!!!
fp.write(b"DATA\n")
for id in range(256):
m = self.metrics[id]
if not m:
puti16(fp, [0] * 10)
else:
puti16(fp, m[0] + m[1] + m[2])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pillow@py2@PIL@FontFile.py@.PATH_END.py
|
{
"filename": "_showline.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/xaxis/_showline.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showline", parent_name="layout.xaxis", **kwargs):
super(ShowlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks+layoutstyle"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@xaxis@_showline.py@.PATH_END.py
|
{
"filename": "_angleref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/marker/_angleref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AnglerefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="angleref", parent_name="scatterpolar.marker", **kwargs
):
super(AnglerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["previous", "up"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@marker@_angleref.py@.PATH_END.py
|
{
"filename": "arima211_css_results.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/tsa/tests/results/arima211_css_results.py",
"type": "Python"
}
|
import numpy as np
from statsmodels.tools.tools import Bunch
llf = np.array([-240.29558272688])
nobs = np.array([202])
k = np.array([5])
k_exog = np.array([1])
sigma = np.array([.79494581155191])
chi2 = np.array([1213.6019521322])
df_model = np.array([3])
k_ar = np.array([2])
k_ma = np.array([1])
params = np.array([
.72428568600554,
1.1464248419014,
-.17024528879204,
-.87113675466923,
.63193884330392])
cov_params = np.array([
.31218565961764,
-.01618380799341,
.00226345462929,
.01386291798401,
-.0036338799176,
-.01618380799341,
.00705713030623,
-.00395404914463,
-.00685704952799,
-.00018629958479,
.00226345462929,
-.00395404914463,
.00255884492061,
.00363586332269,
.00039879711931,
.01386291798401,
-.00685704952799,
.00363586332269,
.00751765532203,
.00008982556101,
-.0036338799176,
-.00018629958479,
.00039879711931,
.00008982556101,
.00077550533053]).reshape(5, 5)
xb = np.array([
.72428566217422,
.72428566217422,
.56208884716034,
.53160965442657,
.45030161738396,
.45229381322861,
.38432359695435,
.40517011284828,
.36063131690025,
.30754271149635,
.32044330239296,
.29408219456673,
.27966624498367,
.29743707180023,
.25011941790581,
.27747189998627,
.24822402000427,
.23426930606365,
.27233305573463,
.23524768650532,
.26427435874939,
.21787133812904,
.22461311519146,
.22853142023087,
.24335558712482,
.22953669726849,
.25524401664734,
.22482520341873,
.26450532674789,
.31863233447075,
.27352628111839,
.33670437335968,
.25623551011086,
.28701293468475,
.315819054842,
.3238864839077,
.35844340920448,
.34399557113647,
.40348997712135,
.39373970031738,
.4022718667984,
.46476069092751,
.45762005448341,
.46842387318611,
.50536489486694,
.52051961421967,
.47866532206535,
.50378143787384,
.50863671302795,
.4302790760994,
.49568024277687,
.44652271270752,
.43774726986885,
.43010330200195,
.42344436049461,
.44517293572426,
.47460499405861,
.62086409330368,
.52550911903381,
.77532315254211,
.78466820716858,
.85438597202301,
.87056696414948,
1.0393311977386,
.99110960960388,
.85202795267105,
.91560190916061,
.89238166809082,
.88917690515518,
.72121334075928,
.84221452474594,
.8454754948616,
.82078683376312,
.95394861698151,
.84718400239944,
.839300096035,
.91501939296722,
.95743554830551,
1.0874761343002,
1.1326615810394,
1.1169674396515,
1.3300451040268,
1.4790810346603,
1.5027786493301,
1.7226468324661,
1.8395622968674,
1.5940405130386,
1.694568157196,
1.8241587877274,
1.7037791013718,
1.838702917099,
1.7334734201431,
1.4791669845581,
1.3007366657257,
1.7364456653595,
1.2694935798645,
.96595168113708,
1.1405370235443,
1.1328836679459,
1.1091921329498,
1.171138882637,
1.1465038061142,
1.0319484472275,
1.055313706398,
.93150246143341,
1.0844472646713,
.93333613872528,
.93137633800507,
1.0778160095215,
.38748729228973,
.77933365106583,
.75266307592392,
.88410103321075,
.94100385904312,
.91849637031555,
.96046274900436,
.92494148015976,
.98310285806656,
1.0272513628006,
1.0762135982513,
1.0743116140366,
1.254854798317,
1.1723403930664,
1.0479376316071,
1.3550333976746,
1.2255589962006,
1.2870025634766,
1.6643482446671,
1.3312928676605,
1.0657893419266,
1.1804157495499,
1.1335761547089,
1.137326002121,
1.1235628128052,
1.1115798950195,
1.1286649703979,
1.0989991426468,
1.0626485347748,
.96542054414749,
1.0419135093689,
.93033194541931,
.95628559589386,
1.027433514595,
.98328214883804,
1.0063992738724,
1.0645687580109,
.94354963302612,
.95077443122864,
1.0226324796677,
1.089217543602,
.97552293539047,
1.0441918373108,
1.052937746048,
.86785578727722,
.82579529285431,
.95432937145233,
.79897737503052,
.68320548534393,
.85365778207779,
.78336101770401,
.80072748661041,
.9089440703392,
.82500487565994,
.98515397310257,
.96745657920837,
1.0962044000626,
1.195325255394,
1.0824474096298,
1.2239117622375,
1.0142554044724,
1.0399018526077,
.80796521902084,
.7145761847496,
1.0631860494614,
.86374056339264,
.98086261749268,
1.0528303384781,
.86123734712601,
.80300676822662,
.96200370788574,
1.0364016294479,
.98456978797913,
1.1556725502014,
1.2025715112686,
1.0507286787033,
1.312912106514,
1.0682457685471,
2.0334177017212,
1.0775905847549,
1.2798084020615,
1.461397767067,
.72960823774338,
1.2498733997345,
1.466894865036,
1.286082983017,
1.3903408050537,
1.8483582735062,
1.4685434103012,
2.3107523918152,
.7711226940155,
-.31598940491676,
.68151205778122,
1.0212944746017])
y = np.array([
np.nan,
29.704284667969,
29.712087631226,
29.881610870361,
29.820302963257,
29.992294311523,
29.934322357178,
30.155170440674,
30.200632095337,
30.117542266846,
30.24044418335,
30.274082183838,
30.319667816162,
30.507436752319,
30.470119476318,
30.657470703125,
30.68822479248,
30.714269638062,
30.962333679199,
30.985248565674,
31.204275131226,
31.16787147522,
31.244613647461,
31.348531723022,
31.523355484009,
31.609535217285,
31.835243225098,
31.874824523926,
32.144504547119,
32.5986328125,
32.723526000977,
33.186702728271,
33.156238555908,
33.387012481689,
33.7158203125,
34.023887634277,
34.458442687988,
34.743995666504,
35.303489685059,
35.693740844727,
36.102272033691,
36.764759063721,
37.257617950439,
37.768424987793,
38.405364990234,
39.020519256592,
39.378665924072,
39.903781890869,
40.408638000488,
40.530277252197,
41.095680236816,
41.346523284912,
41.637748718262,
41.930103302002,
42.223442077637,
42.645172119141,
43.174606323242,
44.320865631104,
44.725509643555,
46.37532043457,
47.584667205811,
48.954383850098,
50.170566558838,
52.039329528809,
53.291107177734,
53.852027893066,
54.915603637695,
55.792385101318,
56.6891746521,
56.821212768555,
57.842212677002,
58.745475769043,
59.5207862854,
60.953948974609,
61.6471824646,
62.439296722412,
63.615020751953,
64.857437133789,
66.587478637695,
68.23265838623,
69.616966247559,
71.930046081543,
74.479080200195,
76.702774047852,
79.722648620605,
82.739562988281,
84.194038391113,
86.394561767578,
89.024154663086,
90.803779602051,
93.33869934082,
95.133476257324,
95.879165649414,
96.300735473633,
99.236442565918,
99.369491577148,
98.865951538086,
99.940536499023,
100.93288421631,
101.90919494629,
103.27114105225,
104.44651031494,
105.13195037842,
106.15531158447,
106.63150024414,
108.08444976807,
108.63333129883,
109.43137359619,
110.9778137207,
109.08748626709,
110.27933502197,
110.95265960693,
112.28410339355,
113.64099884033,
114.71849822998,
115.96046447754,
116.9249420166,
118.18309783936,
119.52725219727,
120.97621154785,
122.27430725098,
124.35485076904,
125.67234039307,
126.44793701172,
128.85502624512,
130.12554931641,
131.78700256348,
135.06434631348,
136.03129577637,
136.16580200195,
137.38041687012,
138.3335723877,
139.43733215332,
140.52355957031,
141.61158752441,
142.82865905762,
143.8990020752,
144.86265563965,
145.46542358398,
146.64192199707,
147.2303314209,
148.15628051758,
149.42742919922,
150.38327026367,
151.50639343262,
152.86457824707,
153.54354858398,
154.45077514648,
155.72262573242,
157.18922424316,
157.97552490234,
159.24418640137,
160.45292663574,
160.7678527832,
161.22578430176,
162.45433044434,
162.79898071289,
162.88320922852,
164.05364990234,
164.68334960938,
165.50071716309,
166.80894470215,
167.52500915527,
169.08515930176,
170.26745605469,
171.99620056152,
173.89532470703,
174.98243713379,
176.82391357422,
177.41424560547,
178.43989562988,
178.40797424316,
178.41456604004,
180.36318969727,
180.86373901367,
182.18086242676,
183.65283203125,
184.06123352051,
184.50300598145,
185.86199951172,
187.33641052246,
188.38456726074,
190.25567626953,
192.00257873535,
192.85073852539,
195.11291503906,
195.76824951172,
201.23341369629,
200.47758483887,
201.97981262207,
204.16139221191,
202.6296081543,
204.82388305664,
207.38688659668,
208.62408447266,
210.52333068848,
214.34335327148,
215.46553039551,
220.92074584961,
217.66012573242,
211.85800170898,
213.35252380371,
215.49029541016])
resid = np.array([
np.nan,
-.55428558588028,
-.36208805441856,
-.5116091966629,
-.28030154109001,
-.4422954916954,
-.18432281911373,
-.31516996026039,
-.39063200354576,
-.19754208624363,
-.26044383645058,
-.23408082127571,
-.10966806858778,
-.2874368429184,
-.09011957794428,
-.21747054159641,
-.20822501182556,
-.02426831051707,
-.21233357489109,
-.0452471524477,
-.25427412986755,
-.14787164330482,
-.12461274117231,
-.06853157281876,
-.14335711300373,
-.02953593060374,
-.18524432182312,
.00517434487119,
.13549427688122,
-.14863033592701,
.12647144496441,
-.28670132160187,
-.05623856931925,
.01299012638628,
-.01581981778145,
.07611121237278,
-.05844036862254,
.15600442886353,
-.00349225639366,
.0062618162483,
.19772660732269,
.03523930162191,
.04237993061543,
.13157841563225,
.09463357180357,
-.12051809579134,
.021334676072,
-.00378143391572,
-.30863979458809,
.06972090899944,
-.19567719101906,
-.14652347564697,
-.13774801790714,
-.13010406494141,
-.02344283089042,
.05482704937458,
.52539497613907,
-.12086410820484,
.87448859214783,
.42467761039734,
.51533102989197,
.34561482071877,
.82943379878998,
.2606680393219,
-.29110881686211,
.14797207713127,
-.01560037955642,
.00761602073908,
-.58917766809464,
.17878817021847,
.05778701230884,
-.04547626897693,
.47921240329742,
-.15394935011864,
-.0471847653389,
.26070219278336,
.28498136997223,
.64256292581558,
.51252233982086,
.2673399746418,
.9830310344696,
1.0699564218521,
.72091597318649,
1.2972244024277,
1.1773546934128,
-.13956540822983,
.50595796108246,
.80543184280396,
.07584273815155,
.6962223649025,
.06129856407642,
-.73347336053848,
-.87916851043701,
1.1992633342743,
-1.1364471912384,
-1.4694905281067,
-.0659501478076,
-.14053705334663,
-.13288362324238,
.19080325961113,
.02886573970318,
-.34650835394859,
-.03194846212864,
-.45531520247459,
.36850056052208,
-.38445034623146,
-.13333308696747,
.46862518787384,
-2.2778205871582,
.41251575946808,
-.07933671027422,
.4473415017128,
.4158943593502,
.1590022444725,
.28150060772896,
.03953726217151,
.27505549788475,
.31690016388893,
.37275013327599,
.22378182411194,
.82568991184235,
.14514668285847,
-.27233889698982,
1.052060842514,
.04496052488685,
.37444713711739,
1.6129913330078,
-.36434525251389,
-.93128365278244,
.03420155867934,
-.1804157346487,
-.03357006236911,
-.03733511269093,
-.02355666831136,
.08841699361801,
-.02865886501968,
-.09899909794331,
-.36265158653259,
.13458555936813,
-.34191656112671,
-.03033804148436,
.24371138215065,
-.02743346057832,
.1167239844799,
.29360374808311,
-.26456567645073,
-.04355576634407,
.24922250211239,
.37737664580345,
-.18922370672226,
.22447402775288,
.15580512583256,
-.55293774604797,
-.36785578727722,
.27421084046364,
-.45432937145233,
-.59898042678833,
.31679451465607,
-.1536608338356,
.01664204336703,
.39926943182945,
-.10894102603197,
.57500427961349,
.21484296023846,
.63253426551819,
.7037987112999,
.00467173522338,
.61756485700607,
-.4239239692688,
-.014255377464,
-.83988964557648,
-.70797437429428,
.88542991876602,
-.36318910121918,
.33625638484955,
.41914650797844,
-.4528394639492,
-.36123737692833,
.39699018001556,
.43800541758537,
.06358920782804,
.71544241905212,
.54432433843613,
-.20257151126862,
.94927132129669,
-.41291815042496,
3.4317541122437,
-1.8334206342697,
.22241242229939,
.72019159793854,
-2.2614006996155,
.94440299272537,
1.0961196422577,
-.04889564588666,
.50891524553299,
1.971658706665,
-.34635934233665,
3.1444630622864,
-4.0317454338074,
-5.4861345291138,
.81299871206284,
1.1164767742157,
.89470589160919])
yr = np.array([
np.nan,
-.55428558588028,
-.36208805441856,
-.5116091966629,
-.28030154109001,
-.4422954916954,
-.18432281911373,
-.31516996026039,
-.39063200354576,
-.19754208624363,
-.26044383645058,
-.23408082127571,
-.10966806858778,
-.2874368429184,
-.09011957794428,
-.21747054159641,
-.20822501182556,
-.02426831051707,
-.21233357489109,
-.0452471524477,
-.25427412986755,
-.14787164330482,
-.12461274117231,
-.06853157281876,
-.14335711300373,
-.02953593060374,
-.18524432182312,
.00517434487119,
.13549427688122,
-.14863033592701,
.12647144496441,
-.28670132160187,
-.05623856931925,
.01299012638628,
-.01581981778145,
.07611121237278,
-.05844036862254,
.15600442886353,
-.00349225639366,
.0062618162483,
.19772660732269,
.03523930162191,
.04237993061543,
.13157841563225,
.09463357180357,
-.12051809579134,
.021334676072,
-.00378143391572,
-.30863979458809,
.06972090899944,
-.19567719101906,
-.14652347564697,
-.13774801790714,
-.13010406494141,
-.02344283089042,
.05482704937458,
.52539497613907,
-.12086410820484,
.87448859214783,
.42467761039734,
.51533102989197,
.34561482071877,
.82943379878998,
.2606680393219,
-.29110881686211,
.14797207713127,
-.01560037955642,
.00761602073908,
-.58917766809464,
.17878817021847,
.05778701230884,
-.04547626897693,
.47921240329742,
-.15394935011864,
-.0471847653389,
.26070219278336,
.28498136997223,
.64256292581558,
.51252233982086,
.2673399746418,
.9830310344696,
1.0699564218521,
.72091597318649,
1.2972244024277,
1.1773546934128,
-.13956540822983,
.50595796108246,
.80543184280396,
.07584273815155,
.6962223649025,
.06129856407642,
-.73347336053848,
-.87916851043701,
1.1992633342743,
-1.1364471912384,
-1.4694905281067,
-.0659501478076,
-.14053705334663,
-.13288362324238,
.19080325961113,
.02886573970318,
-.34650835394859,
-.03194846212864,
-.45531520247459,
.36850056052208,
-.38445034623146,
-.13333308696747,
.46862518787384,
-2.2778205871582,
.41251575946808,
-.07933671027422,
.4473415017128,
.4158943593502,
.1590022444725,
.28150060772896,
.03953726217151,
.27505549788475,
.31690016388893,
.37275013327599,
.22378182411194,
.82568991184235,
.14514668285847,
-.27233889698982,
1.052060842514,
.04496052488685,
.37444713711739,
1.6129913330078,
-.36434525251389,
-.93128365278244,
.03420155867934,
-.1804157346487,
-.03357006236911,
-.03733511269093,
-.02355666831136,
.08841699361801,
-.02865886501968,
-.09899909794331,
-.36265158653259,
.13458555936813,
-.34191656112671,
-.03033804148436,
.24371138215065,
-.02743346057832,
.1167239844799,
.29360374808311,
-.26456567645073,
-.04355576634407,
.24922250211239,
.37737664580345,
-.18922370672226,
.22447402775288,
.15580512583256,
-.55293774604797,
-.36785578727722,
.27421084046364,
-.45432937145233,
-.59898042678833,
.31679451465607,
-.1536608338356,
.01664204336703,
.39926943182945,
-.10894102603197,
.57500427961349,
.21484296023846,
.63253426551819,
.7037987112999,
.00467173522338,
.61756485700607,
-.4239239692688,
-.014255377464,
-.83988964557648,
-.70797437429428,
.88542991876602,
-.36318910121918,
.33625638484955,
.41914650797844,
-.4528394639492,
-.36123737692833,
.39699018001556,
.43800541758537,
.06358920782804,
.71544241905212,
.54432433843613,
-.20257151126862,
.94927132129669,
-.41291815042496,
3.4317541122437,
-1.8334206342697,
.22241242229939,
.72019159793854,
-2.2614006996155,
.94440299272537,
1.0961196422577,
-.04889564588666,
.50891524553299,
1.971658706665,
-.34635934233665,
3.1444630622864,
-4.0317454338074,
-5.4861345291138,
.81299871206284,
1.1164767742157,
.89470589160919])
mse = np.array([
1.1115040779114,
.69814515113831,
.63478744029999,
.63409090042114,
.63356643915176,
.63317084312439,
.63287192583084,
.63264590501785,
.63247483968735,
.63234525918961,
.63224703073502,
.63217264413834,
.63211619853973,
.63207340240479,
.63204091787338,
.63201630115509,
.63199764490128,
.63198345899582,
.63197267055511,
.63196450471878,
.63195830583572,
.63195365667343,
.63195008039474,
.63194733858109,
.63194531202316,
.6319437623024,
.6319425702095,
.63194167613983,
.63194096088409,
.63194048404694,
.63194006681442,
.6319397687912,
.63193953037262,
.63193941116333,
.6319392323494,
.63193917274475,
.63193905353546,
.63193899393082,
.63193899393082,
.63193893432617,
.63193893432617,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193887472153,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688,
.63193881511688])
stdp = np.array([
.72428566217422,
.72428566217422,
.56208884716034,
.53160965442657,
.45030161738396,
.45229381322861,
.38432359695435,
.40517011284828,
.36063131690025,
.30754271149635,
.32044330239296,
.29408219456673,
.27966624498367,
.29743707180023,
.25011941790581,
.27747189998627,
.24822402000427,
.23426930606365,
.27233305573463,
.23524768650532,
.26427435874939,
.21787133812904,
.22461311519146,
.22853142023087,
.24335558712482,
.22953669726849,
.25524401664734,
.22482520341873,
.26450532674789,
.31863233447075,
.27352628111839,
.33670437335968,
.25623551011086,
.28701293468475,
.315819054842,
.3238864839077,
.35844340920448,
.34399557113647,
.40348997712135,
.39373970031738,
.4022718667984,
.46476069092751,
.45762005448341,
.46842387318611,
.50536489486694,
.52051961421967,
.47866532206535,
.50378143787384,
.50863671302795,
.4302790760994,
.49568024277687,
.44652271270752,
.43774726986885,
.43010330200195,
.42344436049461,
.44517293572426,
.47460499405861,
.62086409330368,
.52550911903381,
.77532315254211,
.78466820716858,
.85438597202301,
.87056696414948,
1.0393311977386,
.99110960960388,
.85202795267105,
.91560190916061,
.89238166809082,
.88917690515518,
.72121334075928,
.84221452474594,
.8454754948616,
.82078683376312,
.95394861698151,
.84718400239944,
.839300096035,
.91501939296722,
.95743554830551,
1.0874761343002,
1.1326615810394,
1.1169674396515,
1.3300451040268,
1.4790810346603,
1.5027786493301,
1.7226468324661,
1.8395622968674,
1.5940405130386,
1.694568157196,
1.8241587877274,
1.7037791013718,
1.838702917099,
1.7334734201431,
1.4791669845581,
1.3007366657257,
1.7364456653595,
1.2694935798645,
.96595168113708,
1.1405370235443,
1.1328836679459,
1.1091921329498,
1.171138882637,
1.1465038061142,
1.0319484472275,
1.055313706398,
.93150246143341,
1.0844472646713,
.93333613872528,
.93137633800507,
1.0778160095215,
.38748729228973,
.77933365106583,
.75266307592392,
.88410103321075,
.94100385904312,
.91849637031555,
.96046274900436,
.92494148015976,
.98310285806656,
1.0272513628006,
1.0762135982513,
1.0743116140366,
1.254854798317,
1.1723403930664,
1.0479376316071,
1.3550333976746,
1.2255589962006,
1.2870025634766,
1.6643482446671,
1.3312928676605,
1.0657893419266,
1.1804157495499,
1.1335761547089,
1.137326002121,
1.1235628128052,
1.1115798950195,
1.1286649703979,
1.0989991426468,
1.0626485347748,
.96542054414749,
1.0419135093689,
.93033194541931,
.95628559589386,
1.027433514595,
.98328214883804,
1.0063992738724,
1.0645687580109,
.94354963302612,
.95077443122864,
1.0226324796677,
1.089217543602,
.97552293539047,
1.0441918373108,
1.052937746048,
.86785578727722,
.82579529285431,
.95432937145233,
.79897737503052,
.68320548534393,
.85365778207779,
.78336101770401,
.80072748661041,
.9089440703392,
.82500487565994,
.98515397310257,
.96745657920837,
1.0962044000626,
1.195325255394,
1.0824474096298,
1.2239117622375,
1.0142554044724,
1.0399018526077,
.80796521902084,
.7145761847496,
1.0631860494614,
.86374056339264,
.98086261749268,
1.0528303384781,
.86123734712601,
.80300676822662,
.96200370788574,
1.0364016294479,
.98456978797913,
1.1556725502014,
1.2025715112686,
1.0507286787033,
1.312912106514,
1.0682457685471,
2.0334177017212,
1.0775905847549,
1.2798084020615,
1.461397767067,
.72960823774338,
1.2498733997345,
1.466894865036,
1.286082983017,
1.3903408050537,
1.8483582735062,
1.4685434103012,
2.3107523918152,
.7711226940155,
-.31598940491676,
.68151205778122,
1.0212944746017])
icstats = np.array([
202,
np.nan,
-240.29558272688,
5,
490.59116545376,
507.13250394077])
results = Bunch(
llf=llf,
nobs=nobs,
k=k,
k_exog=k_exog,
sigma=sigma,
chi2=chi2,
df_model=df_model,
k_ar=k_ar,
k_ma=k_ma,
params=params,
cov_params=cov_params,
xb=xb,
y=y,
resid=resid,
yr=yr,
mse=mse,
stdp=stdp,
icstats=icstats
)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@tsa@tests@results@arima211_css_results.py@.PATH_END.py
|
{
"filename": "bug_report.md",
"repo_name": "handley-lab/lsbi",
"repo_path": "lsbi_extracted/lsbi-master/.github/ISSUE_TEMPLATE/bug_report.md",
"type": "Markdown"
}
|
---
name: Bug report
about: Create a report to help us improve
labels:
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behaviour.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
|
handley-labREPO_NAMElsbiPATH_START.@lsbi_extracted@lsbi-master@.github@ISSUE_TEMPLATE@bug_report.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "e-merlin/eMERLIN_CASA_pipeline",
"repo_path": "eMERLIN_CASA_pipeline_extracted/eMERLIN_CASA_pipeline-master/README.md",
"type": "Markdown"
}
|
1. [ Description ](#description)
1. [ Dependencies ](#dependencies)
1. [ Download ](#download)
1. [ Quick start ](#quickstart)
1. [ Usage ](#usage)
1. [ Additional information ](#information)
1. [ FAQ ](#faq)
<a name="description"></a>
## Description ##
The e-MERLIN CASA Pipeline (eMCP) is a python pipeline working on top of [CASA](https://casa.nrao.edu/) to process and calibrate interferometric data from the [e-MERLIN](http://www.e-merlin.ac.uk/) array. Access to data information, statistics and assessment plots on calibration tables and visibilities can be accessed by the pipeline weblog, which is updated in real time as the pipeline job progresses. The output is calibrated data and preliminary lookup images of the relevant fields. It can calibrate mixed mode data that includes narrow-band high spectral resolution spectral windows for spectral lines, and also special observing modes as pseudo-wideband observations. Currently no polarization calibration is performed.
<a name="dependencies"></a>
## Dependencies ##
- CASA v5.5+ (see https://casa.nrao.edu/)
- aoflagger v2.9+ (see https://sourceforge.net/projects/aoflagger/), only needed to calibrate L-band data.
<a name="download"></a>
## Download ##
If you have git installed, you can get the pipeline using:
`git clone https://github.com/e-merlin/eMERLIN_CASA_pipeline.git`
If you don't have git, you can download and unzip the files from [here](https://github.com/e-merlin/eMERLIN_CASA_pipeline/archive/master.zip).
To install aoflagger check out either A. Offringa's websites:
- aoflagger: https://sourceforge.net/projects/aoflagger/
- wsclean: https://sourceforge.net/projects/wsclean/ (not required for running the pipeline)
or (recommended) use the handy anaconda scripts to instantly install dependcies within the conda environment. To do this follow the instructions in this repo.: https://github.com/jradcliffe5/radio_conda_recipes
<a name="quickstart"></a>
## Quick start ##
If you have received calibrated data from the observatory and you want to refine the calibration, you can:
1. [Optionally] Modify `default_params.json` or add manual flags to `manual_avg.flags` with your desired values.
2. Run:
`casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r calibration`
<a name="usage"></a>
## Usage ##
Normal pipeline execution. When you have in your working directory the file `inputs.ini` and you have extracted the pipeline:
`casa -c /path/to/pipeline/eMERLIN_CASA_pipeline.py`
To run the parallelized version using MPI in CASA you can use:
`mpicasa -n <num_cores> casa -c /path/to/pipeline/eMERLIN_CASA_pipeline.py`
**Optional arguments**
Names in capital need to be set by the user:
```
-h, --help show this help message and exit
-i INPUTS_FILE
--inputs INPUTS_FILE
Inputs file to use. Default is inputs.ini
-r RUN_STEPS [RUN_STEPS ...]
--run-steps RUN_STEPS [RUN_STEPS ...]
Whitespace separated list of steps to run. Apart from
individual steps, it also accepts "all",
"pre_processing" and "calibration"
-s SKIP_STEPS [SKIP_STEPS ...]
--skip-steps SKIP_STEPS [SKIP_STEPS ...]
Whispace separated list of steps to skip
-l
--list-steps Show list of available steps and exit
```
You can get the list of available steps with:
`casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -l`
```
pre_processing
run_importfits
flag_aoflagger
flag_apriori
flag_manual
average
plot_data
save_flags
calibration
restore_flags
flag_manual_avg
init_models
bandpass
initial_gaincal
fluxscale
bandpass_final
gaincal_final
applycal_all
flag_target
plot_corrected
first_images
split_fields
```
Selection options are any combination of: a list of any individual step names, `pre_processing`, `calibration` or `all`
**Examples of step selection**
You need to specify which steps of the pipeline to run. Some example on how to choose steps:
1. Run all the calibration steps (ideal for observatory-processed data for which you want to tweak the calibration parameters). Includes all calibrations steps (see list above):
`casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r calibration`
2. Run all pipeline steps (you will need the raw FITS-IDI files for the initial step):
`casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r all`
3. Run only the pre-processing steps (usually executed by the observatory. Otherwise you need the raw FITS-IDI files):
`casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r pre_processing`
4. Any combination of the steps above, for example:
`casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r plot_corrected first_images split_fields`
5. Run all calibration steps except plot_corrected:
`casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r calibration -s plot_corrected`
**Running the pipeline interactively from CASA**
To execute the pipeline from a running CASA instance you need to write in the CASA shell:
~~~~
run_in_casa = True
pipeline_path = '/path/to/pipeline_path/' # You need to define this variable explicitly
execfile(pipeline_path + 'eMERLIN_CASA_pipeline.py')
eMCP = run_pipeline(run_steps=['calibration'])
~~~~
Function `run_pipeline` parameters and defaults are: `run_pipeline(inputs_file='./inputs.ini', run_steps=[], skip_steps=[])`. Variables run_steps and skip_steps are python lists of steps as explained above.
<a name="information"></a>
## Additional information ##
- [Documentation [online]](documentation/docs.md)
- [Wiki pages](https://github.com/e-merlin/eMERLIN_CASA_pipeline/wiki)
<a name="faq"></a>
## FAQ ##
**How do I open the weblog?**
The weblog consist of a series of html files. From the working directory you can open the file `./weblog/index.html` with your preferred web browser.
**How do I know what has been executed?**
You can visit the tab `Pipeline info` in the weblog, where you will find which steps were executed. You will also find a link to the Pipeline log, the CASA log and two files with all the parameters used during the data processing.
**I want to re-run the pipeline to improve the calibration, what do I change?**
There are two main blocks: pre-processing and calibration. Most probably you will only need to repeat the calibration part. Recommended course of action:
- Identify changes you want to include in the data reduction, like changing calibration parameters or adding manual flags.
- Add or edit file `manual_avg.flags` with your flag commands (follow the CASA syntax).
- Edit the file `inputs.ini` if you need to change the sources used or they intend.
- Edit the file `default_params.json` changing any parameter the pipeline is using, if needed.
- Run the calibration block of the pipeline with the command:
`casa -c ./eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r calibration`
**Which flag files does the pipeline accept and what is the right syntax?**
There are four different flag files accepted by the pipeline:
| Flag file | Used by step | Notes |
| ------------- |:-------------:| -----|
| observatory.flags | flag_apriori | Created by the observatory with antenna slewing or other major faults. Please donot edit it yourself. |
| manual.flags | flag_manual | This is meant to flag the unaveraged data set during the pre-processing stage |
| manual_avg.flags | flag_manual_avg | This is meant to flag the averaged data set during the calibration stage |
| manual_narrow.flags | flag_manual_avg | Use this to add flag commands for narrow-band spectral line data set|
For the syntax needed for CASA follow [Basic Syntax Rules](https://casa.nrao.edu/casadocs/casa-5.5.0/global-task-list/task_flagdata/about) in the CASA docs flagdata (end of the section). The main rules are:
1. Use only ONE white space to separate the parameters (no commas). Each key should only appear once on a given command line/string.
2. There is an implicit mode for each command, with the default being 'manual' if not given.
3. Comment lines can start with '#' and will be ignored. The parser used in flagdata will check each parameter name and type and exit with an error if the parameter is not a valid flagdata parameter or of a wrong type.
Example for e-MERLIN:
```
mode='manual' field='1331+305' antenna='' timerange='10:00:00~10:11:30'
mode='manual' field='' antenna='' timerange='' spw='0:0~30'
mode='manual' field='' antenna='Mk2' timerange='09:05:00~16:27:00'
mode='manual' field='1258-2219' antenna='' timerange='12:57:01~12:59:59'
mode='quack' field='1258-2219,1309-2322' quackinterval=24.
```
Example from the CASA docs:
```
scan='1~3' mode='manual'
# this line will be ignored
spw='9' mode='tfcrop' correlation='ABS_XX,YY' ntime=51.0
mode='extend' extendpols=True
scan='1~3,10~12' mode='quack' quackinterval=1.0
```
**How do I fill the source names in inputs.ini if I don't know which fields were observed?**
By default you should have all the information from the observatory. But if you only have the FITS-IDI and don't know the source names, you can run the first pipeline step alone `casa -c eMERLIN_CASA_pipeline/eMERLIN_CASA_pipeline.py -r run_importfits`. When the execution is finished, open the weblog and go to the tab `Observation Summary` where you will find the fields included in the MS and the listobs file with all the scans.
As a general rule, an observation will have 1331+3030 (3C286) as flux scale calibrator, 1407+2827 (OQ208) as bandpass calibrator and 0319+4130 (3C84) as bright ptcal calibrator. To distinguish between target and phasecal, you should look for alternating scans, and the target is usually the one with longer scans.
|
e-merlinREPO_NAMEeMERLIN_CASA_pipelinePATH_START.@eMERLIN_CASA_pipeline_extracted@eMERLIN_CASA_pipeline-master@README.md@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/image/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="image.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@image@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "USAGE.md",
"repo_name": "christophmschaefer/miluphcuda",
"repo_path": "miluphcuda_extracted/miluphcuda-main/test_cases/gravity_merging/USAGE.md",
"type": "Markdown"
}
|
Gravity merging test case for miluphcuda
----------------------------------------
Christoph Burger
christoph.burger@uni-tuebingen.de
last updated: 15/Nov/2021
-----------------------------------------
This is a test case of "gently" merging spheres, in a mass range at the transition of the strength and gravity regime.
The bodies have masses of 3e20 kg and 1e20 kg, respectively, and start from rest, directly next to each other.
The default settings are self-gravity + Collins strength model (including fragmentation with Grady-Kipp).
-----------------------------------------
**To run the test case:**
1. Compile miluphcuda with the `parameter.h` file from this directory.
Don't forget to also adapt the Makefile to your system.
2. Unpack `impact.0000.gz`.
3. Adapt the start script `run.sh` to your system (path to CUDA libs and to miluphcuda executable) and execute it.
4. Wait for the simulation to finish (75 output files).
The setup consists of ~50k SPH particles, with a runtime around 1h on most current GPUs (benchmarked on a GTX 970).
-----------------------------------------
**Check/visualize the results**
You can visualize the simulation for example with *Paraview*. Find the latest release at https://www.paraview.org/.
First, enter the simulation directory and run
utils/postprocessing/create_xdmf.py
which creates an xdmf file (*paraview.xdmf* by default). You can use the default cmd-line options,
so no need to set anything explicitly. Then start Paraview and either
* directly open the created paraview.xdmf and choose settings yourself
* load the prepared Paraview state in *results/paraview.pvsm* (*File -> Load State*), and select
the created paraview.xdmf file under *Choose File Names*
(note: if your Paraview version is not compatible with the state file version (check the first
few lines of paraview.pvsm), either try a closer Paraview version, or load paraview.xdmf directly)
* compare results (e.g., visualized with Paraview) to the animations in `results/`, which show
the interior structure of the colliding bodies (cut views) for different quantities.
* You can also visualize the workings of the Collins plasticity model by running `results/plot_plastic_yielding.sh`.
This produces plots for shear stress vs. pressure, including the respective yield limit curves.
|
christophmschaeferREPO_NAMEmiluphcudaPATH_START.@miluphcuda_extracted@miluphcuda-main@test_cases@gravity_merging@USAGE.md@.PATH_END.py
|
{
"filename": "_tick0.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/colorbar/_tick0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="tick0", parent_name="surface.colorbar", **kwargs):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@colorbar@_tick0.py@.PATH_END.py
|
{
"filename": "test_multi_wavelength_greedy.py",
"repo_name": "rodluger/starry",
"repo_path": "starry_extracted/starry-master/tests/greedy/test_multi_wavelength_greedy.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Test multi-wavelength maps.
"""
import starry
import pytest
import itertools
import numpy as np
# Params combinations
ydeg = [2]
udeg = [0]
nw = [5]
rv = [False, True]
reflected = [False, True]
params = list(itertools.product(ydeg, udeg, nw, rv, reflected))
# Reflected light + radial velocity is not implemented
params = [p for p in params if not (p[3] and p[4])]
@pytest.fixture(scope="class", params=params)
def map(request):
ydeg, udeg, nw, rv, reflected = request.param
map = starry.Map(ydeg=ydeg, udeg=udeg, nw=nw, reflected=reflected, rv=rv)
map.reflected = reflected
return map
class TestShapes:
"""Test the flux and intensity return value shapes."""
def test_flux(self, map):
assert map.flux().shape == (1, map.nw)
def test_flux_vector(self, map):
assert map.flux(xo=[0, 1, 2]).shape == (3, map.nw)
def test_intensity(self, map):
if map.reflected:
assert map.intensity().shape == (1, map.nw, 1)
else:
assert map.intensity().shape == (1, map.nw)
def test_intensity_vector(self, map):
if map.reflected:
assert map.intensity(lat=[0, 30, 60]).shape == (3, map.nw, 1)
else:
assert map.intensity(lat=[0, 30, 60]).shape == (3, map.nw)
def test_intensity_matrix(self, map):
if map.reflected:
assert map.intensity(lat=[0, 30, 60], xs=[1, 2]).shape == (
3,
map.nw,
2,
)
else:
pass
def test_rv(self, map):
if hasattr(map, "rv"):
assert map.rv().shape == (1, map.nw)
else:
pass
def test_rv_vector(self, map):
if hasattr(map, "rv"):
assert map.rv(xo=[0, 1, 2]).shape == (3, map.nw)
else:
pass
def test_render(self, map):
res = 100
map.render(res=res).shape == (map.nw, res, res)
def test_amplitude():
"""Test the amplitude attribute of a multi-wavelength map."""
map = starry.Map(ydeg=1, nw=5)
assert np.allclose(map.amp, np.ones(5))
map.amp = 10.0
assert np.allclose(map.amp, 10.0 * np.ones(5))
|
rodlugerREPO_NAMEstarryPATH_START.@starry_extracted@starry-master@tests@greedy@test_multi_wavelength_greedy.py@.PATH_END.py
|
{
"filename": "controls.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/prompt-toolkit/py3/prompt_toolkit/layout/controls.py",
"type": "Python"
}
|
"""
User interface Controls for the layout.
"""
from __future__ import annotations
import time
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Callable, Hashable, Iterable, NamedTuple
from prompt_toolkit.application.current import get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.data_structures import Point
from prompt_toolkit.document import Document
from prompt_toolkit.filters import FilterOrBool, to_filter
from prompt_toolkit.formatted_text import (
AnyFormattedText,
StyleAndTextTuples,
to_formatted_text,
)
from prompt_toolkit.formatted_text.utils import (
fragment_list_to_text,
fragment_list_width,
split_lines,
)
from prompt_toolkit.lexers import Lexer, SimpleLexer
from prompt_toolkit.mouse_events import MouseButton, MouseEvent, MouseEventType
from prompt_toolkit.search import SearchState
from prompt_toolkit.selection import SelectionType
from prompt_toolkit.utils import get_cwidth
from .processors import (
DisplayMultipleCursors,
HighlightIncrementalSearchProcessor,
HighlightSearchProcessor,
HighlightSelectionProcessor,
Processor,
TransformationInput,
merge_processors,
)
if TYPE_CHECKING:
from prompt_toolkit.key_binding.key_bindings import (
KeyBindingsBase,
NotImplementedOrNone,
)
from prompt_toolkit.utils import Event
__all__ = [
"BufferControl",
"SearchBufferControl",
"DummyControl",
"FormattedTextControl",
"UIControl",
"UIContent",
]
GetLinePrefixCallable = Callable[[int, int], AnyFormattedText]
class UIControl(metaclass=ABCMeta):
"""
Base class for all user interface controls.
"""
def reset(self) -> None:
# Default reset. (Doesn't have to be implemented.)
pass
def preferred_width(self, max_available_width: int) -> int | None:
return None
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: GetLinePrefixCallable | None,
) -> int | None:
return None
def is_focusable(self) -> bool:
"""
Tell whether this user control is focusable.
"""
return False
@abstractmethod
def create_content(self, width: int, height: int) -> UIContent:
"""
Generate the content for this user control.
Returns a :class:`.UIContent` instance.
"""
def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone:
"""
Handle mouse events.
When `NotImplemented` is returned, it means that the given event is not
handled by the `UIControl` itself. The `Window` or key bindings can
decide to handle this event as scrolling or changing focus.
:param mouse_event: `MouseEvent` instance.
"""
return NotImplemented
def move_cursor_down(self) -> None:
"""
Request to move the cursor down.
This happens when scrolling down and the cursor is completely at the
top.
"""
def move_cursor_up(self) -> None:
"""
Request to move the cursor up.
"""
def get_key_bindings(self) -> KeyBindingsBase | None:
"""
The key bindings that are specific for this user control.
Return a :class:`.KeyBindings` object if some key bindings are
specified, or `None` otherwise.
"""
def get_invalidate_events(self) -> Iterable[Event[object]]:
"""
Return a list of `Event` objects. This can be a generator.
(The application collects all these events, in order to bind redraw
handlers to these events.)
"""
return []
class UIContent:
"""
Content generated by a user control. This content consists of a list of
lines.
:param get_line: Callable that takes a line number and returns the current
line. This is a list of (style_str, text) tuples.
:param line_count: The number of lines.
:param cursor_position: a :class:`.Point` for the cursor position.
:param menu_position: a :class:`.Point` for the menu position.
:param show_cursor: Make the cursor visible.
"""
def __init__(
self,
get_line: Callable[[int], StyleAndTextTuples] = (lambda i: []),
line_count: int = 0,
cursor_position: Point | None = None,
menu_position: Point | None = None,
show_cursor: bool = True,
):
self.get_line = get_line
self.line_count = line_count
self.cursor_position = cursor_position or Point(x=0, y=0)
self.menu_position = menu_position
self.show_cursor = show_cursor
# Cache for line heights. Maps cache key -> height
self._line_heights_cache: dict[Hashable, int] = {}
def __getitem__(self, lineno: int) -> StyleAndTextTuples:
"Make it iterable (iterate line by line)."
if lineno < self.line_count:
return self.get_line(lineno)
else:
raise IndexError
def get_height_for_line(
self,
lineno: int,
width: int,
get_line_prefix: GetLinePrefixCallable | None,
slice_stop: int | None = None,
) -> int:
"""
Return the height that a given line would need if it is rendered in a
space with the given width (using line wrapping).
:param get_line_prefix: None or a `Window.get_line_prefix` callable
that returns the prefix to be inserted before this line.
:param slice_stop: Wrap only "line[:slice_stop]" and return that
partial result. This is needed for scrolling the window correctly
when line wrapping.
:returns: The computed height.
"""
# Instead of using `get_line_prefix` as key, we use render_counter
# instead. This is more reliable, because this function could still be
# the same, while the content would change over time.
key = get_app().render_counter, lineno, width, slice_stop
try:
return self._line_heights_cache[key]
except KeyError:
if width == 0:
height = 10**8
else:
# Calculate line width first.
line = fragment_list_to_text(self.get_line(lineno))[:slice_stop]
text_width = get_cwidth(line)
if get_line_prefix:
# Add prefix width.
text_width += fragment_list_width(
to_formatted_text(get_line_prefix(lineno, 0))
)
# Slower path: compute path when there's a line prefix.
height = 1
# Keep wrapping as long as the line doesn't fit.
# Keep adding new prefixes for every wrapped line.
while text_width > width:
height += 1
text_width -= width
fragments2 = to_formatted_text(
get_line_prefix(lineno, height - 1)
)
prefix_width = get_cwidth(fragment_list_to_text(fragments2))
if prefix_width >= width: # Prefix doesn't fit.
height = 10**8
break
text_width += prefix_width
else:
# Fast path: compute height when there's no line prefix.
try:
quotient, remainder = divmod(text_width, width)
except ZeroDivisionError:
height = 10**8
else:
if remainder:
quotient += 1 # Like math.ceil.
height = max(1, quotient)
# Cache and return
self._line_heights_cache[key] = height
return height
class FormattedTextControl(UIControl):
"""
Control that displays formatted text. This can be either plain text, an
:class:`~prompt_toolkit.formatted_text.HTML` object an
:class:`~prompt_toolkit.formatted_text.ANSI` object, a list of ``(style_str,
text)`` tuples or a callable that takes no argument and returns one of
those, depending on how you prefer to do the formatting. See
``prompt_toolkit.layout.formatted_text`` for more information.
(It's mostly optimized for rather small widgets, like toolbars, menus, etc...)
When this UI control has the focus, the cursor will be shown in the upper
left corner of this control by default. There are two ways for specifying
the cursor position:
- Pass a `get_cursor_position` function which returns a `Point` instance
with the current cursor position.
- If the (formatted) text is passed as a list of ``(style, text)`` tuples
and there is one that looks like ``('[SetCursorPosition]', '')``, then
this will specify the cursor position.
Mouse support:
The list of fragments can also contain tuples of three items, looking like:
(style_str, text, handler). When mouse support is enabled and the user
clicks on this fragment, then the given handler is called. That handler
should accept two inputs: (Application, MouseEvent) and it should
either handle the event or return `NotImplemented` in case we want the
containing Window to handle this event.
:param focusable: `bool` or :class:`.Filter`: Tell whether this control is
focusable.
:param text: Text or formatted text to be displayed.
:param style: Style string applied to the content. (If you want to style
the whole :class:`~prompt_toolkit.layout.Window`, pass the style to the
:class:`~prompt_toolkit.layout.Window` instead.)
:param key_bindings: a :class:`.KeyBindings` object.
:param get_cursor_position: A callable that returns the cursor position as
a `Point` instance.
"""
def __init__(
self,
text: AnyFormattedText = "",
style: str = "",
focusable: FilterOrBool = False,
key_bindings: KeyBindingsBase | None = None,
show_cursor: bool = True,
modal: bool = False,
get_cursor_position: Callable[[], Point | None] | None = None,
) -> None:
self.text = text # No type check on 'text'. This is done dynamically.
self.style = style
self.focusable = to_filter(focusable)
# Key bindings.
self.key_bindings = key_bindings
self.show_cursor = show_cursor
self.modal = modal
self.get_cursor_position = get_cursor_position
#: Cache for the content.
self._content_cache: SimpleCache[Hashable, UIContent] = SimpleCache(maxsize=18)
self._fragment_cache: SimpleCache[int, StyleAndTextTuples] = SimpleCache(
maxsize=1
)
# Only cache one fragment list. We don't need the previous item.
# Render info for the mouse support.
self._fragments: StyleAndTextTuples | None = None
def reset(self) -> None:
self._fragments = None
def is_focusable(self) -> bool:
return self.focusable()
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.text!r})"
def _get_formatted_text_cached(self) -> StyleAndTextTuples:
"""
Get fragments, but only retrieve fragments once during one render run.
(This function is called several times during one rendering, because
we also need those for calculating the dimensions.)
"""
return self._fragment_cache.get(
get_app().render_counter, lambda: to_formatted_text(self.text, self.style)
)
def preferred_width(self, max_available_width: int) -> int:
"""
Return the preferred width for this control.
That is the width of the longest line.
"""
text = fragment_list_to_text(self._get_formatted_text_cached())
line_lengths = [get_cwidth(l) for l in text.split("\n")]
return max(line_lengths)
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: GetLinePrefixCallable | None,
) -> int | None:
"""
Return the preferred height for this control.
"""
content = self.create_content(width, None)
if wrap_lines:
height = 0
for i in range(content.line_count):
height += content.get_height_for_line(i, width, get_line_prefix)
if height >= max_available_height:
return max_available_height
return height
else:
return content.line_count
def create_content(self, width: int, height: int | None) -> UIContent:
# Get fragments
fragments_with_mouse_handlers = self._get_formatted_text_cached()
fragment_lines_with_mouse_handlers = list(
split_lines(fragments_with_mouse_handlers)
)
# Strip mouse handlers from fragments.
fragment_lines: list[StyleAndTextTuples] = [
[(item[0], item[1]) for item in line]
for line in fragment_lines_with_mouse_handlers
]
# Keep track of the fragments with mouse handler, for later use in
# `mouse_handler`.
self._fragments = fragments_with_mouse_handlers
# If there is a `[SetCursorPosition]` in the fragment list, set the
# cursor position here.
def get_cursor_position(
fragment: str = "[SetCursorPosition]",
) -> Point | None:
for y, line in enumerate(fragment_lines):
x = 0
for style_str, text, *_ in line:
if fragment in style_str:
return Point(x=x, y=y)
x += len(text)
return None
# If there is a `[SetMenuPosition]`, set the menu over here.
def get_menu_position() -> Point | None:
return get_cursor_position("[SetMenuPosition]")
cursor_position = (self.get_cursor_position or get_cursor_position)()
# Create content, or take it from the cache.
key = (tuple(fragments_with_mouse_handlers), width, cursor_position)
def get_content() -> UIContent:
return UIContent(
get_line=lambda i: fragment_lines[i],
line_count=len(fragment_lines),
show_cursor=self.show_cursor,
cursor_position=cursor_position,
menu_position=get_menu_position(),
)
return self._content_cache.get(key, get_content)
def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone:
"""
Handle mouse events.
(When the fragment list contained mouse handlers and the user clicked on
on any of these, the matching handler is called. This handler can still
return `NotImplemented` in case we want the
:class:`~prompt_toolkit.layout.Window` to handle this particular
event.)
"""
if self._fragments:
# Read the generator.
fragments_for_line = list(split_lines(self._fragments))
try:
fragments = fragments_for_line[mouse_event.position.y]
except IndexError:
return NotImplemented
else:
# Find position in the fragment list.
xpos = mouse_event.position.x
# Find mouse handler for this character.
count = 0
for item in fragments:
count += len(item[1])
if count > xpos:
if len(item) >= 3:
# Handler found. Call it.
# (Handler can return NotImplemented, so return
# that result.)
handler = item[2]
return handler(mouse_event)
else:
break
# Otherwise, don't handle here.
return NotImplemented
def is_modal(self) -> bool:
return self.modal
def get_key_bindings(self) -> KeyBindingsBase | None:
return self.key_bindings
class DummyControl(UIControl):
"""
A dummy control object that doesn't paint any content.
Useful for filling a :class:`~prompt_toolkit.layout.Window`. (The
`fragment` and `char` attributes of the `Window` class can be used to
define the filling.)
"""
def create_content(self, width: int, height: int) -> UIContent:
def get_line(i: int) -> StyleAndTextTuples:
return []
return UIContent(get_line=get_line, line_count=100**100) # Something very big.
def is_focusable(self) -> bool:
return False
class _ProcessedLine(NamedTuple):
fragments: StyleAndTextTuples
source_to_display: Callable[[int], int]
display_to_source: Callable[[int], int]
class BufferControl(UIControl):
"""
Control for visualizing the content of a :class:`.Buffer`.
:param buffer: The :class:`.Buffer` object to be displayed.
:param input_processors: A list of
:class:`~prompt_toolkit.layout.processors.Processor` objects.
:param include_default_input_processors: When True, include the default
processors for highlighting of selection, search and displaying of
multiple cursors.
:param lexer: :class:`.Lexer` instance for syntax highlighting.
:param preview_search: `bool` or :class:`.Filter`: Show search while
typing. When this is `True`, probably you want to add a
``HighlightIncrementalSearchProcessor`` as well. Otherwise only the
cursor position will move, but the text won't be highlighted.
:param focusable: `bool` or :class:`.Filter`: Tell whether this control is focusable.
:param focus_on_click: Focus this buffer when it's click, but not yet focused.
:param key_bindings: a :class:`.KeyBindings` object.
"""
def __init__(
self,
buffer: Buffer | None = None,
input_processors: list[Processor] | None = None,
include_default_input_processors: bool = True,
lexer: Lexer | None = None,
preview_search: FilterOrBool = False,
focusable: FilterOrBool = True,
search_buffer_control: (
None | SearchBufferControl | Callable[[], SearchBufferControl]
) = None,
menu_position: Callable[[], int | None] | None = None,
focus_on_click: FilterOrBool = False,
key_bindings: KeyBindingsBase | None = None,
):
self.input_processors = input_processors
self.include_default_input_processors = include_default_input_processors
self.default_input_processors = [
HighlightSearchProcessor(),
HighlightIncrementalSearchProcessor(),
HighlightSelectionProcessor(),
DisplayMultipleCursors(),
]
self.preview_search = to_filter(preview_search)
self.focusable = to_filter(focusable)
self.focus_on_click = to_filter(focus_on_click)
self.buffer = buffer or Buffer()
self.menu_position = menu_position
self.lexer = lexer or SimpleLexer()
self.key_bindings = key_bindings
self._search_buffer_control = search_buffer_control
#: Cache for the lexer.
#: Often, due to cursor movement, undo/redo and window resizing
#: operations, it happens that a short time, the same document has to be
#: lexed. This is a fairly easy way to cache such an expensive operation.
self._fragment_cache: SimpleCache[
Hashable, Callable[[int], StyleAndTextTuples]
] = SimpleCache(maxsize=8)
self._last_click_timestamp: float | None = None
self._last_get_processed_line: Callable[[int], _ProcessedLine] | None = None
def __repr__(self) -> str:
return f"<{self.__class__.__name__} buffer={self.buffer!r} at {id(self)!r}>"
@property
def search_buffer_control(self) -> SearchBufferControl | None:
result: SearchBufferControl | None
if callable(self._search_buffer_control):
result = self._search_buffer_control()
else:
result = self._search_buffer_control
assert result is None or isinstance(result, SearchBufferControl)
return result
@property
def search_buffer(self) -> Buffer | None:
control = self.search_buffer_control
if control is not None:
return control.buffer
return None
@property
def search_state(self) -> SearchState:
"""
Return the `SearchState` for searching this `BufferControl`. This is
always associated with the search control. If one search bar is used
for searching multiple `BufferControls`, then they share the same
`SearchState`.
"""
search_buffer_control = self.search_buffer_control
if search_buffer_control:
return search_buffer_control.searcher_search_state
else:
return SearchState()
def is_focusable(self) -> bool:
return self.focusable()
def preferred_width(self, max_available_width: int) -> int | None:
"""
This should return the preferred width.
Note: We don't specify a preferred width according to the content,
because it would be too expensive. Calculating the preferred
width can be done by calculating the longest line, but this would
require applying all the processors to each line. This is
unfeasible for a larger document, and doing it for small
documents only would result in inconsistent behavior.
"""
return None
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: GetLinePrefixCallable | None,
) -> int | None:
# Calculate the content height, if it was drawn on a screen with the
# given width.
height = 0
content = self.create_content(width, height=1) # Pass a dummy '1' as height.
# When line wrapping is off, the height should be equal to the amount
# of lines.
if not wrap_lines:
return content.line_count
# When the number of lines exceeds the max_available_height, just
# return max_available_height. No need to calculate anything.
if content.line_count >= max_available_height:
return max_available_height
for i in range(content.line_count):
height += content.get_height_for_line(i, width, get_line_prefix)
if height >= max_available_height:
return max_available_height
return height
def _get_formatted_text_for_line_func(
self, document: Document
) -> Callable[[int], StyleAndTextTuples]:
"""
Create a function that returns the fragments for a given line.
"""
# Cache using `document.text`.
def get_formatted_text_for_line() -> Callable[[int], StyleAndTextTuples]:
return self.lexer.lex_document(document)
key = (document.text, self.lexer.invalidation_hash())
return self._fragment_cache.get(key, get_formatted_text_for_line)
def _create_get_processed_line_func(
self, document: Document, width: int, height: int
) -> Callable[[int], _ProcessedLine]:
"""
Create a function that takes a line number of the current document and
returns a _ProcessedLine(processed_fragments, source_to_display, display_to_source)
tuple.
"""
# Merge all input processors together.
input_processors = self.input_processors or []
if self.include_default_input_processors:
input_processors = self.default_input_processors + input_processors
merged_processor = merge_processors(input_processors)
def transform(lineno: int, fragments: StyleAndTextTuples) -> _ProcessedLine:
"Transform the fragments for a given line number."
# Get cursor position at this line.
def source_to_display(i: int) -> int:
"""X position from the buffer to the x position in the
processed fragment list. By default, we start from the 'identity'
operation."""
return i
transformation = merged_processor.apply_transformation(
TransformationInput(
self, document, lineno, source_to_display, fragments, width, height
)
)
return _ProcessedLine(
transformation.fragments,
transformation.source_to_display,
transformation.display_to_source,
)
def create_func() -> Callable[[int], _ProcessedLine]:
get_line = self._get_formatted_text_for_line_func(document)
cache: dict[int, _ProcessedLine] = {}
def get_processed_line(i: int) -> _ProcessedLine:
try:
return cache[i]
except KeyError:
processed_line = transform(i, get_line(i))
cache[i] = processed_line
return processed_line
return get_processed_line
return create_func()
def create_content(
self, width: int, height: int, preview_search: bool = False
) -> UIContent:
"""
Create a UIContent.
"""
buffer = self.buffer
# Trigger history loading of the buffer. We do this during the
# rendering of the UI here, because it needs to happen when an
# `Application` with its event loop is running. During the rendering of
# the buffer control is the earliest place we can achieve this, where
# we're sure the right event loop is active, and don't require user
# interaction (like in a key binding).
buffer.load_history_if_not_yet_loaded()
# Get the document to be shown. If we are currently searching (the
# search buffer has focus, and the preview_search filter is enabled),
# then use the search document, which has possibly a different
# text/cursor position.)
search_control = self.search_buffer_control
preview_now = preview_search or bool(
# Only if this feature is enabled.
self.preview_search()
and
# And something was typed in the associated search field.
search_control
and search_control.buffer.text
and
# And we are searching in this control. (Many controls can point to
# the same search field, like in Pyvim.)
get_app().layout.search_target_buffer_control == self
)
if preview_now and search_control is not None:
ss = self.search_state
document = buffer.document_for_search(
SearchState(
text=search_control.buffer.text,
direction=ss.direction,
ignore_case=ss.ignore_case,
)
)
else:
document = buffer.document
get_processed_line = self._create_get_processed_line_func(
document, width, height
)
self._last_get_processed_line = get_processed_line
def translate_rowcol(row: int, col: int) -> Point:
"Return the content column for this coordinate."
return Point(x=get_processed_line(row).source_to_display(col), y=row)
def get_line(i: int) -> StyleAndTextTuples:
"Return the fragments for a given line number."
fragments = get_processed_line(i).fragments
# Add a space at the end, because that is a possible cursor
# position. (When inserting after the input.) We should do this on
# all the lines, not just the line containing the cursor. (Because
# otherwise, line wrapping/scrolling could change when moving the
# cursor around.)
fragments = fragments + [("", " ")]
return fragments
content = UIContent(
get_line=get_line,
line_count=document.line_count,
cursor_position=translate_rowcol(
document.cursor_position_row, document.cursor_position_col
),
)
# If there is an auto completion going on, use that start point for a
# pop-up menu position. (But only when this buffer has the focus --
# there is only one place for a menu, determined by the focused buffer.)
if get_app().layout.current_control == self:
menu_position = self.menu_position() if self.menu_position else None
if menu_position is not None:
assert isinstance(menu_position, int)
menu_row, menu_col = buffer.document.translate_index_to_position(
menu_position
)
content.menu_position = translate_rowcol(menu_row, menu_col)
elif buffer.complete_state:
# Position for completion menu.
# Note: We use 'min', because the original cursor position could be
# behind the input string when the actual completion is for
# some reason shorter than the text we had before. (A completion
# can change and shorten the input.)
menu_row, menu_col = buffer.document.translate_index_to_position(
min(
buffer.cursor_position,
buffer.complete_state.original_document.cursor_position,
)
)
content.menu_position = translate_rowcol(menu_row, menu_col)
else:
content.menu_position = None
return content
def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone:
"""
Mouse handler for this control.
"""
buffer = self.buffer
position = mouse_event.position
# Focus buffer when clicked.
if get_app().layout.current_control == self:
if self._last_get_processed_line:
processed_line = self._last_get_processed_line(position.y)
# Translate coordinates back to the cursor position of the
# original input.
xpos = processed_line.display_to_source(position.x)
index = buffer.document.translate_row_col_to_index(position.y, xpos)
# Set the cursor position.
if mouse_event.event_type == MouseEventType.MOUSE_DOWN:
buffer.exit_selection()
buffer.cursor_position = index
elif (
mouse_event.event_type == MouseEventType.MOUSE_MOVE
and mouse_event.button != MouseButton.NONE
):
# Click and drag to highlight a selection
if (
buffer.selection_state is None
and abs(buffer.cursor_position - index) > 0
):
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position = index
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
# When the cursor was moved to another place, select the text.
# (The >1 is actually a small but acceptable workaround for
# selecting text in Vi navigation mode. In navigation mode,
# the cursor can never be after the text, so the cursor
# will be repositioned automatically.)
if abs(buffer.cursor_position - index) > 1:
if buffer.selection_state is None:
buffer.start_selection(
selection_type=SelectionType.CHARACTERS
)
buffer.cursor_position = index
# Select word around cursor on double click.
# Two MOUSE_UP events in a short timespan are considered a double click.
double_click = (
self._last_click_timestamp
and time.time() - self._last_click_timestamp < 0.3
)
self._last_click_timestamp = time.time()
if double_click:
start, end = buffer.document.find_boundaries_of_current_word()
buffer.cursor_position += start
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position += end - start
else:
# Don't handle scroll events here.
return NotImplemented
# Not focused, but focusing on click events.
else:
if (
self.focus_on_click()
and mouse_event.event_type == MouseEventType.MOUSE_UP
):
# Focus happens on mouseup. (If we did this on mousedown, the
# up event will be received at the point where this widget is
# focused and be handled anyway.)
get_app().layout.current_control = self
else:
return NotImplemented
return None
def move_cursor_down(self) -> None:
b = self.buffer
b.cursor_position += b.document.get_cursor_down_position()
def move_cursor_up(self) -> None:
b = self.buffer
b.cursor_position += b.document.get_cursor_up_position()
def get_key_bindings(self) -> KeyBindingsBase | None:
"""
When additional key bindings are given. Return these.
"""
return self.key_bindings
def get_invalidate_events(self) -> Iterable[Event[object]]:
"""
Return the Window invalidate events.
"""
# Whenever the buffer changes, the UI has to be updated.
yield self.buffer.on_text_changed
yield self.buffer.on_cursor_position_changed
yield self.buffer.on_completions_changed
yield self.buffer.on_suggestion_set
class SearchBufferControl(BufferControl):
"""
:class:`.BufferControl` which is used for searching another
:class:`.BufferControl`.
:param ignore_case: Search case insensitive.
"""
def __init__(
self,
buffer: Buffer | None = None,
input_processors: list[Processor] | None = None,
lexer: Lexer | None = None,
focus_on_click: FilterOrBool = False,
key_bindings: KeyBindingsBase | None = None,
ignore_case: FilterOrBool = False,
):
super().__init__(
buffer=buffer,
input_processors=input_processors,
lexer=lexer,
focus_on_click=focus_on_click,
key_bindings=key_bindings,
)
# If this BufferControl is used as a search field for one or more other
# BufferControls, then represents the search state.
self.searcher_search_state = SearchState(ignore_case=ignore_case)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@prompt-toolkit@py3@prompt_toolkit@layout@controls.py@.PATH_END.py
|
{
"filename": "load_model.py",
"repo_name": "vtardugno/TESS-CNN",
"repo_path": "TESS-CNN_extracted/TESS-CNN-main/load_model.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch.nn.functional as F
import os
import torch
import torch.nn as nn
import time
import pandas as pd
from sklearn import manifold, datasets
from sklearn.metrics import precision_recall_fscore_support, roc_curve, confusion_matrix
start_time = time.time()
torch.autograd.set_detect_anomaly(True)
np.random.seed(410)
training_index = "1440chunks_21_everything_2"
# PREP AND LOAD DATA
def prep_data(all_x,all_y,split,tics):
#all_x = torch.load(x)
#all_y = torch.load(y)
#all_x = torch.reshape(all_x, (all_x.shape[0],all_x.shape[2],all_x.shape[1]))
data_x = all_x[:split,:,:]
data_x_val = all_x[split:,:,:]
data_y = all_y[:split,:]
data_y_val = all_y[split:,:]
tics_train = tics[:split]
tics_val = tics[split:]
planet_mask = np.where((data_y[:,0]==1))
planet_mask_val = np.where((data_y_val[:,0]==1))
planets_x = data_x[planet_mask]
planets_y = data_y[planet_mask]
planets_x_val = data_x_val[planet_mask_val]
planets_y_val = data_y_val[planet_mask_val]
planets_tics = tics_train[planet_mask]
planets_tics_val = tics_val[planet_mask_val]
eb_mask = np.where((data_y[:,1]==1))
eb_mask_val = np.where((data_y_val[:,1]==1))
eb_x = data_x[eb_mask]
eb_y = data_y[eb_mask]
eb_x_val = data_x_val[eb_mask_val]
eb_y_val = data_y_val[eb_mask_val]
eb_tics = tics_train[eb_mask]
eb_tics_val = tics_val[eb_mask_val]
other_mask = np.where((data_y[:,2]==1))
other_mask_val = np.where((data_y_val[:,2]==1))
other_x = data_x[other_mask]
other_y = data_y[other_mask]
other_x_val = data_x_val[other_mask_val]
other_y_val = data_y_val[other_mask_val]
other_tics = tics_train[other_mask]
other_tics_val = tics_val[other_mask_val]
data_x = torch.cat((planets_x,planets_x,planets_x,planets_x,planets_x,planets_x,planets_x,planets_x,eb_x,other_x,other_x,other_x,other_x),0)#nothing_x),0)
data_y = torch.cat((planets_y,planets_y,planets_y,planets_y,planets_y,planets_y,planets_y,planets_y,eb_y,other_y,other_y,other_y,other_y),0)#,nothing_y),0)
data_x_val = torch.cat((planets_x_val,planets_x_val,planets_x_val,planets_x_val,planets_x_val,planets_x_val,planets_x_val,planets_x_val,eb_x_val,other_x_val,other_x_val,other_x_val,other_x_val),0)#,nothing_x_val),0)
data_y_val = torch.cat((planets_y_val,planets_y_val,planets_y_val,planets_y_val,planets_y_val,planets_y_val,planets_y_val,planets_y_val,eb_y_val,other_y_val,other_y_val,other_y_val,other_y_val),0)#,nothing_y_val),0)
tics_train = torch.cat((planets_tics,planets_tics,planets_tics,planets_tics,planets_tics,planets_tics,planets_tics,planets_tics,eb_tics,other_tics,other_tics,other_tics,other_tics),0)#,nothing_y),0)
tics_val = torch.cat((planets_tics_val,planets_tics_val,planets_tics_val,planets_tics_val,planets_tics_val,planets_tics_val,planets_tics_val,planets_tics_val,eb_tics_val,other_tics_val,other_tics_val,other_tics_val,other_tics_val),0)#,nothing_x_val),0)
mask = np.array(range(len(data_x)))
mask_val = np.array(range(len(data_x_val)))
#np.random.shuffle(mask)
#np.random.shuffle(mask_val)
#data_x = data_x[mask]
#data_y = data_y[mask]
#data_x_val = data_x_val[mask_val]
#data_y_val = data_y_val[mask_val]
print("planets", planets_y.shape, planets_y_val.shape, "eb", eb_y.shape, eb_y_val.shape, "fp", other_y.shape, other_y_val.shape, "tics", tics_train.shape, tics_val.shape)#, "nothing", nothing_y.shape, nothing_y_val.shape)
return data_x, data_x_val, data_y[:,:-1], data_y_val[:,:-1], tics_train, tics_val
data1x = torch.load("data_x_chunks_1440_no_tics_filterbkg.pt")#[:,[0,1,2,3,7],:]
data2x = torch.load("data_x_chunks_1440_no_tics_filterbkg_2.pt")#[:,[0,1,2,3,7],:]
data3x = torch.load("data_x_chunks_1440_no_tics_filterbkg_3.pt")#[:,[0,1,2,3,7],:]
# data4x = torch.load("data_x_chunks_1440_no_tics.pt")
# data5x = torch.load("data_x_chunks_1440_no_tics_2.pt")
# data6x = torch.load("data_x_chunks_1440_no_tics_3.pt")
data1y = torch.load("data_y_chunks_1440_no_tics_filterbkg.pt")
data2y = torch.load("data_y_chunks_1440_no_tics_filterbkg_2.pt")
data3y = torch.load("data_y_chunks_1440_no_tics_filterbkg_3.pt")
id1 = torch.load("tic_ids_chunks_1440_no_tics_filterbkg.pt")
id2 = torch.load("tic_ids_chunks_1440_no_tics_filterbkg_2.pt")
id3 = torch.load("tic_ids_chunks_1440_no_tics_filterbkg_3.pt")
datax = torch.cat((data1x,data2x,data3x),0)
datay = torch.cat((data1y,data2y,data3y),0)
ids = torch.cat((id1,id2,id3),0)
#datax2 = torch.cat((data4x,data5x,data6x),0)
mask = np.array(range(len(datax)))
np.random.shuffle(mask)
datax = datax[mask]
datay = datay[mask]
ids = ids[mask]
#datax2 = datax2[mask]
data_x, data_x_val, data_y, data_y_val, tics_train, tics_val = prep_data(datax, datay ,225000,ids)
#data_x_2 , _, _, _, _, _ = prep_data(datax2, datay ,225000,ids)
mask = np.array(range(len(data_x)))
mask_val = np.array(range(len(data_x_val)))
np.random.shuffle(mask)
np.random.shuffle(mask_val)
data_x = data_x[mask]
data_y = data_y[mask]
data_x_val = data_x_val[mask_val]
data_y_val = data_y_val[mask_val]
tics_train = tics_train[mask]
tics_val = tics_val[mask_val]
#data_x_2 = data_x_2[mask]
print(data_x.shape,data_x_val.shape)
training_set = torch.utils.data.TensorDataset(data_x,data_y, tics_train)
training_generator = torch.utils.data.DataLoader(training_set, batch_size = 1024, shuffle=True)
validation_set = torch.utils.data.TensorDataset(data_x_val,data_y_val,tics_val)
validation_generator = torch.utils.data.DataLoader(validation_set,batch_size = 1024, shuffle=True)
print(data_x.shape,data_x_val.shape)
# LOAD MODEL
channels, n_out = 6,3
class Classifier(nn.Module):
def __init__(self, channels, n_out):
super(Classifier,self).__init__()
self.conv1 = nn.Conv1d(channels, 32, kernel_size=5, padding="same")
self.conv1_2 = nn.Conv1d(32, 32, kernel_size=5, padding="same")
self.pool1 = nn.MaxPool1d(2)
self.conv2 = nn.Conv1d(32, 64, kernel_size=5, padding="same")
self.conv2_2 = nn.Conv1d(64, 64, kernel_size=5, padding="same")
self.pool2 = nn.MaxPool1d(2)
self.conv3 = nn.Conv1d(64, 128, kernel_size=5, padding="same")
self.conv3_2 = nn.Conv1d(128, 128, kernel_size=5, padding="same")
self.pool3 = nn.MaxPool1d(2)
self.conv4 = nn.Conv1d(128,256, kernel_size=5, padding="same")
self.conv4_2 = nn.Conv1d(256,256, kernel_size=5, padding="same")
self.pool4 = nn.MaxPool1d(2)
self.conv5 = nn.Conv1d(256,128, kernel_size=5, padding="same") #new
self.conv6 = nn.Conv1d(128,64, kernel_size=5, padding="same") #new
self.pool5 = nn.MaxPool1d(2)#new
self.conv7 = nn.Conv1d(64,32, kernel_size=5, padding="same") #new
self.conv8 = nn.Conv1d(32,16, kernel_size=5, padding="same") #new
self.pool6 = nn.MaxPool1d(2)#new
#self.linear1 = nn.Linear(256, 128)
#self.linear2 = nn.Linear(128, 64)
#self.linear3 = nn.Linear(64, 32)
#self.linear4 = nn.Linear(32, 16)
self.linear5 = nn.Linear(16, n_out)
self.dropout = nn.Dropout(0.3)
def forward(self, x):
#print(x.shape)
x = F.relu(self.conv1(x))
x = F.relu(self.conv1_2(x))
x = self.dropout(x)
x = self.pool1(x)
#print(x.shape)
x = F.relu(self.conv2(x))
x = F.relu(self.conv2_2(x))
x = self.dropout(x)
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv3_2(x))
x = self.dropout(x)
x = self.pool3(x)
x = F.relu(self.conv4(x))
x = F.relu(self.conv4_2(x))
x = self.dropout(x)
x = self.pool4(x)
x = F.relu(self.conv5(x)) #new starts here
x = F.relu(self.conv6(x))
x = self.dropout(x)
x = self.pool5(x)
x = F.relu(self.conv7(x))
x = F.relu(self.conv8(x))
x = self.pool6(x) #new ends here
x, _ = x.max(dim=-1)
#print(x.shape)
#x = F.relu(self.linear1(x))
#x = self.dropout(x)
#x = F.relu(self.linear2(x))
#x = self.dropout(x)
#x = F.relu(self.linear3(x))
#x = self.dropout(x)
#print(x.shape)
#x = F.relu(self.linear4(x))
x = F.softmax(self.linear5(x),dim=1)
return x
net = Classifier(channels, n_out)
#net.cuda()
net.load_state_dict(torch.load(f'training{training_index}/cp_{training_index}.ckpt',map_location=torch.device('cpu')))
# PERFORMANCE EVALUATION
y_true = np.array([])
y_pred = np.array([])
x_batch = np.array([])
tic_ids = np.array([])
net.eval()
for local_batch, local_labels, tics in validation_generator:
# Transfer to GPU
#local_batch, local_labels = local_batch.cuda(), local_labels.cuda()
pred_y = net(local_batch)
pred_y = pred_y.cpu().detach().numpy()
true_y = local_labels.numpy()
if y_true.shape[0] == 0:
y_true = true_y
y_pred = pred_y
x_batch = local_batch[:,[0,1],:]
tic_ids = tics.numpy()
else:
y_true = np.append(y_true, true_y, axis=0)
y_pred = np.append(y_pred, pred_y, axis=0)
x_batch = np.append(x_batch, local_batch[:,[0,1],:], axis=0)
tic_ids = np.append(tic_ids, tics, axis=0)
print(confusion_matrix(np.argmax(y_true,axis=1), np.argmax(y_pred,axis=1)))
print(precision_recall_fscore_support(np.argmax(y_true,axis=1), np.argmax(y_pred,axis=1), average=None))
a, b, c = roc_curve(y_true[:,0], y_pred[:,0])
thr = 3500
other = y_pred[y_pred[:,0]<c[thr]]
pc = y_pred[y_pred[:,0]>=c[thr]]
correct = 0
cont = 0
planet = 0
missed = 0
others = 0
for i in range(len(y_pred)):
if y_pred[i,0] < c[thr] and y_true[i,0] == 0:
correct = correct + 1
others = others + 1
elif y_pred[i,0] >= c[thr] and y_true[i,0] == 1:
correct = correct + 1
planet = planet + 1
elif y_pred[i,0] >= c[thr] and y_true[i,0] == 0:
cont = cont + 1
elif y_pred[i,0] < c[thr] and y_true[i,0] == 1:
missed = missed + 1
accuracy = planet/len(y_true[y_true[:,0]==1])
contamination = cont/(len(pc))
print(f"percent of planets found: {accuracy}, contamination: {contamination}, planets found: {planet}, planets missed: {missed}, false positives: {cont}")
print(f"% of FP and EB removed: {(others)*100/(len(y_true[y_true[:,0]==0]))}")
print(f"num removed: {others}")
fp_as_pc = []
fp_as_eb = []
fp_as_fp = []
fp_as_pc_tics = []
fp_as_eb_tics = []
for i in range(len(y_true)):
if np.argmax(y_true,axis=1)[i] == 2 and np.argmax(y_pred,axis=1)[i] == 0:
fp_as_pc.append(i)
fp_as_pc_tics.append(tic_ids[i])
if np.argmax(y_true,axis=1)[i] == 2 and np.argmax(y_pred,axis=1)[i] == 1:
fp_as_eb.append(i)
fp_as_eb_tics.append(tic_ids[i])
if np.argmax(y_true,axis=1)[i] == 2 and np.argmax(y_pred,axis=1)[i] == 2:
fp_as_fp.append(i)
np.savetxt("fp_as_pc_tics.csv",np.array(fp_as_pc_tics),delimiter=",")
np.savetxt("fp_as_eb_tics.csv",np.array(fp_as_eb_tics),delimiter=",")
def make_plot(cols, rows, size, label,title):
fig, axs = plt.subplots(cols,rows, figsize=size)
fig.tight_layout(pad=-1.5)
fig.suptitle(title, fontsize=20,y=0.99)
plt.subplots_adjust(top=0.96,right=0.99)
fig2, axs2 = plt.subplots(cols,rows, figsize=size)
fig2.tight_layout(pad=-1.5)
fig2.suptitle(title + " - background", fontsize=20,y=0.99)
plt.subplots_adjust(top=0.96,right=0.99)
for row in range(rows):
for col in range(cols):
lc = np.random.choice(label,replace=False)
axs[row][col].plot(x_batch[lc,0,:],".",markersize=1,color="indigo",label=f"{tic_ids[lc][0]:.0f}") #magic
axs[row][col].tick_params(left=False,bottom=False,labelleft=False,labelbottom=False)
axs[row][col].legend()
#print(y_true[lc,2])
axs2[row][col].plot(x_batch[lc,1,:],".",markersize=0.6,color="tab:orange")
axs2[row][col].tick_params(left=False,bottom=False,labelleft=False,labelbottom=False)
label = np.delete(label,np.where(label==lc))
#axs[row][col].axis("off")
fig.savefig(title+".png")
fig2.savefig(title+"_bkg.png")
make_plot(8,8,(15,18),fp_as_eb,"FP as EB")
make_plot(8,8,(15,18),fp_as_pc,"FP as PC")
make_plot(8,8,(15,18),fp_as_fp,"FP as FP")
end_time = time.time()
elapsed_time = end_time - start_time
print("Elapsed time: ", elapsed_time)
fp_mask = np.where((data_y[:,2]==1))
def make_plot2(cols, rows, size, label,title):
label = label[0]
print(len(label))
fig, axs = plt.subplots(cols,rows, figsize=size)
fig.tight_layout(pad=-1.5)
fig.suptitle(title + " (no filter)", fontsize=20,y=0.99)
plt.subplots_adjust(top=0.96,right=0.99)
fig2, axs2 = plt.subplots(cols,rows, figsize=size)
fig2.tight_layout(pad=-1.5)
fig2.suptitle(title + " - bkg (no filter)", fontsize=20,y=0.99)
plt.subplots_adjust(top=0.96,right=0.99)
for row in range(rows):
for col in range(cols):
lc = np.random.choice(label,replace=False)
axs[row][col].plot(data_x[lc,0,:],".",markersize=1,color="indigo",label=f"{tics_train[lc,0]:.0f}") #magic
axs[row][col].tick_params(left=False,bottom=False,labelleft=False,labelbottom=False)
#print(y_true[lc,2])
axs2[row][col].plot(data_x[lc,1,:],".",markersize=0.6,color="tab:orange")
axs2[row][col].tick_params(left=False,bottom=False,labelleft=False,labelbottom=False)
label = np.delete(label,np.where(label==lc))
#axs[row][col].axis("off")
fig.savefig(title+".png")
fig2.savefig(title+"_bkg.png")
make_plot2(9,9,(15,18),fp_mask,"fps")
|
vtardugnoREPO_NAMETESS-CNNPATH_START.@TESS-CNN_extracted@TESS-CNN-main@load_model.py@.PATH_END.py
|
{
"filename": "plot_line_survey.py",
"repo_name": "radis/radis",
"repo_path": "radis_extracted/radis-master/examples/0_Database_handling/plot_line_survey.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
===========
Line Survey
===========
Plot details of every single line in a spectrum.
Uses the :py:meth:`~radis.spectrum.spectrum.Spectrum.line_survey` function.
"""
from radis import calc_spectrum
s = calc_spectrum(
wavenum_min=2380,
wavenum_max=2400,
mole_fraction=400e-6,
path_length=100, # cm
Tgas=1500,
molecule="CO2",
isotope=[1],
databank="hitran",
export_lines=True,
)
s.apply_slit(2, "nm")
s.line_survey(overlay="radiance", barwidth=0.01)
|
radisREPO_NAMEradisPATH_START.@radis_extracted@radis-master@examples@0_Database_handling@plot_line_survey.py@.PATH_END.py
|
{
"filename": "sklearn.py",
"repo_name": "hyperopt/hyperopt",
"repo_path": "hyperopt_extracted/hyperopt-master/hyperopt/sklearn.py",
"type": "Python"
}
|
"""Scikit-learn integration.
This class is based on :class:`sklearn.model_selection._search.BaseSearchCV` and
inspired by :class:sklearn.model_selection._search_successive_halving.BaseSuccessiveHalving`.
"""
import numpy as np
from sklearn.model_selection._search import is_classifier
from sklearn.model_selection._search import BaseSearchCV
from sklearn.utils.multiclass import check_classification_targets, unique_labels
from sklearn.utils.validation import check_array
from hyperopt.base import STATUS_OK, Trials
from hyperopt.fmin import fmin
class HyperoptSearchCV(BaseSearchCV):
"""Hyper-parameter search with hyperopt.
Parameters
----------
estimator : estimator object
An object of that type is instantiated for each set of candidate parameters.
This is assumed to implement the ``scikit-learn`` estimator interface. The
estimator needs to provide a ``score`` method or ``scoring`` must be passed.
space : hyperopt.pyll.Apply node or "annotated"
The set of possible arguments to `fn` is the set of objects
that could be created with non-zero probability by drawing randomly
from this stochastic program involving involving hp_<xxx> nodes
(see `hyperopt.hp` and `hyperopt.pyll_utils`).
If set to "annotated", will read space using type hint in fn. Ex:
(`def fn(x: hp.uniform("x", -1, 1)): return x`)
max_evals : int
Allow up to this many function evaluations before returning.
trials : None or base.Trials (or subclass)
Storage for completed, ongoing, and scheduled evaluation points. If
None, then a temporary `base.Trials` instance will be created. If
a trials object, then that trials object will be affected by
side-effect of this call.
algo : search algorithm
This object, such as `hyperopt.rand.suggest` and
`hyperopt.tpe.suggest` provides logic for sequential search of the
hyperparameter space.
warm_start : bool, optional (default False)
When set to True, reuse the solution of the previous ``fit`` call and add
iterations to the trials object. Otherwise, reset the ``trials``. ``max_evals``
refers to the total number of iterations in the ``Trials`` object, so use ``set_params``
to increase the total number.
scoring : str or callable, optional (default None)
Strategy to evaluate the performance of the cross-validated model on the test set.
n_jobs : int, optional (default None)
Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
refit : bool, optional (default True)
Refit an estimator using the best found parameters on the whole dataset.
cv : int, cross-validation generator or an iterable, optional (default None)
Determines the cross-validation splitting strategy.
verbose : int, optional (default 0)
Controls the verbosity.
pre_dispatch : int or str, optional (default "2*n_jobs")
Controls the number of jobs that get dispatched during parallel execution. Reducing this
number can be useful to avoid high memory usage.
random_state : int, RandomState instance or None, optional (default None)
Pseudo random number generator state used for random uniform sampling from lists
instead of ``scipy.stats`` distributions.
error_score : 'raise' or numeric, optional (default np.nan)
Value to assign to the score if an error occurs during fitting.
return_train_score : bool, optional (default False)
If ``False``, the ``cv_results_`` attribute will not include training scores.
Attributes
----------
trials_ : Trials
The trials object.
"""
_required_parameters = ["estimator", "space", "max_evals"]
def __init__(
self,
estimator,
space,
max_evals,
trials=None,
algo=None,
warm_start=False,
scoring=None,
n_jobs=None,
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
random_state=None,
error_score=np.nan,
return_train_score=False,
):
"""Init method."""
super().__init__(
estimator=estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
self.space = space
self.max_evals = max_evals
self.trials = trials
self.algo = algo
self.warm_start = warm_start
self.random_state = random_state
def _check_input_parameters(self, X, y=None, groups=None):
"""Run input checks.
Based on a similar method in :class:`sklearn.model_selection.BaseSuccessiveHalving`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" CV
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
Raises
------
ValueError
Raised if
* ``scoring`` is not a string or callable,
* ``y`` has less than two classes for a classification task,
* ``y`` contains complex data, or
* ``refit`` is not boolean.
"""
if self.scoring is not None and not (
isinstance(self.scoring, str) or callable(self.scoring)
):
raise ValueError(
"scoring parameter must be a string, "
"a callable or None. Multimetric scoring is not "
"supported."
)
check_array(X)
if is_classifier(self.estimator):
y = self._validate_data(X="no_validation", y=y)
check_classification_targets(y)
labels = unique_labels(y)
if len(labels) < 2:
raise ValueError(
"Classifier can't train when only one class is present."
)
if not isinstance(self.refit, bool):
raise ValueError(
f"refit is expected to be a boolean. Got {type(self.refit)} instead."
)
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" CV
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._check_input_parameters(
X=X,
y=y,
groups=groups,
)
super().fit(X, y=y, groups=groups, **fit_params)
return self
def _run_search(self, evaluate_candidates):
"""Run the ``hyperopt`` iterations.
Parameters
----------
evaluate_candidates : callable
Callable defined in :class:`sklearn.model_selection._search.BaseSearchCV`
that trains and scores the model across the cross-validation folds for the
given parameter space.
"""
def _evaluate(params):
results = evaluate_candidates([params])
return {
"loss": -results["mean_test_score"][-1],
"params": params,
"status": STATUS_OK,
}
if not self.warm_start:
self.trials_ = Trials()
else:
if not hasattr(self, "trials_"):
if self.trials is None:
self.trials_ = Trials()
else:
self.trials_ = self.trials
if isinstance(self.random_state, int):
seed = np.random.default_rng(self.random_state)
elif isinstance(self.random_state, np.random.Generator):
seed = self.random_state
elif self.random_state is None:
seed = None
else:
raise ValueError(
"Please supply a `numpy.random.Generator` or integer for `random_state`."
)
fmin(
_evaluate,
space=self.space,
algo=self.algo,
max_evals=self.max_evals,
rstate=seed,
trials=self.trials_,
)
|
hyperoptREPO_NAMEhyperoptPATH_START.@hyperopt_extracted@hyperopt-master@hyperopt@sklearn.py@.PATH_END.py
|
{
"filename": "tf_tag_sigdef_test.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/L0_tf_tag_sigdef/tf_tag_sigdef_test.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import unittest
import numpy as np
import test_util as tu
import tritonhttpclient as httpclient
class TagSigdefTest(tu.TestResultCollector):
base_model_name = "sig_tag"
base_tag = "serve"
test_tag = "testTag"
base_sig_def = "serving_default"
test_sig_def = "testSigDef"
dims = 16
def _test_helper(self, modelVersion, tag, sig_def):
shape = [self.dims]
model_name = self.base_model_name + str(modelVersion)
# The multiplier is defined during model creation. See server/qa/common/gen_tag_sigdef.py
# for details
multiplier = modelVersion + 1
output_name = "OUTPUT"
triton_client = httpclient.InferenceServerClient("localhost:8000", verbose=True)
inputs = []
outputs = []
inputs.append(httpclient.InferInput("INPUT", shape, "FP32"))
input_data = np.ones(shape=shape).astype(np.float32)
inputs[0].set_data_from_numpy(input_data, binary_data=True)
outputs.append(httpclient.InferRequestedOutput(output_name, binary_data=True))
results = triton_client.infer(model_name, inputs, outputs=outputs)
output_data = results.as_numpy(output_name)
test_output = input_data * multiplier
self.assertTrue(np.isclose(output_data, test_output).all())
def test_default(self):
self._test_helper(0, self.base_tag, self.base_sig_def)
def test_sig_def(self):
self._test_helper(1, self.base_tag, self.test_sig_def)
def test_tag(self):
self._test_helper(2, self.test_tag, self.base_sig_def)
def test_tag_sig_def(self):
self._test_helper(3, self.test_tag, self.test_sig_def)
if __name__ == "__main__":
unittest.main()
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@L0_tf_tag_sigdef@tf_tag_sigdef_test.py@.PATH_END.py
|
{
"filename": "tfsa-2022-102.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2022-102.md",
"type": "Markdown"
}
|
## TFSA-2022-102: Segfault in `QuantizedInstanceNorm`
### CVE Number
CVE-2022-35970
### Impact
If `QuantizedInstanceNorm` is given `x_min` or `x_max` tensors of a nonzero rank, it results in a segfault that can be used to trigger a denial of service attack.
```python
import tensorflow as tf
output_range_given = False
given_y_min = 0
given_y_max = 0
variance_epsilon = 1e-05
min_separation = 0.001
x = tf.constant(88, shape=[1,4,4,32], dtype=tf.quint8)
x_min = tf.constant([], shape=[0], dtype=tf.float32)
x_max = tf.constant(0, shape=[], dtype=tf.float32)
tf.raw_ops.QuantizedInstanceNorm(x=x, x_min=x_min, x_max=x_max, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation)
```
### Patches
We have patched the issue in GitHub commit [785d67a78a1d533759fcd2f5e8d6ef778de849e0](https://github.com/tensorflow/tensorflow/commit/785d67a78a1d533759fcd2f5e8d6ef778de849e0).
The fix will be included in TensorFlow 2.10.0. We will also cherrypick this commit on TensorFlow 2.9.1, TensorFlow 2.8.1, and TensorFlow 2.7.2, as these are also affected and still in supported range.
### For more information
Please consult [our security guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for more information regarding the security model and how to contact us with issues and questions.
### Attribution
This vulnerability has been reported by Neophytos Christou, Secure Systems Labs, Brown University.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2022-102.md@.PATH_END.py
|
{
"filename": "core.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/time/core.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
from __future__ import annotations
import copy
import enum
import operator
import os
import threading
from collections import defaultdict
from datetime import UTC, date, datetime
from itertools import pairwise
from time import strftime
from typing import TYPE_CHECKING
from warnings import warn
from weakref import WeakValueDictionary
import erfa
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.extern import _strptime
from astropy.units import UnitConversionError
from astropy.utils import lazyproperty
from astropy.utils.compat import COPY_IF_NEEDED
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.masked import (
MaskableShapedLikeNDArray,
Masked,
combine_masks,
get_data_and_mask,
)
# Below, import TimeFromEpoch to avoid breaking code that followed the old
# example of making a custom timescale in the documentation.
from . import conf
from .formats import (
TIME_DELTA_FORMATS,
TIME_FORMATS,
TimeAstropyTime,
TimeDatetime,
TimeDeltaNumeric,
TimeFromEpoch, # noqa: F401
TimeJD,
TimeUnique,
)
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from .utils import day_frac
if TYPE_CHECKING:
from astropy.coordinates import EarthLocation
__all__ = [
"STANDARD_TIME_SCALES",
"TIME_DELTA_SCALES",
"TIME_SCALES",
"OperandTypeError",
"ScaleValueError",
"Time",
"TimeBase",
"TimeDelta",
"TimeDeltaMissingUnitWarning",
"TimeInfo",
"TimeInfoBase",
"update_leap_seconds",
]
STANDARD_TIME_SCALES = ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc")
LOCAL_SCALES = ("local",)
TIME_TYPES = {
scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales
}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {
("tai", "tcb"): ("tt", "tdb"),
("tai", "tcg"): ("tt",),
("tai", "ut1"): ("utc",),
("tai", "tdb"): ("tt",),
("tcb", "tcg"): ("tdb", "tt"),
("tcb", "tt"): ("tdb",),
("tcb", "ut1"): ("tdb", "tt", "tai", "utc"),
("tcb", "utc"): ("tdb", "tt", "tai"),
("tcg", "tdb"): ("tt",),
("tcg", "ut1"): ("tt", "tai", "utc"),
("tcg", "utc"): ("tt", "tai"),
("tdb", "ut1"): ("tt", "tai", "utc"),
("tdb", "utc"): ("tt", "tai"),
("tt", "ut1"): ("tai", "utc"),
("tt", "utc"): ("tai",),
}
GEOCENTRIC_SCALES = ("tai", "tt", "tcg")
BARYCENTRIC_SCALES = ("tcb", "tdb")
ROTATIONAL_SCALES = ("ut1",)
TIME_DELTA_TYPES = {
scale: scales
for scales in (
GEOCENTRIC_SCALES,
BARYCENTRIC_SCALES,
ROTATIONAL_SCALES,
LOCAL_SCALES,
)
for scale in scales
}
TIME_DELTA_SCALES = (
GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
)
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {
("tt", "tai"): None,
("tai", "tt"): None,
("tcg", "tt"): -erfa.ELG,
("tt", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcg", "tai"): -erfa.ELG,
("tai", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcb", "tdb"): -erfa.ELB,
("tdb", "tcb"): erfa.ELB / (1.0 - erfa.ELB),
}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
"mean": {
"IAU2006": {"function": erfa.gmst06, "scales": ("ut1", "tt")},
"IAU2000": {"function": erfa.gmst00, "scales": ("ut1", "tt")},
"IAU1982": {"function": erfa.gmst82, "scales": ("ut1",), "include_tio": False},
},
"apparent": {
"IAU2006A": {"function": erfa.gst06a, "scales": ("ut1", "tt")},
"IAU2000A": {"function": erfa.gst00a, "scales": ("ut1", "tt")},
"IAU2000B": {"function": erfa.gst00b, "scales": ("ut1",)},
"IAU1994": {"function": erfa.gst94, "scales": ("ut1",), "include_tio": False},
},
}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
def _compress_array_dims(arr):
"""Compress array by allowing at most 2 * edgeitems + 1 in each dimension.
Parameters
----------
arr : array-like
Array to compress.
Returns
-------
out : array-like
Compressed array.
"""
idxs = []
edgeitems = np.get_printoptions()["edgeitems"]
# Build up a list of index arrays for each dimension, allowing no more than
# 2 * edgeitems + 1 elements in each dimension.
for dim in range(arr.ndim):
if arr.shape[dim] > 2 * edgeitems:
# The middle [edgeitems] value does not matter as it gets replaced
# by ... in the output.
idxs.append(
np.concatenate(
[np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)]
)
)
else:
idxs.append(np.arange(arr.shape[dim]))
# Use the magic np.ix_ function to effectively treat each index array as a
# slicing operator.
idxs_ix = np.ix_(*idxs)
out = arr[idxs_ix]
return out
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {"serialize_method"}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = (
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
"_delta_ut1_utc",
"_delta_tdb_tt",
)
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = "value"
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == "formatted_value":
out = ("value",)
elif method == "jd1_jd2":
out = ("jd1", "jd2")
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {
"fits": "jd1_jd2",
"ecsv": "formatted_value",
"hdf5": "jd1_jd2",
"yaml": "jd1_jd2",
"parquet": "jd1_jd2",
None: "jd1_jd2",
}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format="jd")).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(
names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats],
)
)
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if "jd1" in map and "jd2" in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop("format")
out_subfmt = map.pop("out_subfmt", None)
map["format"] = "jd"
map["val"] = map.pop("jd1")
map["val2"] = map.pop("jd2")
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map["val"] = map.pop("value")
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError("input columns have inconsistent locations")
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
tm_attrs = {
attr: getattr(col0, attr) for attr in ("scale", "location", "precision")
}
out = self._parent_cls(jd1, jd2, format="jd", **tm_attrs)
out.format = col0.format
out.out_subfmt = col0.out_subfmt
out.in_subfmt = col0.in_subfmt
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ("format", "scale")
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd1 = np.zeros(shape, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
out = self._parent_cls(jd1, jd2, format="jd", scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(MaskableShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def __getstate__(self):
# For pickling, we remove the cache from what's pickled
state = super().__getstate__().copy()
state.pop("_id_cache", None)
state.pop("cache", None)
return state
def _init_from_vals(
self,
val,
val2,
format,
scale,
copy,
precision=None,
in_subfmt=None,
out_subfmt=None,
):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if in_subfmt is None:
in_subfmt = "*"
if out_subfmt is None:
out_subfmt = "*"
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError(
"Input val and val2 have inconsistent shape; "
"they cannot be broadcast together."
)
if scale is not None:
if not (isinstance(scale, str) and scale.lower() in self.SCALES):
raise ScaleValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(self.SCALES)}"
)
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
data1, mask1 = get_data_and_mask(val)
data2, mask2 = get_data_and_mask(val2)
mask = combine_masks([mask1, mask2])
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(
data1, data2, format, scale, precision, in_subfmt, out_subfmt, mask
)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, "_location"):
self._location = self._time._location
del self._time._location
# If any inputs were masked then mask both jd1 and jd2 accordingly,
# using a shared mask. From above, ``mask`` must be either Python
# bool False or an bool ndarray with the correct shape.
if mask is not False and np.any(mask):
# Ensure that if the class is already masked, we do not lose it.
self._time.jd1 = Masked(self._time.jd1, copy=False)
self._time.jd1.mask |= mask
# Ensure we share the mask (it may have been broadcast).
self._time.jd2 = Masked(
self._time.jd2, mask=self._time.jd1.mask, copy=False
)
def _get_time_fmt(
self, val, val2, format, scale, precision, in_subfmt, out_subfmt, mask
):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None:
# If val and val2 broadcasted shape is (0,) (i.e. empty array input) then we
# cannot guess format from the input values. But a quantity is fine (as
# long as it has time units, but that will be checked later).
empty_array = val.size == 0 and (val2 is None or val2.size == 0)
if not (isinstance(self, TimeDelta) and isinstance(val, u.Quantity)) and (
empty_array or np.all(mask)
):
raise ValueError(
"cannot guess format from input values with zero-size array"
" or all elements masked"
)
formats = [
(name, cls)
for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)
]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
if isinstance(self, Time):
formats.append(("astropy_time", TimeAstropyTime))
elif not isinstance(format, str):
raise TypeError("format must be a string")
elif format.lower() not in self.FORMATS:
raise ValueError(
f"Format {format!r} is not one of the allowed formats "
f"{sorted(self.FORMATS)}"
)
else:
formats = [(format, self.FORMATS[format])]
masked = np.any(mask)
oval, oval2 = val, val2
problems = {}
for name, cls in formats:
try:
if masked:
val, val2 = cls._fill_masked_values(oval, oval2, mask, in_subfmt)
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f"Input values did not match the format class {format}:"
+ os.linesep
+ f"{err.__class__.__name__}: {err}"
) from err
else:
problems[name] = err
message = (
"Input values did not match any of the formats where the format "
"keyword is optional:\n"
) + "\n".join(f"- '{name}': {err}" for name, err in problems.items())
raise ValueError(message)
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format."""
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1,
self._time.jd2,
self._time._scale,
self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
self._format = format
def to_string(self):
"""Output a string representation of the Time or TimeDelta object.
Similar to ``str(self.value)`` (which uses numpy array formatting) but
array values are evaluated only for the items that actually are output.
For large arrays this can be a substantial performance improvement.
Returns
-------
out : str
String representation of the time values.
"""
npo = np.get_printoptions()
if self.size < npo["threshold"]:
out = str(self.value)
else:
# Compress time object by allowing at most 2 * npo["edgeitems"] + 1
# in each dimension. Then force numpy to use "summary mode" of
# showing only the edge items by setting the size threshold to 0.
# TODO: use np.core.arrayprint._leading_trailing if we have support for
# np.concatenate. See #8610.
tm = _compress_array_dims(self)
with np.printoptions(threshold=0):
out = str(tm.value)
return out
def __repr__(self):
return (
f"<{type(self).__name__} object: scale='{self.scale}' "
f"format='{self.format}' value={self.to_string()}>"
)
def __str__(self):
return self.to_string()
def __hash__(self):
try:
loc = getattr(self, "location", None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = "(must be scalar)"
elif self.masked:
reason = "(value is masked)"
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def location(self) -> EarthLocation | None:
return self._location
@location.setter
def location(self, value):
if hasattr(self, "_location"):
# since astropy 6.1.0
warn(
"Setting the location attribute post initialization will be "
"disallowed in a future version of Astropy. "
"Instead you should set the location when creating the Time object. "
"In the future, this will raise an AttributeError.",
category=FutureWarning,
stacklevel=2,
)
self._location = value
@property
def scale(self):
"""Time scale."""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
if scale == "utc" or self.scale == "utc":
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2
for sys1, sys2 in pairwise(xforms):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = "_get_delta_{}_{}".format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
self._time = self.FORMATS[self.format](
jd1,
jd2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in (
(self._time, "jd1"),
(self._time, "jd2"),
(self, "_delta_ut1_utc"),
(self, "_delta_tdb_tt"),
(self, "location"),
):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self.masked:
# Ensure the mask is independent.
value = conf._masked_cls(value, mask=self.mask.copy())
# For new-style, we do not treat masked scalars differently from arrays.
if isinstance(value, Masked):
return value
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value is not ({value!r})"
)
else:
# zero-dimensional array, is it safe to unbox? The tricky comparison
# of the mask is for the case that value is structured; otherwise, we
# could just use np.ma.is_masked(value).
if (
isinstance(value, np.ndarray)
and not value.shape
and (
(mask := getattr(value, "mask", np.False_)) == np.zeros_like(mask)
).all()
):
if value.dtype.kind == "M":
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
return self._shaped_like_input(self._time.jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
return self._shaped_like_input(self._time.jd2)
def to_value(self, format, subfmt="*"):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
if subfmt is None:
if format == self.format:
subfmt = self.out_subfmt
else:
subfmt = self.FORMATS[format]._get_allowed_subfmt(self.out_subfmt)
cache = self.cache["format"]
key = format, subfmt, conf.masked_array_type
value = cache.get(key)
if value is None:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs["out_subfmt"] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
"support passing a 'subfmt' argument"
) from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return value
@property
def value(self):
"""Time value(s) in current format."""
return self.to_value(self.format, None)
@property
def mask(self):
if "mask" not in self.cache:
mask = getattr(self._time.jd2, "mask", None)
if mask is None:
mask = np.broadcast_to(np.False_, self._time.jd2.shape)
else:
# Take a view of any existing mask, so we can set it to readonly.
mask = mask.view()
mask.flags.writeable = False
self.cache["mask"] = mask
return self.cache["mask"]
@property
def masked(self):
return isinstance(self._time.jd1, Masked)
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0 : idx0 + n_values] = values
out._time.jd1[idx0 + n_values :] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values :] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError(
f"{self.__class__.__name__} object is read-only. Make a "
'copy() or set "writeable" attribute to True.'
)
else:
raise ValueError(
f"scalar {self.__class__.__name__} object is read-only."
)
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ("_delta_tdb_tt", "_delta_ut1_utc"):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
if not isinstance(self._time.jd2, Masked):
self._time.jd1 = Masked(self._time.jd1, copy=False)
self._time.jd2 = Masked(
self._time.jd2, mask=self._time.jd1.mask, copy=False
)
self._time.jd2.mask[item] = True
return
elif value is np.ma.nomask:
if isinstance(self._time.jd2, Masked):
self._time.jd2.mask[item] = False
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError(
"'other' argument must support subtraction with Time "
"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}"
)
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply("copy", format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply("copy" if copy else "replicate", format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == "replicate":
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(
jd1,
jd2,
self.scale,
precision=None,
in_subfmt="*",
out_subfmt="*",
from_jd=True,
)
# Optional ndarray attributes.
for attr in ("_delta_ut1_utc", "_delta_tdb_tt", "_location"):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, "shape", ()):
val = apply_method(val)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f"format must be one of {list(tm.FORMATS)}")
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1,
tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
tm._format = new_format
tm.SCALES = self.SCALES
# Finally, if we do not own our data, we link caches, so that
# those can be cleared as needed if any instance is written to.
if not (tm._time.jd1.base if tm.masked else tm._time.jd1).flags["OWNDATA"]:
tm._id_cache = self._id_cache
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [
indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)
]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self._time.jd1, self._time.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self._time.jd1, self._time.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1, kind="stable"):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure that
the full precision given by the two doubles ``jd1`` and ``jd2`` is used, and
that corresponding attributes are copied. Internally, it uses
:func:`~numpy.lexsort`, and hence no sort method can be chosen.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the last
axis.
kind : 'stable', optional
Sorting is done with :func:`~numpy.lexsort` so this argument is ignored, but
kept for compatibility with :func:`~numpy.argsort`. The sorting is stable,
meaning that the order of equal elements is preserved.
Returns
-------
indices : ndarray
An array of indices that sort the time array.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self._time.jd1, self._time.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def _ptp_impl(self, axis=None, out=None, keepdims=False):
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
if function is np.ptp:
return self._ptp_impl(*args[1:], **kwargs)
else:
return super().__array_function__(function, types, args, kwargs)
@deprecated("7.0", alternative="np.ptp")
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This method is similar to the :func:`numpy.ptp` function, but
adapted to ensure that the full precision given by the two doubles
``jd1`` and ``jd2`` is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
return self._ptp_impl(axis, out, keepdims)
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)]
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
"""Mean along a given axis.
This is similar to :meth:`~numpy.ndarray.mean`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2`` is
used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.mean``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
Similarly, the ``dtype`` argument is also present for compatibility
only; it has no meaning for `Time`.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
dtype : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
out : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for
details.
Returns
-------
m : Time
A new Time instance containing the mean values
"""
if dtype is not None:
raise ValueError("Cannot set ``dtype`` on `Time` instances")
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
where = where & ~self.mask
where_broadcasted = np.broadcast_to(where, self.shape)
kwargs = dict(
axis=axis,
keepdims=keepdims,
where=where,
)
divisor = np.sum(where_broadcasted, axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
raise ValueError(
"Mean over zero elements is not supported as it would give an undefined"
" time;see issue https://github.com/astropy/astropy/issues/6509"
)
jd1, jd2 = day_frac(
val1=np.sum(np.ma.getdata(self.jd1), **kwargs),
val2=np.sum(np.ma.getdata(self.jd2), **kwargs),
divisor=divisor,
)
result = type(self)(
val=jd1,
val2=jd2,
format="jd",
scale=self.scale,
copy=COPY_IF_NEEDED,
)
result.format = self.format
return result
@lazyproperty
def _id_cache(self):
"""Cache of all instances that share underlying data.
Helps to ensure all cached data can be deleted if the
underlying data is changed.
"""
return WeakValueDictionary({id(self): self})
@_id_cache.setter
def _id_cache(self, _id_cache):
_id_cache[id(self)] = self
# lazyproperty will do the actual storing of the result.
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
@cache.deleter
def cache(self):
for instance in self._id_cache.values():
instance.cache.clear()
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache["scale"]
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError(
"Cannot convert TimeDelta with "
"undefined scale to any defined scale."
)
else:
raise ScaleValueError(
f"Cannot convert {self.__class__.__name__} with scale "
f"'{self.scale}' to scale '{attr}'"
)
else:
# Should raise AttributeError
return self.__getattribute__(attr)
def __dir__(self):
return sorted(set(super().__dir__()) | set(self.SCALES) | set(self.FORMATS))
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError(
"Attribute shape must match or be broadcastable to that of "
"Time object. Typically, give either a single value or "
"one for each time."
)
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented.
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if (self.scale is not None and self.scale not in other.SCALES) or (
other.scale is not None and other.scale not in self.SCALES
):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError(
f"Cannot compare {self.__class__.__name__} instances with "
f"scales '{self.scale}' and '{other.scale}'"
)
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.0)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s), specifying how to interpret them (e.g., ISO, JD, or
Unix time). By default, the same format will be used for output representation.
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=COPY_IF_NEEDED,
):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self._location = location
else:
self._location = EarthLocation(*location)
if self._location.size == 1:
self._location = self._location.squeeze()
elif not hasattr(self, "_location"):
self._location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val, val2, format, scale, copy, precision, in_subfmt, out_subfmt
)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (
self.location.size > 1 and self.location.shape != self.shape
):
try:
# check the location can be broadcast to self's shape.
self._location = np.broadcast_to(self._location, self.shape, subok=True)
except Exception as err:
raise ValueError(
f"The location with shape {self.location.shape} cannot be "
f"broadcast against time with shape {self.shape}. "
"Typically, either give a single location or one for each time."
) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object."""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif (self_location is None and value.location is not None) or (
self_location is not None and value.location is None
):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError(
"cannot set to Time with different location: expected "
f"location={self_location} and got location={value.location}"
)
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(
value,
scale=self.scale,
format=self.format,
location=self_location,
)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible Time object: {err}"
)
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.now`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock. The timezone is set to UTC.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `now` immediately to be sure it's ASAP
dtnow = datetime.now(tz=UTC)
return cls(val=dtnow, format="datetime", scale="utc")
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ("U", "S"):
raise TypeError(
"Expected type is string, a bytes-like object or a sequence "
f"of these. Got dtype '{time_array.dtype.kind}'"
)
to_string = (
str
if time_array.dtype.kind == "U"
else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, "U30"])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}".format(
*time_tuple
)
format = kwargs.pop("format", None)
out = cls(*iterator.operands[1:], format="isot", **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate("iso")._time.str_kwargs():
date_tuple = date(sk["year"], sk["mon"], sk["day"]).timetuple()
datetime_tuple = (
sk["year"],
sk["mon"],
sk["day"],
sk["hour"],
sk["min"],
sk["sec"],
date_tuple[6],
date_tuple[7],
-1,
)
fmtd_str = format_spec
if "%f" in fmtd_str:
fmtd_str = fmtd_str.replace(
"%f",
"{frac:0{precision}}".format(
frac=sk["fracsec"], precision=self.precision
),
)
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(
self, skycoord, kind="barycentric", location=None, ephemeris=None
):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ("barycentric", "heliocentric"):
raise ValueError(
"'kind' parameter must be one of 'heliocentric' or 'barycentric'"
)
if location is None:
if self.location is None:
raise ValueError(
"An EarthLocation needs to be set or passed in to calculate bary- "
"or heliocentric corrections"
)
location = self.location
from astropy.coordinates import (
GCRS,
HCRS,
ICRS,
CartesianRepresentation,
UnitSphericalRepresentation,
solar_system_ephemeris,
)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError(
"Supplied location does not have a valid `get_itrs` method"
)
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == "heliocentric":
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (
skycoord.icrs.represent_as(UnitSphericalRepresentation)
.represent_as(CartesianRepresentation)
.xyz
)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale="tdb")
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
"""
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
"apparent",
sorted(SIDEREAL_TIME_MODELS["apparent"]),
"mean",
sorted(SIDEREAL_TIME_MODELS["mean"]),
)
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
"""
from astropy.coordinates import EarthLocation, Longitude
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError(
"No longitude is given but the location for "
"the Time object is not set."
)
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=COPY_IF_NEEDED)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ("tt",))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(longitude, "z")
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [
getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ("jd1", "jd2")
]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, "_delta_ut1_utc"):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2
scale = "utc"
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == "ut1":
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, "_delta_tdb_tt"):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ("tt", "tdb"):
raise ValueError(
"Accessing the delta_tdb_tt attribute is only "
"possible for TT or TDB time scales"
)
else:
jd1 = self._time.jd1
jd2 = self._time.jd2
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0.0, 0.0, 0.0)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1,
jd2,
ut,
lon.to_value(u.radian),
rxy.to_value(u.km),
z.to_value(u.km),
)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot subtract Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError(
"Cannot subtract Time instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
self_time = (
self._time if self.scale in TIME_DELTA_SCALES else self.tai._time
)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(
self_time.jd1, self_time.jd2, format="jd", scale=self_time.scale
)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, "+")
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot add Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
scale = self.scale
if scale == "utc":
self = self.tai
result = super().mean(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
if scale == "utc":
result = result.utc
result.out_subfmt = self.out_subfmt
location = self.location
if self.location is not None:
if self.location.shape:
if axis is None:
axis_normalized = tuple(range(self.ndim))
elif isinstance(axis, int):
axis_normalized = (axis,)
else:
axis_normalized = axis
sl = [slice(None)] * self.location.ndim
for a in axis_normalized:
sl[a] = slice(0, 1)
if np.any(self.location != self.location[tuple(sl)]):
raise ValueError(
"`location` must be constant over the reduction axes."
)
if not keepdims:
for a in axis_normalized:
sl[a] = 0
location = self.location[tuple(sl)]
result._location = location
return result
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None, leap_second_strict="raise"):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format="datetime")
return tm._shaped_like_input(
tm._time.to_value(timezone, leap_second_strict=leap_second_strict)
)
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta."""
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime', 'quantity_str']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
For more information see:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
*,
precision=None,
in_subfmt=None,
out_subfmt=None,
copy=COPY_IF_NEEDED,
):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val,
val2,
format,
scale,
copy,
precision=precision,
in_subfmt=in_subfmt,
out_subfmt=out_subfmt,
)
self._check_numeric_no_unit(val, format)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
def _check_numeric_no_unit(self, val, format):
if (
isinstance(self._time, TimeDeltaNumeric)
and getattr(val, "unit", None) is None
and format is None
):
warn(
"Numerical value without unit or explicit format passed to"
" TimeDelta, assuming days",
TimeDeltaMissingUnitWarning,
)
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1,
jd2 + offset2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times."""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if (self.scale is not None and self.scale not in other.SCALES) or (
other.scale is not None and other.scale not in self.SCALES
):
raise TypeError(
"Cannot add TimeDelta instances with scales "
f"'{self.scale}' and '{other.scale}'"
)
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, "-")
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, "*")
elif (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=COPY_IF_NEEDED)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=COPY_IF_NEEDED)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See Also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(
unit, equivalencies=equivalencies
)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are shown below::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime', 'quantity_str']
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See Also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError("to_value() missing required format or unit argument")
# Validate keyword arguments.
if kwargs:
allowed_kwargs = {"format", "subfmt", "unit", "equivalencies"}
if not set(kwargs).issubset(allowed_kwargs):
bad = (set(kwargs) - allowed_kwargs).pop()
raise TypeError(
f"{self.to_value.__qualname__}() got an unexpected keyword argument"
f" '{bad}'"
)
# Handle a valid format as first positional argument or keyword. This will also
# accept a subfmt keyword if supplied.
if "format" in kwargs or (
args != () and (args[0] is None or args[0] in self.FORMATS)
):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# Handle subfmt keyword with no format and no args.
if "subfmt" in kwargs:
if args:
raise ValueError(
"cannot specify 'subfmt' and positional argument that is not a "
"valid format"
)
return super().to_value(self.format, **kwargs)
# At this point any positional argument must be a unit so try parsing as such.
# If it fails then give an informative exception.
# TODO: deprecate providing equivalencies as a positional argument. This is
# quite non-obvious in this context.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError(
"first argument is not one of the known "
f"formats ({list(self.FORMATS)}) and failed to parse as a unit."
) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(
*args, **kwargs
)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object."""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible TimeDelta object: {err}"
)
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
return np.isclose(
self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)
)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=COPY_IF_NEEDED):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
f"Unsupported operand type(s){op_string}: '{type(left).__name__}' "
f"and '{type(right).__name__}'"
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@time@core.py@.PATH_END.py
|
{
"filename": "medstddev.py",
"repo_name": "kevin218/Eureka",
"repo_path": "Eureka_extracted/Eureka-main/src/eureka/lib/medstddev.py",
"type": "Python"
}
|
import numpy as np
def medstddev(data, mask=None, medi=False, axis=0):
"""Compute the stddev with respect to the median.
This is rather than the standard method of using the mean.
Parameters
----------
data : ndarray
An array from which to caculate the median standard deviation.
mask : 1D ndarray; optional
Boolean mask indicating the bad values with True.
Same shape as data. Defaults to None.
medi : boolean; optional
If True return a tuple with (stddev, median) of data. Defaults
to False.
axis : int; optional
The axis along wich the median std deviation is calculated.
Defaults to 0.
Returns
-------
float
The stadard deviation.
float; optional
The median; only returned if medi==True.
Examples
--------
.. highlight:: python
.. code-block:: python
>>> import medstdev as m
>>> a = np.array([1,3,4,5,6,7,7])
>>> std, med = m.medstddev(a, medi=True)
>>> print(median(a))
5.0
>>> print(med)
5.0
>>> print(std)
2.2360679775
>>> # use masks
>>> a = np.array([1,3,4,5,6,7,7])
>>> mask = np.array([False,False,False,True,True,True,True])
>>> std, med = m.medstddev(a, mask, medi=True)
>>> print(std)
1.58113883008
>>> print(med)
3.0
>>> # automatically mask invalid values
>>> a = np.array([np.nan, 1, 4, np.inf, 6])
>>> std, med = m.medstddev(a, medi=True)
>>> print(std, med)
(2.5495097567963922, 4.0)
>>> # critical cases:
>>> # only one value, return std = 0.0
>>> a = np.array([1, 4, 6])
>>> mask = np.array([True, True, False])
>>> std, med = m.medstddev(a, mask, medi=True)
>>> print(std, med)
(0.0, 6.0)
>>> # no good values, return std = nan, med = nan
>>> mask[-1] = True
>>> std, med = m.medstddev(a, mask, medi=True)
>>> print(std, med)
(nan, nan)
Notes
-----
MEANSTDDEV calculates the median, subtracts it from each value of
X, then uses this residual to calculate the standard deviation.
The numerically-stable method for calculating the variance from
moment.pro doesn't work for the median standard deviation. It
only works for the mean, because by definition the residuals from
the mean add to zero.
History:
- 2005-01-18 statia
Written by Statia Luszcz.
- 2005-01-19 statia
Updated variance calculation according to algorithm in moment.pro,
added medi keyword.
- 2005-01-20 Joe Harrington, Cornell, jh@oobleck.astro.cornell.edu
Header update. Removed algorithm from moment.pro because it
doesn't work for the median. Added /double.
- 2010-11-05 patricio pcubillos@fulbrightmail.org
Converted to python, documented.
- 2022-04-11 Taylor James Bell
Efficiently using numpy axes
"""
# Default mask: only non-finite values are bad
if mask is None:
mask = ~np.isfinite(data)
# Apply the mask
data = np.ma.masked_where(mask, data)
# number of good values:
ngood = np.sum(~mask, axis=axis)
# calculate median of good values:
median = np.ma.median(data, axis=axis)
# residuals is data - median, masked values don't count:
residuals = data - median
# calculate standar deviation:
with np.errstate(divide='ignore', invalid='ignore'):
std = np.ma.std(residuals, axis=axis, ddof=1)
# Convert masked arrays to just arrays
std = np.array(std)
median = np.array(median)
if std.shape == ():
# If just a single value, make sure using a shaped array
std = std.reshape(-1)
median = median.reshape(-1)
# critical case fixes:
if np.any(ngood == 0):
std[np.where(ngood == 0)] = np.nan
median[np.where(ngood == 0)] = np.nan
if np.any(ngood == 1):
std[np.where(ngood == 1)] = 0.
if len(std) == 1:
std = std[0]
median = median[0]
# return statement:
if medi:
return (std, median)
return std
|
kevin218REPO_NAMEEurekaPATH_START.@Eureka_extracted@Eureka-main@src@eureka@lib@medstddev.py@.PATH_END.py
|
{
"filename": "_title.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scatterternary/marker/colorbar/_title.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary.marker.colorbar"
_path_str = "scatterternary.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
plotly.graph_objs.scatterternary.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scatterternary@marker@colorbar@_title.py@.PATH_END.py
|
{
"filename": "yamlModelBuilder.py",
"repo_name": "xgds/xgds_core",
"repo_path": "xgds_core_extracted/xgds_core-master/xgds_core/importer/yamlModelBuilder.py",
"type": "Python"
}
|
#! /usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
"""
Script to read yaml file, create django models and use migration to create database tables.
"""
import re
import sys
import subprocess
from collections import OrderedDict
import django
django.setup()
from django.conf import settings
from csvImporter import load_yaml
INDENT = ' '
def get_field_model_type(field_type):
"""
Convert the simple field type string to a field model type, ie
float -> FloatField
iso8601 -> DateTimeField
unixtime_float_second -> DateTimeField
unixtime_int_microsecond -> DateTimeField
boolean -> BooleanField
These are then used to look up the database data type from the connection
:param field_type: incoming simple field type (string)
:return: the field model type
"""
if field_type == 'string':
return 'CharField'
if field_type in ['iso8601', 'unixtime_float_second', 'unixtime_int_microsecond']:
return 'DateTimeField'
if field_type == 'nullboolean':
return 'NullBooleanField'
return field_type.capitalize() + 'Field'
def create_field_code(field_name, field):
"""
Create the one line of code to define a field in a model
:param field_name: the name of the field
:param field: the dictionary defining the field
:return: Python code in a string
"""
# Default parameters for all model fields
params = {'blank': 'True',
'null': 'True'}
# Max length parameter for strings
if 'max_length' in field:
params['max_length'] = field['max_length']
elif field['type'] == 'string':
params['max_length'] = 256
# If there is a timestamp field make it a db_index and make it required
if 'timestamp' == field_name:
params['db_index'] = 'True'
params['blank'] = 'False'
params['null'] = 'False'
# build the string
result = '%s%s = models.%s(' % (INDENT, field_name, get_field_model_type(field['type']))
result += ', '.join(['%s=%s' % (pk, pv) for pk, pv in params.iteritems()])
result += ')\n'
return result
def create_channel_description(field_name, field):
"""
Create the channel description dictionary
:param field: the dictionary defining the field
:return: the string for construction of the channel description
"""
result = "xgds_timeseries.ChannelDescription("
if 'label' in field:
result += "'%s'" % field['label']
else:
result += "'%s'" % field_name.capitalize()
if 'units' in field:
result += ", units='%s'" % field['units']
if 'min' in field:
result += ", global_min=%f" % field['min']
if 'max' in field:
result += ", global_max=%f" % field['max']
if 'interval' in field:
result += ", interval=%f" % field['interval']
result += ")"
return result
def create_model_code(config, yaml_file, model_name):
"""
Create the model code based on the config
:param config: The config file we are using (loaded from yaml)
:return: The python code to inject in yamlModels.py
"""
# add space
result = '\n'
# the class itself
superclass = 'models.Model'
if 'superclass' in config:
superclass = config['superclass']
result += 'class %s(%s):\n' % (model_name, superclass)
# add comment
result += '%s"""\n%sThis is an auto-generated Django model created from a\n' % (INDENT, INDENT)
result += '%sYAML specifications using %s\n' % (INDENT, sys.argv[0])
result += '%sand YAML file %s\n%s"""\n\n' % (INDENT, yaml_file, INDENT)
time_field = 'timestamp'
if 'time_field' in config:
time_field = config['time_field']
channel_descriptions = OrderedDict()
for field_name, field_info in config['fields'].iteritems():
skip = False
if 'skip' in field_info and field_info['skip']:
skip = True
if not skip:
result += create_field_code(field_name, field_info)
if field_name != time_field:
channel_descriptions[field_name] = create_channel_description(field_name, field_info)
# special case the foreign key to a flight, if required
if 'flight_required' in config and config['flight_required']:
result += "%sflight = models.ForeignKey('%s', on_delete=models.SET_NULL, blank=True, null=True)\n" % (INDENT, settings.XGDS_CORE_FLIGHT_MODEL)
# set the stateful flag if it is true, defaults to false
if 'stateful' in config and config['stateful']:
result += "%sstateful=True\n" % INDENT
# add the title, splitting out camelcase
result += '\n'
splits = re.sub('([a-z])([A-Z])', r'\1 \2', model_name).split()
title = ''
for s in splits:
title = title + ' ' + s
result += "%stitle = '%s'" % (INDENT, title[1:])
# add the channel descriptions
result += '\n'
result += "%schannel_descriptions = {\n" % INDENT
for key, value in channel_descriptions.iteritems():
result += "%s%s%s%s%s%s%s'%s': %s,\n" % (INDENT, INDENT, INDENT, INDENT, INDENT, INDENT, INDENT, key, value)
result += "%s%s%s%s%s%s%s}\n" % (INDENT, INDENT, INDENT, INDENT, INDENT, INDENT, INDENT)
# add the channel classmethod
result += '\n'
result += '%s@classmethod\n' % INDENT
result += '%sdef get_channel_names(cls):\n' % INDENT
result += '%s%sreturn [' % (INDENT, INDENT)
for key, value in channel_descriptions.iteritems():
result += "'%s', " % key
result +=']\n'
# add the custom time field name if need be
if time_field != 'timestamp':
result += '%s@classmethod\n' % INDENT
result += '%sdef get_time_field_name(cls):\n' % INDENT
result += "%s%sreturn '%s'\n" % (INDENT, INDENT, time_field)
# add the unicode
result += '\n'
result += '%sdef __unicode__(self):\n' % INDENT
strformat = '%s:'
strcontent = '(self.%s.isoformat()' % time_field
for key, value in channel_descriptions.iteritems():
strformat += ' %s'
strcontent += ', str(self.%s)' % key
strcontent += ')'
result += '%s%sreturn "%s" %% %s' % (INDENT, INDENT, strformat, strcontent)
# add another space
result += '\n'
return result
def model_exists(app_name, model_name):
"""
Test if the model already exists
:param app_name:
:param model_name:
:return: True if it exists, False otherwise
"""
try:
exec('from %s.models import %s' % (app_name, model_name))
print 'The model already exists %s.%s' % (app_name, model_name)
return True
except ImportError:
return False
def main():
import optparse
parser = optparse.OptionParser('usage: %prog')
parser.add_option('-t', '--test',
action='store_true', default=False,
help='Run in test mode: find files and report them but do not process them')
parser.add_option('-m', '--migrate',
action='store_true', default=True,
help='Migrate the database: set to False if you do not want to do this')
opts, args = parser.parse_args()
# YAML files are specified on the command line
yaml_files = args
print args
if not yaml_files:
print 'yaml file must be in the argument'
exit()
apps_needing_migration = set()
for yaml_file in yaml_files:
config = load_yaml(yaml_file)
split_name = config['class'].split('.')
app_name = split_name[0]
model_name = split_name[1]
if not opts.test:
if model_exists(app_name, model_name):
continue
model_code = create_model_code(config, yaml_file, model_name)
print model_code
if not opts.test:
# write to yamlModels.py
model_file_name = './apps/%s/yamlModels.py' % app_name
model_file = open(model_file_name, 'a')
model_file.write(model_code)
model_file.close()
print 'Updated %s' % model_file_name
# write to admin.py
admin_file_name = './apps/%s/admin.py' % app_name
admin_file = open(admin_file_name, 'a')
admin_file.write('admin.site.register(%s)\n' % model_name)
admin_file.close()
print 'Updated %s' % admin_file_name
# add to the set of apps needing migration
apps_needing_migration.add(app_name)
# do the migrations; since we've modified yamlModels.py we have to run this in a new process.
if not opts.test:
if apps_needing_migration:
if opts.migrate:
for app_name in apps_needing_migration:
print 'Making migrations for %s (be patient)' % app_name
subprocess.call(['./manage.py', 'makemigrations', app_name])
print 'Migrating'
subprocess.call(['./manage.py', 'migrate'])
if __name__ == '__main__':
main()
|
xgdsREPO_NAMExgds_corePATH_START.@xgds_core_extracted@xgds_core-master@xgds_core@importer@yamlModelBuilder.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/pointcloud/stream/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@pointcloud@stream@__init__.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/indicator/gauge/axis/tickfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="indicator.gauge.axis.tickfont",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@indicator@gauge@axis@tickfont@_variant.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcoords/labelfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="variant", parent_name="parcoords.labelfont", **kwargs
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcoords@labelfont@_variant.py@.PATH_END.py
|
{
"filename": "almaqa2isg.py",
"repo_name": "lucatelli/morphen",
"repo_path": "morphen_extracted/morphen-main/analysis_scripts/almaqa2isg.py",
"type": "Python"
}
|
# ALMA Quality Assurance Software
# QA2 Imaging Script Generator
# D. Petry (ESO)
# A. Borkar (EU ARC Node, Czech Republic)
# G. Bendo (EU ARC Node, UK)
# $Id: almaqa2isg.py,v 1.24 2021/10/27 09:52:59 dpetry Exp $
#
"""
The ALMA QA2 Imaging Script Generator
"""
from __future__ import print_function
import os
import sys
import numpy as np
import glob
import re
import analysisUtils as aU
sfsdr = aU.stuffForScienceDataReduction()
try: # Python 3
from casatasks import importasdm
from casatasks import gencal
from casatasks import casalog
from casatools import table as tbtool
from casatools import ms as mstool
from casatools import msmetadata as msmdtool
from casatools import quanta as qatool
import builtins as exceptions
except ImportError: # Python 2
from taskinit import *
from importasdm_cli import importasdm_cli as importasdm
from gencal_cli import gencal_cli as gencal
import exceptions
def version(short=False):
"""
Returns the CVS revision number.
"""
myversion = "$Id: almaqa2isg.py,v 1.24 2021/10/27 09:52:59 dpetry Exp $"
if (short):
myversion = myversion.split()[2]
return myversion
def generateImagingScript(vis="", draft_threshold="", reqchanwidth=1, makecubes=True, docontsub=None, chanwidthtol=0.05,
restfreqs={}, additionalimg=['CALIBRATE_POLARIZATION', 'OBSERVE_CHECK_SOURCE'], spwmap=[],
perchanweightdensity=False):
"""
The ALMA QA2 Imaging Script Generator
vis - the MS(s) to image, can be a list.
If wildcards are used, a list is created automatically.
draft_threshold - the cleaning threshold to use as a initial value in the cleaning (as a string e.g. "5 mJy")
reqchanwidth - the requested channel width (for cubes) as a string (e.g. "200MHz", "20km/s")
or as an integer which indicates units of original channels;
if == None, the draft_threshold is used for the agg. bw.
default: 1
makecubes - Boolean to indicate whether cubes should be made (set to False for continuum only)
default: True
docontsub - Boolean to indicate whether continuum subtraction should be done
(only relevant if makecubes==True)
default: True for standard polarisation, False for full polarisation data sets
chanwidthtol - the tolerance in units of channel widths to apply when deciding whether the
grids of corresponding SPWs from two different MSs are aligned
default: 0.05 (i.e. 5%)
restfreqs - the restfrequencies to be used for the restfreq parameter in the cube cleaning
in the format of a dictionary, e.g. {25: '230GHz', 27: '231GHz', ... } with
one entry for each science SPW (optional).
default: {} (empty dictionary; the restfreqs, if needed, will be extracted from
the SOURCE table or, if not available there, the central freqs will be used)
additionalimg - optional list of additional intents to be imaged.
For each field found with the given intent (give name omitting the "#ON_SOURCE"),
code for making an aggregate bandwidth image is going to be added.
Possible values: 'CALIBRATE_PHASE', 'CALIBRATE_BANDPASS', 'CALIBRATE_FLUX',
'OBSERVE_CHECK_SOURCE', 'CALIBRATE_POLARIZATION'
default: ['CALIBRATE_POLARIZATION', 'OBSERVE_CHECK_SOURCE'] (image check source(s) and/or polcal)
example: ['CALIBRATE_BANDPASS', 'CALIBRATE_PHASE']
spwmap - SPW IDs to be used in the final image names.
If a non-empty list is specified, it must contain at least as many
elements as there are science SPWs in the input MS. The given SPW IDs are then
used to replace in the image names the ones used in the MS,
e.g. if there are science spws [0,1,2,3] in the MS,
then setting spwmap=[19,21,23,25] will result in ID 19
being used instead of 0 in the image names, 21 instead of 1 etc.
default: [] (empty list) - use the SPW IDs as they are in the MS.
perchanweightdensity - the setting of the tclean parameter perchanweightdensity
for "briggs" weighting. Starting with CASA 6.2, "briggs" weighting will
only be used for mfs imaging. For cubes, the weighting option "briggsbwtaper"
will be used and perchanweightdensity will be forced to True.
default: False
Example:
import almaqa2isg as isg
isg.generateImagingScript('uid*.ms.split.cal', draft_threshold='0.1mJy', reqchanwidth='20km/s', spwmap=[17,19,21,23])
Discussion:
You can run the Imaging Script Generator (ISG) like so:
import almaqa2isg as isg
cd <the directory where the MS(s) are which you want to image, e.g. "calibrated">
isg.generateImagingScript(vis='uid___A00*.ms.split.cal', draft_threshold='0.1mJy', reqchanwidth='20km/s')
The vis parameter can be a single string or a list. If it is a list, each element must be the name of an
existing MS. If it is a single string, it can either be a single name of an existing MS or an expression
using wildcards "*" or "?" to specify a group of MSs. This will internally be converted to a list of MSs.
If makecubes is true (default), code for cleaning cubes for each science SPW is created.
If docontsub is true, code for continuum subtraction (separate for each science field) is created.
This parameter is only active is makecubes is true.
draft_threshold specifies the threshold which is entered into the cleaning commands. It is related to
the required rms from the MOUS proposal. It is used as the cleaning threshold in cube
cleaning if makecubes is true (then the threshold for the agg. bw. image is left for editing by the analyst).
If makecubes==False, then draft_threshold is used as the cleaning threshold for the agg. bw. image.
reqchanwidth is the bandwidth on which draft_threshold is defined. It is used as the channel width for the cubes if
makecubes is true.
Like for the calibration script generator, the ISG is not meant to produce a script which one can run
blindly. We are not trying to reinvent the imaging pipeline. So, if different channel widths are required
for different SPWs, the analyst needs to edit the channel width (and the corresponding thresholds) for some
of the cubes. A general solution to permit different thresholds and widths for different SPWs was deemed too
complex for the moment.
NOTE that if there is more than one input MS and docontsub is True, then a concat step will be inserted such
that the uvcontsub commands can operate on the concatenated MS. If the SPWs are not yet reindexed to have
IDs starting at 0, uvcontsub will do this reindexing. The ISG will take this into account and use reindexed
SPW IDs when making the cubes but leave the old SPW ids in the image names (this is what archive needs).
Furthermore, if constinuum subtraction is requested, the ISG will check if the corresponding SPWs of the MSs
are shifted w.r.t. each other and if so, find the largest common grid for each SPW and generate mstransform code
to transform all science SPWs of all the MSs into these grids before concatenation.
At the end of the isg run, you obtain a file "scriptForImaging.py". On the terminal (in CASA) you will also
get messages which explain what you need to do to complete this script before you can run it. These messages
look, e.g., like this:
Script generation completed. Please find scriptForImaging.py in the current directory.
NOTE that you still need to edit the script:
- You need to edit the array "therestfreqs" to set the rest frequencies for the cubes of each SPW.
- You need to adjust the threshold for the aggregate bandwidth/continuum image(s).
- You need to edit the fitspw parameter in each uvcontsub command.
Iterative use for continuum identification
------------------------------------------
If cubes are to be created and a decision needs to be taken about whether contsub is necessary and how it
should be done, one generally can go two routes:
a) use plotms to generate amp vs. channel plots to identify lines and line-free regions
b) create image cubes for each spw once to see if they contain detectable spectral lines and identify
continuum channels.
When going route (b), it is recommended that the user runs the ISG twice, first with docontsub=False and then
with docontsub=True: With the first version, one can create some shallowly cleaned cubes to check for lines
and then move the first script version and the created images to a different directory and start again with
docontsub=True.
In order to make sure that a new run of the ISG does not destroy your previous version of the script, the
ISG prevents you from overwriting a pre-existing scriptForImaging.py
SPW Numbering
-------------
If you have used split or mstransform *with* reindexing to split out certain SPWs to obtain the input MS(s) to the ISG,
i.e. if your input MS(s) do *not* use the same SPW numbering as the original ASDM, you need to use the parameter
"spwmap" to get the archive-compliant numbering in the image names.
The spwmap parameter should contain an ordered list of the *original* SPW IDs of *all* the science SPWs
in the input MS(s). The input MSs, if there is more than one, need to all use the same numbering.
A typical example is [19,21,23,25]. You need to look at the original raw MS after importasdm to obtain this information.
Full Polarization support
-------------------------
The ISG will detect whether an input MS contains full polarisation data. In this case it will use stokes='IQUV' and
dconvolver='clarkstokes'. Furthermore, if the intent CALIBRATE_POLARISATION is present, it will by default make
the appropriate calibrator image and add operations to calculate polcal parameters.
"""
casalog.origin('generateImagingScript')
casalog.post( "QA2 Imaging Script Generator "+version())
print("QA2 Imaging Script Generator "+version())
scriptname = 'scriptForImaging.py'
myms = mstool()
mytb = tbtool()
mymsmd = msmdtool()
myqa = qatool()
if os.path.exists(scriptname):
print("ERROR: "+scriptname+" already exists. Please remove and try again.")
return False
# verify input
if(vis=="" or (type(vis)!=str and type(vis)!=list)):
print("ERROR: Invalid vis parameter. Must be non-empty string or list of strings.")
return False
try:
mydraft_threshold = myqa.quantity(draft_threshold)
if mydraft_threshold['value'] <= 0.:
print("ERROR: Invalid draft_threshold parameter. Must have positive value.")
return False
if not (mydraft_threshold['unit'] in ["Jy", "mJy", "uJy"]):
print("ERROR: Invalid draft_threshold parameter. Must have unit Jy, mJy, or uJy.")
return False
except:
print("ERROR: Invalid draft_threshold parameter. Must be string quantity, e.g. 25mJy")
return False
myreqchanwidth=None
if type(reqchanwidth)==int:
if reqchanwidth<=0:
print("ERROR: Invalid reqchanwidth parameter. Must be integer or string quantity")
return False
myreqchanwidth = {'unit': 'channel', 'value': reqchanwidth}
elif type(reqchanwidth)==str:
try:
myreqchanwidth = myqa.quantity(reqchanwidth)
if myreqchanwidth['value'] <= 0.:
print("ERROR: Invalid reqchanwidth parameter. Must have positive value.")
return False
if not (myreqchanwidth['unit'] in ["Hz", "kHz", "MHz", "GHz", "m/s", "km/s"]):
print("ERROR: Invalid reqchanwidth parameter. Must have unit Hz, kHz, MHz, GHz, m/s, or km/s.")
return False
except:
print("ERROR: Invalid reqchanwidth parameter. Must be integer or string quantity, e.g. 25mJy")
return False
elif reqchanwidth!=None:
print("ERROR: Invalid reqchanwidth parameter. Must be integer or string quantity, e.g. 25mJy, or None")
return False
if not type(makecubes)==bool:
print("ERROR: Invalid makecubes parameter. Must be True or False.")
return False
if not type(docontsub)==bool and not docontsub==None:
print("ERROR: Invalid docontsub parameter. Must be True or False.")
return False
if not type(chanwidthtol)==float or chanwidthtol<0.:
print("ERROR: Invalid chanwidthtol parameter. Must be a float >= 0.")
return False
myviss = vis
if type(vis)==str:
if ('*' in vis) or ('?' in vis):
myviss = glob.glob(vis)
if len(myviss) == 0:
print("ERROR: Invalid vis parameter. No MSs found matching "+vis)
return False
else:
myviss = [vis]
# make sure the list is sorted chronologically (this is important for judging the need for mstransform before concat);
# at the same time, check the presence of the intent CALIBRATE_POLARIZATION
visspwsandtimes = []
addfullpolanalysis = False
if len(myviss)>1:
print("Sorting list of input MSs by observation time ...")
for myvis in myviss: # need to go over this even when len(myviss)==1 since we need the science SPWs
if type(myvis) != str:
print("ERROR: Invalid vis parameter. Must be non-empty string or list of strings.")
return False
try: # open MS as a test and at the same time extract first science obs time and science SPWs
mymsmd.open(myvis)
except:
print("ERROR: Invalid vis parameter. Could not open MS "+str(myvis))
return False
try:
onsourcetimes = mymsmd.timesforintent("OBSERVE_TARGET#ON_SOURCE")
thetargetintent = "OBSERVE_TARGET#ON_SOURCE"
if len(onsourcetimes) == 0:
print("ERROR: There is no data with intent OBSERVE_TARGET#ON_SOURCE in MS "+str(myvis))
myintents = mymsmd.intents()
for myintent in myintents:
if 'OBSERVE_TARGET' in myintent:
thetargetintent = myintent
print("Will try to continue using the times and spws for intent "+thetargetintent+" ...")
onsourcetimes = mymsmd.timesforintent(thetargetintent)
firstonsourcetime = onsourcetimes[0]
otos_spwids = list(mymsmd.spwsforintent(thetargetintent))
spwids = []
for myspw in otos_spwids:
if mymsmd.nchan(myspw)>4:
spwids.append(myspw)
if 'CALIBRATE_POLARIZATION#ON_SOURCE' in mymsmd.intents():
addfullpolanalysis = True # we will need to add the full pol analysis step at the end before the fitsexport
mymsmd.close()
spwids.sort() # should not be necessary but let's stay on the safe side
visspwsandtimes.append((firstonsourcetime, myvis, spwids))
except:
print("ERROR trying to determine on-source times and SPWs for MS "+str(myvis)+": "+str(sys.exc_info()))
return False
# now sort myviss by the times in visspwsandtimes, i.e. chronologically
sortedvisspwsandtimes = sorted(visspwsandtimes)
myviss = [myvis for i, myvis, myspws in sortedvisspwsandtimes]
# and make an accordingly sorted list of the spws
sciencespws = [myspws for i, myvis, myspws in sortedvisspwsandtimes]
hascorrected = []
print("Science SPWs in the input MS(s):")
for myvisspwsandtimes in sortedvisspwsandtimes:
myvis = myvisspwsandtimes[1]
print(" "+myvis+": "+str(myvisspwsandtimes[2]) )
try:
mytb.open(myvis)
mycols = mytb.colnames()
hascorrected.append("CORRECTED_DATA" in mycols)
mytb.close()
except:
print("ERROR when accessing MS "+str(myvis))
return False
for myspwids in sciencespws:
if not myspwids == sciencespws[0]:
print("ERROR: The input MSs have different science SPW IDs!")
return False
sciencespws = sciencespws[0]
if True in hascorrected:
if False in hascorrected:
print("ERROR: inhomogenous data columns in input MS: some have CORRECTED_DATA, some don't.")
return False
if not type(restfreqs) == dict:
print("ERROR: Invalid parameter restfreqs. Must be a dictionary, e.g. {25: '230GHz', 27: '231GHz', ...}")
return False
possibleadtlimg = ['CALIBRATE_PHASE',
'CALIBRATE_BANDPASS',
'CALIBRATE_FLUX',
'OBSERVE_CHECK_SOURCE',
'CALIBRATE_POLARIZATION']
if not type(additionalimg) == list:
print("ERROR: Invalid parameter additionalimg. Must be a list, e.g. ['CALIBRATE_BANDPASS', 'OBSERVE_CHECK_SOURCE']")
print("Possible elements are: ")
print(possibleadtlimg)
return False
else:
for myintent in additionalimg:
if not myintent in possibleadtlimg:
print("ERROR: Invalid parameter additionalimg. Possible elements are: ")
print(possibleadtlimg)
return False
if not type(spwmap) == list:
print("ERROR: invalid spwmap, must be list: "+str(spwmap))
return False
# check that spwmap covers all science SPWs
if len(spwmap) < len(sciencespws):
if spwmap == []:
myspwmap = sciencespws
else:
print("ERROR: invalid spwmap, must cover all science SPWs in the MS.")
else:
myspwmap = spwmap
myspwmap.sort()
for myspw in myspwmap:
if not (type(myspw) == int and myspw>=0):
print("ERROR: invalid spwmap, must only contain positive integers.")
return False
# determine default value of docontsub if not explicitly set
if docontsub == None:
if addfullpolanalysis:
docontsub = False
if makecubes:
print("\nBy default, no contsub for full pol. data.")
else:
docontsub = True
print("vis = "+str(myviss))
print("draft_threshold = "+str(mydraft_threshold))
print("reqchanwidth = "+str(myreqchanwidth))
print("makecubes = "+str(makecubes))
print("docontsub = "+str(docontsub))
print("chanwidthtol = "+str(chanwidthtol))
print("restfreqs = "+str(restfreqs))
print("additionalimg = "+str(additionalimg))
print("spwmap = "+str(spwmap))
print("perchanweightdensity = "+str(perchanweightdensity))
print
# parameters verified
# gather input parameters for getimgpars using findfields
representativems=0
targetfielddicts = findfields(myviss[representativems], thetargetintent, myspwmap)
if len(targetfielddicts)==0:
print("ERROR: Could not find any target fields in the given MSs.")
return False
discrepantfields=[]
if len(myviss)>1: # for the case of a mosaic and multiple input MSs, double-check that all MSs have same mosaic fields
reprmschanged=False
for mydict in targetfielddicts:
if mydict['ismosaic']:
print("Checking whether target field setup for mosaic field "+mydict['fieldname']+" agrees between input MSs ...")
for i in range(0,len(myviss)):
if i==representativems:
continue
otherdicts = findfields(myviss[i], thetargetintent, myspwmap)
for myotherdict in otherdicts:
if myotherdict['fieldname'] == mydict['fieldname']:
if not sorted(myotherdict['fieldids']) == sorted(mydict['fieldids']):
casalog.post("Not all input MSs have same field ids for field "+mydict['fieldname'], 'WARN')
casalog.post(myviss[representativems]+": "+str(sorted(mydict['fieldids'])), 'WARN')
casalog.post(myviss[i]+": "+str(sorted(myotherdict['fieldids'])), 'WARN')
if len(myotherdict['fieldids'])>len(mydict['fieldids']):
casalog.post("Switching to "+myviss[i]+" as representative MS. ", 'WARN')
mydict['fieldids'] = myotherdict['fieldids']
representativems = i
reprmschanged=True
print
discrepantfields.append(mydict['fieldname'])
break # there can be no other dict for the same fieldname
# end if
# end for
# end for
# end if
# end for
if reprmschanged:
targetfielddicts = findfields(myviss[representativems], thetargetintent, myspwmap)
# initialise the output script file
scriptfile = open(scriptname, 'w')
mystepindent = " "
mystepdict = {}
haveshifts = False
shiftsHz = {}
shiftsChannels = {}
haverestfreqs = True
yourthresholdhere = False
# write necessary import statements to script
stext = "import os"
scriptfile.write("\n"+stext)
stext = "import sys"
scriptfile.write("\n"+stext+"\n")
# write global var defs to script
# thevis
stext = printstrarray(myviss, "thevis")
scriptfile.write("\n"+stext)
# therestfreqs
if makecubes:
# get the nominal restfreqs from the MS
try:
nomrestfreqs = aU.restFrequencies(myviss[0], showSpwFreq=True)
except:
casalog.post("ERROR: in call to aU.restFrequencies - "+str(sys.exc_info()), 'WARN')
casalog.post(" Your version of the analysisUtils may be broken or this is not ALMA data.", 'WARN')
casalog.post(" Will try to continue with user-provided or central freqs ...", 'WARN')
nomrestfreqs = []
stext = "# put restfreqs in this dictionary,\n# one for each SPW ID, e.g. {17: '350GHz', 19: '356GHz'}\n"
stext = stext+"therestfreqs = {"
idnt = len("therestfreqs = {")*" "
for myspw in targetfielddicts[0]['spwids']:
if myspw in restfreqs:
if type(restfreqs[myspw]) == str:
stext = stext+str(myspw)+": '"+restfreqs[myspw]+"',\n"+idnt
else:
print("ERROR: Invalid parameter restfreqs. Dictionary entries must be strings, e.g. {25: '230GHz', 27: '231GHz', ...}")
return False
else: # the user did not provide a restfreq; try to use nominal
if len(nomrestfreqs)>0 and len(nomrestfreqs[myspw]['frequency']) > 0:
print("No restfreq provided by user for spw "+str(myspw)+". Will use value from SOURCE table.")
stext = stext+str(myspw)+": '"+str(round(nomrestfreqs[myspw]['frequency'][0]/1E9,10))+"GHz',\n"+idnt
else:
print("No restfreq provided by user for spw "+str(myspw)+" and no value in SOURCE table. Will use central freq.")
mymsmd.open(myviss[0])
mycenterfreq = mymsmd.meanfreq(myspw)
mymsmd.close()
stext = stext+str(myspw)+": '"+str(round(mycenterfreq/1E9,10))+"GHz',\n"+idnt
haverestfreqs = False
stext = stext[0:len(stext)-len(",\n"+idnt)]
stext = stext+"}"
scriptfile.write(stext+"\n\n")
if(targetfielddicts[0]['isephem']) and aU.getCasaVersion() < '5.4.0':
print("ERROR: handling of ephemeris target objects not supported for CASA versions < 5.4.0 .")
return False
if docontsub:
if(targetfielddicts[0]['isephem'] and len(myviss)>1):
print("ERROR: continuum subtraction for ephemeris objects and multiple input MSs is not yet supported.")
print(" Use docontsub=False")
return False
haveshifts, shiftsHz, shiftsChannels = haveShiftedSPWs(myviss, targetfielddicts[0]['spwids'], chanwidthtol)
if haveshifts:
thevislsrk = []
for myvis in myviss:
for myspw in targetfielddicts[0]['spwids']:
thevislsrk.append(myvis+".lsrk.spw"+str(myspw))
stext = printstrarray(thevislsrk, "thevislsrk")
scriptfile.write(stext+"\n\n")
if spwmap != []: # non-trivial spwmap: assume that this is to get the SPW IDs right for the archive
stext = "# NOTE: the SPW IDs in the _image_names_ in this script use the numbering as in the original ASDM(s),\n"
stext = stext+"# not the numbering used in the MS(s) processed directly by this script.\n"
stext = stext+"# This is a requirement of the ALMA archive.\n"
scriptfile.write(stext+"\n")
# do contsub and concat if needed
contsubinfile=""
contsuboutfile=""
usereindexedspwids=False
usereindexedspwidsincontsub=False
usereindexedfieldids=False
if makecubes and docontsub:
if len(myviss)>1:
if haveshifts:
print("\nNOTE: The SPWs in the different input MSs are shifted by more than "+str(chanwidthtol)+" chanwidths w.r.t. each other.")
print(" Will add code to mstransform them before concatenation ...")
print(" (You can override this using ISG parameter 'chanwidthtol'.)")
thetnchan, thetwidth, thetstart = largestCommonSPWsLSRK(myviss, targetfielddicts[0]['spwids'])
for myspw in targetfielddicts[0]['spwids']:
print("New common grid for SPW "+str(myspw)+":")
print(" nchan = "+str(thetnchan[myspw])+", start ="+str(thetstart[myspw]/1E9)+" GHz, width = "+str(thetwidth[myspw]/1000.)+" kHz")
mypardict = {'taskname': 'mstransform',
'vis': 'VARmyvis',
'outputvis': "VARmyvis+'.lsrk.spw"+str(myspw)+"'",
'outframe': 'LSRK',
'spw': str(myspw),
'mode': 'frequency',
'nchan': thetnchan[myspw],
'width': str(thetwidth[myspw]/1000.)+'kHz',
'start': str(thetstart[myspw]/1E9)+'GHz',
'regridms': True,
'datacolumn': 'data',
'reindex': True}
usereindexedspwidsincontsub=True
if True in hascorrected:
mypardict['datacolumn'] = 'corrected'
stext = "for myvis in thevis:\n"
stext += mystepindent + "os.system('rm -rf '+myvis+'.lsrk.spw"+str(myspw)+"')\n"
stext += mystepindent + printtask(mypardict, mypardict['taskname'], mystepindent)
# write command
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Transform SPW "+str(myspw)+" for all MSs to common largest LSRK grid before concat",
stext, mystepindent)
print("\nCreating concat command for mstransformed MSs ...")
contsubinfile = 'concat.ms'
mypardict = {'taskname': 'concat',
'vis': 'VARlist(thevislsrk)',
'concatvis': contsubinfile}
#if not targetfielddicts[0]['ismosaic']:
if True:
mypardict['copypointing'] = False
stext = "os.system('rm -rf "+contsubinfile+"')\n"
stext += printtask(mypardict, mypardict['taskname'])
stext += "\n"
# write command
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Concatenation of mstransformed MSs",
stext, mystepindent)
else: # SPWs are already aligned within chanwidthtol
print("\nCreating concat command ...")
contsubinfile = 'concat.ms'
theshifts = []
for myshift in shiftsHz:
theshifts.append(shiftsHz[myshift])
theshifts.append(1.) # final tolerance should not be smaller than 1 Hz
myfreqtolHz = np.max(theshifts)
mypardict = {'taskname': 'concat',
'vis': 'VARthevis',
'concatvis': contsubinfile,
'freqtol': str(round(myfreqtolHz*1.01))+'Hz'} # increase tol by 1% to avoid borderline effects
if not targetfielddicts[0]['ismosaic']:
mypardict['copypointing'] = False
stext = printtask(mypardict, mypardict['taskname'])
stext += "\n"
# write command
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Concatenation",
stext, mystepindent)
else:
contsubinfile = myviss[0]
if targetfielddicts[0]['spwids'][0] > 11: # looks like the calibrated MS uses the original SPW IDs
# uvcontsub will renumber these to consecutive IDs starting at 0
usereindexedspwids=True
if targetfielddicts[0]['ismosaic']:
# since uvcontsub reindexes the fields, need to pay attention to the phase center
usereindexedfieldids=True
### Imaging ###
myimages = set([])
# create agg. bw. imaging commands for additional fields
theintent_extension = thetargetintent[14:] # in the case of "OBSERVE_TARGET#ON_SOURCE" this will be "#ON_SOURCE"
for myintent in additionalimg:
adtlfielddicts = findfields(myviss[representativems], myintent+theintent_extension, myspwmap)
if len(adtlfielddicts)==0:
print("\nNOTE: Could not find any fields with intent "+myintent+theintent_extension+" in the given MSs.\n")
continue
for adtldict in adtlfielddicts:
print("\nWorking on field "+str(adtldict['fieldname'])+"\n")
print(adtldict)
print("\nCreating aggregate bandwidth imaging command ...")
try:
mypardict = getimgpars(vis=myviss[representativems],
fieldname=str(adtldict['fieldname']),
intent=adtldict['intent'],
mode='mfs',
isMosaic=adtldict['ismosaic'],
isFullPol=adtldict['isfullpol'],
field=adtldict['fieldids'],
spw=adtldict['spwids'],
spwmap=adtldict['spwmap'])
except exceptions.KeyboardInterrupt:
print("ERROR: "+str(sys.exc_info()))
return False
except:
print("ERROR: "+str(sys.exc_info()))
casalog.post("ERROR: could not get imaging parameters. Will try to continue ...", 'WARN')
continue
mypardict['vis'] = 'VARthevis'
mypardict['threshold'] = '<your threshold here>'
yourthresholdhere = True
stext = "\nos.system('rm -rf "+mypardict['imagename']+"*')\n"
stext += printtask(mypardict, mypardict['taskname'])
stext += "\n"
# move to the standard names so the fits export picks up the right images
if mypardict['nterms'] > 1:
stext += "\nos.system('mv "+mypardict['imagename']+".image.tt0.pbcor "+mypardict['imagename']+".image.pbcor')\n"
stext += "os.system('ln -sf "+mypardict['imagename']+".image.pbcor "+mypardict['imagename']+".image.tt0.pbcor')\n"
stext += "os.system('mv "+mypardict['imagename']+".pb.tt0 "+mypardict['imagename']+".pb')\n"
stext += "os.system('ln -sf "+mypardict['imagename']+".pb "+mypardict['imagename']+".pb.tt0')\n"
# write command
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Agg. bandwidth image for non-science target "+adtldict['fieldname']+" (intent "+myintent+"), spws "+str(adtldict['spwids']),
stext, mystepindent)
# memorize image name for later fits export
myimages.add(mypardict['imagename'])
# call getimpars to obtain tclean parameters and write tclean commands for science target(s)
mightconstrainchannels=False
for tfdict in targetfielddicts:
print("\nWorking on field "+str(tfdict['fieldname'])+"\n")
print(tfdict)
mymode = 'mfs'
if docontsub:
print("\nCreating continuum imaging command ...\n")
mymode = 'cont'
else:
print("\nCreating aggregate bandwidth imaging command ...\n")
try:
mypardict = getimgpars(vis=myviss[representativems],
fieldname=str(tfdict['fieldname']),
intent=tfdict['intent'],
mode=mymode,
isEphem=tfdict['isephem'],
isMosaic=tfdict['ismosaic'],
isFullPol=tfdict['isfullpol'],
field=tfdict['fieldids'],
spw=tfdict['spwids'],
spwmap=tfdict['spwmap'])
except exceptions.KeyboardInterrupt:
print("ERROR: "+str(sys.exc_info()))
return False
except:
print("ERROR: "+str(sys.exc_info()))
casalog.post("ERROR: could not get imaging parameters. Will try to continue ...", 'WARN')
continue
mypardict['vis'] = 'VARthevis'
if myreqchanwidth==None or not makecubes:
mypardict['threshold'] = draft_threshold
stext = "\nos.system('rm -rf "+mypardict['imagename']+"*')\n"
stext += printtask(mypardict, mypardict['taskname'])
stext += "\n"
if docontsub:
stext += "# NOTE: enter the continuum channel selection in the spw parameter!\n"
# move the standard names so the fits export picks up the right images
if mypardict['nterms'] > 1:
stext += "\nos.system('mv "+mypardict['imagename']+".image.tt0.pbcor "+mypardict['imagename']+".image.pbcor')\n"
stext += "os.system('ln -sf "+mypardict['imagename']+".image.pbcor "+mypardict['imagename']+".image.tt0.pbcor')\n"
stext += "os.system('mv "+mypardict['imagename']+".pb.tt0 "+mypardict['imagename']+".pb')\n"
stext += "os.system('ln -sf "+mypardict['imagename']+".pb "+mypardict['imagename']+".pb.tt0')\n"
# write command
if docontsub:
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Continuum image for target "+tfdict['fieldname']+", spws "+str(tfdict['spwids']),
stext, mystepindent)
else:
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Agg. bandwidth image for target "+tfdict['fieldname']+", spws "+str(tfdict['spwids']),
stext, mystepindent)
# memorize image name for later fits export
myimages.add(mypardict['imagename'])
if makecubes:
if docontsub:
print("\nCreating uvcontsub command ...")
contsubspwids = tfdict['spwids']
if usereindexedspwidsincontsub:
contsubspwids = []
for myspw in tfdict['spwids']:
contsubspwids.append(tfdict['spwids'].index(myspw))
mypardict = getcontsubpars(contsubspwids, '<your channel selection here>')
mypardict['vis'] = contsubinfile
mypardict['field'] = str(tfdict['fieldname'])
contsuboutfile = contsubinfile+'_'+mypardict['field']+'.contsub'
stext = printtask(mypardict, mypardict['taskname'])
stext += "\nos.system('rm -rf "+contsuboutfile+"')\n"
stext += "os.system('mv "+contsubinfile+".contsub "+contsuboutfile+"')\n"
stext += "\n"
# write command
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Continuum subtraction for field "+mypardict['field'],
stext, mystepindent)
print("\nCreating cube imaging commands ...\n")
tfdict['spwids'].sort()
for myspw in tfdict['spwids']:
try:
mypardict = getimgpars(vis=myviss[representativems],
fieldname=str(tfdict['fieldname']),
intent=tfdict['intent'],
mode='cube',
isEphem=tfdict['isephem'],
isMosaic=tfdict['ismosaic'],
isFullPol=tfdict['isfullpol'],
field=tfdict['fieldids'],
spw=[myspw],
spwmap=[myspwmap[sciencespws.index(myspw)]])
except exceptions.KeyboardInterrupt:
print("ERROR: "+str(sys.exc_info()))
break
except:
print("ERROR: "+str(sys.exc_info()))
casalog.post("ERROR: could not get cube imaging parameters. Will try to continue ...", 'WARN')
continue
if "start" in mypardict: # this image has a large number of channels
mightconstrainchannels=True
if docontsub:
mypardict['vis']= contsuboutfile
if usereindexedspwids:
print("NOTE: using reindexed SPWs when imaging contsub cube but leaving original SPW in image name.")
mypardict['spw'] = str(tfdict['spwids'].index(myspw))
if usereindexedfieldids:
if tfdict['ismosaic'] and not tfdict['isephem']:
oldphasecenter = mypardict['phasecenter']
mypardict['phasecenter'] = tfdict['fieldids'].index(oldphasecenter)
# also: remove the comment from the field parameter
mypardict['field'] = str(tfdict['fieldname'])
else:
mypardict['vis'] = 'VARthevis'
if myreqchanwidth==None:
mypardict['threshold'] = "<estimate from requested continuum sensitiviy>"
yourthresholdhere = True
else:
mypardict['threshold'] = str(mydraft_threshold['value'])+mydraft_threshold['unit']
if mypardict['width'] == '':
if myreqchanwidth['unit'] == 'channel':
mypardict['width'] = myreqchanwidth['value']
else:
mypardict['width'] = str(myreqchanwidth['value'])+myreqchanwidth['unit']
mypardict['restfreq'] = "VARtherestfreqs["+str(myspw)+"]"
stext = "os.system('rm -rf "+mypardict['imagename']+"*')\n"
stext += printtask(mypardict, mypardict['taskname'])
stext += "\n"
# write command
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Cube for target "+tfdict['fieldname']+", spw "+str(myspw),
stext, mystepindent)
# memorize image name for later fits export
myimages.add(mypardict['imagename'])
# end for tfdict in targetfielddicts
# add full polarisation analysis step if needed
if addfullpolanalysis:
print("\nCreating full polarisation RMS analysis commands ...")
stext = printfullpolanalysis();
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Polarization rms analysis",
stext, mystepindent)
# add the polarisation angle and intensity images to the list to export
presentimages = myimages.copy()
for imname in presentimages:
if ("_sci" in imname or "_polleak" in imname) and ("mfs.IQUV" in imname or "cont,IQUV" in imname):
newname = imname.replace('IQUV', 'A')
myimages.add(newname)
newname = imname.replace('IQUV', 'P')
myimages.add(newname)
# final fits export step
print("\nCreating exportfits commands ...")
stext = ""
if spwmap != []: # non-trivial spwmap: assume that this is to get the SPW IDs right for the archive
stext = "# NOTE: the SPW IDs in the image names in this script use the numbering as in the original ASDM(s)\n\n"
myimageslist = list(myimages)
myimageslist.sort()
stext = stext + printstrarray(myimageslist, "myimages")
stext = stext + "for myimagebase in myimages:\n"
mypardict = { 'taskname': 'exportfits',
'imagename': "VARmyimagebase+'.image.pbcor'",
'fitsimage': "VARmyimagebase+'.pbcor.fits'",
'overwrite': True }
stext = stext + mystepindent + printtask(mypardict, mypardict['taskname'], mystepindent)
mypardict['imagename'] = "VARmyimagebase+'.pb'"
mypardict['fitsimage'] = "VARmyimagebase+'.pb.fits'"
stext = stext + mystepindent + "if os.path.exists(myimagebase+'.pb'):\n"
stext = stext + 2*mystepindent + printtask(mypardict, mypardict['taskname'], 2*mystepindent)
# write command
sfsdr.addReducScriptStep(scriptfile, mystepdict,
"Export images to FITS format",
stext, mystepindent)
# write script header including isg version
sfsdr.prependReducScriptHeader(scriptfile, mystepdict, "Created using "+version(), mystepindent)
scriptfile.close()
print("Script generation completed. Please find "+scriptname+" in the current directory.\n")
if len(discrepantfields)>0:
print
print("For the following mosaic fields, the field ids did not agree between the input MS:")
print(" "+str(discrepantfields))
print(" This may mean that the mosaic coverage was incomplete in some of the MSs. Please check.")
print
if makecubes:
print("NOTE that you need still need to edit the script:")
if len(discrepantfields)>0:
print(" - for mosaic fields "+str(discrepantfields)+" you may need to adjust the phasecenter")
print(" since the discrepant field setup of the individual input MSs may have led to an")
print(" incorrect phase center choice. To set it explicitly use the coordinate syntax,")
print(" example: phasecenter = 'ICRS 05:02:02.3290 -069.33.45.547'")
if yourthresholdhere:
print(" - in some imaging commands, you need to set the threshold")
if not haverestfreqs:
print(" - You may need to edit the array \"therestfreqs\" to set the rest frequencies for the cubes of each SPW.")
print(" In at least some cases, you did not provide a value on the command line and none could be extracted")
print(" from the SOURCE table. In those cases, the center frequency was used (see log comments above).")
print(" - You need to adjust the threshold for the aggregate bandwidth/continuum image(s).")
if docontsub:
print(" - You need to edit the spw parameter in each continuum imaging command to select the continuum channels.")
print(" - You need to edit the fitspw parameter in each uvcontsub command.")
if mightconstrainchannels:
print(" - There are cubes with a potentially large number of channels. In order to speed up processing,")
print(" you might want to consider to set the start and nchan parameters to constrain them.")
print
print("Generally: scrutinise the code for each step before you execute it!")
print(" There is no guarantee that the generator will always have done exactly the right thing.")
print("Report bugs and suggestions for improvement on SCOPS-5183.")
return True
def findfields(vis=None, intent='OBSERVE_TARGET#ON_SOURCE', spwmap=[]):
"""
Will investigate the MS and return a _list_ of dictionaries with the following outline,
one dictionary for each field name:
{'fieldname': value, # e.g. 'NGC253'
'intent': value, # e.g. 'OBSERVE_TARGET#ON_SOURCE'
'isfullpol': value, # True or False
'isephem': value, # True of False
'ismosaic': value, # True or False
'fieldids': value, # list of numerical field ids
'spwids': value, # list of numerical spw ids
'spwmap': value} # the spwmap for genImageName corresponding to spwids
"""
rval = []
print("findfields for intent "+str(intent))
if vis==None:
casalog.post("ERROR: findfields called with invalid vis parameter", 'WARN')
return rval
if type(intent)!=str:
casalog.post("ERROR: findfields called with invalid intent. Must be str.", 'WARN')
return rval
if type(spwmap)!=list:
casalog.post("ERROR: findfields called with invalid spwmap. Must be list.", 'WARN')
return rval
mymsmd = msmdtool()
mytb = tbtool()
try:
mytb.open(vis+'/FIELD')
ephemIDs = np.zeros(mytb.nrows()) - 1 # all ids == -1 by default
if ('EPHEMERIS_ID' in mytb.colnames()):
ephemIDs = mytb.getcol('EPHEMERIS_ID')
mytb.close()
mymsmd.open(vis)
theintents = mymsmd.intents()
if not intent in theintents:
casalog.post("findfields intent '"+intent+"' not present in MS", 'WARN')
mymsmd.close()
return rval
myfieldnames = list(mymsmd.fieldsforintent(intent, asnames=True))
myuniquefieldnames = set(myfieldnames)
for myfieldname in myuniquefieldnames:
# determine list of field ids which have the given intent and name
# at the same time check if an ephemeris is attached
fieldids = []
isephem = False
intendedfieldids = list(mymsmd.fieldsforintent(intent, asnames=False))
for myfieldid in list(mymsmd.fieldsforname(myfieldname)):
if myfieldid in intendedfieldids:
fieldids.append(myfieldid)
if ephemIDs[myfieldid] >= 0:
isephem = True
print("Field "+str(myfieldid)+" has an ephemeris attached.")
allspwids = list(mymsmd.spwsforfield(myfieldname))
intendedspwids = list(mymsmd.spwsforintent(intent))
spwids = []
scispwids = []
myspwmap = []
for myspw in intendedspwids:
if mymsmd.nchan(myspw)>4:
scispwids.append(myspw)
if (myspw in allspwids):
spwids.append(myspw)
if spwmap == []:
myspwmap = spwids
else: # find the corresponding original SPW ID in the input spwmap
for myspw in spwids:
try:
myspwmap.append(spwmap[scispwids.index(myspw)])
except:
print("ERROR: "+str(sys.exc_info()))
casalog.post(" findfields called with invalid spwmap: "+str(spwmap), 'WARN')
return []
# determine if full pol
isfullpol = False
for thespw in spwids:
ddids = mymsmd.datadescids(spw=thespw)
for ddid in ddids:
polid = mymsmd.polidfordatadesc(ddid)
corrtypes = list(mymsmd.corrtypesforpol(polid))
ctname = {'6':'RL', '7':'LR', '10':'XY', '11':'YX'}
for ct in [6,7,10,11]:
if corrtypes.count(ct)>0:
casalog.post("Found corr type "+ctname[str(ct)]+" for SPW "+str(thespw)+". This dataset is FULL POLARISATION",
'WARN')
isfullpol = True
break
if isfullpol:
break
if isfullpol:
break
# determine if an ephemeris is attached
ismosaic = (len(fieldids)>1)
fielddict = {'fieldname': myfieldname,
'intent': intent,
'isfullpol': isfullpol,
'isephem': isephem,
'ismosaic': ismosaic,
'fieldids': fieldids,
'spwids': spwids,
'spwmap': myspwmap
}
rval.append(fielddict)
except exceptions.KeyboardInterrupt:
print("ERROR: "+str(sys.exc_info()))
return []
except:
casalog.post("ERROR: in findfields"+str(sys.exc_info())+". Returned list may be incomplete.", 'WARN')
mymsmd.close()
return rval
mymsmd.close()
# to make this reproducible in different Python versions, sort the array
# of field dicts in rval by the first field ID:
fieldids = []
for mydicts in rval:
fieldids.append(mydicts['fieldids'][0])
sortedindices = np.argsort(fieldids)
sortedrval = []
for myidx in sortedindices:
sortedrval.append(rval[myidx])
return sortedrval
def getimgpars(vis='', fieldname='', intent='', mode='', isMosaic=False, isEphem=False, isFullPol=False, field=[], spw=[],
spwmap=[], perchanweightdensity=False):
"""
Generate image parameters to be added to the imaging script.
Take fieldnames and other relevant parameters from output of findfields
and generate a dictionary of all tclean parameters for each targetfield
Input parameters and values:
fieldname = name of the field source. e.g. 'NGC253'.
intent = Observation intent (same as intent in listobs). e.g. 'OBSERVE_TARGET#ON_SOURCE'.
mode = imaging mode. 'mfs' or 'cube'.
isMosaic = if the target is mosaic or not. Boolean, default: False.
isEphem = if the target has an ephemeris attached. Boolean, default: False.
isFullPol = if the MS contains full polarisation data. Boolean, default: False
field = field IDs to image. List of numerical field ids.
spw = spectral windows to image. List of numerical spw ids. only int or list of int.
If you want to image specific channels from the spws,
you have to edit the spw parameter of tclean manually.
spwmap = spwmap to be used in aU.genImageName
perchanweightdensity = the setting of the perchanweightdensity parameter of tclean (CASA 5.6+)
Output parameters and values:
vis = Input visibilities (.ms files)
imagename = Output image name. From aU.genImageName().
field = field IDs to image. List of numerical field ids.
intent = the intent selected in the input (only included if isMosaic==True)
spw = spectral windows to image. List of numerical spw ids. Int or Str.
deconvolver = 'hogbom' if intent == 'Target'; 'mtmfs' if intent == 'Calibration' or mode == mfs or cont and
fractional bandwidth >= 10%; 'clarkstokes' for full polarisation
imsize = Obtained from au.pickCellSize
cell = Obtained from au.pickCellSize
phasecenter = Obtained from au.pickCellSize
niter = default to 100
nchan = default: -1
weighting = 'briggsbwtaper' # for cubes, 'briggs' otherwise
robust = default to 0.5
nterms = default to 1; if the fractional bandwidth is >= 10%, it is set to 2
stokes = I
threshold = Threshold at which to stop cleaning. Equal to requested RMS. default=''.
To be edited by data analyst, or to be taken as input parameter for 'generateImagingScript'.
width = channel width for spectral line imaging, default=''.
To be edited by data analyst, or to be taken as input parameter for 'generateImagingScript'.
start = default to ''.
restfreq = Rest frequency of the output image. default=''.
To be edited by data analyst, or to be taken as input parameter for 'generateImagingScript'.
specmode = Spectral definition mode. = mode (mfs/cube)
gridder = Gridding options. 'standard' or 'mosaic'
pbcor = always True
outframe = default to LSRK.
taskname = which task should be used. Options = ['tclean', 'clean'], default = 'tclean'
All other tclean parameters left as default.
"""
mymsmd = msmdtool()
thevis = ''
thefieldname = ''
thefieldids = []
thespw = []
thespwmap = []
theintent = ''
theintents = ['OBSERVE_TARGET',
'CALIBRATE_PHASE',
'CALIBRATE_BANDPASS',
'CALIBRATE_FLUX',
'OBSERVE_CHECK_SOURCE',
'CALIBRATE_POLARIZATION'
]
themodes = ['mfs', 'cube', 'cont']
thespecmode = ''
thegridder = ''
thegridders = ['standard', 'mosaic']
theimsize = ''
thecellsize = ''
thephasecenter = ''
thethreshold = ''
thewidth = ''
therestfreq = ''
theniter = 100
thenchan = -1
theweighting = 'briggs' # may be modified later depending on thespecmode
therobust = 0.5
thenterms = 1
thestokes = 'I'
theimagenames = []
thedeconvolver = ''
theoutframe = 'LSRK'
thetaskname = 'tclean'
thetasknames = ['tclean', 'clean']
smallnumberofchannels=1000 # above this number, we will suggest to constrain the number of channels
# Get calibrated measurement sets
if vis=="" or type(vis)!=str:
raise Exception("Error: invalid vis parameter. Must be non-empty str.")
else:
thevis = vis
# Get target object name
if fieldname != '':
thefieldname = fieldname
else:
print("ERROR: fieldname is empty ")
raise Exception("ERROR: fieldname is empty ")
# Get field ids
if field != []:
thefieldids = field
else:
print("ERROR: field ids left blank ")
raise Exception("ERROR: field ids left blank ")
if type(isMosaic) != type(True):
print("ERROR: invalid isMosaic option "+str(isMosaic))
print(" Valid options are True, False")
raise Exception("ERROR: invalid isMosaic option ", isMosaic)
thefinalfield = ""
for myfield in thefieldids:
thefinalfield += str(myfield)+','
thefinalfield = thefinalfield[:len(thefinalfield)-1]
if isMosaic:
thefinalfield = "'"+thefieldname+"', # IDs from representative MS: '"+thefinalfield+"'"
else:
thefinalfield = thefieldname
# Get spectral window ids
if type(spw) != type(1) and type(spw) != type([]):
print("Error: wrong type for parameter spw "+str(spw))
print("Valid parameter types are integer or list of integers.")
raise Exception("Error: wrong type for parameter spw ", spw)
else:
thespw = spw
if type(thespw) != type([]):
thespw = [thespw]
sp = ""
for mysp in thespw:
sp += str(mysp)+','
sp = sp[:len(sp)-1]
if type(spwmap) != type([]) or len(spwmap)<len(thespw):
print("Error: invalid spwmap "+spw(spwmap))
raise Exception("Error: spwmap must be list at least as long as parameter spw ")
else:
thespwmap = spwmap
# Get specmode
if mode in themodes:
thespecmode = mode
else:
print("ERROR: invalid specmode "+str(mode))
print(" Valid entries are "+str(themodes))
raise Exception("ERROR: invalid targettype "+str(mode))
# decide on weigting, default is "briggs"
if thespecmode == 'cube' and aU.getCasaVersion() >= '6.2.0':
theweighting = 'briggsbwtaper'
# for mode mfs, determine fractional bandwidth
thefracbw = 0.
if (thespecmode=='mfs' or thespecmode=='cont'):
mymsmd.open(thevis)
myspwbws = mymsmd.bandwidths(thespw)
myfreqs = []
for mysp in thespw:
mycf = mymsmd.chanfreqs(mysp)
myfreqs.append(min(mycf))
myfreqs.append(max(mycf))
mymsmd.close()
mytotalbwhz = max(myfreqs) - min(myfreqs)
thefracbw = mytotalbwhz * 2./(min(myfreqs)+max(myfreqs))
print("SPWs "+str(thespw)+" span a total bandwidth of "+str(mytotalbwhz/1E9)+" GHz, fractional bandwidth "+str(thefracbw))
if thefracbw >= 0.1 and not isFullPol:
print("Will use mtmfs with nterms = 2.")
thenterms = 2
# Determine gridder
if isMosaic:
thegridder = 'mosaic'
else:
thegridder = 'standard'
# Check intent
# in case of full polarisation use clarkstokes instead of hogbom for cubes
found=False
for myintent in theintents:
if myintent in intent:
found=True
break
if found:
theintent = intent
else:
print("ERROR: invalid intent: "+str(intent))
print(" Valid entries start with one of the following strings: "+str(theintents))
raise Exception("ERROR: invalid intent "+str(intent))
thepbcor = True # always True since tclean produces both images anyway when pbcor is True
if 'OBSERVE_TARGET' in theintent:
thedeconvolver = 'hogbom'
if isFullPol:
thedeconvolver = 'clarkstokes'
thestokes = 'IQUV'
elif thenterms==2:
thedeconvolver = 'mtmfs'
thetargettype = 'sci'
elif 'CALIBRATE_POLARIZATION' in theintent:
if isFullPol:
thedeconvolver = 'clarkstokes'
thestokes = 'IQUV'
else:
raise Exception("ERROR: found intent CALIBRATED_POLARIZATION but data is not full polarization.")
thenterms=1
thetargettype = 'polleak'
elif 'CALIBRATE_PHASE' in theintent:
thedeconvolver = 'hogbom'
if isFullPol:
thedeconvolver = 'clarkstokes'
thestokes = 'IQUV'
elif thenterms==2:
thedeconvolver = 'mtmfs'
thetargettype = 'ph'
elif 'CALIBRATE_BANDPASS' in theintent:
thedeconvolver = 'hogbom'
if isFullPol:
thedeconvolver = 'clarkstokes'
thestokes = 'IQUV'
elif thenterms==2:
thedeconvolver = 'mtmfs'
thetargettype = 'bp'
elif 'CALIBRATE_FLUX' in theintent:
thedeconvolver = 'hogbom'
thetargettype = 'amp'
if isFullPol:
thedeconvolver = 'clarkstokes'
thestokes = 'IQUV'
elif thenterms==2:
thedeconvolver = 'mtmfs'
elif 'OBSERVE_CHECK_SOURCE' in theintent:
thedeconvolver = 'hogbom'
if isFullPol:
thedeconvolver = 'clarkstokes'
thestokes = 'IQUV'
elif thenterms==2:
thedeconvolver = 'mtmfs'
thetargettype = 'chk'
else:
raise Exception("ERROR: internal problem: invalid intent "+theintent)
# Get the imagename
thestokes = 'I'
if isFullPol:
thestokes = 'IQUV'
theimagename = aU.genImageName(vis=thevis,
spw=thespw,
field=thefieldname,
imtype=thespecmode,
targettype=thetargettype,
stokes=thestokes,
spwmap=thespwmap)
# Get optimum cell size, image size, and central field
mypblevel=0.2 # the default of aU.pickCellSize
if isMosaic:
mypblevel=0.1 # to make sure the mosaic size is large enough
try:
thecellsize, theimsize, thephasecenter = aU.pickCellSize(vis=thevis,
#spw=int(thespw[0]), # not giving spw will use mean freq of all
intent=theintent,
imsize=True,
cellstring=True,
pblevel=mypblevel,
sourcename=thefieldname,
verbose=False)
except:
casalog.post("ERROR: in call to aU.pickCellSize - "+str(sys.exc_info()), 'WARN')
casalog.post(" Your version of the analysisUtils may be broken.", 'WARN')
raise Exception("ERROR: call to aU.pickCellSize failed.")
# special treatment for ephemeris objects
if isEphem:
thephasecenter = 'TRACKFIELD'
theoutframe = ''
if thespecmode == 'cube':
thespecmode = 'cubesource'
# Write all parameters in tclean format to dictionary
out_dict = {'vis': thevis,
'imagename': theimagename,
'field': thefinalfield,
'spw': sp,
'imsize': theimsize,
'cell': thecellsize,
'deconvolver': thedeconvolver,
'gridder': thegridder,
'threshold': thethreshold,
'niter': theniter,
'weighting': theweighting,
'pbcor': thepbcor,
'specmode': thespecmode,
'robust': therobust,
'stokes': thestokes,
'outframe': theoutframe,
'mask': '',
'interactive': True,
'taskname': thetaskname}
if isMosaic or isEphem:
out_dict['phasecenter'] = thephasecenter
if isMosaic:
out_dict['intent'] = theintent
if thespecmode=='cube' or thespecmode=='cubesource':
if aU.getCasaVersion() >= '6.2.0': # weighting option briggsbwtaper only became available in 6.2
if out_dict['weighting'] == 'briggsbwtaper':
out_dict['perchanweightdensity'] = True
elif out_dict['weighting'] == 'briggs':
out_dict['perchanweightdensity'] = perchanweightdensity
elif aU.getCasaVersion() > '5.5.0': # parameter perchanweightdensity only became available in 5.6
if out_dict['weighting'] == 'briggs':
out_dict['perchanweightdensity'] = perchanweightdensity
out_dict['width'] = thewidth
out_dict['nchan'] = thenchan
out_dict['restfreq'] = therestfreq
mymsmd.open(thevis)
for mysp in thespw:
mynchan = mymsmd.nchan(mysp)
if mynchan>smallnumberofchannels: # in case the number of channels is high, provide the start and nchan parameters
out_dict['start']= "'', # native number of channels > "+str(smallnumberofchannels)+" (not taking into account width parameter)"
out_dict['nchan']= "-1, # use ms.cvelfreqs to check; possibly constrain start and nchan to speed up processing"
break
elif mynchan==128: # a TDM SPW
out_dict['width'] = 1
out_dict['start']= "'', # this is a TDM SPW, you might want to set start and nchan"
out_dict['nchan']= "-1, # to avoid the flagged edge channels"
mymsmd.close()
else:
out_dict['nterms'] = thenterms
return out_dict
def getcontsubpars(spws='', fitspw=''):
sp = ""
for mysp in spws:
sp += str(mysp)+','
sp = sp[:len(sp)-1]
out_dict = { 'taskname': 'uvcontsub',
'vis': 'calibrated.ms',
'spw': sp,
'fitspw': fitspw,
'excludechans': False,
'combine': '',
'solint': 'int',
'fitorder': 1,
'want_cont': False}
return out_dict
def printtask(pardict=None, taskstr="", addidnt=""):
"""
Will take a dictionary generated by getimgpars and print(the necessary lines of Python code
into the string which is then returned.
Will return a string containing "ERROR" if unsuccessful.
NOTE: if a string value is meant as a _variable_ name, it needs to be prepended with VAR, e.g. VARthevis
"""
if type(pardict) != dict:
return "ERROR"
thekeys = list(pardict.keys())
if len(thekeys)==0:
return "ERROR"
if ("taskname" in thekeys):
thekeys.remove("taskname")
if taskstr=="":
taskstr = pardict("taskname")
if type(taskstr)!=str or taskstr=="":
return "ERROR"
# predefined order for particular tasks
parameterorder = {'tclean': ['vis',
'imagename',
'field',
'intent',
'phasecenter',
'stokes',
'spw',
'outframe',
'restfreq',
'specmode',
'nterms',
'imsize',
'cell',
'deconvolver',
'niter',
'weighting',
'robust',
'mask',
'gridder',
'pbcor',
'threshold',
'width',
'start',
'nchan',
'interactive'],
'exportfits': ['imagename',
'fitsimage',
'overwrite'],
'uvcontsub': ['vis',
'field',
'spw',
'fitspw',
'fitorder',
'solint',
'combine',
'excludechans',
'want_cont'],
'mstransform': ['vis',
'outputvis',
'outframe',
'spw',
'mode',
'nchan',
'width',
'start',
'regridms',
'datacolumn']
}
rval = taskstr+"("
firstkey=''
# determine order in which to print the parameters
if taskstr in parameterorder: # we have a predefined order for this task
originalkeys = thekeys
thekeys = []
for mykey in parameterorder[taskstr]:
if mykey in originalkeys:
thekeys.append(mykey)
originalkeys.remove(mykey)
originalkeys.sort()
thekeys += originalkeys # parameters without predefined position are appended
firstkey = thekeys[0]
else: # use general rules for the order
thekeys.sort()
if 'vis' in thekeys: # if present, print vis first
firstkey = 'vis'
else:
firstkey = thekeys[0]
thekeys.remove(firstkey)
rval += firstkey + " = "
thevalue = pardict[firstkey]
if type(thevalue) == str:
if thevalue[:3] == "VAR": # this is meant as a variable name
rval += thevalue[3:] + ",\n"
elif ", " in thevalue: # this is a string with comma already contained
rval += thevalue + "\n"
else:
rval += "'"+thevalue+"',\n"
else:
rval += str(thevalue) + ",\n"
indnt = addidnt + (len(taskstr)+1)*" "
thevalue=''
for thekey in thekeys:
rval += indnt + thekey + " = "
thevalue = pardict[thekey]
if type(thevalue) == str:
if thevalue[:3] == "VAR": # this is meant as a variable name
rval += thevalue[3:] + ",\n"
elif ", " in thevalue: # this is a string with comma already contained
rval += thevalue + "\n"
else:
rval += "'"+thevalue+"',\n"
else:
rval += str(thevalue) + ",\n"
if (type(thevalue) == str) and (", " in thevalue):
rval = rval[0:len(rval)-1] # remove last "\n"
else:
rval = rval[0:len(rval)-2] # remove last ",\n"
rval += "\n" + addidnt + len(taskstr)*" " + " )\n"
return rval
def printstrarray(myarray, myname, startchar = '[', endchar = ']'):
"""
Returns string containing the Python code lines to assign the list myarray
to a variable names myname.
"""
rval = myname+" = "+startchar
idnt = len(rval)*" "
for myelement in myarray:
if not type(myelement) == str:
casalog.post('ERROR: printstrarray can only print arrays of stings', 'SEVERE')
return ''
rval = rval+"'"+myelement+"',\n"+idnt
rval = rval[0:len(rval)-len(",\n"+idnt)]
rval = rval+endchar+"\n\n"
return rval
def haveShiftedSPWs(viss=[], spwids=[], freqtolchanwid=0.1):
"""
Return True if the SPWs given by spwids in the MSs given by viss
do _not_ all have identical grids, otherwise False.
As second return value, the maximum discrepancy (Hz) is given for each SPW.
And as third return value, the same discrepancy in units of channel widths.
"""
mymsmd = msmdtool()
rval=False
rval2 = {}
rval3 = {}
for myspw in spwids:
rval2[myspw] = 0
rval3[myspw] = 0
if len(viss)<=1:
return rval, rval2, rval3
thegrids = []
thechanwidth = []
for i in range(0, len(viss)):
thegrids.append({})
thechanwidth.append({})
try:
mymsmd.open(viss[i])
for myspw in spwids:
thegrids[i][myspw] = mymsmd.chanfreqs(myspw)
thechanwidth[i][myspw] = mymsmd.chanwidths(myspw)[0]
mymsmd.close()
except:
print("ERROR: "+str(sys.exc_info()))
casalog.post('ERROR when accessing '+viss[i], 'SEVERE')
raise
for i in range(1, len(viss)):
for myspw in spwids:
freqtolhz = np.abs(thechanwidth[i][myspw] * freqtolchanwid)
maxdiscr = np.max(np.abs(thegrids[i][myspw] - thegrids[0][myspw]))
if maxdiscr> rval2[myspw]:
rval2[myspw] = maxdiscr
rval3[myspw] = maxdiscr/np.abs(thechanwidth[0][myspw])
if not np.allclose(thegrids[i][myspw], thegrids[0][myspw], 0., freqtolhz, True):
rval=True
print("Grid of SPW "+str(myspw)+" in vis "+str(i)+" differs from that in vis 0 by > "+str(freqtolchanwid)+" * chanwidth = "+str(freqtolhz)+\
" Hz, max discr. == "+str(maxdiscr)+" Hz == "+str(rval3[myspw])+" channelwidths")
else:
print("Grid of SPW "+str(myspw)+" in vis "+str(i)+" differs from that in vis 0 by at most "+str(maxdiscr)+" Hz =="+str(rval3[myspw])+" channelwidths")
return rval, rval2, rval3
def largestCommonSPWsLSRK(viss=[], spwids=[], fieldid=0):
"""
Returns three dictionaries which give the nchan, width, and start values
for the common grids for each science SPW.
"""
rval_nchan = {}
rval_width = {}
rval_start = {}
if viss==[] or spwids==[]:
casalog.post('ERROR: called largestCommonSPWsLSRK with empty input', 'SEVERE')
raise
myms = mstool()
lsrkgrids = []
lsrkchanwidth = []
for i in range(0, len(viss)):
print("Original LSRK (!) grids for "+str(viss[i]))
lsrkgrids.append({})
lsrkchanwidth.append({})
try:
myms.open(viss[i])
for myspw in spwids:
lsrkgrids[i][myspw] = myms.cvelfreqs(spwids=[myspw],
fieldids=[fieldid],
outframe='LSRK')
thelen = len(lsrkgrids[i][myspw])
if not thelen>1:
casalog.post('ERROR: SPW '+str(myspw)+' only has one channel. Cannot determine LSRK channel width.', 'SEVERE')
raise
lsrkchanwidth[i][myspw] = np.max([np.ceil(np.abs(lsrkgrids[i][myspw][1] - lsrkgrids[i][myspw][0])*1.00002),
np.ceil(np.abs(lsrkgrids[i][myspw][thelen-1] - lsrkgrids[i][myspw][thelen-2])*1.00002)
])
print(str(myspw)+": nchan = "+str(thelen)+", start = "+str(np.min(lsrkgrids[i][myspw])/1E9)+" GHz, width = "+str(lsrkchanwidth[i][myspw]/1000.)+" kHz")
myms.close()
except:
print("ERROR: "+str(sys.exc_info()))
casalog.post('ERROR when accessing '+viss[i], 'SEVERE')
raise
for myspw in spwids:
minupperedge = np.max(lsrkgrids[0][myspw]) # start value for search
maxloweredge = np.min(lsrkgrids[0][myspw]) # start value for search
maxwidth = lsrkchanwidth[0][myspw] # start value for search
for i in range(1, len(lsrkgrids)):
if np.max(lsrkgrids[i][myspw])<minupperedge:
minupperedge = np.max(lsrkgrids[i][myspw])
if np.min(lsrkgrids[i][myspw])>maxloweredge:
maxloweredge = np.min(lsrkgrids[i][myspw])
if lsrkchanwidth[i][myspw]>maxwidth:
maxwidth = lsrkchanwidth[i][myspw]
maxloweredge += maxwidth*0.1 # increase by one tenth channel width to avoid rounding errors
rval_width[myspw] = maxwidth
rval_start[myspw] = maxloweredge
if maxwidth>0.:
rval_nchan[myspw] = int(np.floor((minupperedge - maxloweredge)/maxwidth))
else:
casalog.post('ERROR: found zero channel width for vis '+str(maxloweredgeindex)+', spw '+str(myspw), 'SEVERE')
raise exceptions.ArithmeticError
return rval_nchan, rval_width, rval_start
def printfullpolanalysis():
"""
Create the text for the full polarisation RMS analysis step
"""
rval = "import glob\n"
rval += "polaCutoff = 100 # in sigmas of the residual RMS.\n"
rval += "imagescicontIQUV = glob.glob('*_sci.*.cont.IQUV.manual.image.pbcor')\n"
rval += "imagescimfsIQUV = glob.glob('*_sci.*.mfs.IQUV.manual.image.pbcor')\n"
rval += "imagepolcalIQUV = glob.glob('*_polleak.*.mfs.IQUV.manual.image.pbcor')\n"
rval += "stokescomps_todo = ['I','Q','U','V']\n"
rval += "\n"
rval += "print(' Putting I,Q,U,V components from IQUV images (sci cont, sci mfs, and polcal) into separate images ...')\n"
rval += "sep_stokesc_files = []\n"
rval += "for inimage in imagescicontIQUV+imagescimfsIQUV+imagepolcalIQUV:\n"
rval += " print(' '+inimage)\n"
rval += " for stokescomp in stokescomps_todo:\n"
rval += " outimage = inimage.replace('.IQUV.', '.'+stokescomp+'.')\n"
rval += " os.system('rm -rf '+outimage)\n"
rval += " immath(imagename=inimage, outfile=outimage, expr='IM0', stokes=stokescomp)\n"
rval += " sep_stokesc_files.append(outimage)\n"
rval += "\n"
rval += "print(' Computing pol fluxes and errors on polcal continuum images ...')\n"
rval += "results = {}\n"
rval += "for inimageIQUV in imagepolcalIQUV:\n"
rval += " results[inimageIQUV] = {}\n"
rval += " res = {}\n"
rval += " for stokescomp, stokeslbl in enumerate(stokescomps_todo):\n"
rval += " inimage = inimageIQUV.replace('.IQUV.', '.'+stokeslbl+'.')\n"
rval += " print(' Fitting Gaussian to '+inimage)\n"
rval += " myheader = imhead(inimage, mode='list')\n"
rval += " rasize=myheader['shape'][0]\n"
rval += " decsize=myheader['shape'][1]\n"
rval += " mybox=str(rasize*0.35)+', '+str(decsize*0.35)+', '+str(rasize*0.65)+', '+str(decsize*0.65)\n"
rval += " res[stokeslbl] = {}\n"
rval += " resout=imfit(imagename = inimage, box=mybox)\n"
rval += " if not resout['converged'][0]:\n"
rval += " print(' ERROR: Cannot fit gaussian in image {}'.format(inimage))\n"
rval += " print(' Maybe extended emission or image too noisy? You will need to deal with this image manually.')\n"
rval += " casalog.post( 'ERROR: Cannot fit gaussian in image '+inimage, 'WARN')\n"
rval += " res[stokeslbl]['flux']=0\n"
rval += " res[stokeslbl]['error']=0\n"
rval += " else:\n"
rval += " res[stokeslbl]['flux']=resout['results']['component0']['flux']['value'][stokescomp]\n"
rval += " res[stokeslbl]['error']=resout['results']['component0']['flux']['error'][stokescomp]\n"
rval += "\n"
rval += " fluxQ, fluxU = res['Q']['flux'], res['U']['flux']\n"
rval += " errorQ, errorU = res['Q']['error'], res['U']['error']\n"
rval += " results[inimageIQUV]['fluxPI'] = sqrt(fluxQ**2+fluxU**2)\n"
rval += " fluxPI = results[inimageIQUV]['fluxPI']\n"
rval += " if fluxPI == 0.0:\n"
rval += " print('Warning: fluxPI==0.0')\n"
rval += " fluxPI = -1.0 # In lieu of NaN\n"
rval += " results[inimageIQUV]['errorPI'] = sqrt( (fluxQ*errorQ)**2 + (fluxU*errorU)**2 ) / fluxPI\n"
rval += " errorPI = results[inimageIQUV]['errorPI']\n"
rval += " results[inimageIQUV]['polAngle'] = 0.5 * degrees( atan2(fluxU,fluxQ) )\n"
rval += " results[inimageIQUV]['errPA'] = 0.5 * degrees( errorPI / fluxPI )\n"
rval += "\n"
rval += "print(' Computing image RMS ...')\n"
rval += "imagecontresidual = glob.glob('*_sci*.cont.IQUV.manual.residual')\n"
rval += "imagemfsresidual = glob.glob('*_sci*.mfs.IQUV.manual.residual')\n"
rval += "imagepolcalresidual = glob.glob('*_polleak*.mfs.IQUV.manual.residual')\n"
rval += "for inimage in imagecontresidual+imagemfsresidual+imagepolcalresidual:\n"
rval += " print(' '+inimage)\n"
rval += " calstat=imstat(imagename=inimage, axes=[0,1])\n"
rval += " rms = calstat['rms']\n"
rval += " results[inimage] = {}\n"
rval += " results[inimage]['rms'] = rms\n"
rval += " results[inimage]['prms'] = (rms[1]**2. + rms[2]**2.)**0.5\n"
rval += "print(results)"
rval += "\n"
rval += "print(' Creating polarization I and A images ...')\n"
rval += "\n"
rval += "for inimage in imagescicontIQUV+imagescimfsIQUV+imagepolcalIQUV:\n"
rval += " print(' '+inimage)\n"
rval += " poliimage = inimage.replace('.IQUV.', '.P.')\n"
rval += " os.system('rm -rf '+poliimage)\n"
rval += " immath(outfile=poliimage,\n"
rval += " mode='poli',\n"
rval += " imagename = inimage,\n"
rval += " sigma='0.0Jy/beam')\n"
rval += "\n"
rval += " polaimage = inimage.replace('.IQUV.', '.A.')\n"
rval += " os.system('rm -rf '+polaimage)\n"
rval += " rmsimage = inimage.replace('.image.pbcor', '.residual')\n"
rval += " prms=results[rmsimage]['prms']\n"
rval += " immath(outfile=polaimage,\n"
rval += " mode='pola',\n"
rval += " imagename = inimage,\n"
rval += " polithresh='%.8fJy/beam'%(5.0*prms))\n"
rval += "\n"
rval += "print(' Saving results to file pol_results.out ...')\n"
rval += "with open('pol_results.out', 'w') as f:\n"
rval += " stdout_orig = sys.stdout\n"
rval += " sys.stdout = f\n"
rval += " print(results)\n"
rval += " sys.stdout = stdout_orig\n"
rval += "\n"
return rval
|
lucatelliREPO_NAMEmorphenPATH_START.@morphen_extracted@morphen-main@analysis_scripts@almaqa2isg.py@.PATH_END.py
|
{
"filename": "axislines.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axislines.py",
"type": "Python"
}
|
"""
Axislines includes modified implementation of the Axes class. The
biggest difference is that the artists responsible for drawing the axis spine,
ticks, ticklabels and axis labels are separated out from mpl's Axis
class. Originally, this change was motivated to support curvilinear
grid. Here are a few reasons that I came up with a new axes class:
* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
different ticks (tick locations and labels). This is not possible
with the current mpl, although some twin axes trick can help.
* Curvilinear grid.
* angled ticks.
In the new axes class, xaxis and yaxis is set to not visible by
default, and new set of artist (AxisArtist) are defined to draw axis
line, ticks, ticklabels and axis label. Axes.axis attribute serves as
a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
AxisArtist can be considered as a container artist and
has following children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
* minor_ticks, minor_ticklabels
* offsetText
* label
Note that these are separate artists from Axis class of the
original mpl, thus most of tick-related command in the original mpl
won't work, although some effort has made to work with. For example,
color and markerwidth of the ax.axis["bottom"].major_ticks will follow
those of Axes.xaxis unless explicitly specified.
In addition to AxisArtist, the Axes will have *gridlines* attribute,
which obviously draws grid lines. The gridlines needs to be separated
from the axis as some gridlines can never pass any axis.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
from matplotlib import rcParams
import matplotlib.artist as martist
import matplotlib.axes as maxes
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from .axisline_style import AxislineStyle
from .axis_artist import AxisArtist, GridlinesCollection
class AxisArtistHelper(object):
"""
AxisArtistHelper should define
following method with given APIs. Note that the first axes argument
will be axes attribute of the caller artist.::
# LINE (spinal line?)
def get_line(self, axes):
# path : Path
return path
def get_line_transform(self, axes):
# ...
# trans : transform
return trans
# LABEL
def get_label_pos(self, axes):
# x, y : position
return (x, y), trans
def get_label_offset_transform(self, \
axes,
pad_points, fontprops, renderer,
bboxes,
):
# va : vertical alignment
# ha : horizontal alignment
# a : angle
return trans, va, ha, a
# TICK
def get_tick_transform(self, axes):
return trans
def get_tick_iterators(self, axes):
# iter : iterable object that yields (c, angle, l) where
# c, angle, l is position, tick angle, and label
return iter_major, iter_minor
"""
class _Base(object):
"""
Base class for axis helper.
"""
def __init__(self):
"""
"""
self.delta1, self.delta2 = 0.00001, 0.00001
def update_lim(self, axes):
pass
class Fixed(_Base):
"""
Helper class for a fixed (in the axes coordinate) axis.
"""
_default_passthru_pt = dict(left=(0, 0),
right=(1, 0),
bottom=(0, 0),
top=(0, 1))
def __init__(self,
loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
self._loc = loc
if loc not in ["left", "right", "bottom", "top"]:
raise ValueError("%s" % loc)
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
elif loc in ["bottom", "top"]:
nth_coord = 0
self.nth_coord = nth_coord
super(AxisArtistHelper.Fixed, self).__init__()
self.passthru_pt = self._default_passthru_pt[loc]
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-nth_coord
_verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
# axis line in transAxes
self._path = Path(_verts)
def get_nth_coord(self):
return self.nth_coord
# LINE
def get_line(self, axes):
return self._path
def get_line_transform(self, axes):
return axes.transAxes
# LABEL
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._loc
pos, angle_tangent = dict(left=((0., 0.5), 90),
right=((1., 0.5), 90),
bottom=((0.5, 0.), 0),
top=((0.5, 1.), 0))[loc]
return pos, angle_tangent
# TICK
def get_tick_transform(self, axes):
trans_tick = [axes.get_xaxis_transform(),
axes.get_yaxis_transform()][self.nth_coord]
return trans_tick
class Floating(_Base):
def __init__(self, nth_coord,
value):
self.nth_coord = nth_coord
self._value = value
super(AxisArtistHelper.Floating,
self).__init__()
def get_nth_coord(self):
return self.nth_coord
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
class AxisArtistHelperRectlinear(object):
class Fixed(AxisArtistHelper.Fixed):
def __init__(self, axes, loc, nth_coord=None):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(AxisArtistHelperRectlinear.Fixed, self).__init__(
loc, nth_coord)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._loc
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = list(self.passthru_pt) # copy
c[self.nth_coord] = x
# check if the tick point is inside axes
c2 = tr2ax.transform_point(c)
#delta=0.00001
if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class Floating(AxisArtistHelper.Floating):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super(AxisArtistHelperRectlinear.Floating, self).__init__(
nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[:,fixed_coord] = p[fixed_coord]
return Path(_verts)
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._axis_direction
#angle = dict(left=0,
# right=0,
# bottom=.5*np.pi,
# top=.5*np.pi)[loc]
if self.nth_coord == 0:
angle = 0
else:
angle = 90
_verts = [0.5, 0.5]
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[fixed_coord] = p[fixed_coord]
if not (0. <= _verts[fixed_coord] <= 1.):
return None, None
else:
return _verts, angle
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._axis_direction
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
if self.nth_coord == 0:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
#angle = 90 - 90 * self.nth_coord
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
tr2ax = axes.transData + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = [self._value, self._value]
c[self.nth_coord] = x
c1, c2 = tr2ax.transform_point(c)
if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class GridHelperBase(object):
def __init__(self):
self._force_update = True
self._old_limits = None
super(GridHelperBase, self).__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._force_update or self._old_limits != (x1, x2, y1, y2):
self._update(x1, x2, y1, y2)
self._force_update = False
self._old_limits = (x1, x2, y1, y2)
def _update(self, x1, x2, y1, y2):
pass
def invalidate(self):
self._force_update = True
def valid(self):
return not self._force_update
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
return []
def new_gridlines(self, ax):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = GridlinesCollection(None, transform=ax.transData,
colors=rcParams['grid.color'],
linestyles=rcParams['grid.linestyle'],
linewidths=rcParams['grid.linewidth'])
ax._set_artist_props(gridlines)
gridlines.set_grid_helper(self)
ax.axes._set_artist_props(gridlines)
# gridlines.set_clip_path(self.axes.patch)
# set_clip_path need to be deferred after Axes.cla is completed.
# It is done inside the cla.
return gridlines
class GridHelperRectlinear(GridHelperBase):
def __init__(self, axes):
super(GridHelperRectlinear, self).__init__()
self.axes = axes
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None,
):
if axes is None:
warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
axes = self.axes
_helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
if axis_direction is None:
axis_direction = loc
axisline = AxisArtist(axes, _helper, offset=offset,
axis_direction=axis_direction,
)
return axisline
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
axes=None,
):
if axes is None:
warnings.warn(
"'new_floating_axis' explicitly requires the axes keyword.")
axes = self.axes
passthrough_point = (value, value)
transform = axes.transData
_helper = AxisArtistHelperRectlinear.Floating(
axes, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
def get_gridlines(self, which="major", axis="both"):
"""
return list of gridline coordinates in data coordinates.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = []
if axis in ["both", "x"]:
locs = []
y1, y2 = self.axes.get_ylim()
#if self.axes.xaxis._gridOnMajor:
if which in ["both", "major"]:
locs.extend(self.axes.xaxis.major.locator())
#if self.axes.xaxis._gridOnMinor:
if which in ["both", "minor"]:
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
if axis in ["both", "y"]:
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._gridOnMajor:
#if which in ["both", "major"]:
locs.extend(self.axes.yaxis.major.locator())
if self.axes.yaxis._gridOnMinor:
#if which in ["both", "minor"]:
locs.extend(self.axes.yaxis.minor.locator())
for y in locs:
gridlines.append([[x1, x2], [y, y]])
return gridlines
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
helper = kw.pop("grid_helper", None)
self._axisline_on = True
if helper:
self._grid_helper = helper
else:
self._grid_helper = GridHelperRectlinear(self)
super(Axes, self).__init__(*kl, **kw)
self.toggle_axisline(True)
def toggle_axisline(self, b=None):
if b is None:
b = not self._axisline_on
if b:
self._axisline_on = True
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
else:
self._axisline_on = False
for s in self.spines.values():
s.set_visible(True)
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
def _init_axis(self):
super(Axes, self)._init_axis()
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
new_fixed_axis = self.get_grid_helper().new_fixed_axis
for loc in ["bottom", "top", "left", "right"]:
self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
axis_direction=loc)
for axisline in [self._axislines["top"], self._axislines["right"]]:
axisline.label.set_visible(False)
axisline.major_ticklabels.set_visible(False)
axisline.minor_ticklabels.set_visible(False)
@property
def axis(self):
return self._axislines
def new_gridlines(self, grid_helper=None):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
if grid_helper is None:
grid_helper = self.get_grid_helper()
gridlines = grid_helper.new_gridlines(self)
return gridlines
def _init_gridlines(self, grid_helper=None):
# It is done inside the cla.
gridlines = self.new_gridlines(grid_helper)
self.gridlines = gridlines
def cla(self):
# gridlines need to b created before cla() since cla calls grid()
self._init_gridlines()
super(Axes, self).cla()
# the clip_path should be set after Axes.cla() since that's
# when a patch is created.
self.gridlines.set_clip_path(self.axes.patch)
self._init_axis_artists()
def get_grid_helper(self):
return self._grid_helper
def grid(self, b=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
"""
# their are some discrepancy between the behavior of grid in
# axes_grid and the original mpl's grid, because axes_grid
# explicitly set the visibility of the gridlines.
super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
if not self._axisline_on:
return
if b is None:
if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
b=True
else:
b=False
self.gridlines.set_which(which)
self.gridlines.set_axis(axis)
self.gridlines.set_visible(b)
if len(kwargs):
martist.setp(self.gridlines, **kwargs)
def get_children(self):
if self._axisline_on:
children = list(six.itervalues(self._axislines)) + [self.gridlines]
else:
children = []
children.extend(super(Axes, self).get_children())
return children
def invalidate_grid_helper(self):
self._grid_helper.invalidate()
def new_fixed_axis(self, loc, offset=None):
gh = self.get_grid_helper()
axis = gh.new_fixed_axis(loc,
nth_coord=None,
axis_direction=None,
offset=offset,
axes=self,
)
return axis
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
):
gh = self.get_grid_helper()
axis = gh.new_floating_axis(nth_coord, value,
axis_direction=axis_direction,
axes=self)
return axis
def draw(self, renderer, inframe=False):
if not self._axisline_on:
super(Axes, self).draw(renderer, inframe)
return
orig_artists = self.artists
self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
super(Axes, self).draw(renderer, inframe)
self.artists = orig_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
if not self._axisline_on:
return bb0
bb = [bb0]
for axisline in list(six.itervalues(self._axislines)):
if not axisline.get_visible():
continue
bb.append(axisline.get_tightbbox(renderer))
# if axisline.label.get_visible():
# bb.append(axisline.label.get_window_extent(renderer))
# if axisline.major_ticklabels.get_visible():
# bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
# if axisline.minor_ticklabels.get_visible():
# bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
# if axisline.major_ticklabels.get_visible() or \
# axisline.minor_ticklabels.get_visible():
# bb.append(axisline.offsetText.get_window_extent(renderer))
#bb.extend([c.get_window_extent(renderer) for c in artists \
# if c.get_visible()])
_bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
return _bbox
Subplot = maxes.subplot_class_factory(Axes)
class AxesZero(Axes):
def __init__(self, *kl, **kw):
super(AxesZero, self).__init__(*kl, **kw)
def _init_axis_artists(self):
super(AxesZero, self)._init_axis_artists()
new_floating_axis = self._grid_helper.new_floating_axis
xaxis_zero = new_floating_axis(nth_coord=0,
value=0.,
axis_direction="bottom",
axes=self)
xaxis_zero.line.set_clip_path(self.patch)
xaxis_zero.set_visible(False)
self._axislines["xzero"] = xaxis_zero
yaxis_zero = new_floating_axis(nth_coord=1,
value=0.,
axis_direction="left",
axes=self)
yaxis_zero.line.set_clip_path(self.patch)
yaxis_zero.set_visible(False)
self._axislines["yzero"] = yaxis_zero
SubplotZero = maxes.subplot_class_factory(AxesZero)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py2@mpl_toolkits@axisartist@axislines.py@.PATH_END.py
|
{
"filename": "codediff_test.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/docs_nnx/_ext/codediff_test.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for codediff Sphinx extension."""
from absl.testing import parameterized
from codediff import CodeDiffParser
class CodeDiffTest(parameterized.TestCase):
def test_parse(self):
input_text = r"""@jax.jit #!
def get_initial_params(key): #!
init_val = jnp.ones((1, 28, 28, 1), jnp.float32)
initial_params = CNN().init(key, init_val)['params']
extra_line
return initial_params
---
@jax.pmap #!
def get_initial_params(key):
init_val = jnp.ones((1, 28, 28, 1), jnp.float32)
initial_params = CNN().init(key, init_val)['params']
return initial_params"""
expected_table = """.. tab-set::\n \n .. tab-item:: Single device\n \n .. code-block:: python\n :emphasize-lines: 1,2\n \n @jax.jit\n def get_initial_params(key):\n init_val = jnp.ones((1, 28, 28, 1), jnp.float32)\n initial_params = CNN().init(key, init_val)['params']\n extra_line\n return initial_params\n \n .. tab-item:: Ensembling on multiple devices\n \n .. code-block:: python\n :emphasize-lines: 1\n \n @jax.pmap\n def get_initial_params(key):\n init_val = jnp.ones((1, 28, 28, 1), jnp.float32)\n initial_params = CNN().init(key, init_val)['params']\n return initial_params"""
expected_testcodes = [
r"""@jax.jit #!
def get_initial_params(key): #!
init_val = jnp.ones((1, 28, 28, 1), jnp.float32)
initial_params = CNN().init(key, init_val)['params']
extra_line
return initial_params
""",
r"""@jax.pmap #!
def get_initial_params(key):
init_val = jnp.ones((1, 28, 28, 1), jnp.float32)
initial_params = CNN().init(key, init_val)['params']
return initial_params""",
]
title_left = 'Single device'
title_right = 'Ensembling on multiple devices'
actual_table, actual_testcodes = CodeDiffParser().parse(
lines=input_text.split('\n'),
title=f'{title_left}, {title_right}',
)
actual_table = '\n'.join(actual_table)
actual_testcodes = ['\n'.join(testcode) for testcode, _ in actual_testcodes]
self.assertEqual(expected_table, actual_table)
self.assertEqual(expected_testcodes[0], actual_testcodes[0])
self.assertEqual(expected_testcodes[1], actual_testcodes[1])
@parameterized.parameters(
{
'input_text': r"""x = 1
---
x = 2
""",
'title': 'Tab 0, Tab1, Tab2',
'groups': None,
'error_msg': 'Expected 2 code separator\\(s\\) for 3 tab\\(s\\), but got 1 code separator\\(s\\) instead.',
},
{
'input_text': r"""x = 1
---
x = 2
---
x = 3
---
x = 4
""",
'title': 'Tab 0, Tab1, Tab2',
'groups': None,
'error_msg': 'Expected 2 code separator\\(s\\) for 3 tab\\(s\\), but got 3 code separator\\(s\\) instead.',
},
{
'input_text': r"""x = 1
---
x = 2
---
x = 3
""",
'title': 'Tab 0, Tab1, Tab2',
'groups': 'tab0, tab2',
'error_msg': 'Expected 3 group assignment\\(s\\) for 3 tab\\(s\\), but got 2 group assignment\\(s\\) instead.',
},
{
'input_text': r"""x = 1
---
x = 2
---
x = 3
""",
'title': 'Tab 0, Tab1, Tab2',
'groups': 'tab0, tab1, tab2, tab3',
'error_msg': 'Expected 3 group assignment\\(s\\) for 3 tab\\(s\\), but got 4 group assignment\\(s\\) instead.',
},
)
def test_parse_errors(self, input_text, title, groups, error_msg):
with self.assertRaisesRegex(ValueError, error_msg):
_, _ = CodeDiffParser().parse(
lines=input_text.split('\n'),
title=title,
groups=groups,
)
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@docs_nnx@_ext@codediff_test.py@.PATH_END.py
|
{
"filename": "_hovertemplate.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/_hovertemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertemplate", parent_name="bar", **kwargs):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@_hovertemplate.py@.PATH_END.py
|
{
"filename": "pastecat_runner.py",
"repo_name": "CosmoStat/shapepipe",
"repo_path": "shapepipe_extracted/shapepipe-master/shapepipe/modules/pastecat_runner.py",
"type": "Python"
}
|
"""PASTECAT RUNNER.
Module runner for ``pastecat``.
:Author: Martin Kilbinger <martin.kilbinger@cea.fr>, Axel Guinot
"""
from shapepipe.modules.module_decorator import module_runner
from shapepipe.modules.pastecat_package.pastecat import PasteCat
@module_runner(
version='1.1',
input_module='sextractor_runner',
file_pattern='tile_sexcat',
file_ext='.fits',
depends=['numpy', 'astropy'],
run_method='parallel',
)
def paste_cat_runner(
input_file_list,
run_dirs,
file_number_string,
config,
module_config_sec,
w_log,
):
"""Define The Paste Catalogue Runner."""
# Get config options
if config.has_option(module_config_sec, 'CHECK_COL_NAME'):
check_col_name = config.get(module_config_sec, 'CHECK_COL_NAME')
else:
check_col_name = None
if config.has_option(module_config_sec, 'HDU'):
tmp = config.getlist(module_config_sec, 'HDU')
hdu_no = [int(idx) for idx in tmp]
if len(hdu_no) != len(input_file_list):
raise IndexError(
'Different lengths for input file list '
+ f'({len(input_file_list)}) and HDU ({len(hdu_no)}).'
)
else:
hdu_no = None
if config.has_option(module_config_sec, 'PREFIX'):
prefix = config.get(
module_config_sec,
'PREFIX'
)
else:
prefix = 'cat_pasted'
if config.has_option(module_config_sec, 'EXT_NAME'):
ext_name_list = config.getlist(module_config_sec, 'EXT_NAME')
if len(ext_name_list) != len(input_file_list):
raise ValueError(
f'Input file list length ({len(input_file_list)}) '
+ f'and EXT_NAME list ({len(ext_name_list)})'
+ 'need to be equal.'
)
else:
ext_name_list = None
# Set file extension
file_ext = 'fits'
# Set output path
output_path = (
f'{run_dirs["output"]}/{prefix}'
+ f'{file_number_string}.{file_ext}'
)
# Create rand cat class instance
paste_cat_inst = PasteCat(
input_file_list,
output_path,
w_log,
ext_name=ext_name_list,
check_col_name=check_col_name,
hdu_no=hdu_no
)
# Run processing
paste_cat_inst.process()
# No return objects
return None, None
|
CosmoStatREPO_NAMEshapepipePATH_START.@shapepipe_extracted@shapepipe-master@shapepipe@modules@pastecat_runner.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/sankey/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sankey.hoverlabel"
_path_str = "sankey.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@sankey@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "_bgcolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/indicator/gauge/_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="indicator.gauge", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@indicator@gauge@_bgcolor.py@.PATH_END.py
|
{
"filename": "exampleHRO.py",
"repo_name": "solerjuan/magnetar",
"repo_path": "magnetar_extracted/magnetar-master/examples/exampleHRO.py",
"type": "Python"
}
|
# Example of the histogram of orientated gradients (HRO) method
# applied to the column density and magnetic field orientation observations toward the Taurus molecular cloud
# See Planck Collaboration XXXV. A&A, 586 (2016) A138
#
# This file is part of Magnetar
#
# Copyright (C) 2013-2023 Juan Diego Soler
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
sys.path.append('../')
from hro import *
from bvisual import *
from astropy.wcs import WCS
from reproject import reproject_interp
import os
import imageio
indir='../data/'
prefix='Taurusfwhm10'
# Loading Taurus Stokes I, Q, and U maps from Planck at 353 GHz
hdu=fits.open(indir+'Taurusfwhm5_logNHmap.fits')
logNHmap=hdu[0].data
hdrREF=hdu[0].header
hdu.close()
hdu=fits.open(indir+prefix+'_Qmap.fits')
Qmap=hdu[0].data
hdu.close()
hdu=fits.open(indir+prefix+'_Umap.fits')
Umap=hdu[0].data
hdu.close()
# Planck beam size
fwhm=10.0 # arcmin
# Setting the size of the derivative kernel
sz=np.shape(logNHmap)
deltab=60.*hdrREF['CDELT2'] # in arcmin
ksz=10. # arcmin
pxksz=ksz/deltab
# Setting the number column density bins
steps=25 # number of histograms
# Setting the statistical weights to account for the beam sampling
weights=np.ones(sz)*(deltab/ksz)**2
# Simple mask definition
mask=np.ones_like(logNHmap)
mask[(logNHmap < 21.2)]=0.
# HRO calculation
outputhro = hroLITE(logNHmap, Qmap, -Umap, steps=20, minI=np.nanmin(logNHmap), w=weights, mask=mask, ksz=pxksz)
# Map of relative orientation angles
fig = plt.figure(figsize=(10.0,10.0))
plt.rc('font', size=12)
ax1=plt.subplot(111, projection=WCS(hdrREF))
im=ax1.imshow(np.abs(np.rad2deg(outputhro['Amap'])), origin='lower', interpolation='none', cmap='jet')
ax1.contour(logNHmap, origin='lower', levels=[np.mean(logNHmap)+1.0*np.std(logNHmap),np.mean(logNHmap)+2.0*np.std(logNHmap)], colors='black', linewidths=2.0)
ax1.coords[0].set_axislabel(r'$l$')
ax1.coords[1].set_axislabel(r'$b$')
cbar=fig.colorbar(im, ax=ax1, fraction=0.046, pad=0.04)
cbar.ax.set_title(r'$\phi$ [deg]')
plt.show()
# Setting the center of the column density bins
isteps=outputhro['csteps']
icentres=0.5*(isteps[0:np.size(isteps)-1]+isteps[1:np.size(isteps)])
# Reproducing top-left panel of Figure 7 in Planck Collaboration XXXV. A&A, 586 (2016) A138
fig = plt.figure(figsize=(10.0,5.0))
plt.rc('font', size=12)
ax1=plt.subplot(111)
ax1.plot(icentres, outputhro['xi'], color='orange')
ax1.axhline(y=0, color='grey', alpha=0.5, linestyle='dashed')
ax1.tick_params(axis='y', labelrotation=90)
ax1.set_xlabel(r'log($N_{\rm H}/$cm$^{2}$)')
ax1.set_ylabel(r'$\xi$')
plt.show()
|
solerjuanREPO_NAMEmagnetarPATH_START.@magnetar_extracted@magnetar-master@examples@exampleHRO.py@.PATH_END.py
|
{
"filename": "google_palm.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/llms/google_palm.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import GooglePalm
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"GooglePalm": "langchain_community.llms"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GooglePalm",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@llms@google_palm.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/streamtube/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="streamtube.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@streamtube@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "exobssim/rapoc-public",
"repo_path": "rapoc-public_extracted/rapoc-public-master/rapoc/models/utils/__init__.py",
"type": "Python"
}
|
exobssimREPO_NAMErapoc-publicPATH_START.@rapoc-public_extracted@rapoc-public-master@rapoc@models@utils@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "steven-murray/mrpy",
"repo_path": "mrpy_extracted/mrpy-master/mrpy/__init__.py",
"type": "Python"
}
|
__version__ = "1.1.0"
from mrpy.base.stats import TGGD
from mrpy.base.core import MRP, dndm
from mrpy.extra.physical_dependence import mrp_b13
|
steven-murrayREPO_NAMEmrpyPATH_START.@mrpy_extracted@mrpy-master@mrpy@__init__.py@.PATH_END.py
|
{
"filename": "1_create_config_file.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioReco/examples/cr_efficiency_analysis/1_create_config_file.py",
"type": "Python"
}
|
import numpy as np
import helper_cr_eff as hcr
import json
import os
from NuRadioReco.utilities import units
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--output_path', type=str, nargs='?', default=os.path.dirname(__file__),
help='Path to save output, most likely the path to the cr_efficiency_analysis directory')
parser.add_argument('--detector_file', type=str, nargs='?',
default='LPDA_Southpole.json',
help='file with one antenna at and the geographic location, change triggered channels accordingly')
parser.add_argument('--target_global_trigger_rate', type=float, nargs='?', default=100,
help='trigger rate for all channels in Hz')
parser.add_argument('--trigger_name', type=str, nargs='?', default='high_low',
help='name of the trigger, high_low, envelope or power_integration')
parser.add_argument('--default_station', type=int, nargs='?', default=101,
help='default station id')
parser.add_argument('--trace_samples', type=int, nargs='?', default=1024,
help='elements in the array of one trace')
parser.add_argument('--sampling_rate', type=int, nargs='?', default=1,
help='sampling rate in GHz')
parser.add_argument('--triggered_channels', type=np.ndarray, nargs='?', default=np.array([1]),
help='channel on which the trigger is applied')
parser.add_argument('--total_number_triggered_channels', type=int, nargs='?', default=4,
help='number ot channels that trigger.')
parser.add_argument('--number_coincidences', type=int, nargs='?', default=2,
help='number coincidences of true trigger within on station of the detector')
parser.add_argument('--coinc_window', type=int, nargs='?', default=80,
help='coincidence window within the number coincidence has to occur. In ns')
parser.add_argument('--int_window', type=float, nargs='?', default=10,
help='integration time window [ns] for power_integration trigger')
parser.add_argument('--passband_low', type=int, nargs='?', default=80,
help='lower bound of the passband used for the trigger in MHz')
parser.add_argument('--passband_high', type=int, nargs='?', default=180,
help='higher bound of the passband used for the trigger in MHz')
parser.add_argument('--order_trigger', type=int, nargs='?', default=10,
help='order of the filter used in the trigger')
parser.add_argument('--Tnoise', type=int, nargs='?', default=300,
help='Temperature of thermal noise in K')
parser.add_argument('--T_noise_min_freq', type=int, nargs='?', default=50,
help='min freq of thermal noise in MHz')
parser.add_argument('--T_noise_max_freq', type=int, nargs='?', default=800,
help='max freq of thermal noise in MHz')
parser.add_argument('--galactic_noise_n_side', type=int, nargs='?', default=4,
help='The n_side parameter of the healpix map. Has to be power of 2, basicly the resolution')
parser.add_argument('--galactic_noise_interpolation_frequencies_start', type=int, nargs='?', default=10,
help='start frequency the galactic noise is interpolated over in MHz')
parser.add_argument('--galactic_noise_interpolation_frequencies_stop', type=int, nargs='?', default=1100,
help='stop frequency the galactic noise is interpolated over in MHz')
parser.add_argument('--galactic_noise_interpolation_frequencies_step', type=int, nargs='?', default=100,
help='frequency steps the galactic noise is interpolated over in MHz')
parser.add_argument('--n_random_phase', type=int, nargs='?', default=10,
help='for computing time reasons one galactic noise amplitude is reused '
'n_random_phase times, each time a random phase is added')
parser.add_argument('--threshold_start', type=float, nargs='?',
help='value of the first tested threshold in Volt')
parser.add_argument('--threshold_step', type=float, nargs='?',
help='value of the threshold step in Volt')
parser.add_argument('--station_time', type=str, nargs='?', default='2021-01-01T00:00:00',
help='station time for calculation of galactic noise')
parser.add_argument('--station_time_random', type=bool, nargs='?', default=True,
help='choose if the station time should be random or not')
parser.add_argument('--hardware_response', type=bool, nargs='?', default=True,
help='choose if the hardware response (amp) should be True or False')
parser.add_argument('--iterations_per_job', type=int, nargs='?', default=200,
help='choose if the hardware response (amp) should be True or False')
parser.add_argument('--number_of_allowed_trigger', type=bool, nargs='?', default=3,
help='The number of iterations is calculated to yield a trigger rate')
args = parser.parse_args()
target_global_trigger_rate = args.target_global_trigger_rate * units.Hz
passband_low = args.passband_low * units.megahertz
passband_high = args.passband_high * units.megahertz
passband_trigger = np.array([passband_low, passband_high])
sampling_rate = args.sampling_rate * units.gigahertz
coinc_window = args.coinc_window * units.ns
int_window = args.int_window * units.ns
Tnoise = args.Tnoise * units.kelvin
T_noise_min_freq = args.T_noise_min_freq * units.megahertz
T_noise_max_freq = args.T_noise_max_freq * units.megahertz
galactic_noise_interpolation_frequencies_start = args.galactic_noise_interpolation_frequencies_start * units.MHz
galactic_noise_interpolation_frequencies_stop = args.galactic_noise_interpolation_frequencies_stop * units.MHz
galactic_noise_interpolation_frequencies_step = args.galactic_noise_interpolation_frequencies_step * units.MHz
trace_length = args.trace_samples / sampling_rate
target_single_trigger_rate = hcr.get_single_channel_trigger_rate(
target_global_trigger_rate, args.total_number_triggered_channels, args.number_coincidences, coinc_window)
n_iteration_for_one_allowed_trigger = (trace_length * target_single_trigger_rate) ** -1
n_iterations = int(n_iteration_for_one_allowed_trigger * args.number_of_allowed_trigger / args.n_random_phase)
resolution = (n_iteration_for_one_allowed_trigger * args.number_of_allowed_trigger * trace_length) ** -1
number_of_jobs = n_iterations / args.iterations_per_job
number_of_jobs = int(np.ceil(number_of_jobs))
Vrms_thermal_noise = hcr.calculate_thermal_noise_Vrms(Tnoise, T_noise_max_freq, T_noise_min_freq)
if args.threshold_start is None:
if args.trigger_name == "power_integration":
raise Exception('Please set threshold start value manually for the power integration trigger')
else:
if args.hardware_response:
threshold_start = 1e3 * Vrms_thermal_noise
elif not args.hardware_response:
threshold_start = 1.8 * Vrms_thermal_noise
else:
threshold_start = args.threshold_start * units.volt
if args.threshold_step is None:
if args.trigger_name == "power_integration":
raise Exception('Please set threshold step value manually for the power integration trigger')
else:
if args.hardware_response:
threshold_step = 1e-3 * units.volt
elif not args.hardware_response:
threshold_step = 1e-6 * units.volt
else:
threshold_step = args.threshold_step * units.volt
dic = {'T_noise': Tnoise, 'Vrms_thermal_noise': Vrms_thermal_noise, 'n_iterations_total': n_iterations,
'number_of_allowed_trigger': args.number_of_allowed_trigger, 'iterations_per_job': args.iterations_per_job,
'number_of_jobs': number_of_jobs, 'target_single_trigger_rate': target_single_trigger_rate,
'target_global_trigger_rate': target_global_trigger_rate, 'resolution': resolution,
'trigger_name': args.trigger_name, 'passband_trigger': passband_trigger,
'total_number_triggered_channels': args.total_number_triggered_channels,
'number_coincidences': args.number_coincidences, 'triggered_channels': args.triggered_channels,
'coinc_window': coinc_window, 'int_window': int_window, 'order_trigger': args.order_trigger, 'detector_file': args.detector_file,
'default_station': args.default_station, 'trace_samples': args.trace_samples, 'sampling_rate': sampling_rate,
'trace_length': trace_length, 'T_noise_min_freq': T_noise_min_freq, 'T_noise_max_freq': T_noise_max_freq,
'galactic_noise_n_side': args.galactic_noise_n_side,
'galactic_noise_interpolation_frequencies_start': galactic_noise_interpolation_frequencies_start,
'galactic_noise_interpolation_frequencies_stop': galactic_noise_interpolation_frequencies_stop,
'galactic_noise_interpolation_frequencies_step': galactic_noise_interpolation_frequencies_step,
'station_time': args.station_time, 'station_time_random': args.station_time_random,
'hardware_response': args.hardware_response, 'n_random_phase': args.n_random_phase,
'threshold_start': threshold_start, 'threshold_step': threshold_step}
os.makedirs(os.path.join(args.output_path, 'config/ntr'), exist_ok=True)
output_file = f'config/ntr/config_{args.trigger_name}_trigger_rate_{target_global_trigger_rate/units.Hz:.0f}Hz_coinc_{args.number_coincidences}of{args.total_number_triggered_channels}.json'
abs_path_output_file = os.path.normpath(os.path.join(args.output_path, output_file))
with open(abs_path_output_file, 'w') as outfile:
json.dump(dic, outfile, cls=hcr.NumpyEncoder, indent=4, sort_keys=True)
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioReco@examples@cr_efficiency_analysis@1_create_config_file.py@.PATH_END.py
|
{
"filename": "test_scalar_ctors.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_core/tests/test_scalar_ctors.py",
"type": "Python"
}
|
"""
Test the scalar constructors, which also do type-coercion
"""
import pytest
import numpy as np
from numpy.testing import (
assert_equal, assert_almost_equal, assert_warns,
)
class TestFromString:
def test_floating(self):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_floating_overflow(self):
""" Strings containing an unrepresentable float overflow """
fhalf = np.half('1e10000')
assert_equal(fhalf, np.inf)
fsingle = np.single('1e10000')
assert_equal(fsingle, np.inf)
fdouble = np.double('1e10000')
assert_equal(fdouble, np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
assert_equal(flongdouble, np.inf)
fhalf = np.half('-1e10000')
assert_equal(fhalf, -np.inf)
fsingle = np.single('-1e10000')
assert_equal(fsingle, -np.inf)
fdouble = np.double('-1e10000')
assert_equal(fdouble, -np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
assert_equal(flongdouble, -np.inf)
class TestExtraArgs:
def test_superclass(self):
# try both positional and keyword arguments
s = np.str_(b'\\x61', encoding='unicode-escape')
assert s == 'a'
s = np.str_(b'\\x61', 'unicode-escape')
assert s == 'a'
# previously this would return '\\xx'
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', encoding='unicode-escape')
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', 'unicode-escape')
# superclass fails, but numpy succeeds
assert np.bytes_(-2) == b'-2'
def test_datetime(self):
dt = np.datetime64('2000-01', ('M', 2))
assert np.datetime_data(dt) == ('M', 2)
with pytest.raises(TypeError):
np.datetime64('2000', garbage=True)
def test_bool(self):
with pytest.raises(TypeError):
np.bool(False, garbage=True)
def test_void(self):
with pytest.raises(TypeError):
np.void(b'test', garbage=True)
class TestFromInt:
def test_intp(self):
# Ticket #99
assert_equal(1024, np.intp(1024))
def test_uint64_from_negative(self):
with pytest.raises(OverflowError):
np.uint64(-2)
int_types = [np.byte, np.short, np.intc, np.long, np.longlong]
uint_types = [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong]
float_types = [np.half, np.single, np.double, np.longdouble]
cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
class TestArrayFromScalar:
""" gh-15467 and gh-19125 """
def _do_test(self, t1, t2, arg=2):
if arg is None:
x = t1()
elif isinstance(arg, tuple):
if t1 is np.clongdouble:
pytest.xfail("creating a clongdouble from real and "
"imaginary parts isn't supported")
x = t1(*arg)
else:
x = t1(arg)
arr = np.array(x, dtype=t2)
# type should be preserved exactly
if t2 is None:
assert arr.dtype.type is t1
else:
assert arr.dtype.type is t2
@pytest.mark.parametrize('t1', int_types + uint_types)
@pytest.mark.parametrize('t2', int_types + uint_types + [None])
def test_integers(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', float_types)
@pytest.mark.parametrize('t2', float_types + [None])
def test_reals(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', cfloat_types)
@pytest.mark.parametrize('t2', cfloat_types + [None])
@pytest.mark.parametrize('arg', [2, 1 + 3j, (1, 2), None])
def test_complex(self, t1, t2, arg):
self._do_test(t1, t2, arg)
@pytest.mark.parametrize('t', cfloat_types)
def test_complex_errors(self, t):
with pytest.raises(TypeError):
t(1j, 1j)
with pytest.raises(TypeError):
t(1, None)
with pytest.raises(TypeError):
t(None, 1)
@pytest.mark.parametrize("length",
[5, np.int8(5), np.array(5, dtype=np.uint16)])
def test_void_via_length(length):
res = np.void(length)
assert type(res) is np.void
assert res.item() == b"\0" * 5
assert res.dtype == "V5"
@pytest.mark.parametrize("bytes_",
[b"spam", np.array(567.)])
def test_void_from_byteslike(bytes_):
res = np.void(bytes_)
expected = bytes(bytes_)
assert type(res) is np.void
assert res.item() == expected
# Passing dtype can extend it (this is how filling works)
res = np.void(bytes_, dtype="V100")
assert type(res) is np.void
assert res.item()[:len(expected)] == expected
assert res.item()[len(expected):] == b"\0" * (res.nbytes - len(expected))
# As well as shorten:
res = np.void(bytes_, dtype="V4")
assert type(res) is np.void
assert res.item() == expected[:4]
def test_void_arraylike_trumps_byteslike():
# The memoryview is converted as an array-like of shape (18,)
# rather than a single bytes-like of that length.
m = memoryview(b"just one mintleaf?")
res = np.void(m)
assert type(res) is np.ndarray
assert res.dtype == "V1"
assert res.shape == (18,)
def test_void_dtype_arg():
# Basic test for the dtype argument (positional and keyword)
res = np.void((1, 2), dtype="i,i")
assert res.item() == (1, 2)
res = np.void((2, 3), "i,i")
assert res.item() == (2, 3)
@pytest.mark.parametrize("data",
[5, np.int8(5), np.array(5, dtype=np.uint16)])
def test_void_from_integer_with_dtype(data):
# The "length" meaning is ignored, rather data is used:
res = np.void(data, dtype="i,i")
assert type(res) is np.void
assert res.dtype == "i,i"
assert res["f0"] == 5 and res["f1"] == 5
def test_void_from_structure():
dtype = np.dtype([('s', [('f', 'f8'), ('u', 'U1')]), ('i', 'i2')])
data = np.array(((1., 'a'), 2), dtype=dtype)
res = np.void(data[()], dtype=dtype)
assert type(res) is np.void
assert res.dtype == dtype
assert res == data[()]
def test_void_bad_dtype():
with pytest.raises(TypeError,
match="void: descr must be a `void.*int64"):
np.void(4, dtype="i8")
# Subarray dtype (with shape `(4,)` is rejected):
with pytest.raises(TypeError,
match=r"void: descr must be a `void.*\(4,\)"):
np.void(4, dtype="4i")
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_core@tests@test_scalar_ctors.py@.PATH_END.py
|
{
"filename": "04_metrics.ipynb",
"repo_name": "vortex-exoplanet/VIP",
"repo_path": "VIP_extracted/VIP-master/docs/source/tutorials/04_metrics.ipynb",
"type": "Jupyter Notebook"
}
|
# 4. Metrics
> Authors: *Valentin Christiaens* and *Carlos Alberto Gomez Gonzalez*
> Suitable for VIP *v1.0.0* onwards
> Last update: *06/06/2024*
**Table of contents**
* [4.1. Loading ADI data](#4.1.-Loading-ADI-data)
* [4.2. Signal-to-noise ratio and significance](#4.2.-Signal-to-noise-ratio-and-significance)
- [4.2.1. S/N map](#4.2.1.-S/N-map)
- [4.2.2. Significance](#4.2.2.-Significance)
- [4.2.3. STIM map](#4.2.3.-STIM-map)
* [4.3. Automatic detection function](#4.3.-Automatic-detection-function)
* [4.4. Throughput and contrast curves](#4.4.-Throughput-and-contrast-curves)
- [4.4.1. Throughput](#4.4.1.-Throughput)
- [4.4.2. Contrast curves](#4.4.2.-Contrast-curves)
* [4.5. Completeness curves and maps](#4.5.-Completeness-curves-and-maps)
- [4.5.1. Completeness curves](#4.5.1.-Completeness-curves)
- [4.5.2. Completeness maps](#4.5.2.-Completeness-maps)
* [4.6. Applefy contrast curves and grids](#4.6.-Applefy-contrast-curves-and-grids)
- [4.6.1. Applefy contrast curves](#4.6.1.-Applefy-contrast-curves)
- [4.6.2. Applefy contrast grids](#4.6.2.-Applefy-contrast-grids)
- [4.6.2.1. Visualizing the contrast grids](#4.6.2.1.-Visualizing-the-contrast-grids)
- [4.6.2.2. Visualizing the thresholded contrast curves](#4.6.2.2.-Visualizing-the-thresholded-contrast-curves)
This tutorial shows:
- how to compute the S/N ratio of a given companion candidate;
- how to calculate the significance of a detection;
- how to compute S/N ratio maps and STIM maps;
- how to use the automatic point-source detection function;
- how to compute throughput and contrast curves;
- how to compute robust contrast curves and contrast grids with Applefy (**requires the installation of `Applefy`** -- details [here](https://applefy.readthedocs.io/en/latest/index.html#)).
-----------
<div class="alert alert-block alert-info">
<b>Note: </b>
<a class="anchor" id="war1"></a>
A number of routines in the `metrics` subpackage have been implemented for compatibility with **multiprocessing**, in order to optimally harness the power of machines equipped with multiple CPUs. Any function where the `nproc` parameter is available in its call (or which internally calls a `psfsub` function, such as the contrast curve function) can be run in multi-processing, with the value of `nproc` setting the requested number of CPUs to use. Instead of an integer, one can set `nproc=None` to use half of all available CPUs. For optimal results in multiprocessing, set the following environment parameters **BEFORE** launching your Jupyter notebook:
`export MKL_NUM_THREADS=1`
`export NUMEXPR_NUM_THREADS=1`
`export OMP_NUM_THREADS=1`
</div>
-----------
Let's first import a couple of external packages needed in this tutorial:
```python
%matplotlib inline
from hciplot import plot_frames, plot_cubes
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from multiprocessing import cpu_count
import numpy as np
import os
from packaging import version
from scipy import interpolate
# Seaborn is only necessary to plot Applefy contrast grid results.
# It is not a mandatory requirement of VIP (only one cell will not compile in this notebook).
try:
import seaborn as sns
no_sns=False
except:
no_sns=True
```
In the following box we check that your version of VIP passes the requirements to run this notebook:
```python
import vip_hci as vip
vvip = vip.__version__
print("VIP version: ", vvip)
if version.parse(vvip) <= version.parse("1.0.3"):
msg = "Please upgrade your version of VIP"
msg+= "It should be striclty above 1.0.3 to run this notebook."
raise ValueError(msg)
```
VIP version: 1.6.1
## 4.1. Loading ADI data
In the 'dataset' folder of the `VIP_extras` repository you can find a toy ADI (Angular Differential Imaging) cube and a NACO point spread function (PSF) to demonstrate the capabilities of ``VIP``. This is an L'-band VLT/NACO dataset of beta Pictoris published in [Absil et al. (2013)](https://ui.adsabs.harvard.edu/abs/2013A%26A...559L..12A/abstract) obtained using the Annular Groove Phase Mask (AGPM) Vortex coronagraph. The sequence has been heavily sub-sampled temporarily to make it smaller. The frames were also cropped to the central 101x101 area. In case you want to plug-in your cube just change the path of the following cells.
More info on this dataset, and on opening and visualizing fits files with VIP in general, is available in Tutorial `1. Quick start`.
Let's load the data:
```python
from vip_hci.fits import open_fits
from astropy.utils.data import download_file
url_d = "https://github.com/vortex-exoplanet/VIP_extras/raw/master/datasets"
f1 = download_file("{}/naco_betapic_cube_cen.fits".format(url_d), cache=True)
f2 = download_file("{}/naco_betapic_psf.fits".format(url_d), cache=True)
f3 = download_file("{}/naco_betapic_derot_angles.fits".format(url_d), cache=True)
# alternatively, for local files simply provide their full or relative path. E.g.:
#f1 = '../datasets/naco_betapic_cube_cen.fits'
#f2 = '../datasets/naco_betapic_psf.fits'
#f3 = '../datasets/naco_betapic_derot_angles.fits'
cube = open_fits(f1)
psf = open_fits(f2)
angs = open_fits(f3)
```
FITS HDU-0 data successfully loaded. Data shape: (61, 101, 101)
FITS HDU-0 data successfully loaded. Data shape: (39, 39)
FITS HDU-0 data successfully loaded. Data shape: (61,)
Let's fit the PSF with a 2D Gaussian to infer the FWHM, the flux in a 1-FWHM size aperture, and get a flux-normalized PSF:
```python
%matplotlib inline
from vip_hci.fm import normalize_psf
psfn, flux, fwhm_naco = normalize_psf(psf, size=19, debug=True, full_output=True)
```

FWHM_y = 4.926059872957138
FWHM_x = 4.675778895005929
centroid y = 9.010992107833063
centroid x = 9.01917912265807
centroid y subim = 9.010992107833063
centroid x subim = 9.01917912265807
amplitude = 0.10032285220380603
theta = -38.446187060503874
Mean FWHM: 4.801
Flux in 1xFWHM aperture: 1.307
```python
print(fwhm_naco)
```
4.800919383981533
Let's visualize the flux-normalized PSF:
```python
plot_frames(psfn, grid=True, size_factor=4)
```

Let's finally define the pixel scale for NACO (L' band), which we get from a dictionary stored in the `config` subpackage:
```python
from vip_hci.config import VLT_NACO
pxscale_naco = VLT_NACO['plsc']
print(pxscale_naco, "arcsec/px")
```
0.02719 arcsec/px
## 4.2. Signal-to-noise ratio and significance
### 4.2.1. S/N map
When testing different stellar PSF modeling and subtraction algorithms, one may end up with a point-like source in the final post-processing images. How can its signal-to-noise ratio be assessed?
By default we adopt the definition of S/N given in [Mawet el al. (2014)](https://ui.adsabs.harvard.edu/abs/2014ApJ...792...97M/abstract), which uses a two samples t-test for the problem of planet detection. Student (*t*) statistics are relevant for hypothesis testing in presence of a small sample, which is the case of high contrast imaging of point sources at small angles. Since the structure of speckle noise varies radially, only a limited sample of pixels, and independent apertures encompassing them, is available for noise estimation at each radius.
The main idea is to test the flux of a given speckle or planet candidate against the flux measured in independent apertures (resolution elements) at the same radial separation from the center:
$$S/Nβ‘ \frac{\overline{x}_1 - \overline{x}_2}{s_2\sqrt{1+\frac{1}{n_2}}},$$
where $\overline{x}_1$ is the flux of the tested resolution element (blue dot in the figure below), $\overline{x}_2$ and $s_2$ are the mean and empirical standard deviation of the fluxes of the noise resolution elements (red dots in the figure below) and $n_2$ the number of such noise resolution elements.
Let's illustrate this process on a PCA post-processed image which contains a companion candidate:
```python
from vip_hci.psfsub import pca
pca_img = pca(cube, angs, ncomp=6, verbose=False, imlib='vip-fft', interpolation=None)
```
```python
%matplotlib inline
plot_frames(pca_img, grid=True)
```

Let's define the coordinates of a test point source in the image:
```python
xy_b = (58.8,35.3)
```
The `snr` function in the `metrics` module can be used to estimate the S/N ratio of the point source:
```python
from vip_hci.metrics import snr
snr1 = snr(pca_img, source_xy=xy_b, fwhm=fwhm_naco, plot=True)
snr1
```

7.193723081700503
The S/N is relatively high, however we see that two of the test apertures are overlapping with the two negative side lobes (which is the typical signature for a point-source imaged with ADI). The `exclude_negative_lobes` option can be set to avoid considering apertures right next to the test aperture, which can make a significant difference in terms of S/N ratio in some cases:
```python
snr1 = snr(pca_img, source_xy=xy_b, fwhm=fwhm_naco, plot=True, exclude_negative_lobes=True)
snr1
```

14.27577030847377
One has to be careful with the `exclude_negative_lobes` option though, as it decreases the number of test apertures, hence increases the small-angle penalty factor.
In the ``metrics`` subpackage we have also implemented a function for S/N map generation, by computing the S/N for each pixel of a 2D array. It has a parameter `nproc` for exploiting multi-CPU systems which by default is *None*, meaning that it will use half the number of available CPUs in the system.
```python
from vip_hci.metrics import snrmap
snrmap_1 = snrmap(pca_img, fwhm=fwhm_naco, plot=True, nproc=None)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:31:23
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ

S/N map created using 5 processes
Running time: 0:00:02.908474
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
In case we really need the S/N map of a big frame (which can take quite some time to compute depending on your hardware), a good option is to use the `approximated=True` option. It uses an approximated S/N definition that yields close results to the one mentioned earlier.
```python
snrmap_f = snrmap(pca_img, fwhm=fwhm_naco, plot=True, approximated=True)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:31:26
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ

S/N map created using 5 processes
Running time: 0:00:00.297805
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
<font color=orange> **Question 4.1**: In the presence of azimuthally extended disc signals in the field of view, how do you expect the S/N ratio of authentic sources to behave? Would the classical definition above still be reliable, or would it underestimate/overestimate the true S/N ratio of authentic circumstellar signals? </font>
### 4.2.2. Significance
Due to the common confusion between S/N ratio and significance, we have included a routine to convert (Student) S/N ratio (as defined above) into a Gaussian "sigma" significance, which is the most common metrics used to assess significance in signal detection theory.
The conversion is based on matching the false positive fraction (FPF; e.g. $3 \times 10^{-7}$ for a $5\sigma$ detection threshold), or equivalently the confidence level CL = 1-FPF.
As an example, let's consider a second tentative source (arguably point-like) in our Beta Pic dataset:
```python
xy_test = (55.6, 59.7)
snr2 = snr(pca_img, source_xy=xy_test, fwhm=fwhm_naco, plot=True, exclude_negative_lobes=True)
print(snr2)
```

3.067524998435319
The first point source (as shown in Sec. 4.2.1.) has a very high S/N of ~13.8.
The second point-like source has a S/N ~3.1.
What is the significance level that each blob is an authentic point source detection (i.e. not a residual speckle emerging randomly as part of the residual intensity distribution)?
The ``significance`` routine in the `metrics` module takes into account both the S/N ratio and the radial separation of the point source (the FWHM is also required to express that radial separation in terms of FWHM), for conversion to significance in terms of Gaussian statistics.
```python
from vip_hci.metrics import significance
from vip_hci.var import frame_center
cy, cx = frame_center(cube[0])
rad1 = np.sqrt((cy-xy_b[1])**2+(cx-xy_b[0])**2)
rad2 = np.sqrt((cy-xy_test[1])**2+(cx-xy_test[0])**2)
sig1 = significance(snr1, rad1, fwhm_naco, student_to_gauss=True)
sig2 = significance(snr2, rad2, fwhm_naco, student_to_gauss=True)
msg = "The point-like signal with S/N={:.1f} at r = {:.1f}px ({:.1f} FWHM radius) corresponds to a {:.1f}-sigma detection (Gaussian statistics)."
print(msg.format(snr1, rad1, rad1/fwhm_naco, sig1))
print(msg.format(snr2, rad2, rad2/fwhm_naco, sig2))
```
At a separation of 17.1 px (3.6 FWHM), S/N = 14.3 corresponds to a 6.9-sigma detection in terms of Gaussian false alarm probability.
At a separation of 11.2 px (2.3 FWHM), S/N = 3.1 corresponds to a 2.6-sigma detection in terms of Gaussian false alarm probability.
The point-like signal with S/N=14.3 at r = 17.1px (3.6 FWHM radius) corresponds to a 6.9-sigma detection (Gaussian statistics).
The point-like signal with S/N=3.1 at r = 11.2px (2.3 FWHM radius) corresponds to a 2.6-sigma detection (Gaussian statistics).
The first point-like signal appears to be an authentic circumstellar point source (i.e. a physical companion or background object) given that its confidence level is equivalent to that of a $6.9\sigma$ detection (Gaussian sigma). For the second blob, the confidence level is equivalent to that of a $2.7\sigma$ detection (Gaussian statistics) - i.e. it is not a significant detection considering common conventions (3-sigma or 5-sigma threhold).
<font color=orange>**Question 4.2**: How can one disentangle a physically bound companion (planet, brown dwarf of stellar binary) from a background object?</font>
### 4.2.3. STIM map
In the presence of extended disc signals, an alternative to the S/N map is to use the Standardized Trajectory Intensity Map (STIM; [Pairet et al. 2019](https://ui.adsabs.harvard.edu/abs/2019MNRAS.487.2262P/abstract)). A comparison between S/N and STIM maps for disc signals in the system of PDS 70 can be found in [Christiaens et al. (2019a)](https://ui.adsabs.harvard.edu/abs/2019MNRAS.486.5819C/abstract).
The STIM map is defined by:
$${\rm STIM}(x,y) \equiv \frac{\mu_z (x,y)}{\sigma_z (x,y)},$$
where $\mu_z$ and $\sigma_z$ are the temporal mean and standard deviation of the *derotated residual cube* across the temporal dimension, respectively. It is calculated for each pixel (x,y).
Let's calculate it for our Beta Pictoris L'-band test dataset. For that, let's first run PCA with `full_output=True` to also return the residual cube and derotated residual cube:
```python
from vip_hci.metrics import stim_map
pca_img, _,_, pca_res, pca_res_der = pca(cube, angs, ncomp=15, verbose=False, full_output=True,
imlib='skimage', interpolation='biquartic')
stim_map = stim_map(pca_res_der)
```
In order to assess which signals are significant in the STIM map, Pairet et al. (2019) recommend to normalize the STIM map with the maximum intensity found in the inverse STIM map, which is obtained in a same way as the STIM map but using opposite angles for the derotation of the residual cube. Starting from VIP v1.6.0, this can be done with function `normalized_stim_map`. For previous versions, this involves the use of `inverse_stim_map`:
```python
from vip_hci.metrics import inverse_stim_map
inv_stim_map = inverse_stim_map(pca_res, angs)
if version.parse(vvip) < version.parse("1.6.0"):
norm_stim_map = stim_map/np.nanmax(inv_stim_map)
else:
from vip_hci.metrics import normalized_stim_map
norm_stim_map = normalized_stim_map(pca_res, angs)
```
Using the exact opposite values of derotation angles leads to a similar structure for the residual speckle noise, while authentic signals are not expected to sum up constructively. Therefore the maximum value in the inverse STIM map gives a good estimate of the maximum STIM intensity that one would expect from noise alone.
```python
plot_frames((stim_map, inv_stim_map, norm_stim_map), grid=True,
label=('STIM map', 'inv. STIM map', 'norm. STIM map'))
```

Any value larger than unity in the normalized STIM map is therefore expected to be significant. This can be visualized by thresholding the normalized STIM map.
```python
thr_stim_map = norm_stim_map.copy()
thr_stim_map[np.where(thr_stim_map<1)]=0
```
Beta Pic b is considered significant with the above criterion, although no extended disc structure is detected:
```python
plot_frames((pca_img, thr_stim_map), grid=True,
label=('PCA image (npc=15)', 'thresholded norm. STIM map'))
```

<div class="alert alert-block alert-info">
<b>Note: </b>
<a class="anchor" id="war1"></a>
In presence of azimuthally extended disc signals, the above criterion may still be too conservative to capture all disc signals present in the image - as these signals may also show up in the inverse STIM map and even dominate the maximum inverse STIM map values. Differential imaging strategies relying on other sources of *diversity* are required to identify azimuthally extended signals (e.g. RDI or PDI).
</div>
<font color=green> **Answer 4.1:** When test apertures include disc signal, the mean flux $\bar{x}_2$ will be higher and/or the noise level will be overestimated (if the extended signal does not cover all apertures), hence leading in both cases to an underestimated S/N ratio of authentic disc signals. The above definition is therefore only relevant for point source detection.</font>
<font color=green> **Answer 4.2:** With enough follow-up observations one can monitor the relative astrometry of the companion candidate with respect to the star, and compare it to the proper motion of the star (typically known to good precision thanks to Gaia for nearby stars). A background object (much further away) is expected to have a significantly different (typically much smaller) proper motion than the target. In absence of follow-up observations, a first statistical argument can be made based on the separation of the companion candidate and the number density of background objects in that area of the sky - this can be estimated with `VIP`'s `vip.stats.bkg_star_proba` routine combined with the help of a Galactic model (e.g. TRILEGAL or BesanΓ§on model). </font>
## 4.3. Automatic detection function
The `frame_report` routine provides some basic statistics about any post-processed image, including the position and SNR of any candidate in the image. Let's try it on the median-ADI image obtained on our toy dataset:
```python
from vip_hci.psfsub import median_sub
fr_adi = median_sub(cube, angs)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:31:35
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Median psf reference subtracted
Done derotating and combining
Running time: 0:00:02.839838
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
```python
from vip_hci.metrics import frame_report
frame_report(fr_adi, fwhm=fwhm_naco)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:31:38
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
S/N map created using 5 processes
Running time: 0:00:03.086918
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Coords of Max px (X,Y) = 55.0, 54.0
Flux in a centered 1xFWHM circular aperture = 470.077
Central pixel S/N = 3.561
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Inside a centered 1xFWHM circular aperture:
Mean S/N (shifting the aperture center) = 1.667
Max S/N (shifting the aperture center) = 3.561
stddev S/N (shifting the aperture center) = 1.012
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
((55, 54), [470.07687891671645], [3.5612234828789724], 1.6674225229822748)
Let's try the `detection` function in the `metrics` module. This is a computer vision blob-detection method based on the Laplacian of Gaussian filter (http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.blob_log).
Provided a post-processed frame, the FWHM in pixels and a PSF (i.e. what a planet should look like), this function can return the position of point-like sources in the image.
Take a look at the help/docstring for a detailed explanation of the function. Depending on the `mode` the results can be different. A S/N minimum criterion can be provided with `snr_thresh`.
Let's try it on the median-ADI and annular PCA images obtained on the Beta Pic b dataset:
```python
from vip_hci.metrics import detection
detection(fr_adi, fwhm=fwhm_naco, psf=psfn, debug=False, mode='log', snr_thresh=5,
bkg_sigma=5, matched_filter=False)
```
Blobs found: 3
ycen xcen
------ ------
46.130 43.396
35.555 58.654
57.920 47.674
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
X,Y = (43.4,46.1)
S/N constraint NOT fulfilled (S/N = 1.626)
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
X,Y = (58.7,35.6)
S/N constraint NOT fulfilled (S/N = 2.552)
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
X,Y = (47.7,57.9)
S/N constraint NOT fulfilled (S/N = 0.797)
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ

(array([], dtype=float64), array([], dtype=float64))
Planet *b* is highlighted but with rather small S/N (~2). We note that a number of other much less convicing blobs are also highlighted. Let's try the frame obtained with annular PCA:
```python
from vip_hci.psfsub import pca_annular
fr_pca = pca_annular(cube, angs, ncomp=7)
detection(fr_pca, fwhm=fwhm_naco, psf=psfn, bkg_sigma=5, debug=False, mode='log',
snr_thresh=5, plot=True, verbose=True)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:31:41
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
N annuli = 12, FWHM = 4.000
PCA per annulus (or annular sectors):
Ann 1 PA thresh: 11.42 Ann center: 2 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.022058
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 2 PA thresh: 6.94 Ann center: 6 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.058704
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 3 PA thresh: 6.04 Ann center: 10 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.107226
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 4 PA thresh: 5.65 Ann center: 14 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.171667
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 5 PA thresh: 5.44 Ann center: 18 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.252905
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 6 PA thresh: 5.30 Ann center: 22 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.343990
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 7 PA thresh: 5.21 Ann center: 26 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.453074
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 8 PA thresh: 5.14 Ann center: 30 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.578874
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 9 PA thresh: 5.08 Ann center: 34 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.717235
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 10 PA thresh: 5.04 Ann center: 38 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:00.876368
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 11 PA thresh: 5.01 Ann center: 42 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:01.048566
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Ann 12 PA thresh: 5.09 Ann center: 45 N segments: 1
Done PCA with lapack for current annulus
Running time: 0:00:01.234632
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Done derotating and combining.
Running time: 0:00:03.935503
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Blobs found: 4
ycen xcen
------ ------
35.703 58.491
58.856 55.039
45.943 55.871
44.344 48.461
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
X,Y = (58.5,35.7)
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Coords of chosen px (X,Y) = 58.5, 35.7
Flux in a centered 1xFWHM circular aperture = 375.747
Central pixel S/N = 9.561
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Inside a centered 1xFWHM circular aperture:
Mean S/N (shifting the aperture center) = 8.255
Max S/N (shifting the aperture center) = 9.809
stddev S/N (shifting the aperture center) = 1.326
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
X,Y = (55.0,58.9)
S/N constraint NOT fulfilled (S/N = 3.253)
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
X,Y = (55.9,45.9)
S/N constraint NOT fulfilled (S/N = 1.476)
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
X,Y = (48.5,44.3)
S/N constraint NOT fulfilled (S/N = 2.083)
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ

(array([35.70251528]), array([58.49135008]))
We see that only one point-like source stands out above the SNR=5 threshold (set with `snr_thresh`): beta Pic b.
We see from these tests that one should take the results obtained by this automatic detection algorithm with a pinch of salt (see. e.g the signals at the edge of the inner mask). It is unlikely to detect point sources better then can be inferred from visual inspection by human eyes. Much more advanced machine learning techniques should be used to infer the presence of companions that cannot be detected from visual inspection of the final image, and with low false positive fraction (see e.g. [Gomez Gonzalez et al. 2018](https://ui.adsabs.harvard.edu/abs/2018A%26A...613A..71G/abstract) or [Cantero et al. 2023](https://ui.adsabs.harvard.edu/abs/2023A%26A...680A..86C/abstract)).
## 4.4. Throughput and contrast curves
### 4.4.1. Throughput
`VIP` allows to measure the throughput of its algorithms by injecting fake companions. The throughput gives us an idea of how much the algorithm subtracts or biases the signal from companions, as a function of the distance from the center (high throughput = low amount of self-subtraction). Let's assess it for our toy datacube. The relevant function is `throughput` in the `metrics` module.
In order to measure reliable throughput and contrast curves, one first need to subtract from the data all real companions.
We'll see in `Tutorial 5.` how to obtain reliable estimates on the parameters of directly imaged companions (radial separation, azimuth and flux). Let's assume we have inferred these parameters. It is only a matter of calling the `cube_planet_free` function in the `fm` module:
```python
from vip_hci.fm import cube_planet_free
r_b = 0.452/pxscale_naco # Absil et al. (2013)
theta_b = 211.2+90 # Absil et al. (2013)
f_b = 648.2
cube_emp = cube_planet_free([(r_b, theta_b, f_b)], cube, angs, psfn=psfn)
```
<div class="alert alert-block alert-info">
<b>Note: </b>
<a class="anchor" id="war1"></a>
The convention in VIP is to measure angles from positive x axis (i.e. as trigonometric angles), as in most other Python packages. This implies adding 90ΒΊ to position angles measured following the common astronomic convention (East from North).
</div>
Let's double check the planet was efficiently removed:
```python
pca_emp = pca(cube_emp, angs, ncomp=12, verbose=False)
plot_frames(pca_emp, axis=False)
```

Let's now compute the throughput with the empty cube:
```python
from vip_hci.metrics import throughput
res_thr = throughput(cube_emp, angs, psfn, fwhm_naco, ncomp=15,
algo=pca, nbranch=1, full_output=True)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:31:48
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Cube without fake companions processed with pca
Running time: 0:00:02.546033
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Measured annulus-wise noise in resulting frame
Running time: 0:00:02.588970
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Flux in 1xFWHM aperture: 1.000
Fake companions injected in branch 1 (pattern 1/3)
Running time: 0:00:02.713670
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Cube with fake companions processed with pca
Measuring its annulus-wise throughput
Running time: 0:00:05.353404
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Fake companions injected in branch 1 (pattern 2/3)
Running time: 0:00:05.463844
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Cube with fake companions processed with pca
Measuring its annulus-wise throughput
Running time: 0:00:08.036481
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Fake companions injected in branch 1 (pattern 3/3)
Running time: 0:00:08.161390
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Cube with fake companions processed with pca
Measuring its annulus-wise throughput
Running time: 0:00:10.702490
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Finished measuring the throughput in 1 branches
Running time: 0:00:10.713547
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
```python
plt.figure(figsize=(6,3))
plt.plot(res_thr[3], res_thr[0][0,:], 'o-', alpha=0.5)
plt.ylabel('throughput')
plt.xlabel('separation in pixels')
```
Text(0.5, 0, 'separation in pixels')

<font color=orange> **Question 4.3**: Why does the throughput increase with radial separation?</font>
Let's compare this with the annular PCA result:
```python
res_thr2 = throughput(cube_emp, angs, psfn, fwhm_naco, algo=pca_annular, nbranch=1, verbose=False,
full_output=True, ncomp=10, radius_int=int(fwhm_naco),
delta_rot=0.5, asize=fwhm_naco)
```
```python
plt.figure(figsize=(6,3))
plt.plot(res_thr[3], res_thr[0][0,:], 'o-', label='PCA', alpha=0.5)
plt.plot(res_thr2[3], res_thr2[0][0,:], 'o-', label='annular PCA', alpha=0.5)
plt.ylabel('throughput')
plt.xlabel('separation in pixels')
_ = plt.legend(loc='best')
```

We clearly see the gain in throughput by applying a parallactic angle rejection in our annular PCA processing. For a sequence with more field rotation, the `delta_rot` value could be increased to further increase the throughput. Note that the drop to zero at the end is due to incomplete padding of the field with concentric annuli.
<font color=green> **Answer 4.3**: There is more linear motion radially outward in the rotating field (e.g. 60deg rotation corresponds to ~6px motion for a hypothetical planet at 5px separation, but to ~35px motion at 30px separation). This results in any putative planet located further out to be comparatively less captured in the principal components used to build the stellar PSF model, hence to less self-subtraction / higher throughput after model subtraction. </font>
### 4.4.2. Contrast curves
Now let's see how to generate 5-sigma contrast curves for ADI data using the `contrast_curve` function.
The contrast curve shows the achieved contrast (i.e. how much fainter than the star a companion can be detected) as a function of radius, for a given datacube post-processed with a given algorithm.
The contrast curve takes into account both the noise level in the final image and the algorithmic throughput (previous subsection). The noise level is used to infer the signal required for the S/N to achieve a 5-sigma significance at each radial separation, as calculated in [Section 4.2.2](#4.2.2.-Significance). Note that `sigma` is an input parameter such that the contrast curve can also be calculated for e.g. 1 or 3 $\sigma$. Let's set it to 5:
```python
nsig=5
```
Among other parameters of the `contrast_curve` function, ``algo`` takes any function in ``VIP`` for model PSF subtraction, and optional parameters of the `algo` can also be passed as regular parameters of `contrast_curve`. Parameter `starphot` sets the flux of the star. The latter was obtained from the non-coronagraphic PSF before normalization and after rescaling to the integration time used in the coronagraphic observations:
```python
starphot = 764939.6
```
In the example below, we'll first calculate the contrast curve obtained with full-frame PCA, and then with PCA in concentric annuli with a PA threshold.
```python
from vip_hci.metrics import contrast_curve
cc_1 = contrast_curve(cube_emp, angs, psfn, fwhm=fwhm_naco, pxscale=pxscale_naco, starphot=starphot,
sigma=nsig, nbranch=3, algo=pca, ncomp=9, debug=True)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:32:14
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
ALGO : pca, FWHM = 4.800919383981533, # BRANCHES = 3, SIGMA = 5, STARPHOT = 764939.6
Finished the throughput calculation
Running time: 0:00:27.490398
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ





```python
cc_2 = contrast_curve(cube_emp, angs, psfn, fwhm=fwhm_naco, pxscale=pxscale_naco, starphot=starphot,
sigma=nsig, nbranch=1, delta_rot=0.5, algo=pca_annular, ncomp=8,
radius_int=int(fwhm_naco), asize=fwhm_naco, debug=True)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:32:43
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
ALGO : pca_annular, FWHM = 4.800919383981533, # BRANCHES = 1, SIGMA = 5, STARPHOT = 764939.6
Finished the throughput calculation
Running time: 0:00:15.859550
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ





<font color=orange> **Question 4.4**: Considering the hard encoded planet parameters and stellar flux provided above, where would Beta Pic b sit in this contrast curve?</font>
<font color=orange> **Question 4.5**: If a companion is present in the datacube, how do you expect it to affect the contrast curve? In other words what would happen if `cube` was provided to `contrast_curve` instead of `cube_emp`? </font>
<font color=green> **Answer 4.4**: As illustrated below. </font>
```python
plt.figure(figsize=(8,5))
plt.plot(cc_1['distance']*pxscale_naco,
-2.5*np.log10(cc_1['sensitivity_student']),
'r-', label='{}-sigma contrast (PCA)'.format(nsig), alpha=0.5)
plt.plot(cc_2['distance']*pxscale_naco,
-2.5*np.log10(cc_2['sensitivity_student']),
'b-', label='{}-sigma contrast (annular PCA)'.format(nsig), alpha=0.5)
r_b = 16.583
plt.plot(r_b*pxscale_naco,
-2.5*np.log10(700./starphot), 'gs', label='Beta Pic b')
plt.gca().invert_yaxis()
plt.ylabel('Contrast (mag)')
plt.xlabel('Separation (arcsec)')
_ = plt.legend(loc='best')
plt.show()
```

<font color=green> **Answer 4.5**: It would artificially increase the estimated contrast at the radial separation of the companion (creating a bump in the curve) - as it would be considered as extra noise. Illustration below: </font>
```python
cc_not_emp = contrast_curve(cube, angs, psfn, fwhm=fwhm_naco, pxscale=pxscale_naco, starphot=starphot,
sigma=nsig, nbranch=1, delta_rot=0.5, algo=pca_annular, ncomp=8,
radius_int=int(fwhm_naco), asize=fwhm_naco, debug=True)
```
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Starting time: 2024-07-04 15:33:00
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
ALGO : pca_annular, FWHM = 4.800919383981533, # BRANCHES = 1, SIGMA = 5, STARPHOT = 764939.6
Finished the throughput calculation
Running time: 0:00:15.342830
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ





## 4.5. Completeness curves and maps
### 4.5.1. Completeness curves
`VIP` allows the calculation of completeness curves, as implemented in [Dahlqvist et al. (2021)](https://ui.adsabs.harvard.edu/abs/2021A%26A...646A..49D/abstract) following the framework introduced in [Jensen-Clem et al. (2018)](https://ui.adsabs.harvard.edu/abs/2018AJ....155...32J/abstract).
These are contrast curves corresponding to a given completeness level (e.g. 95%) and one false positive in the full frame. In other words, the 95% completeness curve corresponds to the contrast at which a given companion would be recovered 95% of the time (19/20 true positives).
Setting `an_dist` to None produces a contrast curve spanning 2 FWHM in radius to half
the size of the provided cube images minus half of the PSF frame, with a step of 5 pixels.
```python
# let's first crop the PSF image
from vip_hci.preproc import frame_crop
crop_sz = 15
if psfn.shape[-1]>crop_sz:
psfn = frame_crop(psfn, crop_sz)
```
New shape: (15, 15)
```python
from vip_hci.metrics import completeness_curve
an_dist, comp_curve = completeness_curve(cube_emp, angs, psfn, fwhm_naco, pca, an_dist=np.arange(10,40,5),
pxscale=pxscale_naco, ini_contrast = 1e-3/np.arange(10,40,5), starphot=starphot,
plot=True, nproc=None, algo_dict={'ncomp':5, 'imlib':'opencv'})
```
Calculating initial SNR map with no injected companion...
*** Calculating contrast at r = 10 ***
Found contrast level for first TP detection: 0.00050625
Found lower and upper bounds of sought contrast: [0.0017085937499999998, 0.0025628906249999996]
=> found final contrast for 95.0% completeness: 0.00196171875
*** Calculating contrast at r = 15 ***
Found contrast level for first TP detection: 0.00022500000000000002
Found lower and upper bounds of sought contrast: [0.00050625, 0.000759375]
=> found final contrast for 95.0% completeness: 0.0005695312499999999
*** Calculating contrast at r = 20 ***
Found contrast level for first TP detection: 0.00011250000000000001
Found lower and upper bounds of sought contrast: [0.00016875, 0.000253125]
=> found final contrast for 95.0% completeness: 0.00023625
*** Calculating contrast at r = 25 ***
Found contrast level for first TP detection: 4e-05
Found lower and upper bounds of sought contrast: [9.000000000000002e-05, 0.00013500000000000003]
=> found final contrast for 95.0% completeness: 0.00010253376000000003
*** Calculating contrast at r = 30 ***
Found contrast level for first TP detection: 5e-05
Found lower and upper bounds of sought contrast: [0.00011250000000000001, 0.00016875]
=> found final contrast for 95.0% completeness: 0.00014135112762451174
*** Calculating contrast at r = 35 ***
Found contrast level for first TP detection: 6.428571428571429e-05
Found lower and upper bounds of sought contrast: [9.642857142857143e-05, 0.00014464285714285715]
=> found final contrast for 95.0% completeness: 0.00012245344201820667

### 4.5.2. Completeness maps
One can also compute completeness maps, considering the dependency of the contrast on both radius and completeness. Since this is a very slow process (even slower than the completeness curve), we will limit it to only one of the radii tested for the completeness curves, and set the initial contrast (`ini_contrast`) to the output of the `completeness_curve` function at that radius:
```python
n_fc = 20
test_an_dist = 25 # tested radial distance
# initial contrast set to value found in completeness curve
idx = np.where(an_dist == test_an_dist)[0]
ini_contrast = comp_curve[idx]
```
```python
from vip_hci.metrics import completeness_map
an_dist, comp_levels, contrasts = completeness_map(cube_emp, angs, psfn, fwhm_naco, pca, an_dist=[test_an_dist],
n_fc=n_fc, ini_contrast=ini_contrast, starphot=starphot,
nproc=None, algo_dict={'ncomp':3, 'imlib':'opencv'})
```
Starting annulus 25
Lower bound (5%) found: 1.3686604804687503e-05
Upper bound (95%) found: 0.00020026125000000006
Data point 0.15 found. Still 13 data point(s) missing
Data point 0.25 found. Still 12 data point(s) missing
Data point 0.3 found. Still 10 data point(s) missing
Data point 0.4 found. Still 9 data point(s) missing
Data point 0.5 found. Still 8 data point(s) missing
Data point 0.55 found. Still 7 data point(s) missing
Data point 0.6 found. Still 6 data point(s) missing
Data point 0.7 found. Still 5 data point(s) missing
Data point 0.75 found. Still 4 data point(s) missing
Data point 0.8 found. Still 2 data point(s) missing
Data point 0.85 found. Still 1 data point(s) missing
Let's plot the achieved contrast as a function of the assumed completeness ratio, at a radial separation of 25 px:
```python
fig = plt.figure(figsize = [5,5])
plt.plot(comp_levels, contrasts[0,:])
plt.xlabel("Completeness")
plt.ylabel("Contrast at {} px separation".format(test_an_dist))
plt.show()
```

## 4.6. Applefy contrast curves and grids
[Applefy](https://applefy.readthedocs.io/en/latest/index.html#) enables the calculation of robust detection limits, be it contrast curves or contrast grids, for different assumptions on the residual noise in the image (see details in [Bonse et al. 2023](https://ui.adsabs.harvard.edu/abs/2023AJ....166...71B/abstract)). The package is not a mandatory requirement of VIP, but intercompatibility is possible with the `pca` algorithm (only option as of 31 August 2023, more algorithms may become compatible later). Below are some example usages to calculate robust contrast curves and contrast grids if you have Applefy installed (see installation instructions [here](https://applefy.readthedocs.io/en/latest/01_getting_started.html#additional-options)).
```python
try:
import applefy
applefy_installed=True
except:
applefy_installed=False
```
Let's first define an Applefy Contrast instance for our dataset (using the cube where the planet was already removed), and a checkpoint directory where intermediate files will be saved (also enabling a faster re-calculation of the contrast curves later):
```python
checkpoint_dir = "../datasets/TMP/cc/"
```
```python
if applefy_installed:
from applefy.detections.contrast import Contrast
contrast_instance = Contrast(science_sequence=cube_emp, psf_template=psfn, parang_rad=angs,
psf_fwhm_radius=fwhm_naco/2, dit_psf_template=1., dit_science=starphot,
scaling_factor=1., # A factor to account e.g. for ND filters
checkpoint_dir=checkpoint_dir)
```
<div class="alert alert-block alert-info">
<b>Note: </b>
<a class="anchor" id="war1"></a>
Since the PSF is already normalized to an integrated flux of unity within 1 FWHM, we can just set the ratio between dit_science and dit_psf_template to be the integrated starphot flux.
</div>
### 4.6.1. Applefy contrast curves
Let's now define the brightness and number of fake planets to be injected:
```python
flux_ratio_mag = 7 # magnitude difference of injected fake planets
num_fake_planets = 6 # number of injected fake planets azimuthally
```
```python
if applefy_installed:
from applefy.utils import flux_ratio2mag, mag2flux_ratio
flux_ratio = mag2flux_ratio(flux_ratio_mag)
print("{} fake planets will be injected azimuthally at {} flux ratio ({:.1f} mag difference)".format(num_fake_planets,
flux_ratio,
flux_ratio_mag))
```
6 fake planets will be injected azimuthally at 0.001584893192461114 flux ratio (7.0 mag difference)
```python
if applefy_installed:
contrast_instance.design_fake_planet_experiments(flux_ratios=flux_ratio,
num_planets=num_fake_planets,
overwrite=True)
```
Let's now run the fake planet experiment. For this we consider a grid of tested numbers of principal components:
```python
if applefy_installed:
components = [1, 4, 8, 13, 19, 25, 32] # can also be a tuple of 3 elements with a given step
from applefy.wrappers.vip import MultiComponentPCAvip
algorithm_function = MultiComponentPCAvip(num_pcas=components, kwarg={'verbose':False})
contrast_instance.run_fake_planet_experiments(algorithm_function=algorithm_function,
num_parallel=cpu_count()//2)
```
Running fake planet experiments...
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 61/61 [02:59<00:00, 2.95s/it]
[DONE]
Select the aperture photometry mode (choice between spaced pixels "FS" or aperture sums "ASS"):
```python
if applefy_installed:
from applefy.utils.photometry import AperturePhotometryMode
# Use apertures pixel values
photometry_mode_planet = AperturePhotometryMode(
"ASS", # or "AS"
psf_fwhm_radius=fwhm_naco/2,
search_area=0.5)
photometry_mode_noise = AperturePhotometryMode(
"AS",
psf_fwhm_radius=fwhm_naco/2)
```
```python
if applefy_installed:
contrast_instance.prepare_contrast_results(
photometry_mode_planet=photometry_mode_planet,
photometry_mode_noise=photometry_mode_noise)
```
Select the statistical test to be used:
```python
if applefy_installed:
from applefy.statistics import TTest, gaussian_sigma_2_fpf, fpf_2_gaussian_sigma, LaplaceBootstrapTest
```
```python
if applefy_installed:
statistical_test = TTest()
```
Finally, let's calculate the contrast curves (see all options in the [Applefy tutorial](https://applefy.readthedocs.io/en/latest/02_user_documentation/01_contrast_curves.html))
```python
if applefy_installed:
contrast_curves, contrast_errors = contrast_instance.compute_analytic_contrast_curves(
statistical_test=statistical_test,
confidence_level_fpf=gaussian_sigma_2_fpf(5),
num_rot_iter=20,
pixel_scale=pxscale_naco)
```
Computing contrast curve for PCA (001 components)
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:02<00:00, 3.80it/s]
Computing contrast curve for PCA (004 components)
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:02<00:00, 3.73it/s]
Computing contrast curve for PCA (008 components)
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:02<00:00, 3.82it/s]
Computing contrast curve for PCA (013 components)
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:02<00:00, 3.77it/s]
Computing contrast curve for PCA (019 components)
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:02<00:00, 3.71it/s]
Computing contrast curve for PCA (025 components)
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:02<00:00, 3.73it/s]
Computing contrast curve for PCA (032 components)
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:02<00:00, 3.78it/s]
Let's examine the output:
```python
if applefy_installed:
contrast_curves
```
Let's now plot the contrast curves:
```python
if applefy_installed:
# compute the overall best contrast curve
overall_best = np.min(contrast_curves.values, axis=1)
```
```python
if applefy_installed:
# get the error bars of the the overall best contrast curve
best_idx = np.argmin(contrast_curves.values, axis=1)
best_contrast_errors = contrast_errors.values[np.arange(len(best_idx)), best_idx]
```
```python
if no_sns:
colors = ['b','c','g','y','orange','r','m','k']
else:
# nicer plotting if seaborn installed
colors = sns.color_palette("rocket_r",
n_colors=len(contrast_curves.columns))
colors.append('b')
```
```python
if applefy_installed:
separations_arcsec = contrast_curves.reset_index(level=0).index
separations_FWHM = contrast_curves.reset_index(level=1).index
```
```python
if applefy_installed:
# 1.) Create Plot Layout
fig = plt.figure(constrained_layout=False, figsize=(12, 8))
gs0 = fig.add_gridspec(1, 1)
axis_contrast_curvse = fig.add_subplot(gs0[0, 0])
# ---------------------- Create the Plot --------------------
i = 0 # color picker
for tmp_model in contrast_curves.columns:
num_components = int(tmp_model[5:9])
tmp_flux_ratios = contrast_curves.reset_index(
level=0)[tmp_model].values
tmp_errors = contrast_errors.reset_index(
level=0)[tmp_model].values
axis_contrast_curvse.plot(
separations_arcsec,
tmp_flux_ratios,
color = colors[i],
label=num_components)
axis_contrast_curvse.fill_between(
separations_arcsec,
tmp_flux_ratios + tmp_errors,
tmp_flux_ratios - tmp_errors,
color = colors[i],
alpha=0.5)
i+=1
axis_contrast_curvse.set_yscale("log")
# ------------ Plot the overall best -------------------------
axis_contrast_curvse.plot(
separations_arcsec,
overall_best,
color = colors[i],
lw=3,
ls="--",
label="Best")
# ------------- Double axis and limits -----------------------
lim_mag_y = (12.5, 6)
lim_arcsec_x = (0.1, 1.3)
sep_lambda_arcse = interpolate.interp1d(
separations_arcsec,
separations_FWHM,
fill_value='extrapolate')
axis_contrast_curvse_mag = axis_contrast_curvse.twinx()
axis_contrast_curvse_mag.plot(
separations_arcsec,
flux_ratio2mag(tmp_flux_ratios),
alpha=0.)
axis_contrast_curvse_mag.invert_yaxis()
axis_contrast_curvse_lambda = axis_contrast_curvse.twiny()
axis_contrast_curvse_lambda.plot(
separations_FWHM,
tmp_flux_ratios,
alpha=0.)
axis_contrast_curvse.grid(which='both')
axis_contrast_curvse_mag.set_ylim(*lim_mag_y)
axis_contrast_curvse.set_ylim(
mag2flux_ratio(lim_mag_y[0]),
mag2flux_ratio(lim_mag_y[1]))
axis_contrast_curvse.set_xlim(
*lim_arcsec_x)
axis_contrast_curvse_mag.set_xlim(
*lim_arcsec_x)
axis_contrast_curvse_lambda.set_xlim(
*sep_lambda_arcse(lim_arcsec_x))
# ----------- Labels and fontsizes --------------------------
axis_contrast_curvse.set_xlabel(
r"Separation [arcsec]", size=16)
axis_contrast_curvse_lambda.set_xlabel(
r"Separation [FWHM]", size=16)
axis_contrast_curvse.set_ylabel(
r"Planet-to-star flux ratio", size=16)
axis_contrast_curvse_mag.set_ylabel(
r"$\Delta$ Magnitude", size=16)
axis_contrast_curvse.tick_params(
axis='both', which='major', labelsize=14)
axis_contrast_curvse_lambda.tick_params(
axis='both', which='major', labelsize=14)
axis_contrast_curvse_mag.tick_params(
axis='both', which='major', labelsize=14)
axis_contrast_curvse_mag.set_title(
r"$5 \sigma_{\mathcal{N}}$ Contrast Curves",
fontsize=18, fontweight="bold", y=1.1)
# --------------------------- Legend -----------------------
handles, labels = axis_contrast_curvse.\
get_legend_handles_labels()
leg1 = fig.legend(handles, labels,
bbox_to_anchor=(0.12, -0.08),
fontsize=14,
title="# PCA components",
loc='lower left', ncol=8)
_=plt.setp(leg1.get_title(),fontsize=14)
```

Now let's plot the optimal number of principal components as a function of separation:
```python
if applefy_installed:
plt.figure(figsize=(12, 8))
plt.plot(separations_arcsec,
np.array(components)[np.argmin(
contrast_curves.values,
axis=1)],)
plt.title(r"Best number of PCA components",
fontsize=18, fontweight="bold", y=1.1)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel("Separation [arcsec]", fontsize=16)
plt.ylabel("Number of PCA components", fontsize=16)
plt.grid()
ax2 = plt.twiny()
ax2.plot(separations_FWHM,
np.array(components)[
np.argmin(contrast_curves.values, axis=1)],)
ax2.set_xlabel("Separation [FWHM]", fontsize=16)
ax2.tick_params(axis='both', which='major', labelsize=14)
```

### 4.6.2. Applefy contrast grids
Again, let's start by defining a contrast instance, along with a checkpoint directory:
```python
checkpoint_dir = "../datasets/TMP/cg/"
```
```python
if applefy_installed:
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
contrast_instance = Contrast(science_sequence=cube_emp, psf_template=psfn, parang_rad=angs,
psf_fwhm_radius=fwhm_naco/2, dit_psf_template=1., dit_science=starphot,
scaling_factor=1., # A factor to account e.g. for ND filters
checkpoint_dir=checkpoint_dir)
```
Let's define the brightness and number of fake planets to be injected - this time we'll consider a range in contrasts:
```python
flux_ratios_mag = np.linspace(7.5, 12, 10) # magnitude differences of injected fake planets
num_fake_planets = 3 # number of injected fake planets azimuthally
```
```python
if applefy_installed:
from applefy.utils import flux_ratio2mag, mag2flux_ratio
flux_ratios = mag2flux_ratio(flux_ratios_mag)
print("{} fake planets will be injected azimuthally at {} flux ratios ({} mag difference)".format(num_fake_planets,
flux_ratios,
flux_ratios_mag))
```
3 fake planets will be injected azimuthally at [1.00000000e-03 6.30957344e-04 3.98107171e-04 2.51188643e-04
1.58489319e-04 1.00000000e-04 6.30957344e-05 3.98107171e-05
2.51188643e-05 1.58489319e-05] flux ratios ([ 7.5 8. 8.5 9. 9.5 10. 10.5 11. 11.5 12. ] mag difference)
```python
if applefy_installed:
contrast_instance.design_fake_planet_experiments(flux_ratios=flux_ratios,
num_planets=num_fake_planets,
overwrite=True)
```
Let's now run the fake planet experiment. For this we consider a grid of tested numbers of principal components:
```python
if applefy_installed:
from applefy.wrappers.vip import MultiComponentPCAvip
algorithm_function = MultiComponentPCAvip(num_pcas=components, kwarg={'verbose':False})
contrast_instance.run_fake_planet_experiments(algorithm_function=algorithm_function,
num_parallel=cpu_count()//2)
```
Running fake planet experiments...
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 301/301 [15:30<00:00, 3.09s/it]
[DONE]
Select the aperture photometry mode (choice between spaced pixels "FS" or aperture sums "ASS"):
```python
if applefy_installed:
from applefy.utils.photometry import AperturePhotometryMode
# Use apertures pixel values
photometry_mode_planet = AperturePhotometryMode(
"ASS", # or "AS"
psf_fwhm_radius=fwhm_naco/2,
search_area=0.5)
photometry_mode_noise = AperturePhotometryMode(
"AS",
psf_fwhm_radius=fwhm_naco/2)
```
```python
if applefy_installed:
contrast_instance.prepare_contrast_results(
photometry_mode_planet=photometry_mode_planet,
photometry_mode_noise=photometry_mode_noise)
```
Select the statistical test to be used:
```python
if applefy_installed:
from applefy.statistics import TTest, gaussian_sigma_2_fpf, fpf_2_gaussian_sigma, LaplaceBootstrapTest
```
```python
if applefy_installed:
statistical_test = TTest()
```
Let's then calculate the contrast grids (i.e. the FPF expressed in Gaussian $\sigma$ for different numbers of principal components and different injected contrasts; see all options in the [Applefy tutorial](https://applefy.readthedocs.io/en/latest/02_user_documentation/02_contrast_grid.html))
```python
if applefy_installed:
contrast_curves_grid, contrast_grids = contrast_instance.compute_contrast_grids(
statistical_test=statistical_test,
num_cores=cpu_count()//2,
confidence_level_fpf=gaussian_sigma_2_fpf(5),
num_rot_iter=20,
safety_margin=1.0,
pixel_scale=pxscale_naco)
```
Computing contrast grid for PCA (001 components)
Computing contrast grid with multiprocessing:
....................................................................................................[DONE]
Computing contrast grid for PCA (004 components)
Computing contrast grid with multiprocessing:
....................................................................................................[DONE]
Computing contrast grid for PCA (008 components)
Computing contrast grid with multiprocessing:
....................................................................................................[DONE]
Computing contrast grid for PCA (013 components)
Computing contrast grid with multiprocessing:
....................................................................................................[DONE]
Computing contrast grid for PCA (019 components)
Computing contrast grid with multiprocessing:
....................................................................................................[DONE]
Computing contrast grid for PCA (025 components)
Computing contrast grid with multiprocessing:
....................................................................................................[DONE]
Computing contrast grid for PCA (032 components)
Computing contrast grid with multiprocessing:
....................................................................................................[DONE]
Two outputs are obtained: **contrast grids** (in the form of a dictionary, one grid for each number of PCs) and **contrast curves** obtained by thresholding the contrast grids (in the form of a Pandas table).
#### 4.6.2.1. Visualizing the contrast grids
```python
if applefy_installed:
print(contrast_curves_grid)
```
PCA (001 components) \
separation [$FWHM$] separation [arcsec]
1.0 0.130537 inf
2.0 0.261074 inf
3.0 0.391611 0.000317
4.0 0.522148 0.000205
5.0 0.652685 0.000160
6.0 0.783222 0.000112
7.0 0.913759 0.000086
8.0 1.044296 0.000050
9.0 1.174833 0.000031
10.0 1.305370 0.000025
PCA (004 components) \
separation [$FWHM$] separation [arcsec]
1.0 0.130537 inf
2.0 0.261074 0.000757
3.0 0.391611 0.000167
4.0 0.522148 0.000115
5.0 0.652685 0.000085
6.0 0.783222 0.000060
7.0 0.913759 0.000051
8.0 1.044296 0.000044
9.0 1.174833 0.000026
10.0 1.305370 0.000027
PCA (008 components) \
separation [$FWHM$] separation [arcsec]
1.0 0.130537 inf
2.0 0.261074 0.000611
3.0 0.391611 0.000161
4.0 0.522148 0.000103
5.0 0.652685 0.000093
6.0 0.783222 0.000061
7.0 0.913759 0.000047
8.0 1.044296 0.000034
9.0 1.174833 0.000023
10.0 1.305370 0.000026
PCA (013 components) \
separation [$FWHM$] separation [arcsec]
1.0 0.130537 inf
2.0 0.261074 0.000524
3.0 0.391611 0.000148
4.0 0.522148 0.000086
5.0 0.652685 0.000074
6.0 0.783222 0.000052
7.0 0.913759 0.000041
8.0 1.044296 0.000040
9.0 1.174833 0.000037
10.0 1.305370 0.000030
PCA (019 components) \
separation [$FWHM$] separation [arcsec]
1.0 0.130537 inf
2.0 0.261074 0.000565
3.0 0.391611 0.000120
4.0 0.522148 0.000070
5.0 0.652685 0.000067
6.0 0.783222 0.000049
7.0 0.913759 0.000038
8.0 1.044296 0.000035
9.0 1.174833 0.000026
10.0 1.305370 0.000029
PCA (025 components) \
separation [$FWHM$] separation [arcsec]
1.0 0.130537 inf
2.0 0.261074 0.000954
3.0 0.391611 0.000108
4.0 0.522148 0.000056
5.0 0.652685 0.000063
6.0 0.783222 0.000047
7.0 0.913759 0.000030
8.0 1.044296 0.000027
9.0 1.174833 0.000021
10.0 1.305370 0.000022
PCA (032 components)
separation [$FWHM$] separation [arcsec]
1.0 0.130537 inf
2.0 0.261074 inf
3.0 0.391611 0.000113
4.0 0.522148 0.000061
5.0 0.652685 0.000048
6.0 0.783222 0.000049
7.0 0.913759 0.000033
8.0 1.044296 0.000032
9.0 1.174833 0.000023
10.0 1.305370 0.000023
Let's check the results for 19 principal components:
```python
ex_pc = 19
```
```python
if applefy_installed:
example_grid = contrast_grids["PCA ({:03d} components)".format(ex_pc)]
# convert FPF to Gaussian Sigma
example_grid = example_grid.map(fpf_2_gaussian_sigma)
# convert flux_ratio to mag
example_grid.index = flux_ratio2mag(example_grid.index)
print(example_grid)
```
separation [FWHM] 1.0 2.0 3.0 4.0 5.0 \
flux_ratio
7.5 2.008370 5.394783 7.666906 9.483116 10.824543
8.0 2.062938 5.114021 7.368064 9.142008 10.514931
8.5 1.941543 4.635351 7.089383 8.675803 9.914510
9.0 1.877863 3.933987 6.378800 7.970985 8.895517
9.5 1.696185 3.065047 5.516982 7.075826 7.736946
10.0 1.305535 2.459888 4.664974 5.892618 6.279798
10.5 1.006822 1.969059 3.814828 4.732727 4.793840
11.0 0.835541 1.583005 3.104087 3.614169 3.347586
11.5 0.715274 1.312736 2.550214 2.695277 2.247183
12.0 0.644244 1.137645 2.142353 2.001203 1.440163
separation [FWHM] 6.0 7.0 8.0 9.0 10.0
flux_ratio
7.5 11.656312 12.590658 13.544075 14.641647 15.542328
8.0 11.256106 12.057257 13.028396 14.184473 15.096176
8.5 10.735447 11.499449 12.528246 13.502048 14.495459
9.0 9.881211 10.786152 11.771061 12.671839 13.661081
9.5 8.760552 9.767830 10.583830 11.624414 12.414664
10.0 7.313060 8.310939 9.226578 10.156350 10.616768
10.5 5.818723 6.785923 7.532526 8.282083 8.496921
11.0 4.325298 5.134021 5.488361 6.555087 6.320718
11.5 3.143441 3.624079 3.720914 4.880726 4.480109
12.0 2.208254 2.491451 2.312835 3.502819 3.011140
**_Quick Note_**: The plotting code below relies on `seaborn`, which is not a mandatory dependence of VIP. If you wish to plot the contrast grid result in a nice way, as below, you will need to `pip install seaborn`.
```python
def plot_contrast_grid(
contrast_grid_axis,
colorbar_axis,
contrast_grid):
c_bar_kargs = dict(
orientation = "vertical",
label = r"Confidence [$\sigma_{\mathcal{N}}$]")
heat = sns.heatmap(
contrast_grid,
vmax=2, vmin=7,
annot=True,
cmap="YlGnBu",
ax=contrast_grid_axis,
cbar_ax=colorbar_axis,
cbar_kws=c_bar_kargs)
ylabels = ['{:.1f}'.format(float(x.get_text()))
for x in heat.get_yticklabels()]
_=heat.set_yticklabels(ylabels)
xlabels = ['{:.1f}'.format(float(x.get_text()))
for x in heat.get_xticklabels()]
_=heat.set_xticklabels(xlabels)
```
```python
if applefy_installed:
if not no_sns:
fig = plt.figure(figsize=(8, 4))
gs0 = fig.add_gridspec(1, 1)
gs0.update(wspace=0.0, hspace=0.2)
gs1 = gridspec.GridSpecFromSubplotSpec(
1, 2, subplot_spec = gs0[0],
wspace=0.05, width_ratios=[1, 0.03])
# All axis we need
contrast_ax = fig.add_subplot(gs1[0])
colorbar_ax = fig.add_subplot(gs1[1])
# Plot the contrast grid
plot_contrast_grid(
contrast_grid_axis=contrast_ax,
colorbar_axis=colorbar_ax,
contrast_grid=example_grid)
colorbar_ax.yaxis.label.set_size(14)
contrast_ax.set_ylabel(
"Contrast - $c = f_p / f_*$ - [mag]", size=14)
contrast_ax.set_xlabel(
r"Separation [FWHM]", size=14)
contrast_ax.set_title(
"Contrast Grid with {} PCA components".format(ex_pc),
fontsize=16,
fontweight="bold",
y=1.03)
contrast_ax.tick_params(
axis='both',
which='major',
labelsize=12)
# Save the figure
fig.patch.set_facecolor('white')
else:
print("To be able to plot the contrast grid results, you will need seaborn installed (e.g. pip install seaborn).")
```
To be able to plot the contrast grid results, you will need seaborn installed (e.g. pip install seaborn).
#### 4.6.2.2. Visualizing the thresholded contrast curves
```python
if applefy_installed:
# compute the overall best contrast curve
overall_best = np.min(contrast_curves_grid.values, axis=1)
```
```python
# Find one color for each number of PCA components used.
if applefy_installed:
if no_sns:
colors = ['b','c','g','y','orange','r','m','k']
else:
# nicer plotting if seaborn installed
colors = sns.color_palette("rocket_r",
n_colors=len(contrast_curves.columns))
colors
```
```python
if applefy_installed:
separations_arcsec = contrast_curves_grid.reset_index(level=0).index
separations_FWHM = contrast_curves_grid.reset_index(level=1).index
```
```python
if applefy_installed:
# 1.) Create Plot Layout
fig = plt.figure(constrained_layout=False, figsize=(12, 8))
gs0 = fig.add_gridspec(1, 1)
axis_contrast_curvse = fig.add_subplot(gs0[0, 0])
# ---------------------- Create the Plot --------------------
i = 0 # color picker
for tmp_model in contrast_curves_grid.columns:
num_components = int(tmp_model[6:9])
tmp_flux_ratios = contrast_curves_grid.reset_index(
level=0)[tmp_model].values
axis_contrast_curvse.plot(separations_arcsec,
tmp_flux_ratios,
color = colors[i],
label=num_components)
i+=1
axis_contrast_curvse.set_yscale("log")
# ------------ Plot the overall best -------------------------
axis_contrast_curvse.plot(
separations_arcsec,
overall_best,
color = colors[-1],
lw=3,
ls="--",
label="Best")
# ------------- Double axis and limits -----------------------
lim_mag_y = (12.5, 8)
lim_arcsec_x = (0.15, 1.3)
sep_lambda_arcse = interpolate.interp1d(
separations_arcsec,
separations_FWHM,
fill_value='extrapolate')
axis_contrast_curvse_mag = axis_contrast_curvse.twinx()
axis_contrast_curvse_mag.plot(
separations_arcsec,
flux_ratio2mag(tmp_flux_ratios),
alpha=0.)
axis_contrast_curvse_mag.invert_yaxis()
axis_contrast_curvse_lambda = axis_contrast_curvse.twiny()
axis_contrast_curvse_lambda.plot(
separations_FWHM,
tmp_flux_ratios,
alpha=0.)
axis_contrast_curvse.grid(which='both')
axis_contrast_curvse_mag.set_ylim(*lim_mag_y)
axis_contrast_curvse.set_ylim(
mag2flux_ratio(lim_mag_y[0]),
mag2flux_ratio(lim_mag_y[1]))
axis_contrast_curvse.set_xlim(
*lim_arcsec_x)
axis_contrast_curvse_mag.set_xlim(
*lim_arcsec_x)
axis_contrast_curvse_lambda.set_xlim(
*sep_lambda_arcse(lim_arcsec_x))
# ----------- Labels and fontsizes --------------------------
axis_contrast_curvse.set_xlabel(
r"Separation [arcsec]", size=16)
axis_contrast_curvse_lambda.set_xlabel(
r"Separation [FWHM]", size=16)
axis_contrast_curvse.set_ylabel(
r"Planet-to-star flux ratio", size=16)
axis_contrast_curvse_mag.set_ylabel(
r"$\Delta$ Magnitude", size=16)
axis_contrast_curvse.tick_params(
axis='both', which='major', labelsize=14)
axis_contrast_curvse_lambda.tick_params(
axis='both', which='major', labelsize=14)
axis_contrast_curvse_mag.tick_params(
axis='both', which='major', labelsize=14)
axis_contrast_curvse_mag.set_title(
r"$5 \sigma_{\mathcal{N}}$ Contrast Curves",
fontsize=18, fontweight="bold", y=1.1)
# --------------------------- Legend -----------------------
handles, labels = axis_contrast_curvse.\
get_legend_handles_labels()
leg1 = fig.legend(handles, labels,
bbox_to_anchor=(0.22, -0.08),
fontsize=14,
title="# PCA components",
loc='lower left', ncol=8)
_=plt.setp(leg1.get_title(),fontsize=14)
```

As can be seen, the number of PCs has a clear effect on the achieved contrast.
Let's finally determine the optimal number of principal components as a function of separation. For that, let's first explicitly define components:
```python
if applefy_installed:
plt.figure(figsize=(12, 8))
plt.plot(separations_arcsec,
np.array(components)[np.nanargmin(
contrast_curves_grid,
axis=1)],)
plt.title(r"Best number of PCA components",
fontsize=18, fontweight="bold", y=1.1)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel("Separation [arcsec]", fontsize=16)
plt.ylabel("Number of PCA components", fontsize=16)
plt.grid()
ax2 = plt.twiny()
ax2.plot(separations_FWHM,
np.array(components)[
np.nanargmin(contrast_curves_grid, axis=1)],)
ax2.set_xlabel("Separation [FWHM]", fontsize=16)
ax2.tick_params(axis='both', which='major', labelsize=14)
```

|
vortex-exoplanetREPO_NAMEVIPPATH_START.@VIP_extracted@VIP-master@docs@source@tutorials@04_metrics.ipynb@.PATH_END.py
|
{
"filename": "data.md",
"repo_name": "dmentipl/plonk",
"repo_path": "plonk_extracted/plonk-main/docs/source/api/data.md",
"type": "Markdown"
}
|
# Data
```{eval-rst}
.. currentmodule:: plonk
```
SPH snapshot files are represented by the {class}`Snap` class. This object
contains a properties dictionary, particle arrays, which are lazily loaded from
file, sink arrays, and additional metadata. There are methods for accessing
arrays, sub-sets of particles, plotting, finding particle neighbours, etc.
{class}`Simulation` is an aggregation of the {class}`Snap` and pandas
{class}`DataFrame <pandas:pandas.DataFrame>`s to encapsulate all data within a single SPH simulation. In addition,
you can load auxilliary SPH simulation data, such as globally-averaged time
series data as pandas {class}`DataFrame <pandas:pandas.DataFrame>`s.
```{toctree}
data.snap
data.simulation
```
|
dmentiplREPO_NAMEplonkPATH_START.@plonk_extracted@plonk-main@docs@source@api@data.md@.PATH_END.py
|
{
"filename": "data_structures.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/eagle/data_structures.py",
"type": "Python"
}
|
import numpy as np
import yt.units
from yt.fields.field_info_container import FieldInfoContainer
from yt.frontends.gadget.data_structures import GadgetHDF5Dataset
from yt.frontends.owls.fields import OWLSFieldInfo
from yt.utilities.on_demand_imports import _h5py as h5py
from .fields import EagleNetworkFieldInfo
class EagleDataset(GadgetHDF5Dataset):
_load_requirements = ["h5py"]
_particle_mass_name = "Mass"
_field_info_class: type[FieldInfoContainer] = OWLSFieldInfo
_time_readin_ = "Time"
def _parse_parameter_file(self):
# read values from header
hvals = self._get_hvals()
self.parameters = hvals
# set features common to OWLS and Eagle
self._set_owls_eagle()
# Set time from analytic solution for flat LCDM universe
a = hvals["ExpansionFactor"]
H0 = hvals["H(z)"] / hvals["E(z)"]
a_eq = (self.omega_matter / self.omega_lambda) ** (1.0 / 3)
t1 = 2.0 / (3.0 * np.sqrt(self.omega_lambda))
t2 = (a / a_eq) ** (3.0 / 2)
t3 = np.sqrt(1.0 + (a / a_eq) ** 3)
t = t1 * np.log(t2 + t3) / H0
self.current_time = t * yt.units.s
def _set_code_unit_attributes(self):
self._set_owls_eagle_units()
@classmethod
def _is_valid(cls, filename: str, *args, **kwargs) -> bool:
if cls._missing_load_requirements():
return False
need_groups = [
"Config",
"Constants",
"HashTable",
"Header",
"Parameters",
"RuntimePars",
"Units",
]
veto_groups = [
"SUBFIND",
"PartType0/ChemistryAbundances",
"PartType0/ChemicalAbundances",
]
valid = True
try:
fileh = h5py.File(filename, mode="r")
for ng in need_groups:
if ng not in fileh["/"]:
valid = False
for vg in veto_groups:
if vg in fileh["/"]:
valid = False
fileh.close()
except Exception:
valid = False
pass
return valid
class EagleNetworkDataset(EagleDataset):
_load_requirements = ["h5py"]
_particle_mass_name = "Mass"
_field_info_class = EagleNetworkFieldInfo
_time_readin = "Time"
@classmethod
def _is_valid(cls, filename: str, *args, **kwargs) -> bool:
if cls._missing_load_requirements():
return False
try:
fileh = h5py.File(filename, mode="r")
if (
"Constants" in fileh["/"].keys()
and "Header" in fileh["/"].keys()
and "SUBFIND" not in fileh["/"].keys()
and (
"ChemistryAbundances" in fileh["PartType0"].keys()
or "ChemicalAbundances" in fileh["PartType0"].keys()
)
):
fileh.close()
return True
fileh.close()
except Exception:
pass
return False
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@eagle@data_structures.py@.PATH_END.py
|
{
"filename": "micro_benchmarks.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/ops/numpy_ops/integration_test/benchmarks/micro_benchmarks.py",
"type": "Python"
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Micro benchmark.
bazel run -c opt --config=cuda \
//third_party/tensorflow/python/ops/numpy_ops/integration_test/benchmarks:micro_benchmarks -- \
--number=100 --repeat=100 \
--benchmark_filter=.
"""
import gc
import time
from absl import flags
from absl import logging
import numpy as np # pylint: disable=unused-import
import tensorflow.compat.v2 as tf
from tensorflow.python.ops.numpy_ops.integration_test.benchmarks import numpy_mlp
from tensorflow.python.ops.numpy_ops.integration_test.benchmarks import tf_numpy_mlp
FLAGS = flags.FLAGS
# Used instead of "import tensorflow(dot)experimental.numpy as tfnp" due to
# copybara issues.
tfnp = tf.experimental.numpy
flags.DEFINE_integer('repeat', 100, '#Measurements per benchmark.')
flags.DEFINE_integer('number', 100, '#Runs per a measure.')
class MicroBenchmarks(tf.test.Benchmark):
"""Main micro benchmark class."""
def _benchmark_and_report(
self,
name,
fn,
repeat=None,
number=None):
"""Run fn repeat * number times, report time, and return fastest time."""
# Can't make these default above since the flags may not have been parsed
# at module import time.
repeat = repeat or int(FLAGS.repeat)
number = number or int(FLAGS.number)
# Warmup
fn()
times = []
for _ in range(repeat):
gc.disable()
start = time.time()
for _ in range(number):
fn()
times.append(time.time() - start)
gc.enable()
gc.collect()
# Regular benchmark to report numbers.
fastest_time_us = min(times) * 1e6 / number
total_time = sum(times)
self.report_benchmark(name=name,
wall_time=total_time,
extras={'fastest_time_us': fastest_time_us})
return fastest_time_us
def benchmark_tf_np_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def benchmark_tf_np_tf_function_mlp_inference_batch_1_cpu(self):
with tf.device('/CPU:0'):
model = tf_numpy_mlp.MLP()
x = tfnp.ones(shape=(1, 10)).astype(np.float32)
self._benchmark_and_report(
self._get_name(), tf.function(lambda: model.inference(x)))
def benchmark_numpy_mlp_inference_batch_1_cpu(self):
model = numpy_mlp.MLP()
x = np.random.uniform(size=(1, 10)).astype(np.float32, copy=False)
self._benchmark_and_report(self._get_name(), lambda: model.inference(x))
def _benchmark_np_and_tf_np(self, name, op, args, repeat=None): # pylint: disable=redefined-builtin
fn = getattr(np, op)
assert fn is not None
np_time = self._benchmark_and_report(
'{}_numpy'.format(name), lambda: fn(*args), repeat=repeat)
fn = getattr(tfnp, op)
assert fn is not None
with tf.device('CPU:0'):
tf_time = self._benchmark_and_report(
'{}_tfnp_cpu'.format(name), lambda: fn(*args), repeat=repeat)
return np_time, tf_time
def _print_times(self, op, sizes, times):
# For easy reporting.
print('For np.{}:'.format(op))
print('{:<15} {:>11} {:>11}'.format('Size', 'NP time', 'TF NP Time'))
for size, (np_time, tf_time) in zip(sizes, times):
print('{:<15} {:>10.5}us {:>10.5}us'.format(
str(size), np_time, tf_time))
print()
def _benchmark_np_and_tf_np_unary(self, op):
sizes = [(100,), (10000,), (1000000,)]
repeats = [FLAGS.repeat] * 2 + [10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(self._benchmark_np_and_tf_np(name, op, (x,), repeat))
self._print_times(op, sizes, times)
def benchmark_count_nonzero(self):
self._benchmark_np_and_tf_np_unary('count_nonzero')
def benchmark_log(self):
self._benchmark_np_and_tf_np_unary('log')
def benchmark_exp(self):
self._benchmark_np_and_tf_np_unary('exp')
def benchmark_tanh(self):
self._benchmark_np_and_tf_np_unary('tanh')
def benchmark_matmul(self):
sizes = [(2, 2), (10, 10), (100, 100), (200, 200), (1000, 1000)]
# Override repeat flag since this can be very slow.
repeats = [FLAGS.repeat] * 3 + [50, 10]
times = []
for size, repeat in zip(sizes, repeats):
x = np.random.uniform(size=size).astype(np.float32, copy=False)
name = '{}_{}'.format(self._get_name(), size)
times.append(
self._benchmark_np_and_tf_np(name, 'matmul', (x, x), repeat=repeat))
self._print_times('matmul', sizes, times)
if __name__ == '__main__':
logging.set_verbosity(logging.WARNING)
tf.enable_v2_behavior()
tf.test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@ops@numpy_ops@integration_test@benchmarks@micro_benchmarks.py@.PATH_END.py
|
{
"filename": "bug_report.md",
"repo_name": "fritz-marshal/fritz",
"repo_path": "fritz_extracted/fritz-main/.github/ISSUE_TEMPLATE/bug_report.md",
"type": "Markdown"
}
|
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
Please fill out relevant sections below; remove those that are unused.
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Platform information:**
- Fritz version: (find this in the [About](https://fritz.science/about) page)
- Interface
- [ ] I am using the API
- [ ] I am browsing on a desktop
- [ ] I am browsing on a phone
- [ ] I am browsing on a tablet
**Additional context**
Add any other context about the problem here.
|
fritz-marshalREPO_NAMEfritzPATH_START.@fritz_extracted@fritz-main@.github@ISSUE_TEMPLATE@bug_report.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "rometsch/fargocpt",
"repo_path": "fargocpt_extracted/fargocpt-master/python_module/fargocpt/__init__.py",
"type": "Python"
}
|
from .run import run
try:
from .data import Loader
except ImportError:
pass
try:
from .overview import Overview
except ImportError:
pass
|
rometschREPO_NAMEfargocptPATH_START.@fargocpt_extracted@fargocpt-master@python_module@fargocpt@__init__.py@.PATH_END.py
|
{
"filename": "test_simulations_gs_fcoll.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/tests/test_simulations_gs_fcoll.py",
"type": "Python"
}
|
"""
test_gs_backcompat.py
Author: Jordan Mirocha
Affiliation: UCLA
Created on: Wed Apr 4 09:37:25 PDT 2018
Description: Make sure changes in input parameters result in changes in
the signal! Also a test of backward compatibility.
"""
import ares
import numpy as np
def test():
oldp = ['fstar', 'fX', 'Tmin', 'Nion', 'Nlw']
newp = ['pop_fstar{0}', 'pop_rad_yield{1}', 'pop_Tmin{0}',
'pop_rad_yield{2}', 'pop_rad_yield{0}']
oldv = [(0.05, 0.2), (0.1, 1.), (1e3, 1e4), (1e3, 1e4), (1e3, 1e4)]
newv = [(0.05, 0.2), (2.6e38, 2.6e39), (1e3, 1e4), (1e3, 1e4), (1e3, 1e4)]
pars = {'old': oldp, 'new': newp}
vals = {'old': oldv, 'new': newv}
kw = ares.util.ParameterBundle('speed:careless')
for h, approach in enumerate(['new', 'old']):
ax = None
for i, par in enumerate(pars[approach]):
data = []
for val in vals[approach][i]:
p = {par:val}
p.update(kw)
sim = ares.simulations.Global21cm(**p)
sim.run()
#ax, zax = sim.GlobalSignature(ax=ax)
data.append((sim.history['z'], sim.history['dTb']))
for j in range(len(data) - 1):
z1, T1 = data[j]
z2, T2 = data[j+1]
# In this case, the sims were definitely different, since the
# only way to change the number of redshift points is through
# real-time timestep adjustment (driven by sources)
if T1.size != T2.size:
continue
neq = np.not_equal(T1, T2)
assert np.any(neq), "Changes in par={} had no effect!".format(par)
if __name__ == '__main__':
test()
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@tests@test_simulations_gs_fcoll.py@.PATH_END.py
|
{
"filename": "test_util.py",
"repo_name": "casacore/python-casacore",
"repo_path": "python-casacore_extracted/python-casacore-master/tests/test_util.py",
"type": "Python"
}
|
import unittest
from pyrap.util import substitute
def f1(arg):
a = 3
s = substitute('subs as $a $arg', locals=locals())
print(a, arg, s)
class TestUtil(unittest.TestCase):
def test_util(self):
a = 1
b = 2
p = "$((a+b)*(a+b))"
s = substitute(p, locals=locals())
print("a=%d, b=%d, %s => %s" % (a, b, p, s))
f1(23)
f1('xyz')
def test_substitute(self):
a = 2
b = 3
c = "xyz"
d1 = True
s1 = (1, 2, 3)
s2 = ['ab', 'cde', 'f', 'ghij']
self.assertTrue(substitute('$a $b $c $d1') == '2 3 "xyz" T')
self.assertTrue(substitute('$(a) $(b) $(c) $(d1)') == '2 3 "xyz" T')
self.assertTrue(substitute('$b $0 $a "$a" $b') == '3 $0 2 "$a" 3')
self.assertTrue(substitute('$(a+b)') == '5')
self.assertTrue(substitute('$((a+b)*(a+b))') == '25')
self.assertTrue(substitute('$((a+b)*(a+c))') == '$((a+b)*(a+c))')
self.assertTrue(substitute('"$(a+b)"') == '"$(a+b)"')
self.assertTrue(substitute('\\$(a+b) \\\\$a \\$a') ==
'\\$(a+b) \\\\2 \\$a')
self.assertTrue(substitute('$(a+b)+$a') == '5+2')
self.assertTrue(substitute('$((a+b)+a)') == '7')
self.assertTrue(substitute('$((a+b)*(a+b))') == '25')
self.assertTrue(substitute('$(len("ab cd( de"))') == '9')
self.assertTrue(substitute(
' $s1 $s2 ') == ' [1,2,3] ["ab","cde","f","ghij"] ')
|
casacoreREPO_NAMEpython-casacorePATH_START.@python-casacore_extracted@python-casacore-master@tests@test_util.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.