metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "scConvolution.py",
"repo_name": "akleroy/phangs_imaging_scripts",
"repo_path": "phangs_imaging_scripts_extracted/phangs_imaging_scripts-master/phangsPipeline/scConvolution.py",
"type": "Python"
}
|
import logging
import numpy as np
import astropy.units as u
from astropy.io import fits
from astropy.convolution import Box1DKernel
from astropy.convolution import convolve, convolve_fft
from radio_beam import Beam
from spectral_cube import SpectralCube, LazyMask, Projection
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def coverage_collapser(coveragecube,
coverage2dfile=None,
overwrite=False):
coverage2d = coveragecube.sum(axis=0)
coverage2darray = (np.array(coverage2d, dtype=np.float32)
/ coveragecube.shape[0])
hdr = coverage2d.header
hdr['DATAMIN'] = np.nanmin(coverage2darray)
hdr['DATAMAX'] = np.nanmax(coverage2darray)
hdu = fits.PrimaryHDU(coverage2darray, hdr)
hdu.writeto(coverage2dfile, overwrite=overwrite)
def smooth_cube(
incube=None,
outfile=None,
angular_resolution=None,
linear_resolution=None,
distance=None,
velocity_resolution=None,
nan_treatment='interpolate', # can also be 'fill'
tol=None,
make_coverage_cube=False,
collapse_coverage=False,
coveragefile=None,
coverage2dfile=None,
dtype=np.float32,
overwrite=True
):
"""
Smooth an input cube to coarser angular or spectral
resolution. This lightly wraps spectral cube and some of the error
checking is left to that.
tol is a fraction. When the target beam is within tol of the
original beam, we just copy.
Optionally, also calculate a coverage footprint in which original
(finite) cube coverage starts at 1.0 and the output cube shows the
fraction of finite pixels.
"""
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Error checking
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Require a valid cube or map input
twod = False
if type(incube) is SpectralCube:
cube = incube
elif type(incube) == type("hello"):
hdulist = fits.open(incube)
if hdulist[0].header['NAXIS'] == 2:
cube = Projection.from_hdu(hdulist)
twod = True
else:
cube = SpectralCube.read(incube)
else:
logger.error("Input must be a SpectralCube object or a filename.")
# Allow huge operations. If the speed or segfaults become a huge
# problem, we will adjust our strategy here.
cube.allow_huge_operations = True
# Check that only one target scale is set
if (angular_resolution is not None) and (linear_resolution is not None):
logger.error('Only one of angular_resolution or ',
'linear_resolution can be set')
return(None)
# Work out the target angular resolution
if angular_resolution is not None:
if type(angular_resolution) is str:
angular_resolution = u.Quantity(angular_resolution)
if linear_resolution is not None:
if distance is None:
logger.error('Convolution to linear resolution requires a distance.')
return(None)
if type(distance) is str:
distance = u.Quantity(distance)
if type(linear_resolution) is str:
linear_resolution = u.Quantity(linear_resolution)
angular_resolution = (linear_resolution / distance * u.rad).to(u.arcsec)
dist_mpc_val = float(distance.to(u.pc).value) / 1e6
cube._header.append(('DIST_MPC',dist_mpc_val,'Used in convolution'))
if tol is None:
tol = 0.0
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Convolution to coarser beam
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
if angular_resolution is not None:
logger.info("... convolving from beam: "+str(cube.beam))
target_beam = Beam(major=angular_resolution,
minor=angular_resolution,
pa=0 * u.deg)
logger.info("... convolving to beam: "+str(target_beam))
new_major = float(target_beam.major.to(u.arcsec).value)
old_major = float(cube.beam.major.to(u.arcsec).value)
delta = (new_major-old_major)/old_major
logger.info("... fractional change: "+str(delta))
if make_coverage_cube:
if twod:
coverage = Projection(np.isfinite(hdulist[0].data)*1.0,
wcs=cube.wcs.celestial, header=cube.header,
beam=cube.beam)
else:
coverage = SpectralCube(
np.isfinite(cube.unmasked_data[:])*1.0,
wcs=cube.wcs,
header=cube.header,
meta={'BUNIT': ' ', 'BTYPE': 'Coverage'})
coverage = \
coverage.with_mask(LazyMask(np.isfinite,cube=coverage))
# Allow huge operations. If the speed or segfaults become a huge
# problem, we will adjust our strategy here.
coverage.allow_huge_operations = True
if delta > tol:
logger.info("... proceeding with convolution.")
if twod:
cube = cube.convolve_to(target_beam,
nan_treatment=nan_treatment,
allow_huge=True)
else:
cube = cube.convolve_to(target_beam,
nan_treatment=nan_treatment)
if make_coverage_cube:
if twod:
coverage = coverage.convolve_to(target_beam,
nan_treatment=nan_treatment,
allow_huge=True)
else:
coverage = coverage.convolve_to(target_beam,
nan_treatment=nan_treatment)
if np.abs(delta) < tol:
logger.info("... current resolution meets tolerance.")
if delta < -1.0*tol:
logger.info("... resolution cannot be matched. Returning")
return(None)
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Spectral convolution
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# This is only a boxcar smooth right now and does not downsample
# or update the header.
if velocity_resolution is not None and twod == False:
if type(velocity_resolution) is str:
velocity_resolution = u.Quantity(velocity_resolution)
dv = scdr.channel_width(cube)
nChan = (velocity_resolution / dv).to(u.dimensionless_unscaled).value
if nChan > 1:
cube = cube.spectral_smooth(Box1DKernel(nChan))
if make_coverage_cube:
coverage = coverage.spectral_smooth(Box1DKernel(nChan))
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Write or return as requested
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
if outfile is not None:
# cube.write(outfile, overwrite=overwrite)
hdu = fits.PrimaryHDU(np.array(cube.filled_data[:], dtype=dtype),
header=cube.header)
hdu.writeto(outfile, overwrite=overwrite)
if make_coverage_cube:
if coveragefile is not None:
hdu = fits.PrimaryHDU(np.array(coverage.filled_data[:], dtype=dtype),
header=coverage.header)
hdu.writeto(coveragefile, overwrite=overwrite)
if collapse_coverage and twod==False:
if coveragefile and not coverage2dfile:
coverage2dfile = coveragefile.replace('.fits','2d.fits')
coverage_collapser(coverage,
coverage2dfile=coverage2dfile,
overwrite=overwrite)
# coverage.write(coveragefile, overwrite=overwrite)
return(cube)
|
akleroyREPO_NAMEphangs_imaging_scriptsPATH_START.@phangs_imaging_scripts_extracted@phangs_imaging_scripts-master@phangsPipeline@scConvolution.py@.PATH_END.py
|
{
"filename": "visitdepth_batch.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/batches/visitdepth_batch.py",
"type": "Python"
}
|
"""Sets of metrics to look at general sky coverage -
nvisits/coadded depth/Teff.
"""
__all__ = ("nvisitsM5Maps", "tEffMetrics", "nvisitsPerNight", "nvisitsPerSubset")
import copy
import numpy as np
import rubin_sim.maf.metric_bundles as mb
import rubin_sim.maf.metrics as metrics
import rubin_sim.maf.slicers as slicers
import rubin_sim.maf.stackers as stackers
import rubin_sim.maf.utils as mafUtils
from .col_map_dict import col_map_dict
from .common import filter_list, standard_summary
def nvisitsM5Maps(
colmap=None,
runName="opsim",
extraSql=None,
extraInfoLabel=None,
slicer=None,
runLength=10.0,
):
"""Generate maps of the number of visits and coadded depth
(with and without dust extinction) in all bands and per filter.
Parameters
----------
colmap : `dict`, optional
A dictionary with a mapping of column names.
runName : `str`, optional
The name of the simulated survey.
extraSql : `str`, optional
Additional constraint to add to any sql constraints.
extraInfoLabel : `str`, optional
Additional info_label to add before any below (i.e. "WFD").
slicer : `rubin_sim.maf.slicer` or None, optional
Optionally, use something other than an nside=64 healpix slicer
runLength : `float`, optional
Length of the simulated survey, for scaling values for the plot limits.
Returns
-------
metric_bundleDict : `dict` of `maf.MetricBundle`
"""
if colmap is None:
colmap = col_map_dict()
bundleList = []
subgroup = extraInfoLabel
if subgroup is None:
subgroup = "All visits"
raCol = colmap["ra"]
decCol = colmap["dec"]
degrees = colmap["raDecDeg"]
# Set up basic all and per filter sql constraints.
filterlist, colors, orders, sqls, info_label = filter_list(
all=True, extra_sql=extraSql, extra_info_label=extraInfoLabel
)
# Set up some values to make nicer looking plots.
benchmarkVals = mafUtils.scale_benchmarks(runLength, benchmark="design")
# Check that nvisits is not set to zero (for very short run length).
for f in benchmarkVals["nvisits"]:
if benchmarkVals["nvisits"][f] == 0:
print("Updating benchmark nvisits value in %s to be nonzero" % (f))
benchmarkVals["nvisits"][f] = 1
benchmarkVals["coaddedDepth"] = mafUtils.calc_coadded_depth(
benchmarkVals["nvisits"], benchmarkVals["singleVisitDepth"]
)
# Scale the n_visit ranges for the runLength.
nvisitsRange = {
"u": [20, 80],
"g": [50, 150],
"r": [100, 250],
"i": [100, 250],
"z": [100, 300],
"y": [100, 300],
"all": [700, 1200],
}
scale = runLength / 10.0
for f in nvisitsRange:
for i in [0, 1]:
nvisitsRange[f][i] = int(np.floor(nvisitsRange[f][i] * scale))
# Generate Nvisit maps in all and per filters
displayDict = {"group": "Nvisits Maps", "subgroup": subgroup}
metric = metrics.CountMetric(colmap["mjd"], metric_name="NVisits", units="")
if slicer is None:
slicer = slicers.HealpixSlicer(nside=64, lat_col=decCol, lon_col=raCol, lat_lon_deg=degrees)
slicerDust = slicers.HealpixSlicer(
nside=64,
lat_col=decCol,
lon_col=raCol,
lat_lon_deg=degrees,
use_cache=False,
)
else:
# If there is already a slicer set up, ensure we have one for dust
# which is NOT using cache.
slicerDust = copy.deepcopy(slicer)
slicerDust.use_cache = False
for f in filterlist:
sql = sqls[f]
displayDict["caption"] = f"Number of visits per healpix in {info_label[f]}."
displayDict["order"] = orders[f]
bin_size = 2
if f == "all":
bin_size = 5
plotDict = {
"x_min": nvisitsRange[f][0],
"x_max": nvisitsRange[f][1],
"color_min": nvisitsRange[f][0],
"color_max": nvisitsRange[f][1],
"bin_size": bin_size,
"color": colors[f],
}
bundle = mb.MetricBundle(
metric,
slicer,
sql,
info_label=info_label[f],
display_dict=displayDict,
plot_dict=plotDict,
summary_metrics=standard_summary(),
)
bundleList.append(bundle)
# Generate Coadded depth maps per filter
displayDict = {"group": "Coadded M5 Maps", "subgroup": subgroup}
metric = metrics.Coaddm5Metric(m5_col=colmap["fiveSigmaDepth"], metric_name="CoaddM5")
for f in filterlist:
# Skip "all" for coadded depth.
if f == "all":
continue
sql = sqls[f]
displayDict["caption"] = f"Coadded depth per healpix in {info_label[f]}."
displayDict["caption"] += " More positive numbers indicate fainter limiting magnitudes."
displayDict["order"] = orders[f]
plotDict = {
"percentile_clip": 98,
"color": colors[f],
}
bundle = mb.MetricBundle(
metric,
slicer,
sql,
info_label=info_label[f],
display_dict=displayDict,
plot_dict=plotDict,
summary_metrics=standard_summary(),
)
bundleList.append(bundle)
# Add Coadded depth maps per filter WITH extragalactic extinction added
displayDict = {"group": "Extragalactic Coadded M5 Maps", "subgroup": subgroup}
metric = metrics.ExgalM5(m5_col=colmap["fiveSigmaDepth"], metric_name="Exgal_CoaddM5")
for f in filterlist:
# Skip "all" for coadded depth.
if f == "all":
continue
sql = sqls[f]
displayDict["caption"] = (
"Coadded depth per healpix for extragalactic purposes "
"(i.e. combined with dust extinction maps), "
f"in {info_label[f]}."
)
displayDict["caption"] += " More positive numbers indicate fainter limiting magnitudes."
displayDict["order"] = orders[f]
plotDict = {
"percentile_clip": 90,
"color": colors[f],
}
bundle = mb.MetricBundle(
metric,
slicerDust,
sql,
info_label=info_label[f],
display_dict=displayDict,
plot_dict=plotDict,
summary_metrics=standard_summary(),
)
bundleList.append(bundle)
# Set the run_name for all bundles and return the bundleDict.
for b in bundleList:
b.set_run_name(runName)
return mb.make_bundles_dict_from_list(bundleList)
def tEffMetrics(
colmap=None,
runName="opsim",
extraSql=None,
extraInfoLabel=None,
slicer=None,
):
"""Generate a series of Teff metrics.
Teff total, per night, and sky maps (all and per filter).
Parameters
----------
colmap : `dict`, optional
A dictionary with a mapping of column names.
runName : `str`, optional
The name of the simulated survey.
extraSql : `str`, optional
Additional constraint to add to any sql constraints.
extraInfoLabel : `str`, optional
Additional info_label to add before any below (i.e. "WFD").
slicer : `rubin_sim.maf.BaseSlicer` or None, optional
Optionally, use something other than an nside=64 healpix slicer
Returns
-------
metric_bundleDict : `dict` of `maf.MetricBundle`
"""
if colmap is None:
colmap = col_map_dict()
bundleList = []
subgroup = extraInfoLabel
if subgroup is None:
subgroup = "All visits"
raCol = colmap["ra"]
decCol = colmap["dec"]
degrees = colmap["raDecDeg"]
if slicer is not None:
skyslicer = slicer
else:
skyslicer = slicers.HealpixSlicer(nside=64, lat_col=decCol, lon_col=raCol, lat_lon_deg=degrees)
# Set up basic all and per filter sql constraints.
filterlist, colors, orders, sqls, info_label = filter_list(
all=True, extra_sql=extraSql, extra_info_label=extraInfoLabel
)
if info_label["all"] is None:
info_label["all"] = "All visits"
# Total Teff and normalized Teff.
displayDict = {"group": "T_eff Summary", "subgroup": subgroup}
displayDict["caption"] = "Total effective time of the survey (see Teff metric)."
displayDict["order"] = 0
metric = metrics.SumMetric(col="t_eff", metric_name="Total Teff")
slicer = slicers.UniSlicer()
bundle = mb.MetricBundle(
metric,
slicer,
constraint=sqls["all"],
display_dict=displayDict,
info_label=info_label["all"],
)
bundleList.append(bundle)
displayDict["caption"] = "Normalized total effective time of the survey (see Teff metric)."
displayDict["order"] = 1
metric = metrics.MeanMetric(col="t_eff", metric_name="Normalized Teff")
normalized_teff_stacker = stackers.TeffStacker(normed=True)
slicer = slicers.UniSlicer()
bundle = mb.MetricBundle(
metric,
slicer,
constraint=sqls["all"],
stacker_list=[normalized_teff_stacker],
display_dict=displayDict,
info_label=info_label["all"],
)
bundleList.append(bundle)
# Generate Teff maps in all and per filters
displayDict = {"group": "T_eff Maps", "subgroup": subgroup}
metric = metrics.MeanMetric(col="t_eff", metric_name="Normalized Teff")
normalized_teff_stacker = stackers.TeffStacker(normed=True)
for f in filterlist:
displayDict["caption"] = "Normalized effective time of the survey, for %s" % info_label[f]
displayDict["order"] = orders[f]
plotDict = {"color": colors[f]}
bundle = mb.MetricBundle(
metric,
skyslicer,
sqls[f],
stacker_list=[normalized_teff_stacker],
info_label=info_label[f],
display_dict=displayDict,
plot_dict=plotDict,
summary_metrics=standard_summary(),
)
bundleList.append(bundle)
# Set the run_name for all bundles and return the bundleDict.
for b in bundleList:
b.set_run_name(runName)
return mb.make_bundles_dict_from_list(bundleList)
def nvisitsPerNight(
colmap=None,
runName="opsim",
binNights=1,
extraSql=None,
extraInfoLabel=None,
subgroup=None,
):
"""Count the number of visits per night through the survey.
Parameters
----------
colmap : `dict` or None, optional
A dictionary with a mapping of column names.
runName : `str`, optional
The name of the simulated survey. Default is "opsim".
binNights : `int`, optional
Number of nights to count in each bin.
extraSql : `str` or None, optional
Additional constraint to add to any sql constraints.
extraInfoLabel : `str` or None, optional
Additional info_label to add before any below (i.e. "WFD").
subgroup : `str` or None, optional
Use this for the 'subgroup' in the display_dict, instead of info_label.
Returns
-------
metric_bundleDict : `dict` of `maf.MetricBundle`
"""
if colmap is None:
colmap = col_map_dict()
subgroup = subgroup
if subgroup is None:
subgroup = extraInfoLabel
if subgroup is None:
subgroup = "All visits"
infoCaption = extraInfoLabel
if extraInfoLabel is None:
if extraSql is not None:
infoCaption = extraSql
else:
infoCaption = "all visits"
bundleList = []
displayDict = {"group": "Nvisits Per Night", "subgroup": subgroup}
displayDict["caption"] = "Number of visits per night for %s." % (infoCaption)
displayDict["order"] = 0
metric = metrics.CountMetric(colmap["mjd"], metric_name="Nvisits")
slicer = slicers.OneDSlicer(slice_col_name=colmap["night"], bin_size=binNights)
bundle = mb.MetricBundle(
metric,
slicer,
extraSql,
info_label=infoCaption,
display_dict=displayDict,
summary_metrics=standard_summary(),
)
bundleList.append(bundle)
# Set the run_name for all bundles and return the bundleDict.
for b in bundleList:
b.set_run_name(runName)
return mb.make_bundles_dict_from_list(bundleList)
def nvisitsPerSubset(
colmap=None,
runName="opsim",
binNights=1,
constraint=None,
footprintConstraint=None,
extraInfoLabel=None,
):
"""Look at the distribution of a given sql constraint or
footprint constraint's visits, total number and distribution over time
(# per night), if possible.
Parameters
----------
opsdb : `str` or database connection
Name of the opsim sqlite database.
colmap : `dict` or None, optional
A dictionary with a mapping of column names.
runName : `str`, optional
The name of the simulated survey.
binNights : `int`, optional
Number of nights to count in each bin.
constraint : `str` or None, optional
SQL constraint to add to all metrics.
This would be the way to select only a given "Note".
footprintConstraint : `np.ndarray` or None, optional
Footprint to look for visits within
(and then identify via WFDlabelStacker).
The footprint = a full length heapix array, filled with 0/1 values.
extraInfoLabel : `str` or None, optional
Additional info_label to add before any below (i.e. "WFD").
Returns
-------
metric_bundleDict : `dict` of `rubin_sim.maf.MetricBundle`
"""
if colmap is None:
colmap = col_map_dict()
bdict = {}
bundleList = []
if footprintConstraint is None:
if extraInfoLabel is None and constraint is not None:
extraInfoLabel += " %s" % constraint
# Nvisits per night, this constraint.
bdict.update(
nvisitsPerNight(
colmap=colmap,
runName=runName,
binNights=binNights,
extraSql=constraint,
extraInfoLabel=extraInfoLabel,
)
)
# Nvisits total, this constraint.
metric = metrics.CountMetric(colmap["mjd"], metric_name="Nvisits")
slicer = slicers.UniSlicer()
displayDict = {
"group": "Nvisit Summary",
"subgroup": extraInfoLabel,
}
displayDict["caption"] = f"Total number of visits for {extraInfoLabel}."
bundle = mb.MetricBundle(
metric,
slicer,
constraint,
info_label=extraInfoLabel,
display_dict=displayDict,
)
bundleList.append(bundle)
# Or count the total number of visits that contribute
# towards a given footprint
if footprintConstraint is not None:
# Set up a stacker to use this footprint to label visits
if extraInfoLabel is None:
extraInfoLabel = "Footprint"
footprintStacker = stackers.WFDlabelStacker(
footprint=footprintConstraint,
fp_threshold=0.4,
area_id_name=extraInfoLabel,
exclude_dd=True,
)
metric = metrics.CountSubsetMetric(
col="area_id", subset=extraInfoLabel, units="#", metric_name="Nvisits"
)
slicer = slicers.UniSlicer()
displayDict = {
"group": "Nvisit Summary",
"subgroup": extraInfoLabel,
"caption": f"Visits within footprint {extraInfoLabel}.",
}
bundle = mb.MetricBundle(
metric,
slicer,
constraint,
stacker_list=[footprintStacker],
info_label=extraInfoLabel,
display_dict=displayDict,
)
bundleList.append(bundle)
for b in bundleList:
b.set_run_name(runName)
bdict.update(mb.make_bundles_dict_from_list(bundleList))
return bdict
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@batches@visitdepth_batch.py@.PATH_END.py
|
{
"filename": "plots.py",
"repo_name": "RuthAngus/flicker",
"repo_path": "flicker_extracted/flicker-master/code/plots.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import triangle
import scipy.interpolate as spi
import h5py
import sys
from noisy_plane import model1
from model import load_data
def interp(xold, yold, xnew, s):
tck = spi.splrep(xold, yold, s=s)
ynew = spi.splev(xnew, tck, der=0)
return ynew
def fit_straight_line(x, y, yerr):
AT = np.vstack((np.ones_like(x), x))
C = np.eye(len(y)) * yerr**2
ATCA = np.dot(np.dot(AT, C), AT.T)
ATCy = np.dot(np.dot(AT, C), y)
return np.linalg.solve(ATCA, ATCy)
def make_flicker_plot(x, xerr, y, yerr, samples, whichx, fname, ndraws,
fractional=False, extra=False):
# use highest likelihood samples
lls = samples[:, -1]
m = lls == max(lls)
if fname == "simple":
beta, alpha, tau = [samples[i, m]
for i in range(np.shape(samples)[0]-1)]
else:
beta, alpha, tau, f = [samples[i, m]
for i in range(np.shape(samples)[0]-1)]
# sigma = abs(tau)**.5
sigma = tau
if whichx == "rho":
pars = ["alpha", "beta", "rho", "gamma"]
alpha -= 3
if whichx == "logg":
pars = ["delta", "epsilon", "g", "zeta"]
print "parameters:"
ap = np.percentile(samples[1, :], [16, 50, 84])
print "$\\%s$ & %s$_{-%s}^{+%s}$ & \\" \
% (pars[0], np.round(alpha[0], 3),
np.round(ap[1], 2)-np.round(ap[0], 2),
np.round(ap[2], 2)-np.round(ap[1], 2))
bp = np.percentile(samples[0, :], [16, 50, 84])
print "$\\%s$ & %s$_{-%s}^{+%s}$ & \\" \
% (pars[1], np.round(beta[0], 3),
np.round(bp[1], 2)-np.round(bp[0], 2),
np.round(bp[2], 2)-np.round(bp[1], 2))
tp = np.percentile(samples[1, :], [16, 50, 84])
print "$\\sigma_%s$ & %s$_{-%s}^{+%s}$ & \\" \
% (pars[2], np.round(tau[0], 3),
np.round(tp[1], 2)-np.round(tp[0], 2),
np.round(tp[2], 2)-np.round(tp[1], 2))
if fname != "simple":
fp = np.percentile(samples[1, :], [16, 50, 84])
print "$\\%s$ & %s$_{-%s}^{+%s}$ & \\" \
% (pars[3], np.round(f[0], 3),
np.round(fp[1], 2)-np.round(fp[0], 2),
np.round(fp[2], 2)-np.round(fp[1], 2))
# draw samples from post
b_samp = np.random.choice(samples[0, :], ndraws)
a_samp = np.random.choice(samples[1, :], ndraws)
s_samp = np.random.choice(samples[2, :], ndraws)
f_samp = np.random.choice(samples[3, :], ndraws)
# rho plot
plt.clf()
xs = np.linspace(.7, 2.5, 100)
if whichx == "rho":
plt.ylabel("$\log_{10}(\\rho_{\star}[\mathrm{g~cm}^{-3}])$")
col = "#FF33CC"
plt.text(1.4, .5, "$\log_{10} (\\rho_{\star}) \sim \mathcal{N} \
\\left(\\alpha + \\beta \log_{10}(F_8), \
\\sigma=\sqrt{\\sigma_{\\rho}^2+\\gamma F_8}\\right)$")
plt.text(1.95, .24, "$\\alpha = %.3f$" % (alpha-3))
plt.text(1.95, .09, "$\\beta = %.3f$" % beta)
plt.text(1.95, -.06, "$\\sigma_{\\rho} = %.3f$" % sigma)
plt.text(1.95, -.21, "$\\gamma = %.3f$" % f)
plt.ylim(-2, 1)
# plot line draws
lines = []
for i in range(ndraws):
ys = model1([b_samp[i], a_samp[i]], xs)
y3 = ys - 3
# line = ys + (np.random.randn(1)*np.median(s_samp)**2 + \
# np.random.randn(1)*np.median(f_samp)*xs)**.5 - 3
line = ys + (np.random.randn(1)*np.median(sigma)**2 + \
np.random.randn(1)*np.median(f)*xs)**.5 - 3
# plt.plot(xs, line, col, alpha=.05)
if len(line[np.isfinite(line)])==len(xs):
lines.append(line)
quantiles = np.percentile(lines, [2, 16, 84, 98], axis=0)
plt.fill_between(xs, quantiles[1], quantiles[2], color=col,
alpha=.4)
plt.fill_between(xs, quantiles[0], quantiles[3], color=col,
alpha=.4)
# plot best fit and data
ym = model1([np.median(b_samp), np.median(a_samp)], xs)
plt.plot(xs, ym - 3, ".2", linewidth=1)
plt.xlim(min(xs), max(xs))
plt.errorbar(x, y - 3, xerr=xerr, yerr=xerr, fmt="k.", capsize=0,
alpha=.5, ecolor=".5", mec=".2")
plt.savefig("new_rho")
# logg plot
elif whichx == "logg":
plt.ylim(3, 5)
col = "#0066CC"
plt.text(1.5, 4.7, "$\log(g) \sim \mathcal{N} \
\\left(\\delta + \\epsilon \log_{10}(\\rho_{\star}), \
\\sigma=\sqrt{\\sigma_g^2 + \zeta F_8}\\right)$")
plt.text(1.95, 4.52, "$\\delta = %.3f$" % alpha)
plt.text(1.95, 4.42, "$\\epsilon = %.3f$" % beta)
plt.text(1.95, 4.32, "$\\sigma_g = %.3f$" % sigma)
plt.text(1.95, 4.22, "$\\zeta = %.3f$" % f)
plt.ylabel("$\log_{10}(g [\mathrm{cm~s}^{-2}])$")
# plot line draws
# lines = np.zeros((ndraws, len(xs)))
lines = []
for i in range(ndraws):
ys = model1([b_samp[i], a_samp[i]], xs)
if np.all(tau[0]**2+f[0]*xs) > 0:
line = ys + np.random.randn(1)*(tau**2 + f*xs)**.5
# plt.plot(xs, line, col, alpha=.05)
# print len(line[np.isfinite(line)])
if len(line[np.isfinite(line)])==len(xs):
# print len(line[np.isfinite(line)])
lines.append(line)
plt.xlim(min(xs), max(xs))
# print np.shape(lines)
# plot regions
quantiles = np.percentile(lines, [2, 16, 84, 98], axis=0)
# print np.shape(quantiles)
# assert 0
# print quantiles
plt.fill_between(xs, quantiles[1], quantiles[2], color=col,
alpha=.4)
plt.fill_between(xs, quantiles[0], quantiles[3], color=col,
alpha=.4)
# plt.fill_between(xs, quantiles[1], quantiles[2], color=col,
# alpha=.4)
# plt.fill_between(xs, quantiles[0], quantiles[3], color=col,
# alpha=.2)
# plot best fit and data
ym = model1([np.median(b_samp), np.median(a_samp)], xs)
plt.plot(xs, ym, ".2", linewidth=1)
plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="k.", capsize=0,
alpha=.5, ecolor=".5", mec=".2")
plt.savefig("new_logg")
def make_inverse_flicker_plot(x, xerr, y, yerr, samples, whichx, fname, ndraws,
fractional=False, extra=False):
# fit straight line
lim = 200
a1 = fit_straight_line(x[:lim], y[:lim], yerr[:lim])
a2 = fit_straight_line(x[lim:], y[lim:], yerr[lim:])
assert np.shape(samples)[0] < np.shape(samples)[1], \
"samples is wrong shape"
m = samples[0, :] < 0
samples = samples[:, m]
# use highest likelihood samples
lls = samples[:, -1]
m = lls == max(lls)
if extra:
beta, alpha, tau, f = \
[samples[i, m] for i in range(np.shape(samples)[0]-1)]
else:
beta, alpha, tau = \
[samples[i, m] for i in range(np.shape(samples)[0]-1)]
sigma = abs(tau)**.5
if fname == "simple":
sigma = tau
pars = [beta, alpha, sigma]
# # take medians
# results = np.median(samples, axis=1)
# print 'results = ', results
# beta, alpha, tau = results[:3]
# if extra:
# print np.shape(results), "shape"
# beta, alpha, tau, f = results[:4]
print alpha, beta, tau, sigma
b_samp = np.random.choice(samples[0, :], ndraws)
a_samp = np.random.choice(samples[1, :], ndraws)
t_samp = np.random.choice(samples[2, :], ndraws)
if fname == "f":
s_samp = (abs(t_samp)**.5 - 1) * np.random.randn(ndraws) + 1
if fname == "simple":
s_samp = t_samp
if fname == "test":
s_samp = (abs(t_samp)**.5) * np.random.randn(ndraws)
if extra:
s_samp = (abs(t_samp)**.5 - 1) * np.random.randn(ndraws) + 1
f_samp = np.random.choice(samples[3, :], ndraws)
plt.clf()
xs = np.linspace(1., 2.4, 100)
if whichx == "rho":
plt.ylabel("$\log_{10}(\\rho_{\star}[\mathrm{g~cm}^{-3}])$")
col = "#FF33CC"
plt.text(1.55, .5, "$\log_{10} (\\rho_{\star}) \sim \mathcal{N} \
(\\alpha + \\beta \log_{10}(F_8), \sigma_{\\rho})$")
plt.text(1.95, .22, "$\\alpha = %.3f$" % (alpha-3))
plt.text(1.95, .07, "$\\beta = %.3f$" % beta)
plt.text(1.95, -.08, "$\\sigma_{\\rho} = %.3f$" % sigma)
plt.ylim(-2, 1)
lines = []
ym = model1([np.median(b_samp), np.median(a_samp)], xs)
for i in range(ndraws):
ys = model1([b_samp[i], a_samp[i]], xs)
y3 = ys - 3
# if fractional:
# plt.plot(xs, ym + ym * np.random.randn(1)*sigma - 3 ,
# "b", alpha=.05)
# lines.append(ys-3 + ys * (np.random.randn(1)*s_samp[i] - 1))
# lines.append(ym + ym * np.random.randn(1)*(sigma-1) - 3)
if extra:
line = y3 + np.random.randn(1)*np.median(s_samp-1) + \
np.random.randn(1)*np.median(f_samp)*xs
plt.plot(xs, line, col, alpha=.05)
lines.append(line)
else:
# plt.plot(xs, ys + s_samp[i] - 3, col, alpha=.05)
# plt.plot(xs, ys + np.random.randn(1)*s_samp[i] - 3, col,
# alpha=.05)
lines.append(ys + np.random.randn(1)*s_samp[i] - 3) #FIXME: opt
plt.plot(xs, model1([np.median(b_samp), np.median(a_samp)], xs)-3, ".2", linewidth=1)
plt.errorbar(x, y-3, xerr=xerr, yerr=xerr, fmt="k.", capsize=0,
alpha=.5, ecolor=".5", mec=".2")
quantiles = np.percentile(lines, [2, 16, 84, 98], axis=0)
plt.fill_between(xs, quantiles[1], quantiles[2], color=col,
alpha=.4)
plt.fill_between(xs, quantiles[0], quantiles[3], color=col,
alpha=.2)
# ys = model1(pars, xs)
# if fractional:
# plt.plot(xs, ys + ys * sigma - 3 , "k--")
# plt.plot(xs, ys - ys * sigma - 3, "k--")
# elif extra:
# plt.plot(xs, ys + f * ys + sigma - 3 , "k--")
# plt.plot(xs, ys - f * ys + sigma - 3, "k--")
# else:
# plt.plot(xs, ys + sigma - 3 , "k--")
# plt.plot(xs, ys - sigma - 3, "k--")
elif whichx == "logg":
plt.ylim(3, 5)
col = "#0066CC"
plt.ylabel("$\log_{10}(g [\mathrm{cm~s}^{-2}])$")
plt.text(1.6, 4.7, "$\log(g) \sim \mathcal{N} \
(\\gamma + \\delta \log_{10}(F_8), \\sigma_g)$")
plt.text(1.95, 4.52, "$\\gamma = %.3f$" % alpha)
plt.text(1.95, 4.42, "$\\delta = %.3f$" % beta)
plt.text(1.95, 4.32, "$\\sigma_g = %.3f$" % sigma)
lines = []
ym = model1([np.median(b_samp), np.median(a_samp)], xs)
for i in range(ndraws):
ys = model1([b_samp[i], a_samp[i]], xs)
# if fractional:
# plt.plot(xs, ym + ym * np.random.randn(1)*sigma - 3 ,
# "b", alpha=.05)
# lines.append(ys + ys*np.random.randn(1)*s_samp[i])
if extra:
plt.plot(xs, ys + np.random.randn(1)*np.median(s_samp-1) +
np.random.randn(1)*np.median(f_samp)*xs, col, alpha=.05)
# plt.plot(xs, ys + np.median(f_samp-1)*np.random.randn(1) * \
# ys + np.median(s_samp-1)*np.random.randn(1), col, alpha=.05)
# plt.plot(xs, ys + np.median(f_samp)*np.random.randn(1)*ys
# + np.median(s_samp-1)*np.random.randn(1),
# col, alpha=.05)
# lines.append(ys + ys*np.median(f_samp)*np.random.randn(1))
lines.append(ys + ys*np.median(f_samp)*np.random.randn(1)
+ np.random.randn(1)*np.median(s_samp-1))
else:
# plt.plot(xs, ys + s_samp[i], col, alpha=.05)
# plt.plot(xs, ys + np.random.randn(1)*s_samp[i], col, alpha=.05)
lines.append(ys + np.random.randn(1)*s_samp[i])
plt.plot(xs, model1(pars, xs), ".2", linewidth=1)
plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="k.", capsize=0,
alpha=.5, ecolor=".5", mec=".2")
ys = model1(pars, xs)
quantiles = np.percentile(lines, [2, 16, 84, 98], axis=0)
plt.fill_between(xs, quantiles[1], quantiles[2], color=col,
alpha=.4)
plt.fill_between(xs, quantiles[0], quantiles[3], color=col,
alpha=.2)
# if fractional:
# plt.plot(xs, ys + ys*sigma, "k--")
# plt.plot(xs, ys - ys*sigma, "k--")
# elif extra:
# plt.plot(xs, ys + (f * ys + sigma), "k--")
# plt.plot(xs, ys - (f * ys + sigma), "k--")
# else:
# plt.plot(xs, ys + sigma, "k--")
# plt.plot(xs, ys - sigma, "k--")
A = np.vander(xs, 2)
lines = np.dot(samples[:, :2], A.T)
quantiles = np.percentile(lines, [16, 84], axis=0)
plt.fill_between(xs, quantiles[0], quantiles[1], color="#8d44ad",
alpha=.5)
plt.subplots_adjust(bottom=.1)
plt.xlim(1, 2.4)
plt.xlabel("$\log_{10}\mathrm{(F}_8~\mathrm{[ppm]})$")
print "..figs/%s_vs_flicker_%s.pdf" % (whichx, fname)
plt.savefig("../figs/%s_vs_flicker_%s.pdf" % (whichx, fname))
plt.savefig("flicker_inv_%s_%s" % (whichx, fname))
plt.clf()
plt.ylim(4.6, 3.2)
x -= 3
xs = np.linspace(min(x), max(x), 100)
plt.plot(10**x, y, "ko")
ys = 1.15136-3.59637*xs-1.40002*xs**2-.22993*xs**3
plt.plot(10**xs, ys, "m")
plt.savefig("bastien_figureS2")
if __name__ == "__main__":
plotpar = {'axes.labelsize': 18,
'text.fontsize': 26,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
whichx = str(sys.argv[1]) # should be either "rho" or "logg"
fname = str(sys.argv[2]) # mixture, f_extra, f, test, simple
# x, y, xerr, yerr = load_data(whichx, bigdata=True)
x, y, xerr, yerr = load_data(whichx, bigdata=False)
# load chains
with h5py.File("%s_samples_%s.h5" % (whichx, fname), "r") as f:
samples = f["samples"][...]
samples = samples.T
fractional, extra = False, False
if fname == "f":
fractional = True
elif fname == "f_extra" or "short":
extra = True
make_flicker_plot(x, xerr, y, yerr, samples, whichx, fname, 10000,
fractional=fractional, extra=extra)
# make_inverse_flicker_plot(x, xerr, y, yerr, samples, whichx, fname, 1000,
# fractional=fractional, extra=extra)
|
RuthAngusREPO_NAMEflickerPATH_START.@flicker_extracted@flicker-master@code@plots.py@.PATH_END.py
|
{
"filename": "stochastic_optics.py",
"repo_name": "achael/eht-imaging",
"repo_path": "eht-imaging_extracted/eht-imaging-main/ehtim/scattering/stochastic_optics.py",
"type": "Python"
}
|
# Michael Johnson, 2/15/2017
# See http://adsabs.harvard.edu/abs/2016ApJ...833...74J for details about this module
from __future__ import print_function
from builtins import range
from builtins import object
import numpy as np
import scipy.signal
import scipy.special as sps
import scipy.integrate as integrate
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import ehtim.image as image
import ehtim.movie as movie
import ehtim.obsdata as obsdata
from ehtim.observing.obs_helpers import *
from ehtim.const_def import * #Note: C is m/s rather than cm/s.
from multiprocessing import cpu_count
from multiprocessing import Pool
import math
import cmath
################################################################################
# The class ScatteringModel enscompasses a generic scattering model, determined by the power spectrum Q and phase structure function Dphi
################################################################################
class ScatteringModel(object):
"""A scattering model based on a thin-screen approximation.
Models include:
('von_Mises', 'boxcar', 'dipole'): These scattering models are motivated by observations of Sgr A*.
Each gives a Gaussian at long wavelengths that matches the model defined
by {theta_maj_mas_ref, theta_min_mas_ref, POS_ANG} at the reference wavelength wavelength_reference_cm
with a lambda^2 scaling. The source sizes {theta_maj, theta_min} are the image FWHM in milliarcseconds
at the reference wavelength. Note that this may not match the ensemble-average kernel at the reference wavelength,
if the reference wavelength is short enough to be beyond the lambda^2 regime!
This model also includes an inner and outer scale and will thus transition to scattering with scatt_alpha at shorter wavelengths
Note: This model *requires* a finite inner scale
'power-law': This scattering model gives a pure power law at all wavelengths. There is no inner scale, but there can be an outer scale.
The ensemble-average image is given by {theta_maj_mas_ref, theta_min_mas_ref, POS_ANG} at the reference wavelength wavelength_reference_cm.
The ensemble-average image size is proportional to wavelength^(1+2/scatt_alpha) = wavelength^(11/5) for Kolmogorov
Attributes:
model (string): The type of scattering model (determined by the power spectrum of phase fluctuations).
scatt_alpha (float): The power-law index of the phase fluctuations (Kolmogorov is 5/3).
observer_screen_distance (float): The distance from the observer to the scattering screen in cm.
source_screen_distance (float): The distance from the source to the scattering screen in cm.
theta_maj_mas_ref (float): FWHM in mas of the major axis angular broadening at the specified reference wavelength.
theta_min_mas_ref (float): FWHM in mas of the minor axis angular broadening at the specified reference wavelength.
POS_ANG (float): The position angle of the major axis of the scattering.
wavelength_reference_cm (float): The reference wavelength for the scattering model in cm.
r_in (float): The inner scale of the scattering screen in cm.
r_out (float): The outer scale of the scattering screen in cm.
rF (function): The Fresnel scale of the scattering screen at the specific wavelength.
"""
def __init__(self, model = 'dipole', scatt_alpha = 1.38, observer_screen_distance = 2.82 * 3.086e21, source_screen_distance = 5.53 * 3.086e21, theta_maj_mas_ref = 1.380, theta_min_mas_ref = 0.703, POS_ANG = 81.9, wavelength_reference_cm = 1.0, r_in = 800e5, r_out = 1e20):
"""To initialize the scattering model, specify:
Attributes:
model (string): The type of scattering model (determined by the power spectrum of phase fluctuations). Options are 'von_Mises', 'boxcar', 'dipole', and 'power-law'
scatt_alpha (float): The power-law index of the phase fluctuations (Kolmogorov is 5/3).
observer_screen_distance (float): The distance from the observer to the scattering screen in cm.
source_screen_distance (float): The distance from the source to the scattering screen in cm.
theta_maj_mas_ref (float): FWHM in mas of the major axis angular broadening at the specified reference wavelength.
theta_min_mas_ref (float): FWHM in mas of the minor axis angular broadening at the specified reference wavelength.
POS_ANG (float): The position angle of the major axis of the scattering.
wavelength_reference_cm (float): The reference wavelength for the scattering model in cm.
r_in (float): The inner scale of the scattering screen in cm.
r_out (float): The outer scale of the scattering screen in cm.
"""
self.model = model
self.POS_ANG = POS_ANG #Major axis position angle [degrees, east of north]
self.observer_screen_distance = observer_screen_distance #cm
self.source_screen_distance = source_screen_distance #cm
M = observer_screen_distance/source_screen_distance
self.wavelength_reference = wavelength_reference_cm #Reference wavelength [cm]
self.r_in = r_in #inner scale [cm]
self.r_out = r_out #outer scale [cm]
self.scatt_alpha = scatt_alpha
FWHM_fac = (2.0 * np.log(2.0))**0.5/np.pi
self.Qbar = 2.0/sps.gamma((2.0 - self.scatt_alpha)/2.0) * (self.r_in**2*(1.0 + M)/(FWHM_fac*(self.wavelength_reference/(2.0*np.pi))**2) )**2 * ( (theta_maj_mas_ref**2 + theta_min_mas_ref**2)*(1.0/1000.0/3600.0*np.pi/180.0)**2)
self.C_scatt_0 = (self.wavelength_reference/(2.0*np.pi))**2 * self.Qbar*sps.gamma(1.0 - self.scatt_alpha/2.0)/(8.0*np.pi**2*self.r_in**2)
A = theta_maj_mas_ref/theta_min_mas_ref # Anisotropy, >=1, as lambda->infinity
self.phi0 = (90 - self.POS_ANG) * np.pi/180.0
# Parameters for the approximate phase structure function
theta_maj_rad_ref = theta_maj_mas_ref/1000.0/3600.0*np.pi/180.0
theta_min_rad_ref = theta_min_mas_ref/1000.0/3600.0*np.pi/180.0
self.Amaj_0 = ( self.r_in*(1.0 + M) * theta_maj_rad_ref/(FWHM_fac * (self.wavelength_reference/(2.0*np.pi)) * 2.0*np.pi ))**2
self.Amin_0 = ( self.r_in*(1.0 + M) * theta_min_rad_ref/(FWHM_fac * (self.wavelength_reference/(2.0*np.pi)) * 2.0*np.pi ))**2
if model == 'von_Mises':
def avM_Anisotropy(kzeta):
return np.abs( (kzeta*sps.i0(kzeta)/sps.i1(kzeta) - 1.0)**0.5 - A )
self.kzeta = minimize(avM_Anisotropy, A**2, method='nelder-mead', options={'xtol': 1e-8, 'disp': False}).x
self.P_phi_prefac = 1.0/(2.0*np.pi*sps.i0(self.kzeta))
elif model == 'boxcar':
def boxcar_Anisotropy(kzeta):
return np.abs( np.sin(np.pi/(1.0 + kzeta))/(np.pi/(1.0 + kzeta)) - (theta_maj_mas_ref**2 - theta_min_mas_ref**2)/(theta_maj_mas_ref**2 + theta_min_mas_ref**2) )
self.kzeta = minimize(boxcar_Anisotropy, A, method='nelder-mead', options={'xtol': 1e-8, 'disp': False}).x
self.P_phi_prefac = (1.0 + self.kzeta)/(2.0*np.pi)
elif model == 'dipole':
def dipole_Anisotropy(kzeta):
return np.abs( sps.hyp2f1((self.scatt_alpha + 2.0)/2.0, 0.5, 2.0, -kzeta)/sps.hyp2f1((self.scatt_alpha + 2.0)/2.0, 1.5, 2.0, -kzeta) - A**2 )
self.kzeta = minimize(dipole_Anisotropy, A, method='nelder-mead', options={'xtol': 1e-8, 'disp': False}).x
self.P_phi_prefac = 1.0/(2.0*np.pi*sps.hyp2f1((self.scatt_alpha + 2.0)/2.0, 0.5, 1.0, -self.kzeta))
else:
print("Scattering Model Not Recognized!")
# More parameters for the approximate phase structure function
int_maj = integrate.quad(lambda phi_q: np.abs( np.cos( self.phi0 - phi_q ) )**self.scatt_alpha * self.P_phi(phi_q), 0, 2.0*np.pi, limit=250)[0]
int_min = integrate.quad(lambda phi_q: np.abs( np.sin( self.phi0 - phi_q ) )**self.scatt_alpha * self.P_phi(phi_q), 0, 2.0*np.pi, limit=250)[0]
B_prefac = self.C_scatt_0 * 2.0**(2.0 - self.scatt_alpha) * np.pi**0.5/(self.scatt_alpha * sps.gamma((self.scatt_alpha + 1.0)/2.0))
self.Bmaj_0 = B_prefac*int_maj
self.Bmin_0 = B_prefac*int_min
#Check normalization:
#print("Checking Normalization:",integrate.quad(lambda phi_q: self.P_phi(phi_q), 0, 2.0*np.pi)[0])
return
def P_phi(self, phi):
if self.model == 'von_Mises':
return self.P_phi_prefac * np.cosh(self.kzeta*np.cos(phi - self.phi0))
elif self.model == 'boxcar':
return self.P_phi_prefac * (1.0 - ((np.pi/(2.0*(1.0 + self.kzeta)) < (phi - self.phi0) % np.pi) & ((phi - self.phi0) % np.pi < np.pi - np.pi/(2.0*(1.0 + self.kzeta)))))
elif self.model == 'dipole':
return self.P_phi_prefac * (1.0 + self.kzeta*np.sin(phi - self.phi0)**2)**(-(self.scatt_alpha + 2.0)/2.0)
def rF(self, wavelength):
"""Returns the Fresnel scale [cm] of the scattering screen at the specified wavelength [cm].
Args:
wavelength (float): The desired wavelength [cm]
Returns:
rF (float): The Fresnel scale [cm]
"""
return (self.source_screen_distance*self.observer_screen_distance/(self.source_screen_distance + self.observer_screen_distance)*wavelength/(2.0*np.pi))**0.5
def Mag(self):
"""Returns the effective magnification the scattering screen: (observer-screen distance)/(source-screen distance).
Returns:
M (float): The effective magnification of the scattering screen.
"""
return self.observer_screen_distance/self.source_screen_distance
def dDphi_dz(self, r, phi, phi_q, wavelength):
"""differential contribution to the phase structure function
"""
return 4.0 * (wavelength/self.wavelength_reference)**2 * self.C_scatt_0/self.scatt_alpha * (sps.hyp1f1(-self.scatt_alpha/2.0, 0.5, -r**2/(4.0*self.r_in**2)*np.cos(phi - phi_q)**2) - 1.0)
def Dphi_exact(self, x, y, wavelength_cm):
r = (x**2 + y**2)**0.5
phi = np.arctan2(y, x)
return integrate.quad(lambda phi_q: self.dDphi_dz(r, phi, phi_q, wavelength_cm)*self.P_phi(phi_q), 0, 2.0*np.pi)[0]
def Dmaj(self, r, wavelength_cm):
return (wavelength_cm/self.wavelength_reference)**2 * self.Bmaj_0 * (2.0 * self.Amaj_0/(self.scatt_alpha * self.Bmaj_0))**(-self.scatt_alpha/(2.0 - self.scatt_alpha)) * ((1.0 + (2.0*self.Amaj_0/(self.scatt_alpha * self.Bmaj_0))**(2.0/(2.0 - self.scatt_alpha)) * (r/self.r_in)**2 )**(self.scatt_alpha/2.0) - 1.0)
def Dmin(self, r, wavelength_cm):
return (wavelength_cm/self.wavelength_reference)**2 * self.Bmin_0 * (2.0 * self.Amin_0/(self.scatt_alpha * self.Bmin_0))**(-self.scatt_alpha/(2.0 - self.scatt_alpha)) * ((1.0 + (2.0*self.Amin_0/(self.scatt_alpha * self.Bmin_0))**(2.0/(2.0 - self.scatt_alpha)) * (r/self.r_in)**2 )**(self.scatt_alpha/2.0) - 1.0)
def Dphi_approx(self, x, y, wavelength_cm):
r = (x**2 + y**2)**0.5
phi = np.arctan2(y, x)
Dmaj_eval = self.Dmaj(r, wavelength_cm)
Dmin_eval = self.Dmin(r, wavelength_cm)
return (Dmaj_eval + Dmin_eval)/2.0 + (Dmaj_eval - Dmin_eval)/2.0*np.cos(2.0*(phi - self.phi0))
def Q(self, qx, qy):
"""Computes the power spectrum of the scattering model at a wavenumber {qx,qy} (in 1/cm).
The power spectrum is part of what defines the scattering model (along with Dphi).
Q(qx,qy) is independent of the observing wavelength.
Args:
qx (float): x coordinate of the wavenumber in 1/cm.
qy (float): y coordinate of the wavenumber in 1/cm.
Returns:
(float): The power spectrum Q(qx,qy)
"""
q = (qx**2 + qy**2)**0.5 + 1e-12/self.r_in #Add a small offset to avoid division by zero
phi_q = np.arctan2(qy, qx)
return self.Qbar * (q*self.r_in)**(-(self.scatt_alpha + 2.0)) * np.exp(-(q * self.r_in)**2) * self.P_phi(phi_q)
def sqrtQ_Matrix(self, Reference_Image, Vx_km_per_s=50.0, Vy_km_per_s=0.0, t_hr=0.0):
"""Computes the square root of the power spectrum on a discrete grid. Because translation of the screen is done most conveniently in Fourier space, a screen translation can also be included.
Args:
Reference_Image (Image): Reference image to determine image and pixel dimensions and wavelength.
Vx_km_per_s (float): Velocity of the scattering screen in the x direction (toward East) in km/s.
Vy_km_per_s (float): Velocity of the scattering screen in the y direction (toward North) in km/s.
t_hr (float): The current time of the scattering in hours.
Returns:
sqrtQ (2D complex ndarray): The square root of the power spectrum of the screen with an additional phase for rotation of the screen.
"""
#Derived parameters
FOV = Reference_Image.psize * Reference_Image.xdim * self.observer_screen_distance #Field of view, in cm, at the scattering screen
N = Reference_Image.xdim
dq = 2.0*np.pi/FOV #this is the spacing in wavenumber
screen_x_offset_pixels = (Vx_km_per_s * 1.e5) * (t_hr*3600.0) / (FOV/float(N))
screen_y_offset_pixels = (Vy_km_per_s * 1.e5) * (t_hr*3600.0) / (FOV/float(N))
s, t = np.meshgrid(np.fft.fftfreq(N, d=1.0/N), np.fft.fftfreq(N, d=1.0/N))
sqrtQ = np.sqrt(self.Q(dq*s, dq*t)) * np.exp(2.0*np.pi*1j*(s*screen_x_offset_pixels +
t*screen_y_offset_pixels)/float(N))
sqrtQ[0][0] = 0.0 #A DC offset doesn't affect scattering
return sqrtQ
def Ensemble_Average_Kernel(self, Reference_Image, wavelength_cm = None, use_approximate_form=True):
"""The ensemble-average convolution kernel for images; returns a 2D array corresponding to the image dimensions of the reference image
Args:
Reference_Image (Image): Reference image to determine image and pixel dimensions and wavelength.
wavelength_cm (float): The observing wavelength for the scattering kernel in cm. If unspecified, this will default to the wavelength of the Reference image.
Returns:
ker (2D ndarray): The ensemble-average scattering kernel in the image domain.
"""
if wavelength_cm == None:
wavelength_cm = C/Reference_Image.rf*100.0 #Observing wavelength [cm]
uvlist = np.fft.fftfreq(Reference_Image.xdim)/Reference_Image.psize # assume square kernel. FIXME: create ulist and vlist, and construct u_grid and v_grid with the correct dimension
if use_approximate_form == True:
u_grid, v_grid = np.meshgrid(uvlist, uvlist)
ker_uv = self.Ensemble_Average_Kernel_Visibility(u_grid, v_grid, wavelength_cm, use_approximate_form=use_approximate_form)
else:
ker_uv = np.array([[self.Ensemble_Average_Kernel_Visibility(u, v, wavelength_cm, use_approximate_form=use_approximate_form) for u in uvlist] for v in uvlist])
ker = np.real(np.fft.fftshift(np.fft.fft2(ker_uv)))
ker = ker / np.sum(ker) # normalize to 1
return ker
def Ensemble_Average_Kernel_Visibility(self, u, v, wavelength_cm, use_approximate_form=True):
"""The ensemble-average multiplicative scattering kernel for visibilities at a particular {u,v} coordinate
Args:
u (float): u baseline coordinate (dimensionless)
v (float): v baseline coordinate (dimensionless)
wavelength_cm (float): The observing wavelength for the scattering kernel in cm.
Returns:
float: The ensemble-average kernel at the specified {u,v} point and observing wavelength.
"""
if use_approximate_form == True:
return np.exp(-0.5*self.Dphi_approx(u*wavelength_cm/(1.0+self.Mag()), v*wavelength_cm/(1.0+self.Mag()), wavelength_cm))
else:
return np.exp(-0.5*self.Dphi_exact(u*wavelength_cm/(1.0+self.Mag()), v*wavelength_cm/(1.0+self.Mag()), wavelength_cm))
def Ensemble_Average_Blur(self, im, wavelength_cm = None, ker = None, use_approximate_form=True):
"""Blurs an input Image with the ensemble-average scattering kernel.
Args:
im (Image): The unscattered image.
wavelength_cm (float): The observing wavelength for the scattering kernel in cm. If unspecified, this will default to the wavelength of the input image.
ker (2D ndarray): The user can optionally pass a pre-computed ensemble-average blurring kernel.
Returns:
out (Image): The ensemble-average scattered image.
"""
# Inputs an unscattered image and an ensemble-average blurring kernel (2D array); returns the ensemble-average image
# The pre-computed kernel can optionally be specified (ker)
if wavelength_cm == None:
wavelength_cm = C/im.rf*100.0 #Observing wavelength [cm]
if ker is None:
ker = self.Ensemble_Average_Kernel(im, wavelength_cm, use_approximate_form)
Iim = Wrapped_Convolve((im.imvec).reshape(im.ydim, im.xdim), ker)
out = image.Image(Iim, im.psize, im.ra, im.dec, rf=C/(wavelength_cm/100.0), source=im.source, mjd=im.mjd, pulse=im.pulse)
if len(im.qvec):
Qim = Wrapped_Convolve((im.qvec).reshape(im.ydim, im.xdim), ker)
Uim = Wrapped_Convolve((im.uvec).reshape(im.ydim, im.xdim), ker)
out.add_qu(Qim, Uim)
if len(im.vvec):
Vim = Wrapped_Convolve((im.vvec).reshape(im.ydim, im.xdim), ker)
out.add_v(Vim)
return out
def Deblur_obs(self, obs, use_approximate_form=True):
"""Deblurs the observation obs by dividing visibilities by the ensemble-average scattering kernel. See Fish et al. (2014): arXiv:1409.4690.
Args:
obs (Obsdata): The observervation data (including scattering).
Returns:
obsdeblur (Obsdata): The deblurred observation.
"""
# make a copy of observation data
datatable = (obs.copy()).data
vis = datatable['vis']
qvis = datatable['qvis']
uvis = datatable['uvis']
vvis = datatable['vvis']
sigma = datatable['sigma']
qsigma = datatable['qsigma']
usigma = datatable['usigma']
vsigma = datatable['vsigma']
u = datatable['u']
v = datatable['v']
# divide visibilities by the scattering kernel
for i in range(len(vis)):
ker = self.Ensemble_Average_Kernel_Visibility(u[i], v[i], wavelength_cm = C/obs.rf*100.0, use_approximate_form=use_approximate_form)
vis[i] = vis[i] / ker
qvis[i] = qvis[i] / ker
uvis[i] = uvis[i] / ker
vvis[i] = vvis[i] / ker
sigma[i] = sigma[i] / ker
qsigma[i] = qsigma[i] / ker
usigma[i] = usigma[i] / ker
vsigma[i] = vsigma[i] / ker
datatable['vis'] = vis
datatable['qvis'] = qvis
datatable['uvis'] = uvis
datatable['vvis'] = vvis
datatable['sigma'] = sigma
datatable['qsigma'] = qsigma
datatable['usigma'] = usigma
datatable['vsigma'] = vsigma
obsdeblur = obsdata.Obsdata(obs.ra, obs.dec, obs.rf, obs.bw, datatable, obs.tarr, source=obs.source, mjd=obs.mjd,
ampcal=obs.ampcal, phasecal=obs.phasecal, opacitycal=obs.opacitycal, dcal=obs.dcal, frcal=obs.frcal)
return obsdeblur
def MakePhaseScreen(self, EpsilonScreen, Reference_Image, obs_frequency_Hz=0.0, Vx_km_per_s=50.0, Vy_km_per_s=0.0, t_hr=0.0, sqrtQ_init=None):
"""Create a refractive phase screen from standardized Fourier components (the EpsilonScreen).
All lengths should be specified in centimeters
If the observing frequency (obs_frequency_Hz) is not specified, then it will be taken to be equal to the frequency of the Reference_Image
Note: an odd image dimension is required!
Args:
EpsilonScreen (2D ndarray): Optionally, the scattering screen can be specified. If none is given, a random one will be generated.
Reference_Image (Image): The reference image.
obs_frequency_Hz (float): The observing frequency, in Hz. By default, it will be taken to be equal to the frequency of the Unscattered_Image.
Vx_km_per_s (float): Velocity of the scattering screen in the x direction (toward East) in km/s.
Vy_km_per_s (float): Velocity of the scattering screen in the y direction (toward North) in km/s.
t_hr (float): The current time of the scattering in hours.
ea_ker (2D ndarray): The used can optionally pass a precomputed array of the ensemble-average blurring kernel.
sqrtQ_init (2D ndarray): The used can optionally pass a precomputed array of the square root of the power spectrum.
Returns:
phi_Image (Image): The phase screen.
"""
#Observing wavelength
if obs_frequency_Hz == 0.0:
obs_frequency_Hz = Reference_Image.rf
wavelength = C/obs_frequency_Hz*100.0 #Observing wavelength [cm]
wavelengthbar = wavelength/(2.0*np.pi) #lambda/(2pi) [cm]
#Derived parameters
FOV = Reference_Image.psize * Reference_Image.xdim * self.observer_screen_distance #Field of view, in cm, at the scattering screen
rF = self.rF(wavelength)
Nx = EpsilonScreen.shape[1]
Ny = EpsilonScreen.shape[0]
# if Nx%2 == 0:
# print("The image dimension should really be odd...")
#Now we'll calculate the power spectrum for each pixel in Fourier space
screen_x_offset_pixels = (Vx_km_per_s*1.e5) * (t_hr*3600.0) / (FOV/float(Nx))
screen_y_offset_pixels = (Vy_km_per_s*1.e5) * (t_hr*3600.0) / (FOV/float(Nx))
if sqrtQ_init is None:
sqrtQ = self.sqrtQ_Matrix(Reference_Image, Vx_km_per_s=Vx_km_per_s, Vy_km_per_s=Vy_km_per_s, t_hr=t_hr)
else:
#If a matrix for sqrtQ_init is passed, we still potentially need to rotate it
if screen_x_offset_pixels != 0.0 or screen_y_offset_pixels != 0.0:
s, t = np.meshgrid(np.fft.fftfreq(Nx, d=1.0/Nx), np.fft.fftfreq(Ny, d=1.0/Ny))
sqrtQ = sqrtQ_init * np.exp(2.0*np.pi*1j*(s*screen_x_offset_pixels +
t*screen_y_offset_pixels)/float(Nx))
else:
sqrtQ = sqrtQ_init
#Now calculate the phase screen
phi = np.real(wavelengthbar/FOV*EpsilonScreen.shape[0]*EpsilonScreen.shape[1]*np.fft.ifft2(sqrtQ*EpsilonScreen))
phi_Image = image.Image(phi, Reference_Image.psize, Reference_Image.ra, Reference_Image.dec, rf=Reference_Image.rf, source=Reference_Image.source, mjd=Reference_Image.mjd)
return phi_Image
def Scatter2(self, args, kwargs):
"""Call self.Scatter with expanded args and kwargs."""
return self.Scatter(*args, **kwargs)
def Scatter(self, Unscattered_Image, Epsilon_Screen=np.array([]), obs_frequency_Hz=0.0, Vx_km_per_s=50.0, Vy_km_per_s=0.0, t_hr=0.0, ea_ker=None, sqrtQ=None, Linearized_Approximation=False, DisplayImage=False, Force_Positivity=False, use_approximate_form=True):
"""Scatter an image using the specified epsilon screen.
All lengths should be specified in centimeters
If the observing frequency (obs_frequency_Hz) is not specified, then it will be taken to be equal to the frequency of the Unscattered_Image
Note: an odd image dimension is required!
Args:
Unscattered_Image (Image): The unscattered image.
Epsilon_Screen (2D ndarray): Optionally, the scattering screen can be specified. If none is given, a random one will be generated.
obs_frequency_Hz (float): The observing frequency, in Hz. By default, it will be taken to be equal to the frequency of the Unscattered_Image.
Vx_km_per_s (float): Velocity of the scattering screen in the x direction (toward East) in km/s.
Vy_km_per_s (float): Velocity of the scattering screen in the y direction (toward North) in km/s.
t_hr (float): The current time of the scattering in hours.
ea_ker (2D ndarray): The used can optionally pass a precomputed array of the ensemble-average blurring kernel.
sqrtQ (2D ndarray): The used can optionally pass a precomputed array of the square root of the power spectrum.
Linearized_Approximation (bool): If True, uses a linearized approximation for the scattering (Eq. 10 of Johnson & Narayan 2016). If False, uses Eq. 9 of that paper.
DisplayImage (bool): If True, show a plot of the unscattered, ensemble-average, and scattered images as well as the phase screen.
Force_Positivity (bool): If True, eliminates negative flux from the scattered image from the linearized approximation.
Return_Image_List (bool): If True, returns a list of the scattered frames. If False, returns a movie object.
Returns:
AI_Image (Image): The scattered image.
"""
#Observing wavelength
if obs_frequency_Hz == 0.0:
obs_frequency_Hz = Unscattered_Image.rf
wavelength = C/obs_frequency_Hz*100.0 #Observing wavelength [cm]
wavelengthbar = wavelength/(2.0*np.pi) #lambda/(2pi) [cm]
#Derived parameters
FOV = Unscattered_Image.psize * Unscattered_Image.xdim * self.observer_screen_distance #Field of view, in cm, at the scattering screen
rF = self.rF(wavelength)
Nx = Unscattered_Image.xdim
Ny = Unscattered_Image.ydim
#First we need to calculate the ensemble-average image by blurring the unscattered image with the correct kernel
EA_Image = self.Ensemble_Average_Blur(Unscattered_Image, wavelength, ker = ea_ker, use_approximate_form=use_approximate_form)
# If no epsilon screen is specified, then generate a random realization
if Epsilon_Screen.shape[0] == 0:
Epsilon_Screen = MakeEpsilonScreen(Nx, Ny)
#We'll now calculate the phase screen.
phi_Image = self.MakePhaseScreen(Epsilon_Screen, Unscattered_Image, obs_frequency_Hz, Vx_km_per_s=Vx_km_per_s, Vy_km_per_s=Vy_km_per_s, t_hr=t_hr, sqrtQ_init=sqrtQ)
phi = phi_Image.imvec.reshape(Ny,Nx)
#Next, we need the gradient of the ensemble-average image
phi_Gradient = Wrapped_Gradient(phi/(FOV/Nx))
#The gradient signs don't actually matter, but let's make them match intuition (i.e., right to left, bottom to top)
phi_Gradient_x = -phi_Gradient[1]
phi_Gradient_y = -phi_Gradient[0]
if Linearized_Approximation == True: #Use Equation 10 of Johnson & Narayan (2016)
#Calculate the gradient of the ensemble-average image
EA_Gradient = Wrapped_Gradient((EA_Image.imvec/(FOV/Nx)).reshape(EA_Image.ydim, EA_Image.xdim))
#The gradient signs don't actually matter, but let's make them match intuition (i.e., right to left, bottom to top)
EA_Gradient_x = -EA_Gradient[1]
EA_Gradient_y = -EA_Gradient[0]
#Now we can patch together the average image
AI = (EA_Image.imvec).reshape(Ny,Nx) + rF**2.0 * ( EA_Gradient_x*phi_Gradient_x + EA_Gradient_y*phi_Gradient_y )
if len(Unscattered_Image.qvec):
# Scatter the Q image
EA_Gradient = Wrapped_Gradient((EA_Image.qvec/(FOV/Nx)).reshape(EA_Image.ydim, EA_Image.xdim))
EA_Gradient_x = -EA_Gradient[1]
EA_Gradient_y = -EA_Gradient[0]
AI_Q = (EA_Image.qvec).reshape(Ny,Nx) + rF**2.0 * ( EA_Gradient_x*phi_Gradient_x + EA_Gradient_y*phi_Gradient_y )
# Scatter the U image
EA_Gradient = Wrapped_Gradient((EA_Image.uvec/(FOV/Nx)).reshape(EA_Image.ydim, EA_Image.xdim))
EA_Gradient_x = -EA_Gradient[1]
EA_Gradient_y = -EA_Gradient[0]
AI_U = (EA_Image.uvec).reshape(Ny,Nx) + rF**2.0 * ( EA_Gradient_x*phi_Gradient_x + EA_Gradient_y*phi_Gradient_y )
if len(Unscattered_Image.vvec):
# Scatter the V image
EA_Gradient = Wrapped_Gradient((EA_Image.vvec/(FOV/Nx)).reshape(EA_Image.ydim, EA_Image.xdim))
EA_Gradient_x = -EA_Gradient[1]
EA_Gradient_y = -EA_Gradient[0]
AI_V = (EA_Image.vvec).reshape(Ny,Nx) + rF**2.0 * ( EA_Gradient_x*phi_Gradient_x + EA_Gradient_y*phi_Gradient_y )
else: #Use Equation 9 of Johnson & Narayan (2016)
EA_im = (EA_Image.imvec).reshape(Ny,Nx)
AI = np.copy((EA_Image.imvec).reshape(Ny,Nx))
if len(Unscattered_Image.qvec):
AI_Q = np.copy((EA_Image.imvec).reshape(Ny,Nx))
AI_U = np.copy((EA_Image.imvec).reshape(Ny,Nx))
EA_im_Q = (EA_Image.qvec).reshape(Ny,Nx)
EA_im_U = (EA_Image.uvec).reshape(Ny,Nx)
if len(Unscattered_Image.vvec):
AI_V = np.copy((EA_Image.imvec).reshape(Ny,Nx))
EA_im_V = (EA_Image.vvec).reshape(Ny,Nx)
for rx in range(Nx):
for ry in range(Ny):
# Annoyingly, the signs here must be negative to match the other approximation. I'm not sure which is correct, but it really shouldn't matter anyway because -phi has the same power spectrum as phi. However, getting the *relative* sign for the x- and y-directions correct is important.
rxp = int(np.round(rx - rF**2.0 * phi_Gradient_x[ry,rx]/self.observer_screen_distance/Unscattered_Image.psize))%Nx
ryp = int(np.round(ry - rF**2.0 * phi_Gradient_y[ry,rx]/self.observer_screen_distance/Unscattered_Image.psize))%Ny
AI[ry,rx] = EA_im[ryp,rxp]
if len(Unscattered_Image.qvec):
AI_Q[ry,rx] = EA_im_Q[ryp,rxp]
AI_U[ry,rx] = EA_im_U[ryp,rxp]
if len(Unscattered_Image.vvec):
AI_V[ry,rx] = EA_im_V[ryp,rxp]
#Optional: eliminate negative flux
if Force_Positivity == True:
AI = abs(AI)
#Make it into a proper image format
AI_Image = image.Image(AI, EA_Image.psize, EA_Image.ra, EA_Image.dec, rf=EA_Image.rf, source=EA_Image.source, mjd=EA_Image.mjd)
if len(Unscattered_Image.qvec):
AI_Image.add_qu(AI_Q, AI_U)
if len(Unscattered_Image.vvec):
AI_Image.add_v(AI_V)
if DisplayImage:
plot_scatt(Unscattered_Image.imvec, EA_Image.imvec, AI_Image.imvec, phi_Image.imvec, Unscattered_Image, 0, 0, ipynb=False)
return AI_Image
def Scatter_Movie(self, Unscattered_Movie, Epsilon_Screen=np.array([]), obs_frequency_Hz=0.0, Vx_km_per_s=50.0, Vy_km_per_s=0.0, framedur_sec=None, N_frames = None, ea_ker=None, sqrtQ=None, Linearized_Approximation=False, Force_Positivity=False, Return_Image_List=False, processes=0):
"""Scatter a movie using the specified epsilon screen. The movie can either be a movie object, an image list, or a static image
If scattering a list of images or static image, the frame duration in seconds (framedur_sec) must be specified
If scattering a static image, the total number of frames must be specified (N_frames)
All lengths should be specified in centimeters
If the observing frequency (obs_frequency_Hz) is not specified, then it will be taken to be equal to the frequency of the Unscattered_Movie
Note: an odd image dimension is required!
Args:
Unscattered_Movie: This can be a movie object, an image list, or a static image
Epsilon_Screen (2D ndarray): Optionally, the scattering screen can be specified. If none is given, a random one will be generated.
obs_frequency_Hz (float): The observing frequency, in Hz. By default, it will be taken to be equal to the frequency of the Unscattered_Movie.
Vx_km_per_s (float): Velocity of the scattering screen in the x direction (toward East) in km/s.
Vy_km_per_s (float): Velocity of the scattering screen in the y direction (toward North) in km/s.
framedur_sec (float): Duration of each frame, in seconds. Only needed if Unscattered_Movie is not a movie object.
N_frames (int): Total number of frames. Only needed if Unscattered_Movie is a static image object.
ea_ker (2D ndarray): The used can optionally pass a precomputed array of the ensemble-average blurring kernel.
sqrtQ (2D ndarray): The used can optionally pass a precomputed array of the square root of the power spectrum.
Linearized_Approximation (bool): If True, uses a linearized approximation for the scattering (Eq. 10 of Johnson & Narayan 2016). If False, uses Eq. 9 of that paper.
Force_Positivity (bool): If True, eliminates negative flux from the scattered image from the linearized approximation.
Return_Image_List (bool): If True, returns a list of the scattered frames. If False, returns a movie object.
processes (int): Number of cores to use in multiprocessing. Default value (0) means no multiprocessing. Uses all available cores if processes < 0.
Returns:
Scattered_Movie: Either a movie object or a list of images, depending on the flag Return_Image_List.
"""
print("Warning!! assuming a constant frame duration, but Movie objects now support unequally spaced frames!")
if type(Unscattered_Movie) != movie.Movie and framedur_sec is None:
print("If scattering a list of images or static image, the framedur must be specified!")
return
if type(Unscattered_Movie) == image.Image and N_frames is None:
print("If scattering a static image, the total number of frames must be specified (N_frames)!")
return
# time list in hr
if hasattr(Unscattered_Movie, 'times'):
tlist_hr = Unscattered_Movie.times
else:
tlist_hr = [framedur_sec/3600.0*j for j in range(N_frames)]
if type(Unscattered_Movie) == movie.Movie:
N = Unscattered_Movie.xdim
N_frames = len(Unscattered_Movie.frames)
psize = Unscattered_Movie.psize
ra = Unscattered_Movie.ra
dec = Unscattered_Movie.dec
rf = Unscattered_Movie.rf
pulse=Unscattered_Movie.pulse
source=Unscattered_Movie.source
mjd=Unscattered_Movie.mjd
start_hr=Unscattered_Movie.start_hr
has_pol = len(Unscattered_Movie.qframes)
has_circ_pol = len(Unscattered_Movie.vframes)
elif type(Unscattered_Movie) == list:
N = Unscattered_Movie[0].xdim
N_frames = len(Unscattered_Movie)
psize = Unscattered_Movie[0].psize
ra = Unscattered_Movie[0].ra
dec = Unscattered_Movie[0].dec
rf = Unscattered_Movie[0].rf
pulse=Unscattered_Movie[0].pulse
source=Unscattered_Movie[0].source
mjd=Unscattered_Movie[0].mjd
start_hr=0.0
has_pol = len(Unscattered_Movie[0].qvec)
has_circ_pol = len(Unscattered_Movie[0].vvec)
else:
N = Unscattered_Movie.xdim
psize = Unscattered_Movie.psize
ra = Unscattered_Movie.ra
dec = Unscattered_Movie.dec
rf = Unscattered_Movie.rf
pulse=Unscattered_Movie.pulse
source=Unscattered_Movie.source
mjd=Unscattered_Movie.mjd
start_hr=0.0
has_pol = len(Unscattered_Movie.qvec)
has_circ_pol = len(Unscattered_Movie.vvec)
def get_frame(j):
if type(Unscattered_Movie) == movie.Movie:
im = image.Image(Unscattered_Movie.frames[j].reshape((N,N)), psize=psize, ra=ra, dec=dec, rf=rf, pulse=pulse, source=source, mjd=mjd)
if len(Unscattered_Movie.qframes) > 0:
im.add_qu(Unscattered_Movie.qframes[j].reshape((N,N)), Unscattered_Movie.uframes[j].reshape((N,N)))
if len(Unscattered_Movie.vframes) > 0:
im.add_v(Unscattered_Movie.vframes[j].reshape((N,N)))
return im
elif type(Unscattered_Movie) == list:
return Unscattered_Movie[j]
else:
return Unscattered_Movie
#If it isn't specified, calculate the matrix sqrtQ for efficiency
if sqrtQ is None:
sqrtQ = self.sqrtQ_Matrix(get_frame(0))
# If no epsilon screen is specified, then generate a random realization
if Epsilon_Screen.shape[0] == 0:
Epsilon_Screen = MakeEpsilonScreen(N, N)
# possibly parallelize
if processes < 0:
processes = cpu_count()
processes = min(processes, N_frames)
# generate scattered images
if processes > 0:
pool = Pool(processes=processes)
args = [
(
[get_frame(j), Epsilon_Screen],
dict(obs_frequency_Hz = obs_frequency_Hz, Vx_km_per_s = Vx_km_per_s, Vy_km_per_s = Vy_km_per_s, t_hr=tlist_hr[j], sqrtQ=sqrtQ, Linearized_Approximation=Linearized_Approximation, Force_Positivity=Force_Positivity)
) for j in range(N_frames)
]
scattered_im_List = pool.starmap(self.Scatter2, args)
pool.close()
pool.join()
else:
scattered_im_List = [self.Scatter(get_frame(j), Epsilon_Screen, obs_frequency_Hz = obs_frequency_Hz, Vx_km_per_s = Vx_km_per_s, Vy_km_per_s = Vy_km_per_s, t_hr=tlist_hr[j], ea_ker=ea_ker, sqrtQ=sqrtQ, Linearized_Approximation=Linearized_Approximation, Force_Positivity=Force_Positivity) for j in range(N_frames)]
if Return_Image_List == True:
return scattered_im_List
Scattered_Movie = movie.Movie( [im.imvec.reshape((im.xdim,im.ydim)) for im in scattered_im_List],
times=tlist_hr, psize = psize, ra = ra, dec = dec, rf=rf, pulse=pulse, source=source, mjd=mjd)
if has_pol:
Scattered_Movie_Q = [im.qvec.reshape((im.xdim,im.ydim)) for im in scattered_im_List]
Scattered_Movie_U = [im.uvec.reshape((im.xdim,im.ydim)) for im in scattered_im_List]
Scattered_Movie.add_qu(Scattered_Movie_Q, Scattered_Movie_U)
if has_circ_pol:
Scattered_Movie_V = [im.vvec.reshape((im.xdim,im.ydim)) for im in scattered_im_List]
Scattered_Movie.add_v(Scattered_Movie_V)
return Scattered_Movie
################################################################################
# These are helper functions
################################################################################
def Wrapped_Convolve(sig,ker):
N = sig.shape[0]
return scipy.signal.fftconvolve(np.pad(sig,((N, N), (N, N)), 'wrap'), np.pad(ker,((N, N), (N, N)), 'constant'),mode='same')[N:(2*N),N:(2*N)]
def Wrapped_Gradient(M):
G = np.gradient(np.pad(M,((1, 1), (1, 1)), 'wrap'))
Gx = G[0][1:-1,1:-1]
Gy = G[1][1:-1,1:-1]
return (Gx, Gy)
def MakeEpsilonScreenFromList(EpsilonList, N):
epsilon = np.zeros((N,N),dtype=np.complex)
#If N is odd: there are (N^2-1)/2 real elements followed by their corresponding (N^2-1)/2 imaginary elements
#If N is even: there are (N^2+2)/2 of each, although 3 of these must be purely real, also giving a total of N^2-1 degrees of freedom
#This is because of conjugation symmetry in Fourier space to ensure a real Fourier transform
#The first (N-1)/2 are the top row
N_re = (N*N-1)//2 # FIXME: check logic if N is even
i = 0
for x in range(1,(N+1)//2): # FIXME: check logic if N is even
epsilon[0][x] = EpsilonList[i] + 1j * EpsilonList[i+N_re]
epsilon[0][N-x] = np.conjugate(epsilon[0][x])
i=i+1
#The next N(N-1)/2 are filling the next N rows
for y in range(1,(N+1)//2): # FIXME: check logic if N is even
for x in range(N):
epsilon[y][x] = EpsilonList[i] + 1j * EpsilonList[i+N_re]
x2 = N - x
y2 = N - y
if x2 == N:
x2 = 0
if y2 == N:
y2 = 0
epsilon[y2][x2] = np.conjugate(epsilon[y][x])
i=i+1
return epsilon
def MakeEpsilonScreen(Nx, Ny, rngseed = 0):
"""Create a standardized Fourier representation of a scattering screen
Args:
Nx (int): Number of pixels in the x direction
Ny (int): Number of pixels in the y direction
rngseed (int): Seed for the random number generator
Returns:
epsilon: A 2D numpy ndarray.
"""
if rngseed != 0:
np.random.seed( rngseed )
epsilon = np.random.normal(loc=0.0, scale=1.0/math.sqrt(2), size=(Ny,Nx)) + 1j * np.random.normal(loc=0.0, scale=1.0/math.sqrt(2), size=(Ny,Nx))
# The zero frequency doesn't affect scattering
epsilon[0][0] = 0.0
#Now let's ensure that it has the necessary conjugation symmetry
if Nx%2 == 0:
epsilon[0][Nx//2] = np.real(epsilon[0][Nx//2])
if Ny%2 == 0:
epsilon[Ny//2][0] = np.real(epsilon[Ny//2][0])
if Nx%2 == 0 and Ny%2 == 0:
epsilon[Ny//2][Nx//2] = np.real(epsilon[Ny//2][Nx//2])
for x in range(Nx):
if x > (Nx-1)//2:
epsilon[0][x] = np.conjugate(epsilon[0][Nx-x])
for y in range((Ny-1)//2, Ny):
x2 = Nx - x
y2 = Ny - y
if x2 == Nx:
x2 = 0
if y2 == Ny:
y2 = 0
epsilon[y][x] = np.conjugate(epsilon[y2][x2])
return epsilon
##################################################################################################
# Plotting Functions
##################################################################################################
def plot_scatt(im_unscatt, im_ea, im_scatt, im_phase, Prior, nit, chi2, ipynb=False):
# Get vectors and ratio from current image
x = np.array([[i for i in range(Prior.xdim)] for j in range(Prior.ydim)])
y = np.array([[j for i in range(Prior.xdim)] for j in range(Prior.ydim)])
# Create figure and title
plt.ion()
plt.clf()
if chi2 > 0.0:
plt.suptitle("step: %i $\chi^2$: %f " % (nit, chi2), fontsize=20)
# Unscattered Image
plt.subplot(141)
plt.imshow(im_unscatt.reshape(Prior.ydim, Prior.xdim), cmap=plt.get_cmap('afmhot'), interpolation='gaussian', vmin=0)
xticks = ticks(Prior.xdim, Prior.psize/RADPERAS/1e-6)
yticks = ticks(Prior.ydim, Prior.psize/RADPERAS/1e-6)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\mu$as)')
plt.ylabel('Relative Dec ($\mu$as)')
plt.title('Unscattered')
# Ensemble Average
plt.subplot(142)
plt.imshow(im_ea.reshape(Prior.ydim, Prior.xdim), cmap=plt.get_cmap('afmhot'), interpolation='gaussian', vmin=0)
xticks = ticks(Prior.xdim, Prior.psize/RADPERAS/1e-6)
yticks = ticks(Prior.ydim, Prior.psize/RADPERAS/1e-6)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\mu$as)')
plt.ylabel('Relative Dec ($\mu$as)')
plt.title('Ensemble Average')
# Scattered
plt.subplot(143)
plt.imshow(im_scatt.reshape(Prior.ydim, Prior.xdim), cmap=plt.get_cmap('afmhot'), interpolation='gaussian', vmin=0)
xticks = ticks(Prior.xdim, Prior.psize/RADPERAS/1e-6)
yticks = ticks(Prior.ydim, Prior.psize/RADPERAS/1e-6)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\mu$as)')
plt.ylabel('Relative Dec ($\mu$as)')
plt.title('Average Image')
# Phase
plt.subplot(144)
plt.imshow(im_phase.reshape(Prior.ydim, Prior.xdim), cmap=plt.get_cmap('afmhot'), interpolation='gaussian')
xticks = ticks(Prior.xdim, Prior.psize/RADPERAS/1e-6)
yticks = ticks(Prior.ydim, Prior.psize/RADPERAS/1e-6)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\mu$as)')
plt.ylabel('Relative Dec ($\mu$as)')
plt.title('Phase Screen')
# Display
plt.draw()
|
achaelREPO_NAMEeht-imagingPATH_START.@eht-imaging_extracted@eht-imaging-main@ehtim@scattering@stochastic_optics.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmapgl/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="heatmapgl.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmapgl@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "__version__.py",
"repo_name": "danielkoll/PyRADS-shortwave",
"repo_path": "PyRADS-shortwave_extracted/PyRADS-shortwave-master/pyDISORT-master/build/lib/disort/__version__.py",
"type": "Python"
}
|
__version__ = '0.0.1'
|
danielkollREPO_NAMEPyRADS-shortwavePATH_START.@PyRADS-shortwave_extracted@PyRADS-shortwave-master@pyDISORT-master@build@lib@disort@__version__.py@.PATH_END.py
|
{
"filename": "_gridwidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/ternary/baxis/_gridwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class GridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="gridwidth", parent_name="layout.ternary.baxis", **kwargs
):
super(GridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@ternary@baxis@_gridwidth.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "jpcoles/glass",
"repo_path": "glass_extracted/glass-master/glass/solvers/rwalk/__init__.py",
"type": "Python"
}
|
jpcolesREPO_NAMEglassPATH_START.@glass_extracted@glass-master@glass@solvers@rwalk@__init__.py@.PATH_END.py
|
|
{
"filename": "edenai.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/embeddings/edenai.py",
"type": "Python"
}
|
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
)
from langchain_community.utilities.requests import Requests
class EdenAiEmbeddings(BaseModel, Embeddings):
"""EdenAI embedding.
environment variable ``EDENAI_API_KEY`` set with your API key, or pass
it as a named parameter.
"""
edenai_api_key: Optional[SecretStr] = Field(None, description="EdenAI API Token")
provider: str = "openai"
"""embedding provider to use (eg: openai,google etc.)"""
model: Optional[str] = None
"""
model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "edenai_api_key", "EDENAI_API_KEY")
)
return values
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Compute embeddings using EdenAi api."""
url = "https://api.edenai.run/v2/text/embeddings"
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.edenai_api_key.get_secret_value()}", # type: ignore[union-attr]
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {"texts": texts, "providers": self.provider}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
temp = response.json()
provider_response = temp[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
embeddings = []
for embed_item in temp[self.provider]["items"]:
embedding = embed_item["embedding"]
embeddings.append(embedding)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a query using EdenAI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._generate_embeddings([text])[0]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@embeddings@edenai.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/violin/legendgrouptitle/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@violin@legendgrouptitle@__init__.py@.PATH_END.py
|
{
"filename": "_base.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py3/sklearn/mixture/_base.py",
"type": "Python"
}
|
"""Base class for mixture models."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
from time import time
import numpy as np
from scipy.special import logsumexp
from .. import cluster
from ..base import BaseEstimator, DensityMixin, _fit_context
from ..cluster import kmeans_plusplus
from ..exceptions import ConvergenceWarning
from ..utils import check_random_state
from ..utils._param_validation import Interval, StrOptions
from ..utils.validation import check_is_fitted
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : str
"""
param = np.array(param)
if param.shape != param_shape:
raise ValueError(
"The parameter '%s' should have the shape of %s, but got %s"
% (name, param_shape, param.shape)
)
class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="left")],
"reg_covar": [Interval(Real, 0.0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"n_init": [Interval(Integral, 1, None, closed="left")],
"init_params": [
StrOptions({"kmeans", "random", "random_from_data", "k-means++"})
],
"random_state": ["random_state"],
"warm_start": ["boolean"],
"verbose": ["verbose"],
"verbose_interval": [Interval(Integral, 1, None, closed="left")],
}
def __init__(
self,
n_components,
tol,
reg_covar,
max_iter,
n_init,
init_params,
random_state,
warm_start,
verbose,
verbose_interval,
):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
@abstractmethod
def _check_parameters(self, X):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state):
"""Initialize the model parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
random_state : RandomState
A random number generator instance that controls the random seed
used for the method chosen to initialize the parameters.
"""
n_samples, _ = X.shape
if self.init_params == "kmeans":
resp = np.zeros((n_samples, self.n_components))
label = (
cluster.KMeans(
n_clusters=self.n_components, n_init=1, random_state=random_state
)
.fit(X)
.labels_
)
resp[np.arange(n_samples), label] = 1
elif self.init_params == "random":
resp = random_state.uniform(size=(n_samples, self.n_components))
resp /= resp.sum(axis=1)[:, np.newaxis]
elif self.init_params == "random_from_data":
resp = np.zeros((n_samples, self.n_components))
indices = random_state.choice(
n_samples, size=self.n_components, replace=False
)
resp[indices, np.arange(self.n_components)] = 1
elif self.init_params == "k-means++":
resp = np.zeros((n_samples, self.n_components))
_, indices = kmeans_plusplus(
X,
self.n_components,
random_state=random_state,
)
resp[indices, np.arange(self.n_components)] = 1
self._initialize(X, resp)
@abstractmethod
def _initialize(self, X, resp):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fits the model ``n_init`` times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for ``max_iter``
times until the change of likelihood or lower bound is less than
``tol``, otherwise, a ``ConvergenceWarning`` is raised.
If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single
initialization is performed upon the first call. Upon consecutive
calls, training starts where it left off.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
The fitted mixture.
"""
# parameters are validated in fit_predict
self.fit_predict(X, y)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_predict(self, X, y=None):
"""Estimate model parameters using X and predict the labels for X.
The method fits the model n_init times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is
raised. After fitting, it predicts the most probable label for the
input data points.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
"Expected n_samples >= n_components "
f"but got n_components = {self.n_components}, "
f"n_samples = {X.shape[0]}"
)
self._check_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not (self.warm_start and hasattr(self, "converged_"))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.inf
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state)
lower_bound = -np.inf if do_init else self.lower_bound_
if self.max_iter == 0:
best_params = self._get_parameters()
best_n_iter = 0
else:
for n_iter in range(1, self.max_iter + 1):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp)
lower_bound = self._compute_lower_bound(log_resp, log_prob_norm)
change = lower_bound - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(lower_bound)
if lower_bound > max_lower_bound or max_lower_bound == -np.inf:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
# Should only warn about convergence if max_iter > 0, otherwise
# the user is assumed to have used 0-iters initialization
# to get the initial means.
if not self.converged_ and self.max_iter > 0:
warnings.warn(
"Initialization %d did not converge. "
"Try different init parameters, "
"or increase max_iter, tol "
"or check for degenerate data." % (init + 1),
ConvergenceWarning,
)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X)
return log_resp.argmax(axis=1)
def _e_step(self, X):
"""E step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log-likelihood of each sample in `X` under the current model.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like of shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
log_likelihood : float
Log-likelihood of `X` under the Gaussian mixture model.
"""
return self.score_samples(X).mean()
def predict(self, X):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def predict_proba(self, X):
"""Evaluate the components' density for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Density of each Gaussian component for each sample in X.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
_, log_resp = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample.
y : array, shape (nsamples,)
Component labels.
"""
check_is_fitted(self)
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components)
)
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if self.covariance_type == "full":
X = np.vstack(
[
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp
)
]
)
elif self.covariance_type == "tied":
X = np.vstack(
[
rng.multivariate_normal(mean, self.covariances_, int(sample))
for (mean, sample) in zip(self.means_, n_samples_comp)
]
)
else:
X = np.vstack(
[
mean
+ rng.standard_normal(size=(sample, n_features))
* np.sqrt(covariance)
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp
)
]
)
y = np.concatenate(
[np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)]
)
return (X, y)
def _estimate_weighted_log_prob(self, X):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_samples, n_component)
"""
return self._estimate_log_prob(X) + self._estimate_log_weights()
@abstractmethod
def _estimate_log_weights(self):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under="ignore"):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(
" Iteration %d\t time lapse %.5fs\t ll change %.5f"
% (n_iter, cur_time - self._iter_prev_time, diff_ll)
)
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, ll):
"""Print verbose message on the end of iteration."""
if self.verbose == 1:
print("Initialization converged: %s" % self.converged_)
elif self.verbose >= 2:
print(
"Initialization converged: %s\t time lapse %.5fs\t ll %.5f"
% (self.converged_, time() - self._init_prev_time, ll)
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py3@sklearn@mixture@_base.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "riogroup/SORA",
"repo_path": "SORA_extracted/SORA-master/sora/lightcurve/__init__.py",
"type": "Python"
}
|
from .core import *
from .utils import *
__all__ = ['LightCurve']
|
riogroupREPO_NAMESORAPATH_START.@SORA_extracted@SORA-master@sora@lightcurve@__init__.py@.PATH_END.py
|
{
"filename": "Web-Redirection-Service.md",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/docs-old/pages/administration/web-service/Web-Redirection-Service.md",
"type": "Markdown"
}
|
title: Web Redirection Service
toc: [Documentation, Administration, Web Services, Web Redirection Service]
# Web Redirection Service
## Configuration
To configure a Web Redirection Service, attach a dictionary element to a path in your [Web transport](Web Transport and Services):
option | description
---|---
**`type`** | must be `"redirect"`
**`url`** | the HTTP(S) URL where to redirect to, e.g. `"http://somehost:8080/something"`.
## Example
Here is how you define a **Web Transport** that redirects HTTP (and WebSocket) on port 80 to secure HTTPS (and secure WebSocket) on port 443:
```javascript
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 80
},
"paths": {
"/": {
"type": "redirect",
"url": "https://example.com"
}
}
}
```
> The former example assumes the host's name is **example.com**
The single parameter to the *Redirection* service is `url`, which can take different forms:
* `../foobar` (relative)
* `/download` (absolute)
* `https://example.com` (fully qualified)
You can also redirect *subpaths* on a **Web Transport**:
```javascript
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 80
},
"paths": {
"/": {
"type": "static",
"directory": ".."
},
"ws": {
"type": "websocket",
"url": "ws://localhost:8080/ws"
},
"tavendo": {
"type": "redirect",
"url": "http://somewhere.com/to/something"
}
}
}
```
---
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@docs-old@pages@administration@web-service@Web-Redirection-Service.md@.PATH_END.py
|
{
"filename": "test_utils.py",
"repo_name": "galsci/pysm",
"repo_path": "pysm_extracted/pysm-main/tests/test_utils.py",
"type": "Python"
}
|
from urllib.error import URLError
import numpy as np
try:
from numpy import trapezoid
except ImportError:
from numpy import trapz as trapezoid
import pixell.enmap
import pytest
from astropy.io import fits
from astropy.tests.helper import assert_quantity_allclose
import pysm3
import pysm3.units as u
from pysm3 import utils
def test_get_relevant_frequencies():
freqs = [10, 11, 14, 16, 20]
assert utils.get_relevant_frequencies(freqs, 11, 14) == [11, 14]
assert utils.get_relevant_frequencies(freqs, 11.5, 14) == [11, 14]
assert utils.get_relevant_frequencies(freqs, 11.5, 13.9) == [11, 14]
assert utils.get_relevant_frequencies(freqs, 11, 14.1) == [11, 14, 16]
assert utils.get_relevant_frequencies(freqs, 10, 10.1) == [10, 11]
assert utils.get_relevant_frequencies(freqs, 10, 19) == freqs
assert utils.get_relevant_frequencies(freqs, 15, 19) == [14, 16, 20]
def test_has_polarization():
h = pysm3.utils.has_polarization
m = np.empty(12)
assert h(np.empty((3, 12)))
assert not h(np.empty((1, 12)))
assert not h(m)
assert h(np.empty((4, 3, 12)))
assert not h(np.empty((4, 1, 12)))
assert h((m, m, m))
assert h([(m, m, m), (m, m, m)])
def test_bandpass_unit_conversion():
nside = 32
freqs = np.array([250, 300, 350]) * u.GHz
weights = np.ones(len(freqs))
sky = pysm3.Sky(nside=nside, preset_strings=["c2"])
CMB_rj_int = sky.get_emission(freqs, weights)
CMB_thermo_int = CMB_rj_int * pysm3.utils.bandpass_unit_conversion(
freqs, weights, u.uK_CMB
)
expected_map = pysm3.read_map(
"pysm_2/lensed_cmb.fits", field=(0, 1), nside=nside, unit=u.uK_CMB
)
for pol in [0, 1]:
assert_quantity_allclose(expected_map[pol], CMB_thermo_int[pol], rtol=1e-4)
def test_bandpass_integration_tophat():
input_map = np.ones(12, dtype=np.double)
output_map = np.zeros_like(input_map)
freqs = [99, 100, 101] * u.GHz
weights = None
freqs = utils.check_freq_input(freqs)
weights = utils.normalize_weights(freqs, weights)
for i, (_freq, _weight) in enumerate(zip(freqs, weights)):
utils.trapz_step_inplace(freqs, weights, i, input_map, output_map)
np.testing.assert_allclose(input_map, output_map)
@pytest.mark.parametrize("freq_spacing", ["uniform", "non-uniform"])
def test_trapz(freq_spacing):
freqs = [99, 100, 101] * u.GHz
if freq_spacing == "non-uniform":
freqs[-1] += 30 * u.GHz
input_maps = np.array([1, 1.5, 1.2], dtype=np.double)
output_map = np.array([0], dtype=np.double)
weights = [0.3, 1, 0.3]
freqs = utils.check_freq_input(freqs)
weights = utils.normalize_weights(freqs, weights)
for i, (_freq, _weight) in enumerate(zip(freqs, weights)):
utils.trapz_step_inplace(freqs, weights, i, input_maps[i : i + 1], output_map)
expected = trapezoid(weights * input_maps, freqs)
np.testing.assert_allclose(expected, output_map)
def test_bandpass_integration_weights():
input_map = np.ones(12, dtype=np.double)
output_map = np.zeros_like(input_map)
freqs = [99, 100, 101] * u.GHz
weights = [0.3, 1, 0.3]
freqs = utils.check_freq_input(freqs)
weights = utils.normalize_weights(freqs, weights)
for i, (_freq, _weight) in enumerate(zip(freqs, weights)):
utils.trapz_step_inplace(freqs, weights, i, input_map, output_map)
np.testing.assert_allclose(input_map, output_map)
def test_remotedata(tmp_path, monkeypatch):
data_folder = tmp_path / "data"
data_folder.mkdir()
test_file = data_folder / "testfile.txt"
test_file.touch()
monkeypatch.setenv("PYSM_LOCAL_DATA", str(data_folder))
filename = pysm3.utils.RemoteData().get("testfile.txt")
assert filename == str(test_file)
def test_remotedata_globalpath(tmp_path):
data_folder = tmp_path / "data"
data_folder.mkdir()
test_file = data_folder / "testfile.txt"
test_file.touch()
filename = pysm3.utils.RemoteData().get(str(test_file))
assert filename == str(test_file)
@pytest.fixture
def test_fits_file(tmp_path):
d = tmp_path / "sub"
c1 = fits.Column(name="a", array=np.array([1, 2]), format="K")
c2 = fits.Column(name="b", array=np.array([4, 5]), format="K")
c3 = fits.Column(name="c", array=np.array([7, 8]), format="K")
t = fits.BinTableHDU.from_columns([c1, c2, c3])
t.writeto(d)
return d
def test_add_metadata(test_fits_file):
pysm3.utils.add_metadata(
[test_fits_file, test_fits_file],
field=1,
coord="G",
unit="uK_RJ",
ref_freq="353 GHz",
)
with fits.open(test_fits_file) as f:
assert f[1].header["COORDSYS"] == "G"
assert f[1].header["TUNIT1"] == "uK_RJ"
assert f[1].header["TUNIT2"] == "uK_RJ"
assert f[1].header["TUNIT3"] == "uK_RJ"
assert f[1].header["REF_FREQ"] == "353 GHz"
def test_add_metadata_different_units(test_fits_file):
pysm3.utils.add_metadata(
[test_fits_file],
field=1,
coord="G",
unit=["uK_RJ", "mK_RJ", "K_CMB"],
ref_freq="353 GHz",
)
with fits.open(test_fits_file) as f:
assert f[1].header["COORDSYS"] == "G"
assert f[1].header["TUNIT1"] == "uK_RJ"
assert f[1].header["TUNIT2"] == "mK_RJ"
assert f[1].header["TUNIT3"] == "K_CMB"
assert f[1].header["REF_FREQ"] == "353 GHz"
def test_data_raise():
with pytest.raises(URLError):
pysm3.utils.RemoteData().get("doesntexist.txt")
class ReturnsCar:
def __init__(self, wcs):
self.wcs = wcs
def get_emission(self):
emission = np.ones(12) * u.uK_RJ
return utils.wrap_wcs(emission, self.wcs)
def test_wrap_wcs_no_wcs():
EmissionModel = ReturnsCar(wcs=None)
emission = EmissionModel.get_emission()
assert emission.unit == u.uK_RJ
assert not hasattr(emission, "wcs")
def test_wrap_wcs_with_wcs():
shape, wcs = pixell.enmap.fullsky_geometry(
(10 * u.deg).to_value(u.rad),
dims=(3,),
variant="fejer1",
)
EmissionModel = ReturnsCar(wcs=wcs)
emission = EmissionModel.get_emission()
assert hasattr(emission, "wcs")
|
galsciREPO_NAMEpysmPATH_START.@pysm_extracted@pysm-main@tests@test_utils.py@.PATH_END.py
|
{
"filename": "test_nan.py",
"repo_name": "transientskp/tkp",
"repo_path": "tkp_extracted/tkp-master/tests/test_quality/test_nan.py",
"type": "Python"
}
|
import unittest
import numpy as np
from tkp.quality.nan import contains_nan
class TestNan(unittest.TestCase):
def test_valid(self):
array = np.array([1, 2, 3.0])
self.assertFalse(contains_nan(array))
def test_invalid(self):
array = np.array([1, 2, np.nan])
result = contains_nan(array)
self.assertTrue(result)
def test_errorstring(self):
array = np.array([1, 2, np.nan])
result = contains_nan(array)
self.assertTrue(type(result) == str)
|
transientskpREPO_NAMEtkpPATH_START.@tkp_extracted@tkp-master@tests@test_quality@test_nan.py@.PATH_END.py
|
{
"filename": "train_segmentor.py",
"repo_name": "ESA-Datalabs/XAMI-model",
"repo_path": "XAMI-model_extracted/XAMI-model-main/xami_model/train/train_segmentor.py",
"type": "Python"
}
|
import os
import sys
import yaml
import torch
import numpy as np
import json
import cv2
import albumentations as A
import matplotlib.pyplot as plt
from datetime import datetime
from pycocotools import mask as maskUtils
from torch.utils.data import DataLoader
from segment_anything.utils.transforms import ResizeLongestSide
from xami_model.dataset import dataset_utils, load_dataset
from xami_model.model_predictor import xami, predictor_utils
from xami_model.mobile_sam.mobile_sam import sam_model_registry, SamPredictor
# For reproducibility
seed = 0
import torch.backends.cudnn as cudnn
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark, cudnn.deterministic = False, True
def read_config(config_path):
with open(config_path, 'r') as file:
config = yaml.safe_load(file)
return config
def main(config):
# Environment setup
os.environ['CUDA_VISIBLE_DEVICES'] = config['cuda_visible_devices']
# Load configuration parameters
kfold_iter = config['kfold_iter']
device_id = int(config['device_id'])
lr = float(config['learning_rate'])
wd = float(config['weight_decay'])
wandb_track = config['wandb_track']
num_epochs = int(config['num_epochs'])
use_lr_initial_decay = config['use_lr_initial_decay']
n_epochs_stop = int(config['n_epochs_stop'])
use_CR = config['use_CR']
work_dir = config['work_dir']
input_dir = config['input_dir']
# The batch size before applying augmentations. The effective batch size is batch_size * (#augmentations + 1)
# If the effective batch_size is bigger than 8, the model may run into OOM errors due to allocation of memory
batch_size = int(config['initial_batch_size'])
mobile_sam_checkpoint = config['mobile_sam_checkpoint']
model_type = config['model_type']
the_time = datetime.now()
# Create working directory
work_dir = predictor_utils.get_next_directory_name(work_dir)
os.makedirs(work_dir)
print(f"Working directory: {work_dir}")
# Setup device
device = f"cuda:{device_id}" if torch.cuda.is_available() else "cpu"
print(f"Device: {device}")
# Load dataset
train_dir = os.path.join(input_dir, 'train/')
valid_dir = os.path.join(input_dir, 'valid/')
json_train_path = os.path.join(train_dir, '_annotations.coco.json')
json_valid_path = os.path.join(valid_dir, '_annotations.coco.json')
with open(json_train_path) as f1, open(json_valid_path) as f2:
train_data_in = json.load(f1)
valid_data_in = json.load(f2)
training_image_paths = [os.path.join(train_dir, image['file_name']) for image in train_data_in['images']]
val_image_paths = [os.path.join(valid_dir, image['file_name']) for image in valid_data_in['images']]
train_data = dataset_utils.load_json(json_train_path)
valid_data = dataset_utils.load_json(json_valid_path)
train_gt_masks, train_bboxes, train_classes, train_class_categories = dataset_utils.get_coords_and_masks_from_json(
train_dir, train_data)
val_gt_masks, val_bboxes, val_classes, val_class_categories = dataset_utils.get_coords_and_masks_from_json(
valid_dir, valid_data)
# Initialize model
model = sam_model_registry[model_type](checkpoint=mobile_sam_checkpoint)
model.to(device)
predictor = SamPredictor(model)
xami_model_instance = xami.XAMI(model, device, predictor, apply_segm_CR=use_CR)
if wandb_track:
import wandb
wandb.login()
run = wandb.init(project="sam", name=f"sam_{kfold_iter}_{the_time}")
wandb.watch(xami_model_instance.model, log='all', log_graph=True)
# Prepare data loaders
transform = ResizeLongestSide(xami_model_instance.model.image_encoder.img_size)
train_set = load_dataset.ImageDataset(training_image_paths, xami_model_instance.model, transform, device)
val_set = load_dataset.ImageDataset(val_image_paths, xami_model_instance.model, transform, device)
train_dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_set, batch_size=batch_size, shuffle=False)
# Optimizer
for name, param in xami_model_instance.model.named_parameters():
param.requires_grad = 'mask_decoder' in name
parameters_to_optimize = [param for param in xami_model_instance.model.parameters() if param.requires_grad]
optimizer = torch.optim.AdamW(parameters_to_optimize, lr=lr, weight_decay=wd)
# Scheduler
scheduler = None
if use_lr_initial_decay:
initial_lr = lr
final_lr = float(config['final_lr'])
total_steps = config['total_steps']
lr_decrement = (initial_lr - final_lr) / total_steps
def lr_lambda(current_step):
if current_step < total_steps:
return 1 - current_step * lr_decrement / initial_lr
return final_lr / initial_lr
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
# Training loop
train_losses = []
valid_losses = []
best_valid_loss = float('inf')
combined_augmentations = A.Compose([
# Geometric transformations
A.Flip(p=0.5),
A.RandomRotate90(p=0.5),
A.RandomSizedCrop((492, 492), 512, 512, p=0.6),
# Noise and blur transformations
A.GaussianBlur(blur_limit=(3, 7), p=0.7),
A.GaussNoise(var_limit=(10.0, 50.0), p=0.6),
A.ISONoise(p=0.5),
], bbox_params={'format': 'coco', 'label_fields': ['category_id']}, p=1)
cr_transforms = [combined_augmentations]
print(f"🚀 Training {xami_model_instance.model.__class__.__name__} with {len(training_image_paths)} training images and {len(val_image_paths)} validation images.")
print(f"🚀 Training for {num_epochs} epochs with effective batch size {batch_size * (len(cr_transforms) + 1)} and learning rate {lr}.")
print(f"🚀 Initial learning rate: {lr}. Final learning rate: {final_lr} after {total_steps} steps. Weight decay: {wd}.")
print(f"🚀 Using learning rate initial decay scheduler: {use_lr_initial_decay}. ")
print(f"🚀 Early stopping after {n_epochs_stop} epochs without improvement.")
print(f"🚀 Training started.\n")
iou_eval_thresholds = [0.5, 0.75, 0.9]
for epoch in range(num_epochs):
# Train
xami_model_instance.model.train()
epoch_loss, _, _, _ = xami_model_instance.train_validate_step(
train_dataloader,
train_dir,
train_gt_masks,
train_bboxes,
optimizer,
mode='train',
cr_transforms=cr_transforms,
scheduler=scheduler)
train_losses.append(epoch_loss)
# Validate
xami_model_instance.model.eval()
with torch.no_grad():
epoch_val_loss, all_image_ids, all_gt_masks, all_pred_masks = xami_model_instance.train_validate_step(
val_dataloader,
valid_dir,
val_gt_masks,
val_bboxes,
optimizer,
mode='validate',
cr_transforms=[],
scheduler=None)
valid_losses.append(epoch_val_loss)
p_metric_name, p_means, p_stds = predictor_utils.compute_scores('precision', all_pred_masks, all_gt_masks, iou_eval_thresholds)
r_metric_name, r_means, r_stds = predictor_utils.compute_scores('recall', all_pred_masks, all_gt_masks, iou_eval_thresholds)
f_metric_name, f_means, f_stds = predictor_utils.compute_scores('f1_score', all_pred_masks, all_gt_masks, iou_eval_thresholds)
a_metric_name, a_means, a_stds = predictor_utils.compute_scores('accuracy', all_pred_masks, all_gt_masks, iou_eval_thresholds)
print('Precision', p_means, 'Recall', r_means, 'F1-score', f_means, 'Accuracy', a_means)
# Logging
if wandb_track:
wandb.log({'epoch training loss': epoch_loss, 'epoch validation loss': epoch_val_loss})
wandb.log({'Precision': p_means, 'Recall': r_means, 'F1-score': f_means, 'Accuracy': a_means})
print(f'EPOCH: {epoch}. Training loss: {epoch_loss}')
print(f'EPOCH: {epoch}. Validation loss: {epoch_val_loss}.')
if epoch_val_loss < best_valid_loss:
best_valid_loss = epoch_val_loss
best_epoch = epoch
best_model = xami_model_instance.model
epochs_no_improve = 0
else:
epochs_no_improve += 1
if epochs_no_improve == n_epochs_stop:
print("Early stopping initiated.")
break
print(f"Best epoch: {best_epoch}. Best validation loss: {best_valid_loss}.\n")
torch.save(best_model.state_dict(), f'{work_dir}/sam_best.pth')
torch.save(xami_model_instance.model.state_dict(), f'{work_dir}/sam_last.pth')
if wandb_track:
wandb.run.summary["batch_size"] = batch_size * (len(cr_transforms) + 1)
wandb.run.summary["best_epoch"] = best_epoch
wandb.run.summary["best_valid_loss"] = best_valid_loss
wandb.run.summary["num_epochs"] = num_epochs
wandb.run.summary["learning rate"] = lr
wandb.run.summary["weight_decay"] = wd
wandb.run.summary["# train_dataloader"] = len(train_dataloader)
wandb.run.summary["# val_dataloader"] = len(val_dataloader)
wandb.run.summary["checkpoint"] = mobile_sam_checkpoint
run.finish()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python train_yolo_sam.py <path_to_config.yaml>")
sys.exit(1)
config_path = sys.argv[1]
config = read_config(config_path)
main(config)
|
ESA-DatalabsREPO_NAMEXAMI-modelPATH_START.@XAMI-model_extracted@XAMI-model-main@xami_model@train@train_segmentor.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choroplethmap/hoverlabel/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="choroplethmap.hoverlabel.font",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choroplethmap@hoverlabel@font@_variant.py@.PATH_END.py
|
{
"filename": "make_particles_file.py",
"repo_name": "AMReX-Astro/Castro",
"repo_path": "Castro_extracted/Castro-main/Exec/unit_tests/particles_test/make_particles_file.py",
"type": "Python"
}
|
"""
This script creates a particle file for the test problem.
Note it's not very clever - if you change problo and probhi in the inputs file,
you'll need to change them here as well.
"""
import numpy as np
outfile_name = "particle_file"
# number of particles
n_particles = 20
# copy these from the inputs file
problo = np.array([0, 0])
probhi = np.array([1, 1])
# maximum distance from center
max_R = np.max(0.5 * (probhi - problo))
dr_part = max_R / n_particles
dtheta_part = 2 * np.pi / n_particles
xs = np.zeros((n_particles, 2))
theta = np.linspace(0, n_particles, num=n_particles,
endpoint=False) * dtheta_part
r = (np.linspace(0, n_particles, num=n_particles, endpoint=False) + 0.5) * dr_part
xs[:, 0] = r * np.cos(theta)
xs[:, 1] = r * np.sin(theta)
xs[:, :] += 0.5 * (problo + probhi)[np.newaxis, :]
with open(outfile_name, 'w') as outfile:
outfile.write("{}\n".format(n_particles))
for pos in xs:
outfile.write("{} {}\n".format(pos[0], pos[1]))
|
AMReX-AstroREPO_NAMECastroPATH_START.@Castro_extracted@Castro-main@Exec@unit_tests@particles_test@make_particles_file.py@.PATH_END.py
|
{
"filename": "_xanchor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/parcoords/line/colorbar/_xanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="parcoords.line.colorbar", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@parcoords@line@colorbar@_xanchor.py@.PATH_END.py
|
{
"filename": "log.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py2/IPython/utils/log.py",
"type": "Python"
}
|
from __future__ import absolute_import
from warnings import warn
warn("IPython.utils.log has moved to traitlets.log")
from traitlets.log import *
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py2@IPython@utils@log.py@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergl/textfont/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="variantsrc", parent_name="scattergl.textfont", **kwargs
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergl@textfont@_variantsrc.py@.PATH_END.py
|
{
"filename": "_nticks.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/ternary/caxis/_nticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="layout.ternary.caxis", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@ternary@caxis@_nticks.py@.PATH_END.py
|
{
"filename": "polyutils.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/polynomial/polyutils.py",
"type": "Python"
}
|
"""
Utility classes and functions for the polynomial modules.
This module provides: error and warning objects; a polynomial base class;
and some routines used in both the `polynomial` and `chebyshev` modules.
Error objects
-------------
.. autosummary::
:toctree: generated/
PolyError base class for this sub-package's errors.
PolyDomainError raised when domains are mismatched.
Warning objects
---------------
.. autosummary::
:toctree: generated/
RankWarning raised in least-squares fit for rank-deficient matrix.
Base class
----------
.. autosummary::
:toctree: generated/
PolyBase Obsolete base class for the polynomial classes. Do not use.
Functions
---------
.. autosummary::
:toctree: generated/
as_series convert list of array_likes into 1-D arrays of common type.
trimseq remove trailing zeros.
trimcoef remove small trailing coefficients.
getdomain return the domain appropriate for a given set of abscissae.
mapdomain maps points between domains.
mapparms parameters of the linear map between domains.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = [
'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq',
'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase']
#
# Warnings and Exceptions
#
class RankWarning(UserWarning):
"""Issued by chebfit when the design matrix is rank deficient."""
pass
class PolyError(Exception):
"""Base class for errors in this module."""
pass
class PolyDomainError(PolyError):
"""Issued by the generic Poly class when two domains don't match.
This is raised when an binary operation is passed Poly objects with
different domains.
"""
pass
#
# Base class for all polynomial types
#
class PolyBase(object):
"""
Base class for all polynomial types.
Deprecated in numpy 1.9.0, use the abstract
ABCPolyBase class instead. Note that the latter
requires a number of virtual functions to be
implemented.
"""
pass
#
# Helper functions to convert inputs to 1-D arrays
#
def trimseq(seq):
"""Remove small Poly series coefficients.
Parameters
----------
seq : sequence
Sequence of Poly series coefficients. This routine fails for
empty sequences.
Returns
-------
series : sequence
Subsequence with trailing zeros removed. If the resulting sequence
would be empty, return the first element. The returned sequence may
or may not be a view.
Notes
-----
Do not lose the type info if the sequence contains unknown objects.
"""
if len(seq) == 0:
return seq
else:
for i in range(len(seq) - 1, -1, -1):
if seq[i] != 0:
break
return seq[:i+1]
def as_series(alist, trim=True):
"""
Return argument as a list of 1-d arrays.
The returned list contains array(s) of dtype double, complex double, or
object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
raises a Value Error if it is not first reshaped into either a 1-d or 2-d
array.
Parameters
----------
alist : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1-D arrays
A copy of the input data as a list of 1-d arrays.
Raises
------
ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> a = np.arange(4)
>>> pu.as_series(a)
[array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])]
>>> b = np.arange(6).reshape((2,3))
>>> pu.as_series(b)
[array([ 0., 1., 2.]), array([ 3., 4., 5.])]
>>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))
[array([ 1.]), array([ 0., 1., 2.]), array([ 0., 1.])]
>>> pu.as_series([2, [1.1, 0.]])
[array([ 2.]), array([ 1.1])]
>>> pu.as_series([2, [1.1, 0.]], trim=False)
[array([ 2.]), array([ 1.1, 0. ])]
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]):
raise ValueError("Coefficient array is not 1-d")
if trim:
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]):
ret = []
for a in arrays:
if a.dtype != np.dtype(object):
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else:
ret.append(a.copy())
else:
try:
dtype = np.common_type(*arrays)
except Exception:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
def trimcoef(c, tol=0):
"""
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
See Also
--------
trimseq
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.trimcoef((0,0,3,0,5,0,0))
array([ 0., 0., 3., 0., 5.])
>>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([ 0.])
>>> i = complex(0,1) # works for complex
>>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([ 0.0003+0.j , 0.0010-0.001j])
"""
if tol < 0:
raise ValueError("tol must be non-negative")
[c] = as_series([c])
[ind] = np.nonzero(np.abs(c) > tol)
if len(ind) == 0:
return c[:1]*0
else:
return c[:ind[-1] + 1].copy()
def getdomain(x):
"""
Return a domain suitable for given abscissae.
Find a domain suitable for a polynomial or Chebyshev series
defined at the values supplied.
Parameters
----------
x : array_like
1-d array of abscissae whose domain will be determined.
Returns
-------
domain : ndarray
1-d array containing two values. If the inputs are complex, then
the two returned points are the lower left and upper right corners
of the smallest rectangle (aligned with the axes) in the complex
plane containing the points `x`. If the inputs are real, then the
two points are the ends of the smallest interval containing the
points `x`.
See Also
--------
mapparms, mapdomain
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> points = np.arange(4)**2 - 5; points
array([-5, -4, -1, 4])
>>> pu.getdomain(points)
array([-5., 4.])
>>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle
>>> pu.getdomain(c)
array([-1.-1.j, 1.+1.j])
"""
[x] = as_series([x], trim=False)
if x.dtype.char in np.typecodes['Complex']:
rmin, rmax = x.real.min(), x.real.max()
imin, imax = x.imag.min(), x.imag.max()
return np.array((complex(rmin, imin), complex(rmax, imax)))
else:
return np.array((x.min(), x.max()))
def mapparms(old, new):
"""
Linear map parameters between domains.
Return the parameters of the linear map ``offset + scale*x`` that maps
`old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.
Parameters
----------
old, new : array_like
Domains. Each domain must (successfully) convert to a 1-d array
containing precisely two values.
Returns
-------
offset, scale : scalars
The map ``L(x) = offset + scale*x`` maps the first domain to the
second.
See Also
--------
getdomain, mapdomain
Notes
-----
Also works for complex numbers, and thus can be used to calculate the
parameters required to map any line in the complex plane to any other
line therein.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.mapparms((-1,1),(-1,1))
(0.0, 1.0)
>>> pu.mapparms((1,-1),(-1,1))
(0.0, -1.0)
>>> i = complex(0,1)
>>> pu.mapparms((-i,-1),(1,i))
((1+1j), (1+0j))
"""
oldlen = old[1] - old[0]
newlen = new[1] - new[0]
off = (old[1]*new[0] - old[0]*new[1])/oldlen
scl = newlen/oldlen
return off, scl
def mapdomain(x, old, new):
"""
Apply linear map to input points.
The linear map ``offset + scale*x`` that maps the domain `old` to
the domain `new` is applied to the points `x`.
Parameters
----------
x : array_like
Points to be mapped. If `x` is a subtype of ndarray the subtype
will be preserved.
old, new : array_like
The two domains that determine the map. Each must (successfully)
convert to 1-d arrays containing precisely two values.
Returns
-------
x_out : ndarray
Array of points of the same shape as `x`, after application of the
linear map between the two domains.
See Also
--------
getdomain, mapparms
Notes
-----
Effectively, this implements:
.. math ::
x\\_out = new[0] + m(x - old[0])
where
.. math ::
m = \\frac{new[1]-new[0]}{old[1]-old[0]}
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> old_domain = (-1,1)
>>> new_domain = (0,2*np.pi)
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
>>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out
array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825,
6.28318531])
>>> x - pu.mapdomain(x_out, new_domain, old_domain)
array([ 0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
the complex plane to any other line therein).
>>> i = complex(0,1)
>>> old = (-1 - i, 1 + i)
>>> new = (-1 + i, 1 - i)
>>> z = np.linspace(old[0], old[1], 6); z
array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ])
>>> new_z = P.mapdomain(z, old, new); new_z
array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ])
"""
x = np.asanyarray(x)
off, scl = mapparms(old, new)
return off + scl*x
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@polynomial@polyutils.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "POSYDON-code/POSYDON",
"repo_path": "POSYDON_extracted/POSYDON-main/posydon/grids/__init__.py",
"type": "Python"
}
|
POSYDON-codeREPO_NAMEPOSYDONPATH_START.@POSYDON_extracted@POSYDON-main@posydon@grids@__init__.py@.PATH_END.py
|
|
{
"filename": "area_expansion.py",
"repo_name": "rice-solar-physics/ebtelPlusPlus",
"repo_path": "ebtelPlusPlus_extracted/ebtelPlusPlus-main/examples/area_expansion.py",
"type": "Python"
}
|
"""
The Effect of Cross-sectional Area Expansion
============================================
In this example, we demonstrate the effect of expanding cross-sectional area on the time-evolution
of the temperature, density, and pressure. We will reproduce Figure 7 of :cite:t:`cargill_static_2022`.
"""
import astropy.units as u
import matplotlib.pyplot as plt
from astropy.visualization import quantity_support
import ebtelplusplus
from ebtelplusplus.models import HeatingModel, PhysicsModel, TriangularHeatingEvent
quantity_support()
##############################################################################
# In `ebtelplusplus`, cross-sectional area expansion is defined through two ratios:
# the ratio between the cross-sectional area averaged over the transition
# region (TR) to the cross-sectional area averaged over the corona
# (:math:`A_{TR}/A_C`) and the ratio between the cross-sectional area at the
# TR-corona boundary and the cross-sectional area averaged over the corona
# (:math:`A_0/A_C`). An additional third parameter, :math:`L_{TR}/L`, the ratio between the
# length of the TR and the loop half-length, controls the thickness of the TR.
#
# We will explore the effect of three different expansion profiles: no expansion,
# gradual expansion from the TR through the corona, and rapid expansion in the
# corona.
#
# We start by defining our simple single-pulse heating model that we will use in
# all three cases.
# Note that we will use a heating partition of :math:`1/2` because we will assume
# a single fluid model in this case to be consistent with :cite:t:`cargill_static_2022`.
heating = HeatingModel(background=3.5e-5*u.Unit('erg cm-3 s-1'),
partition=0.5,
events=[TriangularHeatingEvent(0*u.s, 200*u.s, 0.1*u.Unit('erg cm-3 s-1'))])
##############################################################################
# Next, we will set up our three expansion models following
# :cite:t:`cargill_static_2022`.
# In all cases except the no expansion case, we set :math:`L_{TR}/L_C=0.15` to
# model a TR with a small, but finite thickness.
no_expansion = PhysicsModel(force_single_fluid=True)
gradual_expansion = PhysicsModel(force_single_fluid=True,
loop_length_ratio_tr_total=0.15,
area_ratio_tr_corona=1/3,
area_ratio_0_corona=2/3)
coronal_expansion = PhysicsModel(force_single_fluid=True,
loop_length_ratio_tr_total=0.15,
area_ratio_tr_corona=1/3,
area_ratio_0_corona=1/3)
##############################################################################
# Now, run each simulation for a loop with a half length of 45 Mm for a total
# simulation time of 5000 s.
loop_length = 45 * u.Mm
total_time = 5500 * u.s
r_no_expansion = ebtelplusplus.run(total_time, loop_length, heating, physics=no_expansion)
r_gradual_expansion = ebtelplusplus.run(total_time, loop_length, heating, physics=gradual_expansion)
r_coronal_expansion = ebtelplusplus.run(total_time, loop_length, heating, physics=coronal_expansion)
##############################################################################
# Finally, let's visualize our results in the manner of Figure 7 of
# :cite:t:`cargill_static_2022`.
fig, axes = plt.subplot_mosaic(
"""
TN
PO
""",
figsize=(8,8),
layout='constrained',
)
for result, model in [(r_no_expansion, no_expansion),
(r_gradual_expansion, gradual_expansion),
(r_coronal_expansion, coronal_expansion)]:
label = f'$A_{{TR}}/A_C={model.area_ratio_tr_corona:.2f},A_0/A_C={model.area_ratio_0_corona:.2f}$'
axes['T'].plot(result.time, result.electron_temperature.to('MK'), label=label)
axes['N'].plot(result.time, result.density)
axes['P'].plot(result.time, result.electron_pressure+result.ion_pressure)
axes['O'].plot(result.electron_temperature, result.density)
axes['T'].legend(frameon=False,loc=1)
for ax in ['T','N','P']:
axes[ax].set_xlim(0,5500)
axes['T'].set_ylim(0,15)
axes['N'].set_ylim(0,6e9)
axes['P'].set_ylim(0,8)
axes['O'].set_xlim(1e5,2e7)
axes['O'].set_ylim(7e7, 1e10)
axes['O'].set_xscale('log')
axes['O'].set_yscale('log')
|
rice-solar-physicsREPO_NAMEebtelPlusPlusPATH_START.@ebtelPlusPlus_extracted@ebtelPlusPlus-main@examples@area_expansion.py@.PATH_END.py
|
{
"filename": "test_upstash_redis.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/storage/test_upstash_redis.py",
"type": "Python"
}
|
"""Implement integration tests for Redis storage."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain_community.storage.upstash_redis import UpstashRedisByteStore
if TYPE_CHECKING:
from upstash_redis import Redis
pytest.importorskip("upstash_redis")
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
@pytest.fixture
def redis_client() -> Redis:
"""Yield redis client."""
from upstash_redis import Redis
# This fixture flushes the database!
client = Redis(url=URL, token=TOKEN)
try:
client.ping()
except Exception:
pytest.skip("Ping request failed. Verify that credentials are correct.")
client.flushdb()
return client
def test_mget(redis_client: Redis) -> None:
store = UpstashRedisByteStore(client=redis_client, ttl=None)
keys = ["key1", "key2"]
redis_client.mset({"key1": "value1", "key2": "value2"})
result = store.mget(keys)
assert result == [b"value1", b"value2"]
def test_mset(redis_client: Redis) -> None:
store = UpstashRedisByteStore(client=redis_client, ttl=None)
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
store.mset(key_value_pairs)
result = redis_client.mget("key1", "key2")
assert result == ["value1", "value2"]
def test_mdelete(redis_client: Redis) -> None:
"""Test that deletion works as expected."""
store = UpstashRedisByteStore(client=redis_client, ttl=None)
keys = ["key1", "key2"]
redis_client.mset({"key1": "value1", "key2": "value2"})
store.mdelete(keys)
result = redis_client.mget(*keys)
assert result == [None, None]
def test_yield_keys(redis_client: Redis) -> None:
store = UpstashRedisByteStore(client=redis_client, ttl=None)
redis_client.mset({"key1": "value2", "key2": "value2"})
assert sorted(store.yield_keys()) == ["key1", "key2"]
assert sorted(store.yield_keys(prefix="key*")) == ["key1", "key2"]
assert sorted(store.yield_keys(prefix="lang*")) == []
def test_namespace(redis_client: Redis) -> None:
store = UpstashRedisByteStore(client=redis_client, ttl=None, namespace="meow")
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
store.mset(key_value_pairs)
cursor, all_keys = redis_client.scan(0)
while cursor != 0:
cursor, keys = redis_client.scan(cursor)
if len(keys) != 0:
all_keys.extend(keys)
assert sorted(all_keys) == [
"meow/key1",
"meow/key2",
]
store.mdelete(["key1"])
cursor, all_keys = redis_client.scan(0, match="*")
while cursor != 0:
cursor, keys = redis_client.scan(cursor, match="*")
if len(keys) != 0:
all_keys.extend(keys)
assert sorted(all_keys) == [
"meow/key2",
]
assert list(store.yield_keys()) == ["key2"]
assert list(store.yield_keys(prefix="key*")) == ["key2"]
assert list(store.yield_keys(prefix="key1")) == []
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@storage@test_upstash_redis.py@.PATH_END.py
|
{
"filename": "filter_Gaussian.py",
"repo_name": "Ntsikelelo-Charles/Fringe_rate_filters",
"repo_path": "Fringe_rate_filters_extracted/Fringe_rate_filters-main/Mutual_coupling_fringe_rate_filters/filter_Gaussian.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from pyuvdata import UVData
import hera_cal as hc
import uvtools as uvt
import hera_pspec as hp
import copy
from scipy import integrate
from scipy import optimize
def gauss(x, amp, loc, scale):
return amp * np.exp(-0.5 * (x-loc)**2 / scale**2)
def chisq(x0, x, y):
yfit = gauss(x, *x0)
return np.sum(np.abs(yfit - y)**2)
def gauss_fit(x0, x, y, method='powell'):
fit = optimize.minimize(chisq, x0, args=(x, y), method=method)
ypred = gauss(x, *fit.x)
return fit, ypred
path="/net/sinatra/vault-ike/ntsikelelo/Simulated_data_files/UVH5_files/"
print("Main lobe filtering the data")
# mode_array=np.array(["high"])
mode_array=np.array(["low","high"])
for i in range(len(mode_array)):
mode=mode_array[i]
Model_complete_file = path+"Corrected_redundant_array_Model_zeroth_order_visibilities_2h_"+mode+"_baseline_selected.uvh5"
Model_complete = hc.frf.FRFilter(Model_complete_file)
uvd = UVData()
uvd.read(Model_complete_file)
freqs = Model_complete.freqs/1e6
times = (Model_complete.times-Model_complete .times.min()) * 24 * 3600 # seconds
uvd.conjugate_bls()
F = hc.frf.FRFilter(uvd)
raw_file = path+"Raw_data_non_redundant_with_noise_"+mode+"_2h.uvh5"
raw = hc.frf.FRFilter(raw_file)
uvd = UVData()
uvd.read(raw_file)
uvd.conjugate_bls()
F_raw = hc.frf.FRFilter(uvd)
filter_factor = [1e-8]
print("filter factor = "+str(filter_factor))
F.fft_data(ax='time', window='blackman', overwrite=True, ifft=True)
fr_select = (0< F.frates) & (F.frates < 5)
fr_select_negative=(-5 < F.frates) & (F.frates<0)
m=np.load("/net/jake/home/ntsikelelo/Simulated_data_files/m_slope_filter.npy")
c=np.load("/net/jake/home/ntsikelelo/Simulated_data_files/c_intercept_filter.npy")
x0 = np.array([1e-3, 2.0, 0.3])
x_negative = F.frates[fr_select_negative]
x = F.frates[fr_select]
antpos = F.antpos
filter_fun=np.zeros(F.frates.shape)
filt_data_complete = copy.deepcopy(F.data)
filt_data_raw=copy.deepcopy(F_raw.data)
E_W_bls_list={}
fringe_value_list={}
sigma_list={}
for k in filt_data_raw:
blvec = (antpos[k[1]] - antpos[k[0]])
bl_len_EW = blvec[0]
fringe_value=m*bl_len_EW
if fringe_value > 0.5:
y = np.abs(F.dfft[k]).mean(1)
x0[1]=fringe_value
fit, ypred = gauss_fit(x0, x, y[fr_select], method='powell')
# make the filter
gmean, gsigma = fit.x[1:]
filter_center = -gmean * 1e-3
filter_half_width = np.abs(gsigma) * 2 * 1e-3
# print(filter_center,filter_half_width)
fringe_value_list[k]=filter_center
sigma_list[k]=gsigma
E_W_bls_list[k]=bl_len_EW
C = uvt.dspec.dayenu_mat_inv(times, filter_center, filter_half_width, filter_factor, no_regularization=False)
R = np.linalg.pinv(C, rcond=1e-10)
filt_data_complete[k] = filt_data_complete[k] - R @ filt_data_complete[k]
filt_data_raw[k] = filt_data_raw[k] - R @ filt_data_raw[k]
if fringe_value < -0.5:
y = np.abs(F.dfft[k]).mean(1)
x0[1]=fringe_value
fit, ypred = gauss_fit(x0, x_negative, y[fr_select_negative], method='powell')
# make the filter
gmean, gsigma = fit.x[1:]
filter_center = -gmean * 1e-3
filter_half_width = np.abs(gsigma) * 2 * 1e-3
fringe_value_list[k]=filter_center
sigma_list[k]=gsigma
E_W_bls_list[k]=bl_len_EW
C = uvt.dspec.dayenu_mat_inv(times, filter_center, filter_half_width, filter_factor, no_regularization=False)
R = np.linalg.pinv(C, rcond=1e-10)
filt_data_complete[k] = filt_data_complete[k] - R @ filt_data_complete[k]
filt_data_raw[k] = filt_data_raw[k] - R @ filt_data_raw[k]
# else:
# filter_center =0
# filter_half_width = 0.25 * 1e-3
# C = uvt.dspec.dayenu_mat_inv(times, filter_center, filter_half_width, filter_factor, no_regularization=False)
# R = np.linalg.pinv(C, rcond=1e-10)
# filt_data_complete[k] = R @ filt_data_complete[k]
# filt_data_raw[k] = R @ filt_data_raw[k]
np.save("/home/ntsikelelo/non_redundancy_sim/sigma_gaussian_"+mode+".npy",np.array(sigma_list))
np.save("/home/ntsikelelo/non_redundancy_sim/fringe_value_gaussian_"+mode+".npy",np.array(fringe_value_list))
np.save("/home/ntsikelelo/non_redundancy_sim/E_W_baseline_gaussian_"+mode+".npy",np.array(E_W_bls_list))
F.write_data(filt_data_complete,path+"Model_complete_filtered_Gaussian_non_redundant_"+mode+"_2h.uvh5",overwrite=True)
F_raw.write_data(filt_data_raw,path+"Raw_data_filtered_Gaussian_non_redundant_with_noise_"+mode+"_2h.uvh5",overwrite=True)
|
Ntsikelelo-CharlesREPO_NAMEFringe_rate_filtersPATH_START.@Fringe_rate_filters_extracted@Fringe_rate_filters-main@Mutual_coupling_fringe_rate_filters@filter_Gaussian.py@.PATH_END.py
|
{
"filename": "ecsv.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/cosmology/_io/ecsv.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""|Cosmology| <-> ECSV I/O, using |Cosmology.read| and |Cosmology.write|.
This module provides functions to write/read a |Cosmology| object to/from an ECSV file.
The functions are registered with ``readwrite_registry`` under the format name
"ascii.ecsv".
We assume the following setup:
>>> from pathlib import Path
>>> from tempfile import TemporaryDirectory
>>> temp_dir = TemporaryDirectory()
To see reading a Cosmology from an ECSV file, we first write a Cosmology to an ECSV
file:
>>> from astropy.cosmology import Cosmology, Planck18
>>> file = Path(temp_dir.name) / "file.ecsv"
>>> Planck18.write(file)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
# - {name: H0, unit: km / (Mpc s), datatype: float64, description: Hubble ...}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
name H0 Om0 Tcmb0 Neff m_nu Ob0
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
<BLANKLINE>
Now we can read the Cosmology from the ECSV file, constructing a new cosmological
instance identical to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.read(file)
>>> print(cosmo)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
>>> cosmo == Planck18
True
If a file already exists, attempting to write will raise an error unless
``overwrite=True``.
>>> Planck18.write(file, overwrite=True)
By default the cosmology class is written to the Table metadata. This can be changed to
a column of the table using the ``cosmology_in_meta`` keyword argument.
>>> file = Path(temp_dir.name) / "file2.ecsv"
>>> Planck18.write(file, cosmology_in_meta=False)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: cosmology, datatype: string}
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
<BLANKLINE>
The ``cosmology`` information (column or metadata) may be omitted if the cosmology class
(or its string name) is passed as the ``cosmology`` keyword argument to
|Cosmology.read|. Alternatively, specific cosmology classes can be used to parse the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> print(FlatLambdaCDM.read(file))
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
When using a specific cosmology class, the class' default parameter values are used to
fill in any missing information.
For files with multiple rows of cosmological parameters, the ``index`` argument is
needed to select the correct row. The index can be an integer for the row number or, if
the table is indexed by a column, the value of that column. If the table is not indexed
and ``index`` is a string, the "name" column is used as the indexing column.
Here is an example where ``index`` is needed and can be either an integer (for the row
number) or the name of one of the cosmologies, e.g. 'Planck15'.
>>> from astropy.cosmology import Planck13, Planck15, Planck18
>>> from astropy.table import vstack
>>> cts = vstack([c.to_format("astropy.table")
... for c in (Planck13, Planck15, Planck18)],
... metadata_conflicts='silent')
>>> file = Path(temp_dir.name) / "file3.ecsv"
>>> cts.write(file)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
name H0 Om0 Tcmb0 Neff m_nu Ob0
Planck13 67.77 0.30712 2.7255 3.046 [0.0,0.0,0.06] 0.048252
Planck15 67.74 0.3075 2.7255 3.046 [0.0,0.0,0.06] 0.0486
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
>>> cosmo = Cosmology.read(file, index="Planck15", format="ascii.ecsv")
>>> cosmo == Planck15
True
Fields of the table in the file can be renamed to match the
`~astropy.cosmology.Cosmology` class' signature using the ``rename`` argument. This is
useful when the files's column names do not match the class' parameter names.
>>> file = Path(temp_dir.name) / "file4.ecsv"
>>> Planck18.write(file, rename={"H0": "Hubble"})
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
name Hubble Om0 Tcmb0 Neff m_nu Ob0
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
>>> cosmo = Cosmology.read(file, rename={"Hubble": "H0"})
>>> cosmo == Planck18
True
By default :class:`~astropy.cosmology.Cosmology` instances are written using
`~astropy.table.QTable` as an intermediate representation (for details see
|Cosmology.to_format|, with ``format="astropy.table"``). The `~astropy.table.Table` type
can be changed using the ``cls`` keyword argument.
>>> from astropy.table import Table
>>> file = Path(temp_dir.name) / "file5.ecsv"
>>> Planck18.write(file, cls=Table)
For most use cases, the default ``cls`` of :class:`~astropy.table.QTable` is recommended
and will be largely indistinguishable from other table types, as the ECSV format is
agnostic to the table type. An example of a difference that might necessitate using a
different table type is if a different ECSV schema is desired.
Additional keyword arguments are passed to ``QTable.read`` and ``QTable.write``.
.. testcleanup::
>>> temp_dir.cleanup()
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, TypeVar
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable
from .table import from_table, to_table
if TYPE_CHECKING:
from collections.abc import Mapping
from astropy.cosmology._typing import _CosmoT
from astropy.io.typing import PathLike, ReadableFileLike, WriteableFileLike
from astropy.table import Table
_TableT = TypeVar("_TableT", "Table")
def read_ecsv(
filename: PathLike | ReadableFileLike[Table],
index: int | str | None = None,
*,
move_to_meta: bool = False,
cosmology: str | type[_CosmoT] | None = None,
rename: Mapping[str, str] | None = None,
**kwargs: Any,
) -> _CosmoT:
r"""Read a `~astropy.cosmology.Cosmology` from an ECSV file.
Parameters
----------
filename : path-like or file-like
From where to read the Cosmology.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be an
integer for the row number or, if the table is indexed by a column, the value of
that column. If the table is not indexed and ``index`` is a string, the "name"
column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class' signature
to the Cosmology's metadata. This will only be applied if the Cosmology does NOT
have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the
metadata will be merged with existing metadata, preferring specified metadata in
the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``,
the ``Cosmology.meta`` will be ``{'key': 10}``).
cosmology : str or type or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing the
cosmology instance. The class also provides default parameter values, filling in
any non-mandatory arguments missing in 'table'.
rename : dict or None (optional, keyword-only)
A dictionary mapping column names in 'table' to fields of the
`~astropy.cosmology.Cosmology` class.
**kwargs
Passed to ``QTable.read``
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
We assume the following setup:
>>> from pathlib import Path
>>> from tempfile import TemporaryDirectory
>>> temp_dir = TemporaryDirectory()
To see reading a Cosmology from an ECSV file, we first write a Cosmology to an ECSV
file:
>>> from astropy.cosmology import Cosmology, Planck18
>>> file = Path(temp_dir.name) / "file.ecsv"
>>> Planck18.write(file)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
name H0 Om0 Tcmb0 Neff m_nu Ob0
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
<BLANKLINE>
Now we can read the Cosmology from the ECSV file, constructing a new cosmological
instance identical to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.read(file)
>>> print(cosmo)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
>>> cosmo == Planck18
True
The ``cosmology`` information (column or metadata) may be omitted if the cosmology
class (or its string name) is passed as the ``cosmology`` keyword argument to
|Cosmology.read|. Alternatively, specific cosmology classes can be used to parse the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> print(FlatLambdaCDM.read(file))
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
When using a specific cosmology class, the class' default parameter values are used
to fill in any missing information.
For files with multiple rows of cosmological parameters, the ``index`` argument is
needed to select the correct row. The index can be an integer for the row number or,
if the table is indexed by a column, the value of that column. If the table is not
indexed and ``index`` is a string, the "name" column is used as the indexing column.
Here is an example where ``index`` is needed and can be either an integer (for the
row number) or the name of one of the cosmologies, e.g. 'Planck15'.
>>> from astropy.cosmology import Planck13, Planck15, Planck18
>>> from astropy.table import vstack
>>> cts = vstack([c.to_format("astropy.table")
... for c in (Planck13, Planck15, Planck18)],
... metadata_conflicts='silent')
>>> file = Path(temp_dir.name) / "file2.ecsv"
>>> cts.write(file)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
name H0 Om0 Tcmb0 Neff m_nu Ob0
Planck13 67.77 0.30712 2.7255 3.046 [0.0,0.0,0.06] 0.048252
Planck15 67.74 0.3075 2.7255 3.046 [0.0,0.0,0.06] 0.0486
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
>>> cosmo = Cosmology.read(file, index="Planck15", format="ascii.ecsv")
>>> cosmo == Planck15
True
Fields of the table in the file can be renamed to match the
`~astropy.cosmology.Cosmology` class' signature using the ``rename`` argument. This
is useful when the files's column names do not match the class' parameter names.
For this example we need to make a new file with renamed columns:
>>> file = Path(temp_dir.name) / "file3.ecsv"
>>> renamed_table = Planck18.to_format("astropy.table", rename={"H0": "Hubble"})
>>> renamed_table.write(file)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
name Hubble Om0 Tcmb0 Neff m_nu Ob0
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
Now we can read the Cosmology from the ECSV file, with the required renaming:
>>> cosmo = Cosmology.read(file, rename={"Hubble": "H0"})
>>> cosmo == Planck18
True
Additional keyword arguments are passed to ``QTable.read``.
.. testcleanup::
>>> temp_dir.cleanup()
"""
kwargs["format"] = "ascii.ecsv"
with u.add_enabled_units(cu):
table = QTable.read(filename, **kwargs)
# build cosmology from table
return from_table(
table,
index=index,
move_to_meta=move_to_meta,
cosmology=cosmology,
rename=rename,
)
def write_ecsv(
cosmology: Cosmology,
file: PathLike | WriteableFileLike[_TableT],
*,
overwrite: bool = False,
cls: type[_TableT] = QTable,
cosmology_in_meta: bool = True,
rename: Mapping[str, str] | None = None,
**kwargs: Any,
) -> None:
"""Serialize the cosmology into a ECSV.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`
The cosmology instance to convert to a mapping.
file : path-like or file-like
Location to save the serialized cosmology.
overwrite : bool (optional, keyword-only)
Whether to overwrite the file, if it exists.
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` (sub)class to use when writing. Default is
:class:`~astropy.table.QTable`.
cosmology_in_meta : bool (optional, keyword-only)
Whether to put the cosmology class in the Table metadata (if `True`, default) or
as the first column (if `False`).
rename : Mapping[str, str] or None (optional keyword-only)
A mapping of field names on the `~astropy.cosmology.Cosmology` to column names
of the table.
**kwargs
Passed to ``cls.write``
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
Examples
--------
We assume the following setup:
>>> from pathlib import Path
>>> from tempfile import TemporaryDirectory
>>> temp_dir = TemporaryDirectory()
A Cosmology can be written to an ECSV file:
>>> from astropy.cosmology import Cosmology, Planck18
>>> file = Path(temp_dir.name) / "file.ecsv"
>>> Planck18.write(file)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
name H0 Om0 Tcmb0 Neff m_nu Ob0
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
<BLANKLINE>
If a file already exists, attempting to write will raise an error unless
``overwrite=True``.
>>> Planck18.write(file, overwrite=True)
By default :class:`~astropy.cosmology.Cosmology` instances are written using
`~astropy.table.QTable` as an intermediate representation (for details see
|Cosmology.to_format|, with ``format="astropy.table"``). The `~astropy.table.Table`
type can be changed using the ``cls`` keyword argument.
>>> from astropy.table import Table
>>> file = Path(temp_dir.name) / "file2.ecsv"
>>> Planck18.write(file, cls=Table)
For most use cases, the default ``cls`` of :class:`~astropy.table.QTable` is
recommended and will be largely indistinguishable from other table types, as the
ECSV format is agnostic to the table type. An example of a difference that might
necessitate using a different table type is if a different ECSV schema is desired.
By default the cosmology class is written to the Table metadata. This can be changed
to a column of the table using the ``cosmology_in_meta`` keyword argument.
>>> file = Path(temp_dir.name) / "file3.ecsv"
>>> Planck18.write(file, cosmology_in_meta=False)
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: cosmology, datatype: string}
# - {name: name, datatype: string}
...
# meta: !!omap
# - {Oc0: 0.2607}
...
# schema: astropy-2.0
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
<BLANKLINE>
Fields of the Cosmology can be renamed to when writing to an ECSV file using the
``rename`` argument.
>>> file = Path(temp_dir.name) / "file4.ecsv"
>>> Planck18.write(file, rename={"H0": "Hubble"})
>>> with open(file) as f: print(f.read())
# %ECSV 1.0
# ---
# datatype:
# - {name: name, datatype: string}
...
# meta:
...
# schema: astropy-2.0
name Hubble Om0 Tcmb0 Neff m_nu Ob0
Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897
<BLANKLINE>
Additional keyword arguments are passed to :attr:`astropy.table.QTable.write`.
.. testcleanup::
>>> temp_dir.cleanup()
"""
table = to_table(
cosmology, cls=cls, cosmology_in_meta=cosmology_in_meta, rename=rename
)
kwargs["format"] = "ascii.ecsv"
table.write(file, overwrite=overwrite, **kwargs)
def ecsv_identify(
origin: object, filepath: str | None, *args: object, **kwargs: object
) -> bool:
"""Identify if object uses the Table format.
Returns
-------
bool
"""
return filepath is not None and filepath.endswith(".ecsv")
# ===================================================================
# Register
readwrite_registry.register_reader("ascii.ecsv", Cosmology, read_ecsv)
readwrite_registry.register_writer("ascii.ecsv", Cosmology, write_ecsv)
readwrite_registry.register_identifier("ascii.ecsv", Cosmology, ecsv_identify)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@cosmology@_io@ecsv.py@.PATH_END.py
|
{
"filename": "core.py",
"repo_name": "dfm/corner.py",
"repo_path": "corner.py_extracted/corner.py-main/src/corner/core.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
__all__ = [
"corner_impl",
"hist2d",
"quantile",
"overplot_lines",
"overplot_points",
]
import copy
import logging
import matplotlib
import numpy as np
from matplotlib import pyplot as pl
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import (
LogFormatterMathtext,
LogLocator,
MaxNLocator,
NullLocator,
ScalarFormatter,
)
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
def corner_impl(
xs,
bins=20,
range=None,
axes_scale="linear",
weights=None,
color=None,
hist_bin_factor=1,
smooth=None,
smooth1d=None,
labels=None,
label_kwargs=None,
titles=None,
show_titles=False,
title_fmt=".2f",
title_kwargs=None,
truths=None,
truth_color="#4682b4",
scale_hist=False,
quantiles=None,
title_quantiles=None,
verbose=False,
fig=None,
max_n_ticks=5,
top_ticks=False,
use_math_text=False,
reverse=False,
labelpad=0.0,
hist_kwargs=None,
**hist2d_kwargs,
):
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# If no separate titles are set, copy the axis labels
if titles is None:
titles = labels
# deal with title quantiles so they much quantiles unless desired otherwise
if title_quantiles is None:
if len(quantiles) > 0:
title_quantiles = quantiles
else:
# a default for when quantiles not supplied.
title_quantiles = [0.16, 0.5, 0.84]
if show_titles and len(title_quantiles) != 3:
raise ValueError(
"'title_quantiles' must contain exactly three values; "
"pass a length-3 list or array using the 'title_quantiles' argument"
)
# Deal with 1D sample lists.
xs = _parse_input(xs)
assert xs.shape[0] <= xs.shape[1], (
"I don't believe that you want more " "dimensions than samples!"
)
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
if reverse:
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.5 * factor # size of top/right margin
else:
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.0) * whspace
dim = lbdim + plotdim + trdim
# Make axes_scale into a list if necessary, otherwise check length
if isinstance(axes_scale, str):
axes_scale = [axes_scale] * K
else:
assert (
len(axes_scale) == K
), "'axes_scale' should contain as many elements as data dimensions"
# Create a new figure if one wasn't provided.
new_fig = True
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
axes, new_fig = _get_fig_axes(fig, K)
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
# Parse the parameter ranges.
force_range = False
if range is None:
if "extents" in hist2d_kwargs:
logging.warning(
"Deprecated keyword argument 'extents'. "
"Use 'range' instead."
)
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(
(
"It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument."
).format(
", ".join(map("{0}".format, np.arange(len(m))[m]))
)
)
else:
force_range = True
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5 * range[i], 0.5 + 0.5 * range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [int(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
try:
hist_bin_factor = [float(hist_bin_factor) for _ in range]
except TypeError:
if len(hist_bin_factor) != len(range):
raise ValueError(
"Dimension mismatch between hist_bin_factor and " "range"
)
# Set up the default plotting arguments.
if color is None:
color = matplotlib.rcParams["ytick.color"]
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = (
axes if not isinstance(axes, np.ndarray) else axes.flatten()[0]
)
else:
if reverse:
ax = axes[K - i - 1, K - i - 1]
else:
ax = axes[i, i]
# Plot the histograms.
n_bins_1d = int(max(1, np.round(hist_bin_factor[i] * bins[i])))
if axes_scale[i] == "linear":
bins_1d = np.linspace(min(range[i]), max(range[i]), n_bins_1d + 1)
elif axes_scale[i] == "log":
bins_1d = np.logspace(
np.log10(min(range[i])), np.log10(max(range[i])), n_bins_1d + 1
)
else:
raise ValueError(
"Scale "
+ axes_scale[i]
+ "for dimension "
+ str(i)
+ "not supported. Use 'linear' or 'log'"
)
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins_1d, weights=weights, **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, _ = np.histogram(x, bins=bins_1d, weights=weights)
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(bins_1d[:-1], bins_1d[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_lo, q_mid, q_hi = quantile(
x, title_quantiles, weights=weights
)
q_m, q_p = q_mid - q_lo, q_hi - q_mid
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_mid), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if titles is not None:
title = "{0} = {1}".format(titles[i], title)
elif titles is not None:
title = "{0}".format(titles[i])
if title is not None:
if reverse:
if "pad" in title_kwargs.keys():
title_kwargs_new = copy.copy(title_kwargs)
del title_kwargs_new["pad"]
title_kwargs_new["labelpad"] = title_kwargs["pad"]
else:
title_kwargs_new = title_kwargs
ax.set_xlabel(title, **title_kwargs_new)
else:
ax.set_title(title, **title_kwargs)
# Set up the axes.
_set_xlim(force_range, new_fig, ax, range[i])
ax.set_xscale(axes_scale[i])
if scale_hist:
maxn = np.max(n)
_set_ylim(force_range, new_fig, ax, [-0.1 * maxn, 1.1 * maxn])
else:
_set_ylim(force_range, new_fig, ax, [0, 1.1 * np.max(n)])
ax.set_yticklabels([])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
if axes_scale[i] == "linear":
ax.xaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower")
)
elif axes_scale[i] == "log":
ax.xaxis.set_major_locator(LogLocator(numticks=max_n_ticks))
ax.yaxis.set_major_locator(NullLocator())
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
[l.set_rotation(45) for l in ax.get_xticklabels(minor=True)]
else:
ax.set_xticklabels([])
ax.set_xticklabels([], minor=True)
else:
if reverse:
ax.xaxis.tick_top()
[l.set_rotation(45) for l in ax.get_xticklabels()]
[l.set_rotation(45) for l in ax.get_xticklabels(minor=True)]
if labels is not None:
if reverse:
if "labelpad" in label_kwargs.keys():
label_kwargs_new = copy.copy(label_kwargs)
del label_kwargs_new["labelpad"]
label_kwargs_new["pad"] = label_kwargs["labelpad"]
else:
label_kwargs_new = label_kwargs
ax.set_title(
labels[i],
position=(0.5, 1.3 + labelpad),
**label_kwargs_new,
)
else:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3 - labelpad)
# use MathText for axes ticks
if axes_scale[i] == "linear":
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text)
)
elif axes_scale[i] == "log":
ax.xaxis.set_major_formatter(LogFormatterMathtext())
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
if reverse:
ax = axes[K - i - 1, K - j - 1]
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(
y,
x,
ax=ax,
range=[range[j], range[i]],
axes_scale=[axes_scale[j], axes_scale[i]],
weights=weights,
color=color,
smooth=smooth,
bins=[bins[j], bins[i]],
new_fig=new_fig,
force_range=force_range,
**hist2d_kwargs,
)
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
if axes_scale[j] == "linear":
ax.xaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower")
)
elif axes_scale[j] == "log":
ax.xaxis.set_major_locator(
LogLocator(numticks=max_n_ticks)
)
if axes_scale[i] == "linear":
ax.yaxis.set_major_locator(
MaxNLocator(max_n_ticks, prune="lower")
)
elif axes_scale[i] == "log":
ax.yaxis.set_major_locator(
LogLocator(numticks=max_n_ticks)
)
if i < K - 1:
ax.set_xticklabels([])
ax.set_xticklabels([], minor=True)
else:
if reverse:
ax.xaxis.tick_top()
[l.set_rotation(45) for l in ax.get_xticklabels()]
[l.set_rotation(45) for l in ax.get_xticklabels(minor=True)]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
if reverse:
ax.xaxis.set_label_coords(0.5, 1.4 + labelpad)
else:
ax.xaxis.set_label_coords(0.5, -0.3 - labelpad)
# use MathText for axes ticks
if axes_scale[j] == "linear":
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text)
)
elif axes_scale[j] == "log":
ax.xaxis.set_major_formatter(LogFormatterMathtext())
if j > 0:
ax.set_yticklabels([])
ax.set_yticklabels([], minor=True)
else:
if reverse:
ax.yaxis.tick_right()
[l.set_rotation(45) for l in ax.get_yticklabels()]
[l.set_rotation(45) for l in ax.get_yticklabels(minor=True)]
if labels is not None:
if reverse:
ax.set_ylabel(labels[i], rotation=-90, **label_kwargs)
ax.yaxis.set_label_coords(1.3 + labelpad, 0.5)
else:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3 - labelpad, 0.5)
# use MathText for axes ticks
if axes_scale[i] == "linear":
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text)
)
elif axes_scale[i] == "log":
ax.yaxis.set_major_formatter(LogFormatterMathtext())
if truths is not None:
overplot_lines(fig, truths, reverse=reverse, color=truth_color)
overplot_points(
fig,
[[np.nan if t is None else t for t in truths]],
reverse=reverse,
marker="s",
color=truth_color,
)
return fig
def quantile(x, q, weights=None):
"""
Compute sample quantiles with support for weighted samples.
Note
----
When ``weights`` is ``None``, this method simply calls numpy's percentile
function with the values of ``q`` multiplied by 100.
Parameters
----------
x : array_like[nsamples,]
The samples.
q : array_like[nquantiles,]
The list of quantiles to compute. These should all be in the range
``[0, 1]``.
weights : Optional[array_like[nsamples,]]
An optional weight corresponding to each sample. These
Returns
-------
quantiles : array_like[nquantiles,]
The sample quantiles computed at ``q``.
Raises
------
ValueError
For invalid quantiles; ``q`` not in ``[0, 1]`` or dimension mismatch
between ``x`` and ``weights``.
"""
x = np.atleast_1d(x)
q = np.atleast_1d(q)
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0 and 1")
if weights is None:
return np.percentile(x, list(100.0 * q))
else:
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x)")
idx = np.argsort(x)
sw = weights[idx]
cdf = np.cumsum(sw)[:-1]
cdf /= cdf[-1]
cdf = np.append(0, cdf)
return np.interp(q, cdf, x[idx]).tolist()
def hist2d(
x,
y,
bins=20,
range=None,
axes_scale=["linear", "linear"],
weights=None,
levels=None,
smooth=None,
ax=None,
color=None,
quiet=False,
plot_datapoints=True,
plot_density=True,
plot_contours=True,
no_fill_contours=False,
fill_contours=False,
contour_kwargs=None,
contourf_kwargs=None,
data_kwargs=None,
pcolor_kwargs=None,
new_fig=True,
force_range=False,
**kwargs,
):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x : array_like[nsamples,]
The samples.
y : array_like[nsamples,]
The samples.
axes_scale : iterable (2,)
Scale (``"linear"``, ``"log"``) to use for each dimension.
quiet : bool
If true, suppress warnings for small datasets.
levels : array_like
The contour levels to draw.
If None, (0.5, 1, 1.5, 2)-sigma equivalent contours are drawn,
i.e., containing 11.8%, 39.3%, 67.5% and 86.4% of the samples.
See https://corner.readthedocs.io/en/latest/pages/sigmas/
ax : matplotlib.Axes
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool
Draw the individual data points.
plot_density : bool
Draw the density colormap.
plot_contours : bool
Draw the contours.
no_fill_contours : bool
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool
Fill the contours.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
pcolor_kwargs : dict
Any additional keyword arguments to pass to the `pcolor` method when
adding the density colormap.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warning(
"Deprecated keyword argument 'extent'. Use 'range' instead."
)
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = matplotlib.rcParams["ytick.color"]
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the base color of the axis (background color)
base_color = ax.get_facecolor()
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, colorConverter.to_rgba(base_color, alpha=0.0)]
)
# This color map is used to hide the points at the high density areas.
base_cmap = LinearSegmentedColormap.from_list(
"base_cmap", [base_color, base_color], N=2
)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels) + 1)
# Parse the bin specifications.
try:
bins = [int(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# We'll make the 2D histogram to directly estimate the density.
bins_2d = []
if axes_scale[0] == "linear":
bins_2d.append(np.linspace(min(range[0]), max(range[0]), bins[0] + 1))
elif axes_scale[0] == "log":
bins_2d.append(
np.logspace(
np.log10(min(range[0])),
np.log10(max(range[0])),
bins[0] + 1,
)
)
if axes_scale[1] == "linear":
bins_2d.append(np.linspace(min(range[1]), max(range[1]), bins[1] + 1))
elif axes_scale[1] == "log":
bins_2d.append(
np.logspace(
np.log10(min(range[1])),
np.log10(max(range[1])),
bins[1] + 1,
)
)
try:
H, X, Y = np.histogram2d(
x.flatten(),
y.flatten(),
bins=bins_2d,
weights=weights,
)
except ValueError:
raise ValueError(
"It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument."
)
if H.sum() == 0:
raise ValueError(
"It looks like the provided 'range' is not valid "
"or the sample is empty."
)
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
if plot_contours or plot_density:
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except IndexError:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m) and not quiet:
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate(
[
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
]
)
Y2 = np.concatenate(
[
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
]
)
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(
X2,
Y2,
H2.T,
[V.min(), H.max()],
cmap=base_cmap,
antialiased=False,
)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get(
"antialiased", False
)
ax.contourf(
X2,
Y2,
H2.T,
np.concatenate([[0], V, [H.max() * (1 + 1e-4)]]),
**contourf_kwargs,
)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
if pcolor_kwargs is None:
pcolor_kwargs = dict()
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap, **pcolor_kwargs)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
_set_xlim(force_range, new_fig, ax, range[0])
_set_ylim(force_range, new_fig, ax, range[1])
ax.set_xscale(axes_scale[0])
ax.set_yscale(axes_scale[1])
def overplot_lines(fig, xs, reverse=False, **kwargs):
"""
Overplot lines on a figure generated by ``corner.corner``
Parameters
----------
fig : Figure
The figure generated by a call to :func:`corner.corner`.
xs : array_like[ndim]
The values where the lines should be plotted. This must have ``ndim``
entries, where ``ndim`` is compatible with the :func:`corner.corner`
call that originally generated the figure. The entries can optionally
be ``None`` to omit the line in that axis.
reverse: bool
A boolean flag that should be set to 'True' if the corner plot itself
was plotted with 'reverse=True'.
**kwargs
Any remaining keyword arguments are passed to the ``ax.axvline``
method.
"""
K = len(xs)
axes, _ = _get_fig_axes(fig, K)
if reverse:
for k1 in range(K):
if xs[k1] is not None:
axes[K - k1 - 1, K - k1 - 1].axvline(xs[k1], **kwargs)
for k2 in range(k1 + 1, K):
if xs[k1] is not None:
axes[K - k2 - 1, K - k1 - 1].axvline(xs[k1], **kwargs)
if xs[k2] is not None:
axes[K - k2 - 1, K - k1 - 1].axhline(xs[k2], **kwargs)
else:
for k1 in range(K):
if xs[k1] is not None:
axes[k1, k1].axvline(xs[k1], **kwargs)
for k2 in range(k1 + 1, K):
if xs[k1] is not None:
axes[k2, k1].axvline(xs[k1], **kwargs)
if xs[k2] is not None:
axes[k2, k1].axhline(xs[k2], **kwargs)
def overplot_points(fig, xs, reverse=False, **kwargs):
"""
Overplot points on a figure generated by ``corner.corner``
Parameters
----------
fig : Figure
The figure generated by a call to :func:`corner.corner`.
xs : array_like[nsamples, ndim]
The coordinates of the points to be plotted. This must have an ``ndim``
that is compatible with the :func:`corner.corner` call that originally
generated the figure.
reverse: bool
A boolean flag that should be set to 'True' if the corner plot itself
was plotted with 'reverse=True'.
**kwargs
Any remaining keyword arguments are passed to the ``ax.plot``
method.
"""
kwargs["marker"] = kwargs.pop("marker", ".")
kwargs["linestyle"] = kwargs.pop("linestyle", "none")
xs = _parse_input(xs)
K = len(xs)
axes, _ = _get_fig_axes(fig, K)
if reverse:
for k1 in range(K):
for k2 in range(k1):
axes[K - k1 - 1, K - k2 - 1].plot(xs[k2], xs[k1], **kwargs)
else:
for k1 in range(K):
for k2 in range(k1 + 1, K):
axes[k2, k1].plot(xs[k1], xs[k2], **kwargs)
def _parse_input(xs):
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
return xs
def _get_fig_axes(fig, K):
if not fig.axes:
return fig.subplots(K, K), True
try:
axarr = np.array(fig.axes).reshape((K, K))
return axarr.item() if axarr.size == 1 else axarr.squeeze(), False
except ValueError:
raise ValueError(
(
"Provided figure has {0} axes, but data has "
"dimensions K={1}"
).format(len(fig.axes), K)
)
def _set_xlim(force, new_fig, ax, new_xlim):
if force or new_fig:
return ax.set_xlim(new_xlim)
xlim = ax.get_xlim()
return ax.set_xlim([min(xlim[0], new_xlim[0]), max(xlim[1], new_xlim[1])])
def _set_ylim(force, new_fig, ax, new_ylim):
if force or new_fig:
return ax.set_ylim(new_ylim)
ylim = ax.get_ylim()
return ax.set_ylim([min(ylim[0], new_ylim[0]), max(ylim[1], new_ylim[1])])
|
dfmREPO_NAMEcorner.pyPATH_START.@corner.py_extracted@corner.py-main@src@corner@core.py@.PATH_END.py
|
{
"filename": "util.py",
"repo_name": "juliotux/astropop",
"repo_path": "astropop_extracted/astropop-main/astropop/framedata/util.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for loading data as FrameData."""
import os
import numpy as np
from astropy.io import fits
from astropy.units import Quantity
from astropy.nddata import CCDData
from .framedata import FrameData
from ._compat import _extract_ccddata, _extract_fits, imhdus
__all__ = ['check_framedata', 'read_framedata']
_fits_kwargs = ['hdu', 'unit', 'hdu_uncertainty',
'hdu_mask', 'unit_key']
def read_framedata(obj, copy=False, **kwargs):
"""Read an object to a FrameData container.
Parameters
----------
obj: any compatible, see notes
Object that will be readed to the FrameData.
copy: bool (optional)
If the object is already a FrameData, return a copy instead of the
original one.
Default: False
Returns
-------
frame: `FrameData`
The readed FrameData object.
Notes
-----
- If obj is a string or `~pathlib.Path`, it will be interpreted as a file.
File types will be checked. Just FITS format supported now.
- If obj is `~astropy.io.fits.HDUList`, `~astropy.io.fits.HDUList` or
`~astropy.nddata.CCDData`, they will be properly translated to
`FrameData`.
- If numbers or `~astropop.math.physical.QFloat`,
`~astropy.units.Quantity`, they will be translated to a `FrameData`
without metadata.
"""
if isinstance(obj, FrameData):
if copy:
obj = obj.copy(**kwargs)
elif isinstance(obj, CCDData):
obj = FrameData(**_extract_ccddata(obj), **kwargs)
elif isinstance(obj, (str, bytes, os.PathLike, fits.HDUList, *imhdus)):
# separate kwargs to be sent to extractors
fits_kwargs = {}
for k in _fits_kwargs:
if k in kwargs.keys():
fits_kwargs[k] = kwargs.pop(k)
obj = FrameData(**_extract_fits(obj, **fits_kwargs), **kwargs)
elif isinstance(obj, Quantity):
obj = FrameData(obj.value, unit=obj.unit, **kwargs)
elif isinstance(obj, np.ndarray):
obj = FrameData(obj, **kwargs)
elif obj.__class__.__name__ == "QFloat":
# if not do this, a cyclic dependency breaks the code.
obj = FrameData(obj.nominal, unit=obj.unit,
uncertainty=obj.uncertainty, **kwargs)
else:
raise TypeError(f'Object {obj} is not compatible with FrameData.')
return obj
check_framedata = read_framedata
|
juliotuxREPO_NAMEastropopPATH_START.@astropop_extracted@astropop-main@astropop@framedata@util.py@.PATH_END.py
|
{
"filename": "compute_second_order_aperture_mass_correlations_MS_subsampled.py",
"repo_name": "sheydenreich/threepoint",
"repo_path": "threepoint_extracted/threepoint-main/python_scripts/old/compute_second_order_aperture_mass_correlations_MS_subsampled.py",
"type": "Python"
}
|
from matplotlib import use
from file_loader import get_millennium_downsampled
from utility import aperture_mass_computer,extract_second_order_aperture_masses
import numpy as np
import sys
from tqdm import tqdm
import multiprocessing.managers
from multiprocessing import Pool
from astropy.io import fits
import os
import matplotlib.pyplot as plt
Ngal_subsample=288000
class MyManager(multiprocessing.managers.BaseManager):
pass
MyManager.register('np_zeros', np.zeros, multiprocessing.managers.ArrayProxy)
process_parallel=False
startpath = '/home/laila/OneDrive/1_Work/5_Projects/02_3ptStatistics/Map3_Covariances/MS/' # '/vol/euclid6/euclid6_ssd/sven/threepoint_with_laila/Map3_Covariances/MS/'
def compute_aperture_masses_of_field(los,theta_ap_array,save_map=None,use_polynomial_filter=False):
fieldsize = 4.*60
npix = 4096
Xs, Ys, shears1, shears2 = get_millennium_downsampled(los, Ngal_subsample)
shear=shears1+1.0j*shears2
result = extract_second_order_aperture_masses(Xs, Ys,shear, npix,theta_ap_array,fieldsize,compute_mcross=False,save_map=save_map,use_polynomial_filter=use_polynomial_filter)
return result
def compute_aperture_masses_of_field_kernel(kwargs):
result, los, theta_ap_array, save_map, use_polynomial_filter, realisation = kwargs
map2=compute_aperture_masses_of_field(los, theta_ap_array, save_map=save_map, use_polynomial_filter=use_polynomial_filter)
result[:,realisation]=map2
def compute_all_aperture_masses(all_los,savepath,aperture_masses = [1.17,2.34,4.69,9.37],n_processes = 64,use_polynomial_filter=False):
n_files = len(all_los)
n_thetas=len(aperture_masses)
if(process_parallel):
m = MyManager()
m.start()
results=m.np_zeros((n_thetas, n_files))
with Pool(processes=n_processes) as p:
args=[[results, all_los[i], aperture_masses, None, use_polynomial_filter, i] for i in range(n_files)]
for i in tqdm(p.imap_unordered(compute_aperture_masses_of_field_kernel, args), total=n_files):
pass
np.savetxt(savepath+'map_squared_ngal_{Ngal_subsample}',results)
else:
for los in all_los:
print(f"Processing {los}")
map2=compute_aperture_masses_of_field(los, aperture_masses, save_map=None, use_polynomial_filter=use_polynomial_filter)
np.savetxt(savepath+f"map_squared_{los}_ngal_{Ngal_subsample}.dat", map2)
if(__name__=='__main__'):
all_los = range(64)
# if not 'SLICS' in dirpath:
# dir_end_path = dirpath.split('/')[-1]
savepath = startpath + 'map_squared_our_thetas'
print('Writing summary statistics to ',savepath)
if not os.path.exists(savepath):
os.makedirs(savepath)
compute_all_aperture_masses(all_los,savepath+'/',n_processes=10,aperture_masses = [2,4,8,16])
|
sheydenreichREPO_NAMEthreepointPATH_START.@threepoint_extracted@threepoint-main@python_scripts@old@compute_second_order_aperture_mass_correlations_MS_subsampled.py@.PATH_END.py
|
{
"filename": "source_candidate.py",
"repo_name": "epfl-radio-astro/LiSA",
"repo_path": "LiSA_extracted/LiSA-main/modules/util/source_candidate.py",
"type": "Python"
}
|
############################################################
# Helper class to store source candidate information
############################################################
class SourceCandidate:
def __init__(self, i, j, k = None, kslice = None, sig = None):
self.i = i
self.j = j
self.k = k
self.sig = sig
self.kslice = kslice
# default setting for central frequency index and source significance
if kslice != None:
if k == None: self.k = int(kslice.start *0.5 + kslice.stop*0.5)
#if sig == None: sig = kslice.stop - kslice.start
def matching(self, truth_source, margin = 10):
# if truth_source.z() >= self.kslice.start and truth_source.z() <= self.kslice.stop:
# print(T.tcol("Is matching: {} <= {} <= {}".format(self.kslice.start, truth_source.z(), self.kslice.stop), "yellow"))
# else:
# print(T.tcol("Not matching: {} <= {} <= {}".format(self.kslice.start, truth_source.z(), self.kslice.stop), "blue"))
# print(T.tcol("Are both matching ? {2} >= {0} is {3}, {2} <= {1} is {4}".format(self.kslice.start, self.kslice.stop, truth_source.z(), truth_source.z() >= self.kslice.start, truth_source.z() <= self.kslice.stop), "yellow"))
return truth_source.z() >= self.kslice.start-margin and truth_source.z() <= self.kslice.stop + margin
def __str__(self):
return "Source candidate at i = {0}, j = {1}, k = {2}, significance = {3}".format(self.i,self.j,self.k, self.sig)
def to_line(self):
return "{0} {1} {2} {3}\n".format(self.i,self.j,self.k, self.sig)
############################################################
# Source grouping helper class and methods
############################################################
class SourceGroup:
def __init__(self, source):
self._source_list = [source]
self._max = source
self._range_min = source[:-1]
self._range_max = source[:-1]
def __iadd__(self,other):
self._source_list.append(other)
for i in range(3):
if other[i] < self._range_min[i]:
self._range_min[i] = other[i]
if other[i] > self._range_max[i]:
self._range_max[i] = other[i]
if other[-1] > self._max[-1]:
self._max = other
def merge(self, other):
for s in other._source_list:
if s not in self._source_list:
self.__iadd__(s)
def __str__(self):
return "{0}-{1}-{2}; {3}-{4}-{5}; {6}-{7}-{8};".format(self._range_min[0], self._max[0], self._range_max[0],
self._range_min[1], self._max[1], self._range_max[1],
self._range_min[2], self._max[2], self._range_max[2])
# check if a source is adjancent to any other source in the group
def source_is_adjacent(self, other, dvox = 10):
x0, y0, z0, sig0 = other
for x, y, z, sig in self._source_list:
if abs(x-x0) <= dvox and abs(y-y0) <= dvox and abs(z-z0) <= dvox*2:
return True
return False
def overlaps(self, other):
groups_overlap = True
for i in range(3):
overlapping_range = range(max(self._range_min[i], other._range_min[i]), min(self._range_max[i], other._range_max[i])+1)
groups_overlap &= len(overlapping_range) > 0
return groups_overlap
@property
def center(self):
return self._max
@property
def symmetryx(self):
side1 = self.center[0] - self._range_min[0]
side2 = self._range_max[0] - self.center[0]
return side1 - side2
@property
def symmetryy(self):
side1 = self.center[1] - self._range_min[1]
side2 = self._range_max[1] - self.center[1]
return side1 - side2
@property
def symmetryz(self):
side1 = self.center[2] - self._range_min[2]
side2 = self._range_max[2] - self.center[2]
return side1 - side2
@property
def size(self):
return len(self._source_list)
@property
def lenx(self):
return self._range_max[0] - self._range_min[0]
@property
def leny(self):
return self._range_max[1] - self._range_min[1]
@property
def lenz(self):
return self._range_max[2] - self._range_min[2]
def print_overlaps(groups):
for g in groups:
print(g)
for g2 in groups:
if g != g2 and g.overlaps(g2):
print(" overlapping:", g2)
def prune_candidates(infile, outfile, k):
sources = []
with open(infile, 'r') as f:
for line in f:
x, y, z, sig = line.split()
if int(z) == k: continue
sources.append([ int(x), int(y), int(z), float(sig)])
with open(outfile, 'w') as f:
for s in sources:
f.write( "{0} {1} {2} {3}\n".format(*s))
def count_neighboring_groups(x1, y1, others, d = 30):
neighbors = 0
for x2, y2 in others:
if x1 == x2 and y1 == y2: continue
dcel = math.sqrt( (x1 - x2)**2 + (y1 - y2)**2)
if dcel < d: neighbors += 1
return neighbors
def remove_suspicious_candidates(infile, outfile, dvox = 30, max_neighbors = 3):
sources = []
with open(infile, 'r') as f:
for line in f:
x, y, z, sig = line.split()
sources.append([ int(x), int(y), int(z), float(sig)])
keep_sources = []
for s1 in sources:
n_line_neighbors = count_neighboring_groups(s1[0], s1[1], [(s2[0], s2[1]) for s2 in sources], d = dvox)
'''for s2 in sources:
if s1 == s2: continue
dcel = math.sqrt( (s1[0] - s2[0])**2 + (s1[1] - s2[1])**2)
if dcel < dvox:
n_line_neighbors += 1
'''
if n_line_neighbors <= max_neighbors: keep_sources.append(s1)
with open(outfile, 'w') as f:
for s in keep_sources:
f.write( "{0} {1} {2} {3}\n".format(*s))
def make_source_groups(infile, dvox, min_sig_threshold = -999):
groups = []
# assign each source to groups
nsources = 0
with open(infile, 'r') as f:
for line in f:
x, y, z, sig = line.split()
source = [ int(x), int(y), int(z), float(sig)]
if source[-1] < min_sig_threshold: continue
nsources += 1
found_group = False
for g in groups:
if g.source_is_adjacent(source, dvox):
g += source
found_group = True
if not found_group:
groups.append( SourceGroup(source))
# merge overlapping groups
for i in range(len(groups)):
for j in range(len(groups)):
if groups[i] == None or groups[j] == None or groups[i] == groups[j]: continue
if groups[i].overlaps(groups[j]):
groups[i].merge(groups[j])
groups[j] = None
groups = [g for g in groups if g != None]
# make sure that we kept all sources
assert nsources == sum([g.size for g in groups])
return groups
# only keep sources in groups with group size >= min_group_size OR central significance > pasing_sig_threshold
def merge_source_candidates(infile, outfile, dvox = 10, min_group_size = 1, min_sig_threshold = -999, passing_sig_threshold = -999, verbose = False):
groups = make_source_groups(infile, dvox, min_sig_threshold)
group_filter = lambda g: g.size >= min_group_size and (True if passing_sig_threshold < 0 else g.center[-1] > passing_sig_threshold)
groups = [g for g in groups if g != None and group_filter(g)]
with open(outfile, 'w') as f:
for g in groups:
if verbose: print("Group with {0} sources centered at ({1}, {2}, {3}, sig = {4})".format(g.size, *g.center))
f.write( "{0} {1} {2} {3}\n".format(*g.center))
#print("#############")
#print_overlaps(groups)
|
epfl-radio-astroREPO_NAMELiSAPATH_START.@LiSA_extracted@LiSA-main@modules@util@source_candidate.py@.PATH_END.py
|
{
"filename": "test_parfile_writing_format.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/tests/test_parfile_writing_format.py",
"type": "Python"
}
|
import os
import pytest
from io import StringIO
import pytest
from hypothesis import given
from hypothesis.strategies import sampled_from
from pint.models import get_model, get_model_and_toas
from pint import fitter
from pinttestdata import datadir
def test_SWM():
"""Should be present in PINT, not in TEMPO/TEMPO2"""
m = get_model(os.path.join(datadir, "B1855+09_NANOGrav_9yv1.gls.par"))
assert (
"SWM" in m.as_parfile()
and "SWM" not in m.as_parfile(format="tempo")
and "SWM" not in m.as_parfile(format="tempo2")
)
def test_CHI2():
"""Should be present after fit in PINT, not in TEMPO/TEMPO2"""
m, t = get_model_and_toas(
os.path.join(datadir, "NGC6440E.par"), os.path.join(datadir, "NGC6440E.tim")
)
f = fitter.WLSFitter(toas=t, model=m)
f.fit_toas()
assert "CHI2" in f.model.as_parfile()
assert "CHI2" in f.model.as_parfile(format="tempo2")
assert "CHI2" in f.model.as_parfile(format="tempo")
def test_T2CMETHOD():
"""Should be commented out in TEMPO2"""
m = get_model(os.path.join(datadir, "B1855+09_NANOGrav_dfg+12_TAI.par"))
for l in m.as_parfile().split("\n"):
if "T2CMETHOD" in l:
assert not (l.startswith("#"))
for l in m.as_parfile(format="tempo").split("\n"):
if "T2CMETHOD" in l:
assert not (l.startswith("#"))
for l in m.as_parfile(format="tempo2").split("\n"):
if "T2CMETHOD" in l:
assert l.startswith("#")
def test_MODE1():
"""Should start TEMPO2"""
m = get_model(os.path.join(datadir, "B1855+09_NANOGrav_dfg+12_TAI.par"))
assert (
not (m.as_parfile(include_info=False).startswith("MODE 1"))
and not (m.as_parfile(include_info=False, format="tempo").startswith("MODE 1"))
and (m.as_parfile(include_info=False, format="tempo2").startswith("MODE 1"))
)
def test_STIGMA():
"""Should get changed to VARSIGMA for TEMPO/TEMPO2"""
m = get_model(os.path.join(datadir, "J0613-0200_NANOGrav_9yv1_ELL1H_STIG.gls.par"))
assert (
"STIGMA" in m.as_parfile()
and "VARSIGMA" not in m.as_parfile()
and "STIGMA" not in m.as_parfile(format="tempo")
and "VARSIGMA" in m.as_parfile(format="tempo")
and "STIGMA" not in m.as_parfile(format="tempo2")
and "VARSIGMA" in m.as_parfile(format="tempo2")
)
def test_A1DOT():
"""Should get changed to XDOT for TEMPO/TEMPO2"""
m = get_model(os.path.join(datadir, "J1600-3053_test.par"))
assert (
"A1DOT" in m.as_parfile()
and "XDOT" not in m.as_parfile()
and "A1DOT" not in m.as_parfile(format="tempo")
and "XDOT" in m.as_parfile(format="tempo")
and "A1DOT" not in m.as_parfile(format="tempo2")
and "XDOT" in m.as_parfile(format="tempo2")
)
def test_ECL():
"""Should be only IERS2003 for TEMPO2"""
m = get_model(os.path.join(datadir, "J0613-0200_NANOGrav_9yv1.gls.par"))
for l in m.as_parfile().split("\n"):
if "ECL" in l:
assert l.split()[-1] == "IERS2010"
for l in m.as_parfile(format="tempo").split("\n"):
if "ECL" in l:
assert l.split()[-1] == "IERS2010"
for l in m.as_parfile(format="tempo2").split("\n"):
if "ECL" in l:
assert l.split()[-1] == "IERS2003"
def test_DMDATA_N():
"""Should be an integer for TEMPO/TEMPO2"""
m = get_model(os.path.join(datadir, "J0030+0451_post.par"))
for l in m.as_parfile(format="tempo").split("\n"):
if "DMDATA" in l:
# this should be a 0
dmdata = int(l.split()[-1])
assert dmdata == 0
def test_DMDATA_Y():
"""Should be an integer for TEMPO/TEMPO2"""
m = get_model(os.path.join(datadir, "B1855+09_NANOGrav_12yv3.wb.gls.par"))
for l in m.as_parfile(format="tempo").split("\n"):
if "DMDATA" in l:
# this should be a 1
dmdata = int(l.split()[-1])
assert dmdata == 1
def test_formats():
m = get_model(os.path.join(datadir, "B1855+09_NANOGrav_9yv1.gls.par"))
with pytest.raises(ValueError):
s = m.as_parfile(format="nottempo")
def test_EFAC():
"""Should become T2EFAC in TEMPO/TEMPO2"""
model = get_model(
StringIO(
"""
PSRJ J1234+5678
ELAT 0
ELONG 0
DM 10
F0 1
PEPOCH 58000
EFAC mjd 57000 58000 2
"""
)
)
assert any(
l.startswith("EFAC") for l in model.as_parfile(format="pint").split("\n")
)
assert not any(
l.startswith("T2EFAC") for l in model.as_parfile(format="pint").split("\n")
)
assert not any(
l.startswith("EFAC") for l in model.as_parfile(format="tempo").split("\n")
)
assert any(
l.startswith("T2EFAC") for l in model.as_parfile(format="tempo").split("\n")
)
assert not any(
l.startswith("EFAC") for l in model.as_parfile(format="tempo2").split("\n")
)
assert any(
l.startswith("T2EFAC") for l in model.as_parfile(format="tempo2").split("\n")
)
def test_EQUAD():
"""Should become T2EQUAD in TEMPO/TEMPO2"""
model = get_model(
StringIO(
"""
PSRJ J1234+5678
ELAT 0
ELONG 0
DM 10
F0 1
PEPOCH 58000
EQUAD mjd 57000 58000 2
"""
)
)
assert any(
l.startswith("EQUAD") for l in model.as_parfile(format="pint").split("\n")
)
assert not any(
l.startswith("T2EQUAD") for l in model.as_parfile(format="pint").split("\n")
)
assert not any(
l.startswith("EQUAD") for l in model.as_parfile(format="tempo").split("\n")
)
assert any(
l.startswith("T2EQUAD") for l in model.as_parfile(format="tempo").split("\n")
)
assert not any(
l.startswith("EQUAD") for l in model.as_parfile(format="tempo2").split("\n")
)
assert any(
l.startswith("T2EQUAD") for l in model.as_parfile(format="tempo2").split("\n")
)
def test_DM001_vs_DM1_pint_tempo_read_write():
"""Ensure all parfile formats write out DMn the way it was read in. May want to change later."""
model = get_model(
StringIO(
"""
PSRJ J1234+5678
ELAT 0
ELONG 0
DM 10
F0 1
PEPOCH 58000
DM001 0 1 0
DM2 0 1 0
DM0010 0 1 0
"""
)
)
assert "DM1" in model.params
assert "DM10" in model.params
assert any(
l.startswith("DM0010") for l in model.as_parfile(format="tempo").split("\n")
)
assert any(
l.startswith("DM0010") for l in model.as_parfile(format="tempo2").split("\n")
)
assert any(
l.startswith("DM0010") for l in model.as_parfile(format="pint").split("\n")
)
assert any(
l.startswith("DM2") for l in model.as_parfile(format="tempo").split("\n")
)
assert any(
l.startswith("DM2") for l in model.as_parfile(format="tempo2").split("\n")
)
assert any(l.startswith("DM2") for l in model.as_parfile(format="pint").split("\n"))
def test_NHARMS():
model = get_model(
StringIO(
"""
PSR J1802-2124
EPHEM DE440
CLOCK TT(BIPM2019)
UNITS TDB
START 57682.9164254267206481
FINISH 58945.5403619002087731
DILATEFREQ N
DMDATA N
ELONG 270.486526147922007 1 0.00000012082526951556
ELAT 2.037373400788321 1 0.00000314425302204301
PMELONG -1.826189745727688 1 0.4777024218759009
PMELAT -2.9704295220972257 1 12.55798968405999
PX 0.5286751048529988 1 1.2501655070063251
ECL IERS2010
POSEPOCH 58314.0000000000000000
F0 79.0664240384491762 1 2.7971394286742147137e-12
F1 -4.55647355155766584e-16 1 2.5152536793477603086e-19
PEPOCH 58314.0000000000000000
CORRECT_TROPOSPHERE Y
PLANET_SHAPIRO Y
NE_SW 0.0
SWM 0.0
DM 149.60232712555309087
DM1 0.0
DMEPOCH 58314.0000000000000000
BINARY ELL1H
PB 0.6988892433285496519 1 2.306950233730408576e-11
A1 3.718865580010016 1 5.10696441177697e-07
TASC 58314.1068677136653058 1 1.02392354114256444625e-08
EPS1 4.117234830314865184e-06 1 2.8610710701285065444e-07
EPS2 1.9600104585898805536e-06 1 2.9783944064651076693e-07
H3 2.4037642640660087724e-06 1 3.31127323135470378e-07
H4 1.7027113002634860964e-06 1 2.88248121675800648e-07
NHARMS 7.0
FD1 2.2042642599772845e-05 1 1.2254082126867467e-06
TZRMJD 58133.7623761140993056
TZRSITE GB
TZRFRQ 1455.314
"""
)
)
for line in model.as_parfile(format="tempo").split("\n"):
if line.startswith("NHARMS"):
d = line.split()
# it should be an integer
assert "." not in d[-1]
sensible_pars = [
"B1855+09_NANOGrav_9yv1.gls.par",
"NGC6440E.par",
"B1855+09_NANOGrav_dfg+12_TAI.par",
"B1855+09_NANOGrav_dfg+12_TAI.par",
"J0613-0200_NANOGrav_9yv1.gls.par",
]
@given(sampled_from(sensible_pars), sampled_from(["pint", "tempo", "tempo2"]))
def test_roundtrip(par, format):
m = get_model(os.path.join(datadir, par))
m2 = get_model(StringIO(m.as_parfile(format=format)))
# FIXME: check some things aren't changed
# for p in m.params:
# assert getattr(m, p).value == getattr(m2, p).value
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@tests@test_parfile_writing_format.py@.PATH_END.py
|
{
"filename": "example_mcmc.py",
"repo_name": "NumCosmo/NumCosmo",
"repo_path": "NumCosmo_extracted/NumCosmo-master/examples/pydata_simple/example_mcmc.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# example_mcmc.py
#
# Mon May 22 16:00:00 2023
# Copyright 2023 Sandro Dias Pinto Vitenti
# <vitenti@uel.br>
#
# example_mcmc.py
# Copyright (C) 2023 Sandro Dias Pinto Vitenti <vitenti@uel.br>
#
# numcosmo is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# numcosmo is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Example of running a MCMC analysis on a SLine model."""
import sys
import os.path
from typing import Union
from py_sline_model import PySLineModel
from py_sline_data import PySLineData
from py_sline_gauss import PySLineGauss
from numcosmo_py import Ncm
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init()
def run_mcmc() -> None:
"""Example of running a MCMC analysis on a SLine model."""
#
# Instantiating a new SLine model object and setting
# some values for its parameters.
#
slm = PySLineModel()
slm.props.alpha = 0.9
slm.props.a = 0.1
#
# New Model set object including slm with parameters
# set as free.
#
mset = Ncm.MSet.empty_new()
mset.set(slm)
mset.param_set_all_ftype(Ncm.ParamType.FREE)
mset.prepare_fparam_map()
#
# Creating a new Serialization object, and load
# the data file.
#
sld: Union[PySLineData, PySLineGauss]
data_file = "example_data.obj"
ser = Ncm.Serialize.new(Ncm.SerializeOpt.NONE)
if not os.path.exists(data_file):
print("data file does not exists, run example_create_data.py first.")
sys.exit(-1)
else:
data = ser.from_binfile(data_file)
assert isinstance(data, (PySLineData, PySLineGauss))
sld = data
#
# New data set object with sld added.
#
dset = Ncm.Dataset.new()
dset.append_data(sld)
#
# New likelihood object using dset.
#
lh = Ncm.Likelihood.new(dset)
#
# Creating a Fit object of type NLOPT using the fitting algorithm ln-neldermead to
# fit the Modelset mset using the Likelihood lh and using a numerical
# differentiation algorithm (NUMDIFF_FORWARD) to obtain the gradient (if needed).
#
fit = Ncm.Fit.factory(
Ncm.FitType.NLOPT, "ln-neldermead", lh, mset, Ncm.FitGradType.NUMDIFF_FORWARD
)
#
# Running the fitter printing messages.
#
fit.run(Ncm.FitRunMsgs.SIMPLE)
#
# Printing fitting informations.
#
fit.log_info()
#
# Calculating the parameters covariance using numerical differentiation.
#
fit.numdiff_m2lnL_covar()
#
# Printing the covariance matrix.
#
fit.log_covar()
#
# New Gaussian transition kernel to be used in MCMC algorithm.
# It was created with size 0 (number of parameters), but once
# added to the MCMC object the correct size is assigned.
#
gtkern = Ncm.MSetTransKernGauss.new(0)
mcmc = Ncm.FitMCMC.new(fit, gtkern, Ncm.FitRunMsgs.SIMPLE)
#
# Getting the Fisher matrix calculated above, scaling it by
# multiplying by 2 and setting it as the covariance matrix
# of the transition kernel.
#
cov = fit.peek_state().peek_covar().dup()
cov.scale(2.0)
gtkern.set_cov(cov)
#
# Using `example_mcmc_out.fits' as the catalog file, if there
# is already data in it, the sampler continues from where it stopped.
#
mcmc.set_data_file("example_mcmc_out.fits")
#
# Running the mcmc, it will first calculate 1000 points, after that
# it will estimate the error in the parameters mean. Using the current
# errors the algorithm tries to calculated how many extra steps are
# necessary to obtain the required error `10^-3' in every parameters,
# and it will run such extra steps. It will repeat this procedure
# until it attains the required error in every parameter.
#
#
mcmc.start_run()
mcmc.run_lre(1000, 1.0e-3)
mcmc.end_run()
#
# Calculates the parameter means and covariance and set it into
# the fit object and then print.
#
mcmc.mean_covar()
fit.log_covar()
if __name__ == "__main__":
run_mcmc()
|
NumCosmoREPO_NAMENumCosmoPATH_START.@NumCosmo_extracted@NumCosmo-master@examples@pydata_simple@example_mcmc.py@.PATH_END.py
|
{
"filename": "toyproblem presentation.ipynb",
"repo_name": "bencebeky/spotrod",
"repo_path": "spotrod_extracted/spotrod-master/toyproblem/toyproblem presentation.ipynb",
"type": "Jupyter Notebook"
}
|
# Bence Béky: Using to the numpy C API
## Problem
```
from IPython.display import Image;
Image(filename="ipython.png")
```
```
r = numpy.linspace(0.0, 1.0, 1000);
p = 0.1;
z = 0.5;
```
## Python implementations
```
def circleangleloop(r, p, z):
# If the circle arc of radius r is disjoint from the circular disk
# of radius p, then the angle is zero.
answer = numpy.zeros_like(r);
for i in xrange(r.shape[0]):
# If the planet entirely covers the circle, the half central angle is pi.
if (r[i] < p-z):
answer[i] = numpy.pi;
# If the triangle inequalities hold between z, r, and p,
# then we have partial overlap.
# If alpha is the half central angle in the triangle with sides r, p, and z,
# with p opposite the angle, then p^2 = r^2 + z^2 - 2 rz cos(alpha)
elif (r[i] < p+z) & (z < p+r[i]):
answer[i] = numpy.arccos((r[i]*r[i]+z*z-p*p)/(2*z*r[i]));
return answer;
pyplot.plot(r, circleangleloop(r, p, z), "r-");
```
```
def circleanglemask(r, p, z):
inside = (r < p-z);
intersect = (r < p+z) & (z < r+p) & numpy.logical_not(inside);
answer = numpy.zeros_like(r);
answer[inside] = numpy.pi;
answer[intersect] = numpy.arccos((numpy.power(r[intersect],2)+z*z-p*p)/(2*z*r[intersect]));
return answer;
pyplot.plot(r, circleanglemask(r, p, z), "r-");
```
```
def circleanglesorted(r, p, z):
answer = numpy.empty_like(r);
n = len(r);
if (p > z):
# Planet covers center of star.
a, b = numpy.searchsorted(r, [p-z, p+z], side="right");
answer[:a] = numpy.pi;
answer[a:b] = numpy.arccos((r[a:b]*r[a:b]+z*z-p*p)/(2*z*r[a:b]));
answer[b:] = 0.0;
else:
# Planet does not cover center of star.
a, b = numpy.searchsorted(r, [z-p, z+p], side="right");
answer[:a] = 0.0;
answer[a:b] = numpy.arccos((r[a:b]*r[a:b]+z*z-p*p)/(2*z*r[a:b]));
answer[b:] = 0.0;
return answer;
pyplot.plot(r, circleanglesorted(r, p, z), "r-");
```
```
from timeit import timeit;
n = 500;
r = ", numpy; r = numpy.linspace(0.0, 1.0, 1000)";
arg = "(r, 0.1, 0.5)";
time1 = timeit(stmt="toypython.circleangleloop" + arg, setup="import toypython" + r, number=n);
time2 = timeit(stmt="toypython.circleanglemask" + arg, setup="import toypython" + r, number=n);
time3 = timeit(stmt="toypython.circleanglesorted" + arg, setup="import toypython" + r, number=n);
print("Python loop: {0:5.3f} ms.".format(1000*time1/n));
print("Python mask: {0:5.3f} ms.".format(1000*time2/n));
print("Python sorted: {0:5.3f} ms.".format(1000*time3/n));
```
## C implementations with the numpy API
### toyc.c
#include <math.h>
void circleangleloop(double *r, double p, double z, int n, double *answer) {
/* If the circle arc of radius r is disjoint from the circular disk
of radius p, then the angle is zero. */
int i;
double ri;
for(i=0; i<n; i++) {
ri = *(r+i);
// If the planet entirely covers the circle, the half central angle is pi.
if (ri <= p-z)
*(answer+i) = M_PI;
// If the triangle inequalities hold between z, r, and p, use law of cosines.
else if ((ri < p+z) && (ri > z-p))
*(answer+i) = acos((ri*ri+z*z-p*p)/(2*z*ri));
else
*(answer+i) = 0;
}
return;
}
### toyc-wrapper.c
#include <Python.h>
#include <numpy/arrayobject.h>
#include "toyc.h"
/* Docstrings */
static char module_docstring[] = " This module is a fast C implementation of a toy problem.";
static char circleangleloop_docstring[] =
" circleangleloop(r, p, z)\n"
" Calculate half central angle of the arc of circle of radius r\n"
" that is inside a circle of radius p with separation of centers z.";
/* Function wrappers for external use */
static PyObject *circleangleloop_wrapper(PyObject*, PyObject*, PyObject*);
/* Module specification */
static PyMethodDef module_methods[] = {
{"circleangleloop", (PyCFunction)circleangleloop_wrapper, METH_VARARGS | METH_KEYWORDS, circleangleloop_docstring},
{NULL, NULL, 0, NULL}
};
/* Initialize the module */
PyMODINIT_FUNC inittoyc(void) {
PyObject *m = Py_InitModule3("toyc", module_methods, module_docstring);
if (m == NULL)
return;
/* Load numpy functionality. */
import_array();
}
/* Wrapper function for circleangleloop. */
static PyObject *circleangleloop_wrapper(PyObject *self, PyObject *args, PyObject *kwds) {
/* Input arguments. */
double p, z;
PyObject *r_obj;
// Keywords.
static char *kwlist[] = {"r", "p", "z", NULL};
/* Parse the input tuple */
if (!PyArg_ParseTupleAndKeywords(args, kwds, "Odd", kwlist, &r_obj, &p, &z))
return NULL;
/* Interpret the input object as a numpy array. */
PyObject *r_array = PyArray_FROM_OTF(r_obj, NPY_DOUBLE, NPY_IN_ARRAY);
/* If that didn't work, or the resulting array does not have the correct
* number of dimensions or type, then abort. */
if (r_array == NULL || PyArray_NDIM(r_array) != 1 || PyArray_TYPE(r_array) != PyArray_DOUBLE) {
PyErr_SetString(PyExc_ValueError, "r cannot be converted to a suitable array.");
return NULL;
}
/* Read out dimensions and data pointers. */
int n = (int)PyArray_DIM(r_array, 0);
double *r_data = (double*)PyArray_DATA(r_array);
/* Create answer numpy array, let Python allocate memory.
Do not allocate memory manually and then use PyArray_FromDimsAndData! */
PyArrayObject *answer_array = (PyArrayObject*)PyArray_FromDims(1, &n, NPY_DOUBLE);
// Evaluate the model
circleangleloop(r_data, p, z, n, (double*)PyArray_DATA(answer_array));
/* Clean up. */
Py_DECREF(r_array);
// Return.
return PyArray_Return(answer_array);
}
### toyc-setup.py
from distutils.core import setup, Extension;
import numpy.distutils.misc_util;
c_ext = Extension("toyc", ["toyc-wrapper.c", "toyc.c"], extra_compile_args=['-Ofast']);
setup(ext_modules=[c_ext], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs());
```
from timeit import timeit;
n = 500;
r = ", numpy; r = numpy.linspace(0.0, 1.0, 1000)";
arg = "(r, 0.1, 0.5)";
time1 = timeit(stmt="toypython.circleangleloop" + arg, setup="import toypython" + r, number=n);
time2 = timeit(stmt="toypython.circleanglemask" + arg, setup="import toypython" + r, number=n);
time3 = timeit(stmt="toypython.circleanglesorted" + arg, setup="import toypython" + r, number=n);
time4 = timeit(stmt="toycython.circleangleloop" + arg, setup="import toycython" + r, number=n);
time5 = timeit(stmt="toycython.circleanglemask" + arg, setup="import toycython" + r, number=n);
time6 = timeit(stmt="toycython.circleanglesorted" + arg, setup="import toycython" + r, number=n);
time7 = timeit(stmt="toyc.circleangleloop" + arg, setup="import toyc" + r, number=n);
time8 = timeit(stmt="toyc.circleanglesorted" + arg, setup="import toyc" + r, number=n);
print("Python loop: {0:5.3f} ms.".format(1000*time1/n));
print("Python mask: {0:5.3f} ms.".format(1000*time2/n));
print("Python sorted: {0:5.3f} ms.".format(1000*time3/n));
print("Cython loop: {0:5.3f} ms.".format(1000*time4/n));
print("Cython mask: {0:5.3f} ms.".format(1000*time5/n));
print("Cython sorted: {0:5.3f} ms.".format(1000*time6/n));
print("C loop: {0:5.3f} ms.".format(1000*time7/n));
print("C sorted: {0:5.3f} ms.".format(1000*time8/n));
```
```
```
|
bencebekyREPO_NAMEspotrodPATH_START.@spotrod_extracted@spotrod-master@toyproblem@toyproblem presentation.ipynb@.PATH_END.py
|
{
"filename": "helper.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/libs/onnx/onnx/helper.py",
"type": "Python"
}
|
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=C0302,R0912
import collections.abc
import numbers
import struct
from cmath import isnan
from typing import (
Any,
Callable,
Dict,
KeysView,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
import google.protobuf.message
import numpy as np
from onnx import (
IR_VERSION,
AttributeProto,
FunctionProto,
GraphProto,
MapProto,
ModelProto,
NodeProto,
OperatorSetIdProto,
OptionalProto,
SequenceProto,
SparseTensorProto,
TensorProto,
TensorShapeProto,
TrainingInfoProto,
TypeProto,
ValueInfoProto,
defs,
mapping,
)
VersionRowType = Union[Tuple[str, int, int, int], Tuple[str, int, int, int, int]]
VersionTableType = List[VersionRowType]
AssignmentBindingType = List[Tuple[str, str]]
# This is a copy of the documented version in https://github.com/onnx/onnx/blob/main/docs/Versioning.md#released-versions
# Both must be updated whenever a new version of ONNX is released.
VERSION_TABLE: VersionTableType = [
# Release-version, IR version, ai.onnx version, ai.onnx.ml version, (optional) ai.onnx.training version
("1.0", 3, 1, 1),
("1.1", 3, 5, 1),
("1.1.2", 3, 6, 1),
("1.2", 3, 7, 1),
("1.3", 3, 8, 1),
("1.4.1", 4, 9, 1),
("1.5.0", 5, 10, 1),
("1.6.0", 6, 11, 2),
("1.7.0", 7, 12, 2, 1),
("1.8.0", 7, 13, 2, 1),
("1.8.1", 7, 13, 2, 1),
("1.9.0", 7, 14, 2, 1),
("1.10.0", 8, 15, 2, 1),
("1.10.1", 8, 15, 2, 1),
("1.10.2", 8, 15, 2, 1),
("1.11.0", 8, 16, 3, 1),
("1.12.0", 8, 17, 3, 1),
("1.13.0", 8, 18, 3, 1),
("1.13.1", 8, 18, 3, 1),
("1.14.0", 9, 19, 3, 1),
("1.14.1", 9, 19, 3, 1),
]
VersionMapType = Dict[Tuple[str, int], int]
def create_op_set_id_version_map(table: VersionTableType) -> VersionMapType:
"""create a map from (opset-domain, opset-version) to ir-version from above table"""
result: VersionMapType = {}
def process(release_version: str, ir_version: int, *args: Any) -> None:
del release_version # Unused
for pair in zip(["ai.onnx", "ai.onnx.ml", "ai.onnx.training"], args):
if pair not in result:
result[pair] = ir_version
if pair[0] == "ai.onnx.training":
result["ai.onnx.preview.training", pair[1]] = ir_version
for row in table:
process(*row)
return result
OP_SET_ID_VERSION_MAP = create_op_set_id_version_map(VERSION_TABLE)
def find_min_ir_version_for(
opsetidlist: List[OperatorSetIdProto], ignore_unknown: bool = False
) -> int:
"""Given list of opset ids, determine minimum IR version required.
Arguments:
opsetidlist (List[OperatorSetIdProto]): The list of OperatorSetIdProto
ignore_unknown (bool): If True, ignore unknown domain and return default min version for that domain.
Returns:
The minimum IR version required (integer)
"""
default_min_version = 3
def find_min(domain: Union[str, None], version: int) -> int:
key = (domain if domain else "ai.onnx", version)
if key in OP_SET_ID_VERSION_MAP:
return OP_SET_ID_VERSION_MAP[key]
if ignore_unknown:
return default_min_version
raise ValueError("Unsupported opset-version.")
if opsetidlist:
return max(find_min(x.domain, x.version) for x in opsetidlist)
return default_min_version # if no opsets specified
def make_node(
op_type: str,
inputs: Sequence[str],
outputs: Sequence[str],
name: Optional[str] = None,
doc_string: Optional[str] = None,
domain: Optional[str] = None,
**kwargs: Any,
) -> NodeProto:
"""Construct a NodeProto.
Arguments:
op_type (string): The name of the operator to construct
inputs (list of string): list of input names
outputs (list of string): list of output names
name (string, default None): optional unique identifier for NodeProto
doc_string (string, default None): optional documentation string for NodeProto
domain (string, default None): optional domain for NodeProto.
If it's None, we will just use default domain (which is empty)
**kwargs (dict): the attributes of the node. The acceptable values
are documented in :func:`make_attribute`.
Returns:
NodeProto
"""
node = NodeProto()
node.op_type = op_type
node.input.extend(inputs)
node.output.extend(outputs)
if name:
node.name = name
if doc_string:
node.doc_string = doc_string
if domain is not None:
node.domain = domain
if kwargs:
node.attribute.extend(
make_attribute(key, value)
for key, value in sorted(kwargs.items())
if value is not None
)
return node
def make_operatorsetid(
domain: str,
version: int,
) -> OperatorSetIdProto:
"""Construct an OperatorSetIdProto.
Arguments:
domain (string): The domain of the operator set id
version (integer): Version of operator set id
Returns:
OperatorSetIdProto
"""
operatorsetid = OperatorSetIdProto()
operatorsetid.domain = domain
operatorsetid.version = version
return operatorsetid
def make_graph(
nodes: Sequence[NodeProto],
name: str,
inputs: Sequence[ValueInfoProto],
outputs: Sequence[ValueInfoProto],
initializer: Optional[Sequence[TensorProto]] = None,
doc_string: Optional[str] = None,
value_info: Optional[Sequence[ValueInfoProto]] = None,
sparse_initializer: Optional[Sequence[SparseTensorProto]] = None,
) -> GraphProto:
"""Construct a GraphProto
Arguments:
nodes: list of NodeProto
name (string): graph name
inputs: list of ValueInfoProto
outputs: list of ValueInfoProto
initializer: list of TensorProto
doc_string (string): graph documentation
value_info: list of ValueInfoProto
sparse_initializer: list of SparseTensorProto
Returns:
GraphProto
"""
if initializer is None:
initializer = []
if sparse_initializer is None:
sparse_initializer = []
if value_info is None:
value_info = []
graph = GraphProto()
graph.node.extend(nodes)
graph.name = name
graph.input.extend(inputs)
graph.output.extend(outputs)
graph.initializer.extend(initializer)
graph.sparse_initializer.extend(sparse_initializer)
graph.value_info.extend(value_info)
if doc_string:
graph.doc_string = doc_string
return graph
def make_opsetid(domain: str, version: int) -> OperatorSetIdProto:
"""Construct an OperatorSetIdProto.
Arguments:
domain (string): The domain of the operator set id
version (integer): Version of operator set id
Returns:
OperatorSetIdProto
"""
opsetid = OperatorSetIdProto()
opsetid.domain = domain
opsetid.version = version
return opsetid
def make_function(
domain: str,
fname: str,
inputs: Sequence[str],
outputs: Sequence[str],
nodes: Sequence[NodeProto],
opset_imports: Sequence[OperatorSetIdProto],
attributes: Optional[Sequence[str]] = None,
attribute_protos: Optional[Sequence[AttributeProto]] = None,
doc_string: Optional[str] = None,
) -> FunctionProto:
if attributes is None:
attributes = []
if attribute_protos is None:
attribute_protos = []
f = FunctionProto()
f.domain = domain
f.name = fname
f.input.extend(inputs)
f.output.extend(outputs)
f.node.extend(nodes)
f.opset_import.extend(opset_imports)
f.attribute.extend(attributes)
f.attribute_proto.extend(attribute_protos)
if doc_string:
f.doc_string = doc_string
return f
def make_model(graph: GraphProto, **kwargs: Any) -> ModelProto:
"""Construct a ModelProto
Arguments:
graph (GraphProto): *make_graph* returns
**kwargs: any attribute to add to the returned instance
Returns:
ModelProto
"""
model = ModelProto()
# Touch model.ir_version so it is stored as the version from which it is
# generated.
model.ir_version = IR_VERSION
model.graph.CopyFrom(graph)
opset_imports: Optional[Sequence[OperatorSetIdProto]] = None
opset_imports = kwargs.pop("opset_imports", None) # type: ignore
if opset_imports is not None:
model.opset_import.extend(opset_imports)
else:
# Default import
imp = model.opset_import.add()
imp.version = defs.onnx_opset_version()
functions: Optional[Sequence[FunctionProto]] = None
functions = kwargs.pop("functions", None) # type: ignore
if functions is not None:
model.functions.extend(functions)
for k, v in kwargs.items():
# TODO: Does this work with repeated fields?
setattr(model, k, v)
return model
# An extension of make_model that infers an IR_VERSION for the model,
# if not specified, using a best-effort-basis.
def make_model_gen_version(graph: GraphProto, **kwargs: Any) -> ModelProto:
ir_version_field = "ir_version"
if ir_version_field not in kwargs:
opset_imports_field = "opset_imports"
imports = kwargs[opset_imports_field] if opset_imports_field in kwargs else []
kwargs[ir_version_field] = find_min_ir_version_for(imports)
return make_model(graph, **kwargs)
def set_model_props(model: ModelProto, dict_value: Dict[str, str]) -> None:
del model.metadata_props[:]
for k, v in dict_value.items():
entry = model.metadata_props.add()
entry.key = k
entry.value = v
# model.metadata_properties.append(entry)
def split_complex_to_pairs(ca: Sequence[np.complex64]) -> Sequence[int]:
return [
(ca[i // 2].real if (i % 2 == 0) else ca[i // 2].imag) # type: ignore[misc]
for i in range(len(ca) * 2)
]
# convert a float32 value to a bfloat16 (as int)
# By default, this conversion rounds-to-nearest-even and supports NaN
# Setting `truncate` to True enables a simpler conversion. In this mode the
# conversion is performed by simply dropping the 2 least significant bytes of
# the significand. In this mode an error of up to 1 bit may be introduced and
# preservation of NaN values is not be guaranteed.
def float32_to_bfloat16(fval: float, truncate: bool = False) -> int:
ival = int.from_bytes(struct.pack("<f", fval), "little")
if truncate:
return ival >> 16
# NaN requires at least 1 significand bit set
if isnan(fval):
return 0x7FC0 # sign=0, exp=all-ones, sig=0b1000000
# drop bottom 16-bits
# round remaining bits using round-to-nearest-even
rounded = ((ival >> 16) & 1) + 0x7FFF
return (ival + rounded) >> 16
def float32_to_float8e4m3( # pylint: disable=too-many-statements
fval: float,
scale: float = 1.0,
fn: bool = True,
uz: bool = False,
saturate: bool = True,
) -> int:
"""
Convert a float32 value to a float8, e4m3 (as int).
:param fval: float to convert
:param scale: scale, divide *fval* by *scale* before casting it
:param fn: no infinite values
:param uz: no negative zero
:param saturate: if True, any value out of range included inf becomes the maximum value,
otherwise, it becomes NaN. The description of operator Cast fully describes the
differences.
:return: converted float
See :ref:`onnx-detail-float8` for technical details.
"""
if not fn:
raise NotImplementedError(
"float32_to_float8e4m3 not implemented with fn=False."
)
x = fval / scale
b = int.from_bytes(struct.pack("<f", np.float32(x)), "little")
ret = (b & 0x80000000) >> 24 # sign
if uz:
if (b & 0x7FC00000) == 0x7FC00000:
return 0x80
if np.isinf(x):
if saturate:
return ret | 0x7F
return 0x80
e = (b & 0x7F800000) >> 23 # exponent
m = b & 0x007FFFFF # mantissa
if e != 0:
if e < 116:
pass
elif e < 117:
ret |= 1
if (m >> 23) & 1:
# rounding
ret += 1
elif e < 120: # 127 - 8 + 1
d = 119 - e
ret |= 1 << (2 - d)
ret |= m >> (21 + d)
if (m >> (20 + d)) & 1:
# rounding
ret += 1
elif e < 135: # 127 + 8
ex = e - 119 # 127 - 8
if ex == 0:
ret |= 0x4
ret |= m >> 21
else:
ret |= ex << 3
ret |= m >> 20
if m & 0x80000:
if (ret & 0x7F) < 0x7F:
# rounding
ret += 1
elif not saturate:
return 0x80
elif saturate:
ret |= 0x7F # 01111110
else:
ret = 0x80
elif m == 0:
# -0
ret = 0
return int(ret)
else:
if (b & 0x7FC00000) == 0x7FC00000:
return 0x7F | ret
if np.isinf(x):
if saturate:
return ret | 126
return 0x7F | ret
e = (b & 0x7F800000) >> 23 # exponent
m = b & 0x007FFFFF # mantissa
if e != 0:
if e < 117:
pass
elif e < 118:
ret |= 1
if (m >> 23) & 1:
# rounding
ret += 1
elif e < 121: # 127 - 7 + 1
d = 120 - e
ret |= 1 << (2 - d)
ret |= m >> (21 + d)
if (m >> (20 + d)) & 1:
# rounding
ret += 1
elif e < 136: # 127 + 8 + 1
ex = e - 120 # 127 - 7
if ex == 0:
ret |= 0x4
ret |= m >> 21
else:
ret |= ex << 3
ret |= m >> 20
if (ret & 0x7F) == 0x7F:
ret &= 0xFE
if (m & 0x80000) and (
(m & 0x100000) or (m & 0x7C000)
): # round to nearest even
if (ret & 0x7F) < 0x7E:
# rounding
ret += 1
elif not saturate:
ret |= 0x7F
elif saturate:
ret |= 126 # 01111110
else:
ret |= 0x7F
return int(ret)
def float32_to_float8e5m2( # pylint: disable=too-many-statements
fval: float,
scale: float = 1.0,
fn: bool = False,
uz: bool = False,
saturate: bool = True,
) -> int:
"""
Convert a float32 value to a float8, e5m2 (as int).
:param fval: float to convert
:param scale: scale, divide *fval* by *scale* before casting it
:param fn: no infinite values
:param uz: no negative zero
:param saturate: if True, any value out of range included inf becomes the maximum value,
otherwise, it becomes NaN. The description of operator Cast fully describes the
differences.
:return: converted float
"""
x = fval / scale
b = int.from_bytes(struct.pack("<f", np.float32(x)), "little")
ret = (b & 0x80000000) >> 24 # sign
if fn and uz:
if (b & 0x7FC00000) == 0x7FC00000:
return 0x80
if (b & 0x7FFFFFFF) == 0x7F800000:
# inf
if saturate:
return ret | 0x7F
return 0x80
e = (b & 0x7F800000) >> 23 # exponent
m = b & 0x007FFFFF # mantissa
if e != 0:
if e < 109:
pass
elif e < 110:
ret |= 1
if (m >> 23) & 1:
# rounding
# may be unused
ret += 1
elif e < 112: # 127 - 16 + 1
d = 111 - e
ret |= 1 << (1 - d)
ret |= m >> (22 + d)
if (m >> (21 + d)) & 1:
# rounding
ret += 1
elif e < 143: # 127 + 15 + 1
ex = e - 111 # 127 - 16
ret |= ex << 2
ret |= m >> 21
if m & 0x100000:
if (ret & 0x7F) < 0x7F:
# rounding
ret += 1
elif not saturate:
ret = 0x80
elif e == 255 and m == 0: # inf
ret = 0x80
elif saturate:
ret |= 0x7F # last possible number
else:
ret = 0x80
elif m == 0:
# -0
ret = 0
return int(ret)
elif not fn and not uz:
if (b & 0x7FC00000) == 0x7FC00000:
return 0x7F | ret
if np.isinf(x):
if saturate:
return 0x7B | ret
return 0x7C | ret
e = (b & 0x7F800000) >> 23 # exponent
m = b & 0x007FFFFF # mantissa
if e != 0:
if e < 110:
pass
elif e < 111:
ret |= 1
if (m >> 23) & 1:
# rounding
# may be unused
ret += 1
elif e < 113: # 127 - 15 + 1
d = 112 - e
ret |= 1 << (1 - d)
ret |= m >> (22 + d)
if (m >> (21 + d)) & 1:
# rounding
ret += 1
elif e < 143: # 127 + 15 + 1
ex = e - 112 # 127 - 15
ret |= ex << 2
ret |= m >> 21
if (m & 0x100000) and (
(m & 0xFFFFF) or (m & 0x200000)
): # round to nearest even
if (ret & 0x7F) < 0x7B:
# rounding
ret += 1
elif saturate:
ret |= 0x7B
else:
ret |= 0x7C
elif saturate:
ret |= 0x7B
else:
ret |= 0x7C
return int(ret)
else:
raise NotImplementedError("fn and uz must be both False or True.")
def make_tensor(
name: str, data_type: int, dims: Sequence[int], vals: Any, raw: bool = False
) -> TensorProto:
"""
Make a TensorProto with specified arguments. If raw is False, this
function will choose the corresponding proto field to store the
values based on data_type. If raw is True, use "raw_data" proto
field to store the values, and values should be of type bytes in
this case.
Arguments:
name (string): tensor name
data_type (int): a value such as onnx.TensorProto.FLOAT
dims (List[int]): shape
vals: values
raw (bool): if True, vals contains the serialized content of the tensor,
otherwise, vals should be a list of values of the type defined by *data_type*
Returns:
TensorProto
"""
tensor = TensorProto()
tensor.data_type = data_type
tensor.name = name
if data_type == TensorProto.STRING and raw:
raise TypeError("Can not use raw_data to store string type.")
np_dtype = tensor_dtype_to_np_dtype(data_type)
# Check number of vals specified equals tensor size
expected_size = 1
if raw:
# NumPy doesn't have BFLOAT16. TENSOR_TYPE_TO_NP_TYPE maps it to float32,
# which has the wrong itemsize.
if data_type == TensorProto.BFLOAT16:
expected_size = 2
elif data_type in (
TensorProto.FLOAT8E4M3FN,
TensorProto.FLOAT8E4M3FNUZ,
TensorProto.FLOAT8E5M2,
TensorProto.FLOAT8E5M2FNUZ,
):
expected_size = 1
else:
expected_size = np_dtype.itemsize
if (
type(vals) is np.ndarray # pylint: disable=unidiomatic-typecheck
and len(vals.shape) > 1
):
vals = vals.flatten()
for d in dims:
expected_size *= d
if len(vals) != expected_size:
raise ValueError(
f"Number of values does not match tensor's size. Expected {expected_size}, but it is {len(vals)}. "
)
if raw:
tensor.raw_data = vals
else:
if data_type in (TensorProto.COMPLEX64, TensorProto.COMPLEX128):
vals = split_complex_to_pairs(vals)
elif data_type == TensorProto.FLOAT16:
vals = (
np.array(vals).astype(np_dtype).view(dtype=np.uint16).flatten().tolist()
)
elif data_type in (
TensorProto.BFLOAT16,
TensorProto.FLOAT8E4M3FN,
TensorProto.FLOAT8E4M3FNUZ,
TensorProto.FLOAT8E5M2,
TensorProto.FLOAT8E5M2FNUZ,
):
fcast = {
TensorProto.BFLOAT16: float32_to_bfloat16,
TensorProto.FLOAT8E4M3FN: float32_to_float8e4m3,
TensorProto.FLOAT8E4M3FNUZ: lambda *args: float32_to_float8e4m3( # type: ignore[misc]
*args, uz=True
),
TensorProto.FLOAT8E5M2: float32_to_float8e5m2,
TensorProto.FLOAT8E5M2FNUZ: lambda *args: float32_to_float8e5m2( # type: ignore[misc]
*args, fn=True, uz=True
),
}[
data_type # type: ignore[index]
]
vals = list(
map( # type: ignore[call-overload]
fcast,
np.array(vals).astype(np_dtype).flatten().tolist(),
)
)
elif data_type == TensorProto.BOOL:
vals = np.array(vals).astype(int)
field = tensor_dtype_to_field(data_type)
getattr(tensor, field).extend(vals)
tensor.dims.extend(dims)
return tensor
def make_sparse_tensor(
values: TensorProto, indices: TensorProto, dims: Sequence[int]
) -> SparseTensorProto:
"""Construct a SparseTensorProto
Arguments:
values (TensorProto): the values
indices (TensorProto): the indices
dims: the shape
Returns:
SparseTensorProto
"""
sparse = SparseTensorProto()
sparse.values.CopyFrom(values)
sparse.indices.CopyFrom(indices)
sparse.dims.extend(dims)
return sparse
def make_sequence(
name: str,
elem_type: SequenceProto.DataType,
values: Sequence[Any],
) -> SequenceProto:
"""
Make a Sequence with specified value arguments.
"""
sequence = SequenceProto()
sequence.name = name
sequence.elem_type = elem_type
if elem_type == SequenceProto.UNDEFINED:
return sequence
if elem_type == SequenceProto.TENSOR:
attribute = sequence.tensor_values
elif elem_type == SequenceProto.SPARSE_TENSOR:
attribute = sequence.sparse_tensor_values # type: ignore[assignment]
elif elem_type == SequenceProto.SEQUENCE:
attribute = sequence.sequence_values # type: ignore[assignment]
elif elem_type == SequenceProto.MAP:
attribute = sequence.map_values # type: ignore[assignment]
elif elem_type == OptionalProto.OPTIONAL:
attribute = sequence.optional_values # type: ignore[assignment]
else:
raise TypeError("The element type in the input sequence is not supported.")
attribute.extend(values)
return sequence
def make_map(
name: str, key_type: int, keys: List[Any], values: SequenceProto
) -> MapProto:
"""
Make a Map with specified key-value pair arguments.
Criteria for conversion:
- Keys and Values must have the same number of elements
- Every key in keys must be of the same type
- Every value in values must be of the same type
"""
map_proto = MapProto()
valid_key_int_types = [
TensorProto.INT8,
TensorProto.INT16,
TensorProto.INT32,
TensorProto.INT64,
TensorProto.UINT8,
TensorProto.UINT16,
TensorProto.UINT32,
TensorProto.UINT64,
]
map_proto.name = name
map_proto.key_type = key_type
if key_type == TensorProto.STRING:
map_proto.string_keys.extend(keys)
elif key_type in valid_key_int_types:
map_proto.keys.extend(keys)
map_proto.values.CopyFrom(values)
return map_proto
def make_optional(
name: str,
elem_type: OptionalProto.DataType,
value: Optional[Any],
) -> OptionalProto:
"""
Make an Optional with specified value arguments.
"""
optional = OptionalProto()
optional.name = name
optional.elem_type = elem_type
if elem_type == OptionalProto.UNDEFINED:
return optional
if elem_type == OptionalProto.TENSOR:
attribute = optional.tensor_value
elif elem_type == OptionalProto.SPARSE_TENSOR:
attribute = optional.sparse_tensor_value # type: ignore[assignment]
elif elem_type == OptionalProto.SEQUENCE:
attribute = optional.sequence_value # type: ignore[assignment]
elif elem_type == OptionalProto.MAP:
attribute = optional.map_value # type: ignore[assignment]
elif elem_type == OptionalProto.OPTIONAL:
attribute = optional.optional_value # type: ignore[assignment]
else:
raise TypeError("The element type in the input optional is not supported.")
attribute.CopyFrom(value) # type: ignore[arg-type]
return optional
def _to_bytes(value: Union[str, bytes]) -> bytes:
"""Coerce a string (or bytes) value into UTF-8 bytes."""
return value if isinstance(value, bytes) else value.encode("utf-8")
def make_attribute( # pylint: disable=too-many-statements
key: str, value: Any, doc_string: Optional[str] = None
) -> AttributeProto:
"""Makes an AttributeProto based on the value type."""
attr = AttributeProto()
attr.name = key
if doc_string:
attr.doc_string = doc_string
# Singular cases
if isinstance(value, numbers.Integral):
attr.i = int(value)
attr.type = AttributeProto.INT
elif isinstance(value, numbers.Real):
attr.f = float(value)
attr.type = AttributeProto.FLOAT
elif isinstance(value, (str, bytes)):
# Encode strings into utf-8
attr.s = _to_bytes(value)
attr.type = AttributeProto.STRING
elif isinstance(value, TensorProto):
attr.t.CopyFrom(value)
attr.type = AttributeProto.TENSOR
elif isinstance(value, SparseTensorProto):
attr.sparse_tensor.CopyFrom(value)
attr.type = AttributeProto.SPARSE_TENSOR
elif isinstance(value, GraphProto):
attr.g.CopyFrom(value)
attr.type = AttributeProto.GRAPH
elif isinstance(value, TypeProto):
attr.tp.CopyFrom(value)
attr.type = AttributeProto.TYPE_PROTO
# Iterable cases
elif isinstance(value, collections.abc.Iterable):
value = list(value)
types = {type(v) for v in value}
if all(issubclass(t, numbers.Integral) for t in types):
attr.ints.extend(value)
attr.type = AttributeProto.INTS
elif all(issubclass(t, numbers.Real) for t in types):
attr.floats.extend(value)
attr.type = AttributeProto.FLOATS
elif all(issubclass(t, (str, bytes)) for t in types):
attr.strings.extend(_to_bytes(v) for v in value)
attr.type = AttributeProto.STRINGS
elif all(issubclass(t, TensorProto) for t in types):
attr.tensors.extend(value)
attr.type = AttributeProto.TENSORS
elif all(issubclass(t, SparseTensorProto) for t in types):
attr.sparse_tensors.extend(value)
attr.type = AttributeProto.SPARSE_TENSORS
elif all(issubclass(t, GraphProto) for t in types):
attr.graphs.extend(value)
attr.type = AttributeProto.GRAPHS
elif all(issubclass(t, TypeProto) for t in types):
attr.type_protos.extend(value)
attr.type = AttributeProto.TYPE_PROTOS
else:
raise ValueError(
"Could not infer the attribute type from the elements of the passed Iterable value."
)
else:
raise TypeError(f"'{value}' is not an accepted attribute value.")
return attr
def make_attribute_ref(
name: str, attr_type: AttributeProto.AttributeType, doc_string: Optional[str] = None
) -> AttributeProto:
"""Make an AttributeProto holding a reference to the parent function's attribute of given name and type."""
attr = AttributeProto()
attr.name = name
attr.type = attr_type
if doc_string:
attr.doc_string = doc_string
return attr
def get_attribute_value(attr: AttributeProto) -> Any:
if attr.ref_attr_name:
raise ValueError(f"Cannot get value of reference attribute: {attr}")
if attr.type == AttributeProto.FLOAT:
return attr.f
if attr.type == AttributeProto.INT:
return attr.i
if attr.type == AttributeProto.STRING:
return attr.s
if attr.type == AttributeProto.TENSOR:
return attr.t
if attr.type == AttributeProto.SPARSE_TENSOR:
return attr.sparse_tensor
if attr.type == AttributeProto.GRAPH:
return attr.g
if attr.type == AttributeProto.TYPE_PROTO:
return attr.tp
if attr.type == AttributeProto.FLOATS:
return list(attr.floats)
if attr.type == AttributeProto.INTS:
return list(attr.ints)
if attr.type == AttributeProto.STRINGS:
return list(attr.strings)
if attr.type == AttributeProto.TENSORS:
return list(attr.tensors)
if attr.type == AttributeProto.SPARSE_TENSORS:
return list(attr.sparse_tensors)
if attr.type == AttributeProto.GRAPHS:
return list(attr.graphs)
if attr.type == AttributeProto.TYPE_PROTOS:
return list(attr.type_protos)
raise ValueError(f"Unsupported ONNX attribute: {attr}")
def get_node_attr_value(node: NodeProto, attr_name: str) -> Any:
matching = [x for x in node.attribute if x.name == attr_name]
if len(matching) > 1:
raise ValueError(f"Node has multiple attributes with name {attr_name}")
if len(matching) < 1:
raise ValueError(f"Node has no attribute with name {attr_name}")
return get_attribute_value(matching[0])
def make_empty_tensor_value_info(name: str) -> ValueInfoProto:
value_info_proto = ValueInfoProto()
value_info_proto.name = name
return value_info_proto
def make_tensor_type_proto(
elem_type: int,
shape: Optional[Sequence[Union[str, int, None]]],
shape_denotation: Optional[List[str]] = None,
) -> TypeProto:
"""Makes a Tensor TypeProto based on the data type and shape."""
type_proto = TypeProto()
tensor_type_proto = type_proto.tensor_type
tensor_type_proto.elem_type = elem_type
tensor_shape_proto = tensor_type_proto.shape
if shape is not None:
# You might think this is a no-op (extending a normal Python
# list by [] certainly is), but protobuf lists work a little
# differently; if a field is never set, it is omitted from the
# resulting protobuf; a list that is explicitly set to be
# empty will get an (empty) entry in the protobuf. This
# difference is visible to our consumers, so make sure we emit
# an empty shape!
tensor_shape_proto.dim.extend([])
if shape_denotation and len(shape_denotation) != len(shape):
raise ValueError(
"Invalid shape_denotation. Must be of the same length as shape."
)
for i, d in enumerate(shape):
dim = tensor_shape_proto.dim.add()
if d is None:
pass
elif isinstance(d, int):
dim.dim_value = d
elif isinstance(d, str):
dim.dim_param = d
else:
raise ValueError(
f"Invalid item in shape: {d}. Needs to be of int or str."
)
if shape_denotation:
dim.denotation = shape_denotation[i]
return type_proto
def make_tensor_value_info(
name: str,
elem_type: int,
shape: Optional[Sequence[Union[str, int, None]]],
doc_string: str = "",
shape_denotation: Optional[List[str]] = None,
) -> ValueInfoProto:
"""Makes a ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
tensor_type_proto = make_tensor_type_proto(elem_type, shape, shape_denotation)
value_info_proto.type.CopyFrom(tensor_type_proto)
return value_info_proto
def make_sparse_tensor_type_proto(
elem_type: int,
shape: Optional[Sequence[Union[str, int, None]]],
shape_denotation: Optional[List[str]] = None,
) -> TypeProto:
"""Makes a SparseTensor TypeProto based on the data type and shape."""
type_proto = TypeProto()
sparse_tensor_type_proto = type_proto.sparse_tensor_type
sparse_tensor_type_proto.elem_type = elem_type
sparse_tensor_shape_proto = sparse_tensor_type_proto.shape
if shape is not None:
# You might think this is a no-op (extending a normal Python
# list by [] certainly is), but protobuf lists work a little
# differently; if a field is never set, it is omitted from the
# resulting protobuf; a list that is explicitly set to be
# empty will get an (empty) entry in the protobuf. This
# difference is visible to our consumers, so make sure we emit
# an empty shape!
sparse_tensor_shape_proto.dim.extend([])
if shape_denotation and len(shape_denotation) != len(shape):
raise ValueError(
"Invalid shape_denotation. Must be of the same length as shape."
)
for i, d in enumerate(shape):
dim = sparse_tensor_shape_proto.dim.add()
if d is None:
pass
elif isinstance(d, int):
dim.dim_value = d
elif isinstance(d, str):
dim.dim_param = d
else:
raise ValueError(
f"Invalid item in shape: {d}. Needs to be of int or text."
)
if shape_denotation:
dim.denotation = shape_denotation[i]
return type_proto
def make_sparse_tensor_value_info(
name: str,
elem_type: int,
shape: Optional[Sequence[Union[str, int, None]]],
doc_string: str = "",
shape_denotation: Optional[List[str]] = None,
) -> ValueInfoProto:
"""Makes a SparseTensor ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
sparse_tensor_type_proto = make_sparse_tensor_type_proto(
elem_type, shape, shape_denotation
)
value_info_proto.type.sparse_tensor_type.CopyFrom(
sparse_tensor_type_proto.sparse_tensor_type
)
return value_info_proto
def make_sequence_type_proto(
inner_type_proto: TypeProto,
) -> TypeProto:
"""Makes a sequence TypeProto."""
type_proto = TypeProto()
type_proto.sequence_type.elem_type.CopyFrom(inner_type_proto)
return type_proto
def make_optional_type_proto(
inner_type_proto: TypeProto,
) -> TypeProto:
"""Makes an optional TypeProto."""
type_proto = TypeProto()
type_proto.optional_type.elem_type.CopyFrom(inner_type_proto)
return type_proto
def make_value_info(
name: str,
type_proto: TypeProto,
doc_string: str = "",
) -> ValueInfoProto:
"""Makes a ValueInfoProto with the given type_proto."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
value_info_proto.type.CopyFrom(type_proto)
return value_info_proto
def _sanitize_str(s: Union[str, bytes]) -> str:
if isinstance(s, str):
sanitized = s
elif isinstance(s, bytes):
sanitized = s.decode("utf-8", errors="ignore")
else:
sanitized = str(s)
if len(sanitized) < 64:
return sanitized
return sanitized[:64] + f"...<+len={(len(sanitized) - 64)}>"
def make_tensor_sequence_value_info(
name: str,
elem_type: int,
shape: Optional[Sequence[Union[str, int, None]]],
doc_string: str = "",
elem_shape_denotation: Optional[List[str]] = None,
) -> ValueInfoProto:
"""Makes a Sequence[Tensors] ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
tensor_type_proto = make_tensor_type_proto(elem_type, shape, elem_shape_denotation)
sequence_type_proto = make_sequence_type_proto(tensor_type_proto)
value_info_proto.type.sequence_type.CopyFrom(sequence_type_proto.sequence_type)
return value_info_proto
def printable_attribute(
attr: AttributeProto, subgraphs: bool = False
) -> Union[str, Tuple[str, List[GraphProto]]]:
content = []
content.append(attr.name)
content.append("=")
def str_float(f: float) -> str:
# NB: Different Python versions print different numbers of trailing
# decimals, specifying this explicitly keeps it consistent for all
# versions
return f"{f:.15g}"
def str_int(i: int) -> str:
return str(i)
_T = TypeVar("_T")
def str_list(str_elem: Callable[[_T], str], xs: Sequence[_T]) -> str:
return "[" + ", ".join(map(str_elem, xs)) + "]"
# for now, this logic should continue to work as long as we are running on a proto3
# implementation. If/when we switch to proto3, we will need to use attr.type
# To support printing subgraphs, if we find a graph attribute, print out
# its name here and pass the graph itself up to the caller for later
# printing.
graphs = []
if attr.HasField("f"):
content.append(str_float(attr.f))
elif attr.HasField("i"):
content.append(str_int(attr.i))
elif attr.HasField("s"):
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(repr(_sanitize_str(attr.s)))
elif attr.HasField("t"):
if len(attr.t.dims) > 0:
content.append("<Tensor>")
else:
# special case to print scalars
field = tensor_dtype_to_field(attr.t.data_type)
content.append(f"<Scalar Tensor {str(getattr(attr.t, field))}>")
elif attr.HasField("g"):
content.append(f"<graph {attr.g.name}>")
graphs.append(attr.g)
elif attr.HasField("tp"):
content.append(f"<Type Proto {attr.tp}>")
elif attr.floats:
content.append(str_list(str_float, attr.floats))
elif attr.ints:
content.append(str_list(str_int, attr.ints))
elif attr.strings:
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(str(list(map(_sanitize_str, attr.strings))))
elif attr.tensors:
content.append("[<Tensor>, ...]")
elif attr.type_protos:
content.append("[")
for i, tp in enumerate(attr.type_protos):
comma = "," if i != len(attr.type_protos) - 1 else ""
content.append(f"<Type Proto {tp}>{comma}")
content.append("]")
elif attr.graphs:
content.append("[")
for i, g in enumerate(attr.graphs):
comma = "," if i != len(attr.graphs) - 1 else ""
content.append(f"<graph {g.name}>{comma}")
content.append("]")
graphs.extend(attr.graphs)
else:
content.append("<Unknown>")
if subgraphs:
return " ".join(content), graphs
return " ".join(content)
def printable_dim(dim: TensorShapeProto.Dimension) -> str:
which = dim.WhichOneof("value")
if which is None:
raise TypeError(f"which cannot be {None}.")
return str(getattr(dim, which))
def printable_type(t: TypeProto) -> str:
if t.WhichOneof("value") == "tensor_type":
s = TensorProto.DataType.Name(t.tensor_type.elem_type)
if t.tensor_type.HasField("shape"):
if len(t.tensor_type.shape.dim):
s += str(", " + "x".join(map(printable_dim, t.tensor_type.shape.dim)))
else:
s += ", scalar"
return s
if t.WhichOneof("value") is None:
return ""
return f"Unknown type {t.WhichOneof('value')}"
def printable_value_info(v: ValueInfoProto) -> str:
s = f"%{v.name}"
if v.type:
s = f"{s}[{printable_type(v.type)}]"
return s
def printable_tensor_proto(t: TensorProto) -> str:
s = f"%{t.name}["
s += TensorProto.DataType.Name(t.data_type)
if t.dims is not None:
if len(t.dims):
s += str(", " + "x".join(map(str, t.dims)))
else:
s += ", scalar"
s += "]"
return s
def printable_node(
node: NodeProto, prefix: str = "", subgraphs: bool = False
) -> Union[str, Tuple[str, List[GraphProto]]]:
content = []
if len(node.output):
content.append(", ".join([f"%{name}" for name in node.output]))
content.append("=")
# To deal with nested graphs
graphs: List[GraphProto] = []
printed_attrs = []
for attr in node.attribute:
if subgraphs:
printed_attr_subgraphs = printable_attribute(attr, subgraphs)
if not isinstance(printed_attr_subgraphs[1], list):
raise TypeError(
f"printed_attr_subgraphs[1] must be an instance of {list}."
)
graphs.extend(printed_attr_subgraphs[1])
printed_attrs.append(printed_attr_subgraphs[0])
else:
printed = printable_attribute(attr)
if not isinstance(printed, str):
raise TypeError(f"printed must be an instance of {str}.")
printed_attrs.append(printed)
printed_attributes = ", ".join(sorted(printed_attrs))
printed_inputs = ", ".join([f"%{name}" for name in node.input])
if node.attribute:
content.append(f"{node.op_type}[{printed_attributes}]({printed_inputs})")
else:
content.append(f"{node.op_type}({printed_inputs})")
if subgraphs:
return prefix + " ".join(content), graphs
return prefix + " ".join(content)
def printable_graph(graph: GraphProto, prefix: str = "") -> str:
"""
Display a GraphProto as a string.
Arguments:
graph (GraphProto): the graph to display
prefix (string): prefix of every line
Returns:
string
"""
content = []
indent = prefix + " "
# header
header = ["graph", graph.name]
initializers = {t.name for t in graph.initializer}
if len(graph.input):
header.append("(")
in_strs = [] # required inputs
in_with_init_strs = (
[]
) # optional inputs with initializer providing default value
for inp in graph.input:
if inp.name not in initializers:
in_strs.append(printable_value_info(inp))
else:
in_with_init_strs.append(printable_value_info(inp))
if in_strs:
content.append(prefix + " ".join(header))
header = []
for line in in_strs:
content.append(prefix + " " + line)
header.append(")")
if in_with_init_strs:
header.append("optional inputs with matching initializers (")
content.append(prefix + " ".join(header))
header = []
for line in in_with_init_strs:
content.append(prefix + " " + line)
header.append(")")
# from IR 4 onwards an initializer is not required to have a matching graph input
# so output the name, type and shape of those as well
if len(in_with_init_strs) < len(initializers):
graph_inputs = {i.name for i in graph.input}
init_strs = [
printable_tensor_proto(i)
for i in graph.initializer
if i.name not in graph_inputs
]
header.append("initializers (")
content.append(prefix + " ".join(header))
header = []
for line in init_strs:
content.append(prefix + " " + line)
header.append(")")
header.append("{")
content.append(prefix + " ".join(header))
graphs: List[GraphProto] = []
# body
for node in graph.node:
contents_subgraphs = printable_node(node, indent, subgraphs=True)
if not isinstance(contents_subgraphs[1], list):
raise TypeError(f"contents_subgraphs[1] must be an instance of {list}.")
content.append(contents_subgraphs[0])
graphs.extend(contents_subgraphs[1])
# tail
tail = ["return"]
if len(graph.output):
tail.append(", ".join([f"%{out.name}" for out in graph.output]))
content.append(indent + " ".join(tail))
# closing bracket
content.append(prefix + "}")
for g in graphs:
content.append("\n" + printable_graph(g))
return "\n".join(content)
def strip_doc_string(proto: google.protobuf.message.Message) -> None:
"""
Empties `doc_string` field on any nested protobuf messages
"""
if not isinstance(proto, google.protobuf.message.Message):
raise TypeError(
f"proto must be an instance of {google.protobuf.message.Message}."
)
for descriptor in proto.DESCRIPTOR.fields:
if descriptor.name == "doc_string":
proto.ClearField(descriptor.name)
elif descriptor.type == descriptor.TYPE_MESSAGE:
if descriptor.label == descriptor.LABEL_REPEATED:
for x in getattr(proto, descriptor.name):
strip_doc_string(x)
elif proto.HasField(descriptor.name):
strip_doc_string(getattr(proto, descriptor.name))
def make_training_info(
algorithm: GraphProto,
algorithm_bindings: AssignmentBindingType,
initialization: Optional[GraphProto],
initialization_bindings: Optional[AssignmentBindingType],
) -> TrainingInfoProto:
training_info = TrainingInfoProto()
training_info.algorithm.CopyFrom(algorithm)
for k, v in algorithm_bindings:
binding = training_info.update_binding.add()
binding.key = k
binding.value = v
if initialization:
training_info.initialization.CopyFrom(initialization)
if initialization_bindings:
for k, v in initialization_bindings:
binding = training_info.initialization_binding.add()
binding.key = k
binding.value = v
return training_info
# Following functions are used for mapping
def tensor_dtype_to_np_dtype(tensor_dtype: int) -> np.dtype:
"""
Convert a TensorProto's data_type to corresponding numpy dtype. It can be used while making tensor.
:param tensor_dtype: TensorProto's data_type
:return: numpy's data_type
"""
return mapping.TENSOR_TYPE_MAP[tensor_dtype].np_dtype
def tensor_dtype_to_storage_tensor_dtype(tensor_dtype: int) -> int:
"""
Convert a TensorProto's data_type to corresponding data_type for storage.
:param tensor_dtype: TensorProto's data_type
:return: data_type for storage
"""
return mapping.TENSOR_TYPE_MAP[tensor_dtype].storage_dtype
def tensor_dtype_to_string(tensor_dtype: int) -> str:
"""
Get the name of given TensorProto's data_type.
:param tensor_dtype: TensorProto's data_type
:return: the name of data_type
"""
return mapping.TENSOR_TYPE_MAP[tensor_dtype].name
def tensor_dtype_to_field(tensor_dtype: int) -> str:
"""
Convert a TensorProto's data_type to corresponding field name for storage. It can be used while making tensors.
:param tensor_dtype: TensorProto's data_type
:return: field name
"""
return mapping._STORAGE_TENSOR_TYPE_TO_FIELD[ # pylint: disable=protected-access
mapping.TENSOR_TYPE_MAP[tensor_dtype].storage_dtype
]
def np_dtype_to_tensor_dtype(np_dtype: np.dtype) -> int:
"""
Convert a numpy's dtype to corresponding tensor type. It can be used while converting numpy arrays to tensors.
:param np_dtype: numpy's data_type
:return: TensorsProto's data_type
"""
return cast(
int,
mapping._NP_TYPE_TO_TENSOR_TYPE[np_dtype], # pylint: disable=protected-access
)
def get_all_tensor_dtypes() -> KeysView[int]:
"""
Get all tensor types from TensorProto.
:return: all tensor types from TensorProto
"""
return mapping.TENSOR_TYPE_MAP.keys()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@libs@onnx@onnx@helper.py@.PATH_END.py
|
{
"filename": "model.py",
"repo_name": "ifauh/par-sir",
"repo_path": "par-sir_extracted/par-sir-main/mods/sir3d/synth/model.py",
"type": "Python"
}
|
from collections import OrderedDict
from sir3d import sir_code
from sir3d.configuration import Configuration
import numpy as np
import os
import scipy.stats
import scipy.constants
import logging
import h5py
import scipy.integrate as integ
from scipy import interpolate
# from ipdb import set_trace as stop
__all__ = ['Model']
class Model(object):
def __init__(self, config=None, rank=0):
if (rank != 0):
return
self.logger = logging.getLogger("model")
self.logger.setLevel(logging.DEBUG)
self.logger.handlers = []
self.rank = rank
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
self.LINES = ff.readlines()
ff.close()
self.macroturbulence = 0.0
ch = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
if (config is not None):
self.configuration = Configuration(config)
self.use_configuration(self.configuration.config_dict)
if (self.rank == 0):
if (self.eos_type == 'MANCHA'):
self.logger.info('Reading EOS - MANCHA')
filename = os.path.join(os.path.dirname(__file__), 'data/eos_mancha.h5')
f = h5py.File(filename, 'r')
self.T_eos = np.log10(f['T'][:])
self.P_eos = np.log10(f['P'][:])
self.Pe_eos = np.log10(f['Pel'][:])
f.close()
self.logger.info('Reading kappa5000 - MANCHA')
self.T_kappa5 = np.array([3.32, 3.34, 3.36, 3.38, 3.40, 3.42, 3.44, 3.46, 3.48, 3.50,
3.52, 3.54, 3.56, 3.58, 3.60, 3.62, 3.64, 3.66, 3.68, 3.70,
3.73, 3.76, 3.79, 3.82, 3.85, 3.88, 3.91, 3.94, 3.97, 4.00,
4.05, 4.10, 4.15, 4.20, 4.25, 4.30, 4.35, 4.40, 4.45, 4.50,
4.55, 4.60, 4.65, 4.70, 4.75, 4.80, 4.85, 4.90, 4.95, 5.00,
5.05, 5.10, 5.15, 5.20, 5.25, 5.30 ])
self.P_kappa5 = np.array([-2., -1.5, -1., -.5, 0., .5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6. ,6.5, 7., 7.5, 8. ])
self.kappa = np.zeros((56,21))
filename = os.path.join(os.path.dirname(__file__), 'data/kappa5000_mancha.dat')
f = open(filename, 'r')
for it in range(56):
for ip in range(21):
self.kappa[it,ip] = float(f.readline().split()[-1])
f.close()
else:
self.logger.info('Reading EOS and kappa5000 - SIR')
filename = os.path.join(os.path.dirname(__file__), 'data/kappa5000_eos_sir.h5')
f = h5py.File(filename, 'r')
self.T_eos = np.log10(f['T'][:])
self.P_eos = np.log10(f['P'][:])
self.Pe_eos = np.log10(f['Pe'][:])
self.T_kappa5 = np.log10(f['T'][:])
self.P_kappa5 = np.log10(f['P'][:])
self.kappa = f['kappa5000'][:]
f.close()
def __getstate__(self):
d = self.__dict__.copy()
if 'logger' in d:
d['logger'] = d['logger'].name
return d
def __setstate__(self, d):
if 'logger' in d:
d['logger'] = logging.getLogger(d['logger'])
self.__dict__.update(d)
def use_configuration(self, config_dict):
"""
Use a configuration file
Parameters
----------
config_dict : dict
Dictionary containing all the options from the configuration file previously read
Returns
-------
None
"""
# Deal with the spectral regions
tmp = config_dict['spectral regions']
# Output file and atmosphere type
self.output_file = config_dict['general']['stokes output']
self.atmosphere_type = config_dict['atmosphere']['type']
self.eos_type = config_dict['general']['eos']
self.logger.info('Output Stokes file : {0}'.format(self.output_file))
if (config_dict['general']['interpolated model output'] != 'None'):
self.interpolated_model_filename = config_dict['general']['interpolated model output']
self.interpolated_tau = np.array([float(i) for i in config_dict['general']['interpolate tau']])
self.n_tau = len(self.interpolated_tau)
self.logger.info('Output model file : {0}'.format(self.interpolated_model_filename))
else:
self.interpolated_model_filename = None
# Add spectral regions
self.init_sir(config_dict['spectral regions'])
self.spectral_regions_dict = config_dict['spectral regions']
# Read atmosphere
if (self.atmosphere_type == 'MURAM'):
if (self.rank == 0):
self.logger.info('Using MURAM atmosphere')
self.model_axes = tuple([int(k) for k in config_dict['atmosphere']['xyz']])
self.ax, self.ay, self.az = self.model_axes
self.model_shape = tuple([int(k) for k in config_dict['atmosphere']['dimensions']])
self.nx, self.ny, self.nz = self.model_shape
self.deltaz = float(config_dict['atmosphere']['deltaz']) * np.arange(self.nz)
self.T_file = config_dict['atmosphere']['temperature']
self.logger.info(' - T file : {0}'.format(self.T_file))
self.P_file = config_dict['atmosphere']['pressure']
self.logger.info(' - P file : {0}'.format(self.P_file))
self.rho_file = config_dict['atmosphere']['density']
self.logger.info(' - rho file : {0}'.format(self.rho_file))
if ('vz' in config_dict['atmosphere']):
self.vz_file = config_dict['atmosphere']['vz']
self.vz_type = 'vz'
self.logger.info(' - vz file : {0}'.format(self.vz_file))
elif ('rho_vz' in config_dict['atmosphere']):
self.vz_file = config_dict['atmosphere']['rho_vz']
self.vz_type = 'rho_vz'
self.logger.info(' - rho_vz file : {0}'.format(self.vz_file))
else:
raise Exception("You need to provide either vz or rho_vz")
self.Bx_file = config_dict['atmosphere']['bx']
self.By_file = config_dict['atmosphere']['by']
self.Bz_file = config_dict['atmosphere']['bz']
self.logger.info(' - Bx file : {0}'.format(self.Bx_file))
self.logger.info(' - By file : {0}'.format(self.By_file))
self.logger.info(' - Bz file : {0}'.format(self.Bz_file))
if ('tau delta' in config_dict['atmosphere']):
self.tau_fine = float(config_dict['atmosphere']['tau delta'])
self.logger.info(' - tau axis will be interpolated to have delta={0}'.format(self.tau_fine))
else:
self.tau_fine = 0.0
# cdodds 2021-06-08
self.tau_file = config_dict['atmosphere']['tau500']
self.ne_file = config_dict['atmosphere']['ne']
self.zeros = np.zeros(self.ny)
self.maximum_tau = float(config_dict['atmosphere']['maximum tau'])
self.bx_multiplier = 1.0
self.by_multiplier = 1.0
self.bz_multiplier = 1.0
self.vz_multiplier = 1.0
if ('multipliers' in config_dict['atmosphere']):
if ('bx' in config_dict['atmosphere']['multipliers']):
self.bx_multiplier = float(config_dict['atmosphere']['multipliers']['bx'])
self.logger.info('Bx multiplier : {0}'.format(self.bx_multiplier))
if ('by' in config_dict['atmosphere']['multipliers']):
self.by_multiplier = float(config_dict['atmosphere']['multipliers']['by'])
self.logger.info('By multiplier : {0}'.format(self.by_multiplier))
if ('bz' in config_dict['atmosphere']['multipliers']):
self.bz_multiplier = float(config_dict['atmosphere']['multipliers']['bz'])
self.logger.info('Bz multiplier : {0}'.format(self.bz_multiplier))
if ('vz' in config_dict['atmosphere']['multipliers']):
self.vz_multiplier = float(config_dict['atmosphere']['multipliers']['vz'])
self.logger.info('vz multiplier : {0}'.format(self.vz_multiplier))
# def init_sir_external(self, spectral):
# """
# Initialize SIR for this synthesis
# Parameters
# ----------
# None
# Returns
# -------
# None
# """
# filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
# ff = open(filename, 'r')
# flines = ff.readlines()
# ff.close()
# f = open('malla.grid', 'w')
# f.write("IMPORTANT: a) All items must be separated by commas. \n")
# f.write(" b) The first six characters of the last line \n")
# f.write(" in the header (if any) must contain the symbol --- \n")
# f.write("\n")
# f.write("Line and blends indices : Initial lambda Step Final lambda \n")
# f.write("(in this order) (mA) (mA) (mA) \n")
# f.write("-----------------------------------------------------------------------\n")
# for k, v in spectral.items():
# self.logger.info('Adding spectral regions {0}'.format(v['name']))
# left = float(v['wavelength range'][0])
# right = float(v['wavelength range'][1])
# n_lambda = int(v['n. wavelengths'])
# delta = (right - left) / n_lambda
# for i in range(len(v['spectral lines'])):
# for l in flines:
# tmp = l.split()
# index = int(tmp[0].split('=')[0])
# if (index == int(v['spectral lines'][0])):
# wvl = float(tmp[2])
# lines = ''
# n_lines = len(v['spectral lines'])
# for i in range(n_lines):
# lines += v['spectral lines'][i]
# if (i != n_lines - 1):
# lines += ', '
# f.write("{0} : {1}, {2}, {3}\n".format(lines, 1e3*(left-wvl), 1e3*delta, 1e3*(right-wvl)))
# f.close()
# self.n_lambda_sir = sir_code.init_externalfile(1, filename)
# def init_sir_agents_external(self):
# filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
# self.n_lambda_sir = sir_code.init_externalfile(1, filename)
def init_sir(self, spectral):
"""
Initialize SIR for this synthesis. This version does not make use of any external file, which might be
not safe when running in MPI mode.
Parameters
----------
None
Returns
-------
None
"""
lines = []
n_lines = 0
elements = {'H':1,'HE':2,'LI':3,'BE':4,'B':5,'C':6,'N':7,'O':8,'F':9,'NE':10,
'NA':11,'MG':12,'AL':13,'SI':14,'P':15,'S':16,'CL':17,'AR':18,'K':19,'CA':20,'SC':21,'TI':22,'V':23,'CR':24,
'MN':25,'FE':26,'CO':27,'NI':28,'CU':29,'ZN':30,'GA':31,'GE':32,'AS':33,'SE':34,'BR':35,'KR':36,
'RB':37,'SR':38,'Y':39,'ZR':40,'NB':41,'MO':42,'TC':43,'RU':44,'RH':45,'PD':46,'AG':47,'CD':48,'IN':49,
'SN':50,'SB':51,'TE':52,'I':53,'XE':54,'CS':55,'BA':56,'LA':57,'CE':58,'PR':59,'ND':60,'PM':61,
'SM':62,'EU':63,'GD':64,'TB':65,'DY':66,'HO':67,'ER':68,'TM':69,'YB':70,'LU':71,'HF':72,'TA':73,'W':74,
'RE':75,'OS':76,'IR':77,'PT':78,'AU':79,'HG':80,'TL':81,'PB':82,'BI':83,'PO':84,'AT':85,'RN':86,
'FR':87,'RA':88,'AC':89,'TH':90,'PA':91,'U':92}
states = {'S': 0, 'P': 1, 'D': 2, 'F': 3, 'G': 4, 'H': 5, 'I': 6, 'K': 7, 'L': 8, 'M': 9, 'N': 10, 'O': 11, 'Q': 12,
'p': 13, 'f': 14, 'h': 15, 'k': 16, 'm': 17, 'o': 18, 'r': 19, 't': 20, 'u': 21, 'v': 22, 'w': 23}
for k, v in spectral.items():
self.logger.info('Adding spectral regions {0}'.format(v['name']))
n_lines += 1
left = float(v['wavelength range'][0])
right = float(v['wavelength range'][1])
n_lambda = int(v['n. wavelengths'])
delta = (right - left) / n_lambda
nblend = len(v['spectral lines'])
lines = np.zeros(nblend, dtype=np.intc)
atom = np.zeros(nblend, dtype=np.intc)
istage = np.zeros(nblend, dtype=np.intc)
wvl = np.zeros(nblend)
zeff = np.zeros(nblend)
energy = np.zeros(nblend)
loggf = np.zeros(nblend)
mult1 = np.zeros(nblend, dtype=np.intc)
mult2 = np.zeros(nblend, dtype=np.intc)
design1 = np.zeros(nblend, dtype=np.intc)
design2 = np.zeros(nblend, dtype=np.intc)
tam1 = np.zeros(nblend)
tam2 = np.zeros(nblend)
alfa = np.zeros(nblend)
sigma = np.zeros(nblend)
for i in range(nblend):
lines[i] = v['spectral lines'][i]
for l in self.LINES:
tmp = l.split()
index = int(tmp[0].split('=')[0])
if (index == int(v['spectral lines'][i])):
atom[i] = elements[tmp[0].split('=')[1]]
istage[i] = tmp[1]
wvl[i] = float(tmp[2])
zeff[i] = float(tmp[3])
energy[i] = float(tmp[4])
loggf[i] = float(tmp[5])
mult1[i] = int(tmp[6][:-1])
mult2[i] = int(tmp[8][:-1])
design1[i] = states[tmp[6][-1]]
design2[i] = states[tmp[8][-1]]
tam1[i] = float(tmp[7].split('-')[0])
tam2[i] = float(tmp[9].split('-')[0])
if (len(tmp) == 12):
alfa[i] = float(tmp[-2])
sigma[i] = float(tmp[-1])
else:
alfa[i] = 0.0
sigma[i] = 0.0
lambda0 = 1e3*(left-wvl[0])
lambda1 = 1e3*(right-wvl[0])
sir_code.init(n_lines, nblend, lines, atom, istage, wvl, zeff, energy, loggf,
mult1, mult2, design1, design2, tam1, tam2, alfa, sigma, lambda0, lambda1, n_lambda)
self.n_lambda_sir = n_lambda
self.lambda_zeropoint = 1e3*wvl[0]
def intpltau(self, newtau, oldtau, var):
fX = interpolate.interp1d(oldtau, var, bounds_error=False, fill_value="extrapolate")
return fX(newtau)
def synth(self, T, P, rho, vz, Bx, By, Bz, tau500, Ne, interpolate_model=False):
# Get ltau500 axis
log_T = np.log10(T)
log_P = np.log10(P)
log_tau = np.log10(tau500)
Pe = Ne * scipy.constants.k * 1e7 * T # convert SI to cgs units - kg to g and m to cm2
uT = np.zeros((T.shape))
# TODO add boolean to eliminate the kappa and tau interpolations below
it0 = np.searchsorted(self.T_kappa5, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_kappa5, log_P) - 1
ip1 = ip0 + 1
kappa = self.kappa[it0,ip0] * (self.T_kappa5[it1] - log_T) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it1,ip0] * (log_T - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it0,ip1] * (self.T_kappa5[it1] - log_T) * (log_P - self.P_kappa5[ip0]) + \
self.kappa[it1,ip1] * (log_T - self.T_kappa5[it0]) * (log_P - self.P_kappa5[ip0])
kappa /= ((self.T_kappa5[it1] - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - self.P_kappa5[ip0]))
if (self.eos_type == 'MANCHA'):
chi = (kappa * rho)[::-1]
else:
chi = kappa[::-1]
tau = integ.cumtrapz(chi,x=self.deltaz)
ltau = np.log10(np.insert(tau, 0, 0.5*tau[0]))[::-1]
ind = np.where(ltau < 2.0)[0]
# TODO add the hard coded 2.0 threshold above to the ini file
# Get electron pressure
it0 = np.searchsorted(self.T_eos, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_eos, log_P) - 1
ip1 = ip0 + 1
if (self.eos_type == 'MANCHA'):
log_Pe = self.Pe_eos[ip0,it0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip1,it0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip0,it1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[ip1,it1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
else:
log_Pe = self.Pe_eos[it0,ip0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it1,ip0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it0,ip1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[it1,ip1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
log_Pe /= ((self.T_eos[it1] - self.T_eos[it0]) * (self.P_eos[ip1] - self.P_eos[ip0]))
if (0 and self.tau_fine != 0.0):
taufino = np.arange(np.min(log_tau[ind]), np.max(log_tau[ind]), self.tau_fine)[::-1]
stokes, error = sir_code.synth(1, self.n_lambda_sir, taufino, self.intpltau(taufino, log_tau[ind], T[ind]),
10**self.intpltau(taufino, log_tau[ind], log_Pe[ind]), self.intpltau(taufino, log_tau[ind], self.zeros[ind]),
self.intpltau(taufino, log_tau[ind], self.vz_multiplier*vz[ind]), self.intpltau(taufino, log_tau[ind], self.bx_multiplier*Bx[ind]),
self.intpltau(taufino, log_tau[ind], self.by_multiplier*By[ind]), self.intpltau(taufino, log_tau[ind], self.bz_multiplier*Bz[ind]), self.macroturbulence)
else:
stokes, error = sir_code.synth(1, self.n_lambda_sir, log_tau, T, Pe, uT, self.vz_multiplier*vz,
self.bx_multiplier*Bx, self.by_multiplier*By, self.bz_multiplier*Bz, self.macroturbulence)
if (error != 0):
logging.warning('synth returned error: %d'%(error))
stokes = -99.0 * np.ones_like(stokes)
# We want to interpolate the model to certain isotau surfaces
if (interpolate_model):
model = np.zeros((7,self.n_tau))
model[0,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.deltaz[::-1])
model[1,:] = self.intpltau(self.interpolated_tau, ltau[::-1], T[::-1])
model[2,:] = np.exp(self.intpltau(self.interpolated_tau, ltau[::-1], np.log(P[::-1])))
model[3,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.vz_multiplier * vz[::-1])
model[4,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bx_multiplier * Bx[::-1])
model[5,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.by_multiplier * By[::-1])
model[6,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bz_multiplier * Bz[::-1])
# TODO add interpolated tau500 and Pe if available
return stokes, model
return stokes
def synth2d(self, T, P, rho, vz, Bx, By, Bz, tau500, Ne, interpolate_model=False):
n = T.shape[1]
Pe = Ne * scipy.constants.k * 1e7 * T # convert SI to cgs units - kg to g and m to cm2
log_T = np.log10(T)
log_P = np.log10(P)
log_tau = np.log10(tau500)
stokes_out = np.zeros((5,n,self.n_lambda_sir))
if (interpolate_model):
model_out = np.zeros((7,n,self.n_tau))
for loop in range(n):
if interpolate_model:
it0 = np.searchsorted(self.T_kappa5, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_kappa5, log_P) - 1
ip1 = ip0 + 1
kappa = self.kappa[it0,ip0] * (self.T_kappa5[it1] - log_T) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it1,ip0] * (log_T - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - log_P) + \
self.kappa[it0,ip1] * (self.T_kappa5[it1] - log_T) * (log_P - self.P_kappa5[ip0]) + \
self.kappa[it1,ip1] * (log_T - self.T_kappa5[it0]) * (log_P - self.P_kappa5[ip0])
kappa /= ((self.T_kappa5[it1] - self.T_kappa5[it0]) * (self.P_kappa5[ip1] - self.P_kappa5[ip0]))
if (self.eos_type == 'MANCHA'):
chi = (kappa * rho[:,loop])[::-1]
else:
chi = kappa[::-1]
tau = integ.cumtrapz(chi, x=self.deltaz)
ltau = np.log10(np.insert(tau, 0, 0.5*tau[0]))[::-1]
ind = np.where(ltau < 2.0)[0]
# Get electron pressure
it0 = np.searchsorted(self.T_eos, log_T) - 1
it1 = it0 + 1
ip0 = np.searchsorted(self.P_eos, log_P) - 1
ip1 = ip0 + 1
if (self.eos_type == 'MANCHA'):
log_Pe = self.Pe_eos[ip0,it0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip1,it0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[ip0,it1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[ip1,it1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
else:
log_Pe = self.Pe_eos[it0,ip0] * (self.T_eos[it1] - log_T) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it1,ip0] * (log_T - self.T_eos[it0]) * (self.P_eos[ip1] - log_P) + \
self.Pe_eos[it0,ip1] * (self.T_eos[it1] - log_T) * (log_P - self.P_eos[ip0]) + \
self.Pe_eos[it1,ip1] * (log_T - self.T_eos[it0]) * (log_P - self.P_eos[ip0])
log_Pe /= ((self.T_eos[it1] - self.T_eos[it0]) * (self.P_eos[ip1] - self.P_eos[ip0]))
if (self.tau_fine != 0.0):
taufino = np.arange(np.min(ltau[ind]), np.max(ltau[ind]), self.tau_fine)[::-1]
stokes_out[:,loop,:], error = sir_code.synth(1, self.n_lambda_sir, taufino, self.intpltau(taufino, ltau[ind], T[ind,loop]),
10**self.intpltau(taufino, ltau[ind], log_Pe[ind]), self.intpltau(taufino, ltau[ind], self.zeros[ind]),
self.intpltau(taufino, ltau[ind], self.vz_multiplier*vz[ind,loop]), self.intpltau(taufino, ltau[ind], self.bx_multiplier*Bx[ind,loop]),
self.intpltau(taufino, ltau[ind], self.by_multiplier*By[ind,loop]), self.intpltau(taufino, ltau[ind], self.bz_multiplier*Bz[ind,loop]), self.macroturbulence)
else:
stokes_out[:,loop,:], error = sir_code.synth(1, self.n_lambda_sir, log_tau[:,loop], T[:,loop], Pe[:, loop], self.zeros[0:self.nz],
self.vz_multiplier*vz[:,loop], self.bx_multiplier*Bx[:,loop], self.by_multiplier*By[:,loop], self.bz_multiplier*Bz[:,loop], self.macroturbulence)
if (error != 0):
logging.warning('synth returned error: %d'%(error))
stokes_out[:,loop,:] = -99.0
# We want to interpolate the model to certain isotau surfaces
if (interpolate_model):
model_out[0,loop,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.deltaz[::-1])
model_out[1,loop,:] = self.intpltau(self.interpolated_tau, ltau[::-1], T[::-1,loop])
model_out[2,loop,:] = np.exp(self.intpltau(self.interpolated_tau, ltau[::-1], np.log(P[::-1,loop])))
model_out[3,loop,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.vz_multiplier * vz[::-1,loop])
model_out[4,loop,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bx_multiplier * Bx[::-1,loop])
model_out[5,loop,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.by_multiplier * By[::-1,loop])
model_out[6,loop,:] = self.intpltau(self.interpolated_tau, ltau[::-1], self.bz_multiplier * Bz[::-1,loop])
if (interpolate_model):
return stokes_out, model_out
else:
return stokes_out
|
ifauhREPO_NAMEpar-sirPATH_START.@par-sir_extracted@par-sir-main@mods@sir3d@synth@model.py@.PATH_END.py
|
{
"filename": "plottiling.py",
"repo_name": "wchenastro/Mosaic",
"repo_path": "Mosaic_extracted/Mosaic-master/example/plottiling.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from astropy import wcs
from astropy.coordinates import SkyCoord
from astropy import units as u
import sys, argparse
def parse_argument():
for i, arg in enumerate(sys.argv):
if (arg[0] == '-') and arg[1].isdigit(): sys.argv[i] = ' ' + arg
parser = argparse.ArgumentParser()
parser.add_argument('--tiling_plot', nargs=1, metavar="file", help='filename for the tiling plot')
parser.add_argument('--tiling', nargs=1, metavar="file", help='file for the coordinates of the beams')
parser.add_argument('--boresight', nargs=2, metavar=('RA', 'DEC'), help='boresight position in RA and DEC in h:m:s.s d:m:s.s')
parser.add_argument('--beamshape', nargs=3, metavar=('x', 'y', 'deg'), help='semi-axis and orientation angle of the beam')
parser.add_argument('--beam_size_scaling', nargs=1, metavar="scaling", help='scaling factor for the size of the beam')
parser.add_argument("--flip", action="store_true", help='flip the orientation of the beam')
parser.add_argument("--inner", nargs=1, metavar="number", help='highlight the most [number] inner beams')
parser.add_argument("--extra_source", nargs=1, metavar="file", help='extra point sources to plot')
args = parser.parse_args()
return args
args = parse_argument()
coord_file = args.tiling[0]
if args.beam_size_scaling is not None:
scaling = float(args.beam_size_scaling[0])
else:
scaling = 1.0
coords = np.genfromtxt(coord_file, dtype=None, encoding='utf-8')
equatorialCoordinates = SkyCoord(coords[:,1].astype(str), coords[:,2].astype(str),
frame='fk5', unit=(u.hourangle, u.deg))
indice = coords[:,0].astype(str)
equatorialCoordinates = np.array([equatorialCoordinates.ra.astype(float), equatorialCoordinates.dec.astype(float)]).T
axis1, axis2, angle = (float(args.beamshape[0])*scaling, float(args.beamshape[1])*scaling,
180-float(args.beamshape[2]) if args.flip else float(args.beamshape[2]))
equatorialBoresight = SkyCoord(args.boresight[0], args.boresight[1],
frame='fk5', unit=(u.hourangle, u.deg))
boresight = (equatorialBoresight.ra.deg , equatorialBoresight.dec.deg)
fileName = args.tiling_plot[0]
if args.inner is not None:
inner = int(args.inner[0])
else:
inner = 0
index = True
step = 1/10000000000.
wcs_properties = wcs.WCS(naxis=2)
wcs_properties.wcs.crpix = [0, 0]
wcs_properties.wcs.cdelt = [-step, step]
wcs_properties.wcs.crval = boresight
wcs_properties.wcs.ctype = ["RA---TAN", "DEC--TAN"]
center = boresight
resolution = step
inner_idx = []
fig = plt.figure(figsize=(1600./96, 1600./96), dpi=96)
axis = fig.add_subplot(111,aspect='equal', projection=wcs_properties)
scaled_pixel_coordinats = wcs_properties.wcs_world2pix(equatorialCoordinates, 0)
beam_coordinate = np.array(scaled_pixel_coordinats)
if inner > 0:
index_sort = np.argsort(np.sum(np.square(beam_coordinate), axis=1))
beam_coordinate = beam_coordinate.take(index_sort, axis=0)
indice = indice.take(index_sort, axis=0)
for idx in range(len(beam_coordinate)):
coord = beam_coordinate[idx]
if index == True:
num = indice[idx].split('cfbf')[-1]
axis.text(coord[0], coord[1], int(num), size=6, ha='center', va='center')
ellipse = Ellipse(xy=coord,
width=2.*axis1/resolution,height=2.*axis2/resolution, angle=angle)
ellipse.fill = False
if inner > 0 and idx < inner:
ellipse.fill = True
ellipse.edgecolor = 'auto'
inner_idx.append(int(num))
if idx == 0:
ellipse.facecolor = '#4169E1'
else:
ellipse.facecolor = '#0F52BA'
axis.add_artist(ellipse)
margin = 1.1 * max(np.sqrt(np.sum(np.square(beam_coordinate), axis=1)))
axis.set_xlim(center[0]-margin, center[0]+margin)
axis.set_ylim(center[1]-margin, center[1]+margin)
if args.extra_source is not None:
extra_coords = np.genfromtxt(args.extra_source[0], dtype=None)
if len(extra_coords.shape) == 1:
extra_coords = extra_coords.reshape(1, -1)
extra_equatorial_coordinates = SkyCoord(extra_coords[:,1].astype(str),
extra_coords[:,2].astype(str),
frame='fk5', unit=(u.hourangle, u.deg))
extra_equatorial_coordinates = np.array([extra_equatorial_coordinates.ra.astype(float),
extra_equatorial_coordinates.dec.astype(float)]).T
scaled_extra_pixel_coordinats = np.array(wcs_properties.wcs_world2pix(
extra_equatorial_coordinates, 0))
axis.scatter(scaled_extra_pixel_coordinats[:,0], scaled_extra_pixel_coordinats[:,1],s=40)
ra = axis.coords[0]
dec = axis.coords[1]
ra.set_ticklabel(size=20)
dec.set_ticklabel(size=20, rotation="vertical")
dec.set_ticks_position('l')
ra.set_ticks_position('b')
ra.set_axislabel("RA", size=20)
dec.set_axislabel("DEC", size=20)
plt.savefig(fileName, dpi=96)
if len(inner_idx) != 0:
print(np.sort(inner_idx))
|
wchenastroREPO_NAMEMosaicPATH_START.@Mosaic_extracted@Mosaic-master@example@plottiling.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sibirrer/hierArc",
"repo_path": "hierArc_extracted/hierArc-main/hierarc/Sampling/__init__.py",
"type": "Python"
}
|
sibirrerREPO_NAMEhierArcPATH_START.@hierArc_extracted@hierArc-main@hierarc@Sampling@__init__.py@.PATH_END.py
|
|
{
"filename": "noxfile.py",
"repo_name": "dfm/tinygp",
"repo_path": "tinygp_extracted/tinygp-main/noxfile.py",
"type": "Python"
}
|
import platform
from pathlib import Path
import nox
PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12"]
@nox.session(python=PYTHON_VERSIONS)
def test(session: nox.Session) -> None:
session.install(".[test]")
session.run("pytest", *session.posargs)
@nox.session(python=PYTHON_VERSIONS)
def comparison(session: nox.Session) -> None:
session.install(".[test,comparison]")
session.run("pytest", *session.posargs, env={"JAX_ENABLE_X64": "1"})
@nox.session(python=PYTHON_VERSIONS)
def doctest(session: nox.Session) -> None:
if platform.system() == "Windows":
module = Path(session.virtualenv.location) / "Lib" / "site-packages" / "tinygp"
else:
module = (
Path(session.virtualenv.location)
/ "lib"
/ f"python{session.python}"
/ "site-packages"
/ "tinygp"
)
session.install(".[test]", "numpyro")
session.run("pytest", "--doctest-modules", "-v", str(module), *session.posargs)
|
dfmREPO_NAMEtinygpPATH_START.@tinygp_extracted@tinygp-main@noxfile.py@.PATH_END.py
|
{
"filename": "classify_eigenvalues.py",
"repo_name": "dh4gan/tache",
"repo_path": "tache_extracted/tache-master/plot/classify_eigenvalues.py",
"type": "Python"
}
|
# Written 9/10/14 by dh4gan
# Some useful functions for classifying eigenvalues and defining structure
def classify_eigenvalue(eigenvalues, threshold):
'''Given 3 eigenvalues, and some threshold, returns an integer
'iclass' corresponding to the number of eigenvalues below the threshold
iclass = 0 --> clusters (3 +ve eigenvalues, 0 -ve)
iclass = 1 --> filaments (2 +ve eigenvalues, 1 -ve)
iclass = 2 --> sheet (1 +ve eigenvalues, 2 -ve)
iclass = 3 --> voids (0 +ve eigenvalues, 3 -ve)
'''
iclass = 0
for i in range(3):
if(eigenvalues[i]<threshold):
iclass +=1
return int(iclass)
|
dh4ganREPO_NAMEtachePATH_START.@tache_extracted@tache-master@plot@classify_eigenvalues.py@.PATH_END.py
|
{
"filename": "test_halos.py",
"repo_name": "bccp/nbodykit",
"repo_path": "nbodykit_extracted/nbodykit-master/nbodykit/tutorials/tests/test_halos.py",
"type": "Python"
}
|
from runtests.mpi import MPITest
from nbodykit.tutorials import DemoHaloCatalog
from nbodykit import setup_logging
import pytest
setup_logging()
@MPITest([4])
def test_download(comm):
from halotools.sim_manager import UserSuppliedHaloCatalog
# download and load the cached catalog
cat = DemoHaloCatalog('bolshoi', 'rockstar', 0.5, comm=comm)
assert all(col in cat for col in ['Position', 'Velocity'])
# convert to halotools catalog
halotools_cat = cat.to_halotools()
assert isinstance(halotools_cat, UserSuppliedHaloCatalog)
# bad simulation name
with pytest.raises(Exception):
cat = DemoHaloCatalog('BAD', 'rockstar', 0.5)
@MPITest([4])
def test_download_failure(comm):
# initialize with bad redshift
BAD_REDSHIFT = 100.0
with pytest.raises(Exception):
cat = DemoHaloCatalog('bolshoi', 'rockstar', BAD_REDSHIFT, comm=comm)
|
bccpREPO_NAMEnbodykitPATH_START.@nbodykit_extracted@nbodykit-master@nbodykit@tutorials@tests@test_halos.py@.PATH_END.py
|
{
"filename": "test_pixell.py",
"repo_name": "simonsobs/pixell",
"repo_path": "pixell_extracted/pixell-master/tests/test_pixell.py",
"type": "Python"
}
|
"""Tests for `pixell` package."""
import unittest
from pixell import enmap
from pixell import curvedsky
from pixell import lensing
from pixell import interpol
from pixell import array_ops
from pixell import enplot
from pixell import powspec
from pixell import reproject
from pixell import pointsrcs
from pixell import wcsutils
from pixell import utils as u
from pixell import colors
from pixell import fft
from pixell import tilemap
from pixell import utils
import numpy as np
import pickle
import os,sys
import matplotlib
matplotlib.use('Agg')
import numpy as np
import itertools,yaml,pickle,os,sys
import matplotlib.pyplot as plt
TEST_DIR = os.path.dirname(__file__)
DATA_PREFIX = os.path.join(TEST_DIR, 'data/')
def get_reference_pixels(shape):
"""For a given 2D array, return a list of pixel indices
corresponding to locations of a pre-determined and fixed
pattern of reference pixels.
e.g even x even
1100110011
1100110011
0000000000
0000000000
1100110011
1100110011
0000000000
0000000000
1100110011
1100110011
e,g. odd x odd
110010011
110010011
000000000
000000000
110010011
000000000
000000000
110010011
110010011
e.g even x odd
110010011
110010011
000000000
000000000
110010011
110010011
000000000
000000000
110010011
110010011
requires N>=5 in each axis
"""
Ny,Nx = shape[-2:]
assert (Ny>=5) and (Nx>=5), "Tests are not implemented for arrays with a dimension<5."
"""Given N, return 0,1,{x},N-2,N-1, where {x} is N//2-1,N//2 if N is even
and {x} is N//2 if N is odd.
"""
midextremes = lambda N: [0,1,N//2-1,N//2,N-2,N-1] if N%2==0 else [0,1,N//2,N-2,N-1]
ys = midextremes(Ny)
xs = midextremes(Nx)
pixels = np.array(list(itertools.product(ys,xs)))
return pixels
def mask(arr,pixels,val=0):
"""Mask an array arr based on array pixels of (y,x) pixel coordinates of (Npix,2)"""
arr[...,pixels[:,0],pixels[:,1]] = val
return arr
def get_pixel_values(arr,pixels):
"""Get values of arr at pixels specified in pixels (Npix,2)"""
return arr[...,pixels[:,0],pixels[:,1]]
def get_meansquare(arr):
return np.mean(arr*2.)
def save_mask_image(filename,shape):
"""Save a minimal plot of an array masked by the currently implemented reference
pixel geometry
e.g.
> shape = (11,12)
> save_mask_image("test_mask.png",shape)
"""
arr = np.zeros(shape)
pixels = get_reference_pixels(shape)
masked = mask(arr,pixels,val=1)
fig = plt.figure()
im = plt.imshow(masked,cmap='rainbow')
ax = plt.gca()
ax.set_xticks(np.arange(0,shape[1])+0.5);
ax.set_yticks(np.arange(0,shape[0])+0.5);
ax.grid(which='major',color='w', linestyle='-', linewidth=5)
ax.tick_params(axis='x', colors=(0,0,0,0))
ax.tick_params(axis='y', colors=(0,0,0,0))
for spine in im.axes.spines.values():
spine.set_edgecolor((0,0,0,0))
plt.savefig(filename, bbox_inches='tight')
def get_spectrum(ntype,noise,lmax,lmax_pad):
ells = np.arange(0,lmax+lmax_pad)
if ntype=="white": return np.ones(shape=(ells.size,))*(noise**2.)*((np.pi/180./60.)**2.)
if ntype=="white_dl":
spec = np.zeros(shape=(ells.size,))
spec[2:] = (noise**2.)*((np.pi/180./60.)**2.)*2.*np.pi/ells[2:]/(ells+1.)[2:]
return spec
raise NotImplementedError
def get_spectra(yml_section,lmax,lmax_pad):
spectra = {}
for s in yml_section:
spectra[s['name']] = get_spectrum(s['type'],s['noise'],lmax,lmax_pad)
return spectra
def get_geometries(yml_section):
geos = {}
for g in yml_section:
if g['type']=='fullsky':
geos[g['name']] = enmap.fullsky_geometry(res=np.deg2rad(g['res_arcmin']/60.),proj=g['proj'],variant="CC")
elif g['type']=='pickle':
geos[g['name']] = pickle.load(open(DATA_PREFIX+"%s"%g['filename'],'rb'))
else:
raise NotImplementedError
return geos
def generate_map(shape,wcs,powspec,lmax,seed):
return curvedsky.rand_map(shape, wcs, powspec, lmax=lmax, dtype=np.float64, seed=seed, spin=[0,2], method="auto", verbose=False)
def check_equality(imap1,imap2):
assert np.all(imap1.shape==imap2.shape)
assert wcsutils.equal(imap1.wcs,imap2.wcs)
try:
assert np.all(np.isclose(imap1,imap2))
except:
from orphics import io
io.plot_img(imap1,"i1.png",lim=[-1.5,2])
io.plot_img(imap2,"i2.png",lim=[-1.5,2])
io.plot_img((imap1-imap2)/imap1,"ip.png",lim=[-0.1,0.1])
assert 1==0
def get_extraction_test_results(yaml_file):
print("Starting tests from ",yaml_file)
with open(yaml_file) as f:
config = yaml.safe_load(f)
geos = get_geometries(config['geometries'])
lmax = config['lmax'] ; lmax_pad = config['lmax_pad']
spectra = get_spectra(config['spectra'],lmax,lmax_pad)
seed = config['seed']
results = {}
for g in geos.keys():
results[g] = {}
for s in spectra.keys():
results[g][s] = {}
imap = generate_map(geos[g][0][-2:],geos[g][1],spectra[s],lmax,seed)
# Do write and read test
filename = "temporary_map.fits" # NOT THREAD SAFE
enmap.write_map(filename,imap)
imap_in = enmap.read_map(filename)
check_equality(imap,imap_in)
for e in config['extracts']:
print("Doing test for extract ",e['name']," with geometry ",g," and spectrum ",s,"...")
if e['type']=='slice':
box = np.deg2rad(np.array(e['box_deg']))
cutout = enmap.read_map(filename,box=box)
cutout_internal = imap.submap(box=box)
check_equality(cutout,cutout_internal)
pixels = get_reference_pixels(cutout.shape)
results[g][s]['refpixels'] = get_pixel_values(cutout,pixels)
results[g][s]['meansquare'] = get_meansquare(cutout)
os.remove(filename)
return results,config['result_name']
lens_version = '071123'
def get_offset_result(res=1.,dtype=np.float64,seed=1):
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(res), variant="CC")
shape = (3,) + shape
obs_pos = enmap.posmap(shape, wcs)
np.random.seed(seed)
grad = enmap.enmap(np.random.random(shape),wcs)*1e-3
raw_pos = enmap.samewcs(lensing.offset_by_grad(obs_pos, grad, pol=shape[-3]>1, geodesic=True), obs_pos)
return obs_pos,grad,raw_pos
def get_lens_result(res=1.,lmax=400,dtype=np.float64,seed=1):
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(res), variant="CC")
shape = (3,) + shape
# ells = np.arange(lmax)
ps_cmb,ps_lens = powspec.read_camb_scalar(DATA_PREFIX+"test_scalCls.dat")
ps_lensinput = np.zeros((4,4,ps_cmb.shape[-1]))
ps_lensinput[0,0] = ps_lens
ps_lensinput[1:,1:] = ps_cmb
lensed = lensing.rand_map(shape, wcs, ps_lensinput, lmax=lmax, maplmax=None, dtype=dtype, seed=seed, phi_seed=None, spin=[0,2], output="lu", geodesic=True, verbose=False, delta_theta=None)
return lensed
# Helper functions for adjointness tests
def zip_alm(alm, ainfo):
n = ainfo.lm2ind(1,1)
first = alm[...,:n].real
second = alm[...,n:].view(utils.real_dtype(alm.dtype))*2**0.5
return np.concatenate([first, second],-1)
def unzip_alm(zalm, ainfo):
n = ainfo.lm2ind(1,1)
oalm = np.zeros(zalm.shape[:-1] + (ainfo.nelem,), utils.complex_dtype(zalm.dtype))
oalm[...,:n] = zalm[...,:n]
oalm[...,n:] = zalm[...,n:].view(oalm.dtype)/2**0.5
return oalm
def zalm_len(ainfo): return (2*ainfo.nelem-ainfo.lm2ind(1,1)).astype(int)
def zip_mat(mat):
# Mat is ncomp_alm,nzalm,ncomp_map,ny,nx.
# Want (ncomp*ncomp*nzalm,ny,nx)
mat = np.moveaxis(mat, 2, 1)
mat = mat.reshape((-2,)+mat.shape[-2:])
return mat
def map_bash(fun, shape, wcs, ncomp, lmax, dtype=np.float64):
ctype = utils.complex_dtype(dtype)
ainfo = curvedsky.alm_info(lmax)
nzalm = zalm_len(ainfo)
umap = enmap.zeros((ncomp,)+shape, wcs, dtype=dtype)
oalm = np.zeros((ncomp,ainfo.nelem), dtype=ctype)
mat = np.zeros((ncomp,nzalm,ncomp)+shape, dtype=dtype)
for I in utils.nditer((ncomp,)+shape):
umap[I] = 1
oalm[:] = 0
fun(map=umap, alm=oalm, ainfo=ainfo)
mat[(slice(None),slice(None))+I] = zip_alm(oalm, ainfo)
umap[I] = 0
return zip_mat(mat)
def alm_bash(fun, shape, wcs, ncomp, lmax, dtype=np.float64):
ctype = utils.complex_dtype(dtype)
ainfo = curvedsky.alm_info(lmax)
nzalm = zalm_len(ainfo)
zalm = np.zeros((ncomp,nzalm), dtype)
omap = enmap.zeros((ncomp,)+shape, wcs, dtype)
mat = np.zeros((ncomp,nzalm,ncomp)+shape, dtype)
for ci in range(ncomp):
for i in range(nzalm):
# Why is this 0.5 needed?
zalm[ci,i] = 1 #if i < ainfo.lm2ind(1,1) else 0.5
omap[:] = 0
fun(alm=unzip_alm(zalm,ainfo), map=omap, ainfo=ainfo)
mat[ci,i] = omap
zalm[ci,i] = 0
return zip_mat(mat)
# End of adjointness helpers
class PixelTests(unittest.TestCase):
def test_almxfl(self):
import healpy as hp
for lmax in [100,400,500,1000]:
ainfo = curvedsky.alm_info(lmax)
alms = hp.synalm(np.ones(lmax+1),lmax = lmax, new=True)
filtering = np.ones(lmax+1)
alms0 = ainfo.lmul(alms.copy(),filtering)
assert np.all(np.isclose(alms0,alms))
for lmax in [100,400,500,1000]:
ainfo = curvedsky.alm_info(lmax)
alms = hp.synalm(np.ones(lmax+1),lmax = lmax, new=True)
alms0 = curvedsky.almxfl(alms.copy(),lambda x: np.ones(x.shape))
assert np.all(np.isclose(alms0,alms))
def test_rand_alm(self):
def nalm(lmax):
return (lmax + 1) * (lmax + 2) / 2
lmaxes = [50, 100, 150, 300]
mypower = np.ones(50)
for lmax in lmaxes:
palm = curvedsky.rand_alm(mypower, lmax = lmax)
halm = curvedsky.rand_alm_healpy( mypower, lmax = lmax)
print("nalm(%i) = %i, curvedsky.rand_alm gives %s, curvedsky.rand_alm_healpy gives %s "\
% (lmax, \
nalm(lmax),\
palm.shape, \
halm.shape) )
assert np.all(np.isclose(np.asarray(palm.shape),np.asarray(halm.shape)))
def test_offset(self):
obs_pos,grad,raw_pos = get_offset_result(1.)
obs_pos0 = enmap.read_map(DATA_PREFIX+"MM_offset_obs_pos_%s.fits" % lens_version)
grad0 = enmap.read_map(DATA_PREFIX+"MM_offset_grad_%s.fits" % lens_version)
raw_pos0 = enmap.read_map(DATA_PREFIX+"MM_offset_raw_pos_%s.fits" % lens_version)
assert np.all(np.isclose(obs_pos,obs_pos0))
assert np.all(np.isclose(raw_pos,raw_pos0))
assert np.all(np.isclose(grad,grad0))
assert wcsutils.equal(grad.wcs,grad0.wcs)
assert wcsutils.equal(obs_pos.wcs,obs_pos0.wcs)
assert wcsutils.equal(raw_pos.wcs,raw_pos0.wcs)
def test_lensing(self):
lensed,unlensed = get_lens_result(1.,400,np.float64)
lensed0 = enmap.read_map(DATA_PREFIX+"MM_lensed_%s.fits" % lens_version)
unlensed0 = enmap.read_map(DATA_PREFIX+"MM_unlensed_%s.fits" % lens_version)
y,x = lensed0.posmap()
assert np.all(np.isclose(lensed,lensed0))
assert np.all(np.isclose(unlensed,unlensed0))
assert wcsutils.equal(lensed.wcs,lensed0.wcs)
assert wcsutils.equal(unlensed.wcs,unlensed0.wcs)
assert wcsutils.equal(unlensed.wcs,lensed.wcs)
def test_enplot(self):
print("Testing enplot...")
shape,wcs = enmap.geometry(pos=(0,0),shape=(3,100,100),res=0.01)
a = enmap.ones(shape,wcs)
# basic
p = enplot.plot(a)
# colorbar
p = enplot.plot(a, colorbar=True)
# annotation
p = enplot.plot(a, annotate=DATA_PREFIX+"annot.txt")
def test_fft(self):
# Tests that ifft(ifft(imap))==imap, i.e. default normalizations are consistent
shape,wcs = enmap.geometry(pos=(0,0),shape=(3,100,100),res=0.01)
imap = enmap.enmap(np.random.random(shape),wcs)
assert np.all(np.isclose(imap,enmap.ifft(enmap.fft(imap,normalize='phy'),normalize='phy').real))
assert np.all(np.isclose(imap,enmap.ifft(enmap.fft(imap)).real))
def test_fft_input_shape(self):
# Tests fft for various shapes and choices of axes.
# 1D FFT over last axis for 3d array.
signal = np.ones((1, 2, 5))
signal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5), dtype=np.complex128)
out_exp[0,0,0] = 5
out_exp[0,1,0] = 50
out = fft.fft(signal)
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 1D FFT over middle axis for 3d array.
signal = np.ones((1, 5, 2))
signal[0,:,1] = 10.
out_exp = np.zeros((1, 5, 2), dtype=np.complex128)
out_exp[0,0,0] = 5
out_exp[0,0,1] = 50
out = fft.fft(signal, axes=[-2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D FFT over last 2 axes of 4d array.
signal = np.ones((1, 2, 5, 10))
signal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5, 10), dtype=np.complex128)
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.fft(signal, axes=[-2, -1])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D FFT over last 2 axes of 4d non-contiguous array.
signal = np.ones((1, 2, 5, 10), dtype=np.complex128)
signal[0,1,:] = 10
ft = np.zeros((5, 10, 1, 2), dtype=np.complex128).transpose(2, 3, 0, 1)
out_exp = np.zeros_like(ft)
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.fft(signal, ft=ft, axes=[-2, -1])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(np.shares_memory(ft, out))
self.assertFalse(out.flags['C_CONTIGUOUS'])
# 2D FFT over middle 2 axes of 4d array.
signal = np.ones((1, 5, 10, 2))
signal[0,:,:,1] = 10.
out_exp = np.zeros((1, 5, 10, 2), dtype=np.complex128)
out_exp[0,0,0,0] = 50
out_exp[0,0,0,1] = 500
out = fft.fft(signal, axes=[-3, -2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
def test_ifft_input_shape(self):
# Tests ifft for various shapes and choices of axes.
# 1D IFFT over last axis for 3d array.
fsignal = np.ones((1, 2, 5), dtype=np.complex128)
fsignal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5))
out_exp[0,0,0] = 5
out_exp[0,1,0] = 50
out = fft.ifft(fsignal)
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 1D IFFT over middle axis for 3d array.
fsignal = np.ones((1, 5, 2), dtype=np.complex128)
fsignal[0,:,1] = 10.
out_exp = np.zeros((1, 5, 2))
out_exp[0,0,0] = 5
out_exp[0,0,1] = 50
out = fft.ifft(fsignal, axes=[-2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D IFFT over last 2 axes of 4d array.
fsignal = np.ones((1, 2, 5, 10), dtype=np.complex128)
fsignal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5, 10))
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.ifft(fsignal, axes=[-2, -1])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D IFFT over last 2 axes of 4d non-contiguous array.
fsignal = np.ones((1, 2, 5, 10), dtype=np.complex128)
fsignal[0,1,:] = 10.
tod = np.zeros((5, 10, 1, 2), dtype=np.complex128).transpose(2, 3, 0, 1)
out_exp = np.zeros_like(tod)
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.ifft(fsignal, tod=tod, axes=[-2, -1])
self.assertTrue(np.shares_memory(tod, out))
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertFalse(out.flags['C_CONTIGUOUS'])
# 2D IFFT over middle 2 axes of 4d array.
fsignal = np.ones((1, 5, 10, 2), dtype=np.complex128)
fsignal[0,:,:,1] = 10.
out_exp = np.zeros((1, 5, 10, 2))
out_exp[0,0,0,0] = 50
out_exp[0,0,0,1] = 500
out = fft.ifft(fsignal, axes=[-3, -2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
def test_extract(self):
# Tests that extraction is sensible
shape,wcs = enmap.geometry(pos=(0,0),shape=(500,500),res=0.01)
imap = enmap.enmap(np.random.random(shape),wcs)
smap = imap[200:300,200:300]
sshape,swcs = smap.shape,smap.wcs
smap2 = enmap.extract(imap,sshape,swcs)
pixbox = enmap.pixbox_of(imap.wcs,sshape,swcs)
# Do write and read test
filename = "temporary_extract_map.fits" # NOT THREAD SAFE
enmap.write_map(filename,imap)
smap3 = enmap.read_map(filename,pixbox=pixbox)
os.remove(filename)
assert np.all(np.isclose(smap,smap2))
assert np.all(np.isclose(smap,smap3))
assert wcsutils.equal(smap.wcs,smap2.wcs)
assert wcsutils.equal(smap.wcs,smap3.wcs)
def test_fullsky_geometry(self):
# Tests whether number of pixels and area of a full-sky 0.5 arcminute resolution map are correct
print("Testing full sky geometry...")
test_res_arcmin = 0.5
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(test_res_arcmin/60.),proj='car')
assert shape[0]==21600 and shape[1]==43200
assert abs(enmap.area(shape,wcs) - 4*np.pi) < 1e-6
def test_pixels(self):
"""Runs reference pixel and mean-square comparisons on extracts from randomly generated
maps"""
print("Testing reference pixels...")
results,rname = get_extraction_test_results(TEST_DIR+"/tests.yml")
cresults = pickle.load(open(DATA_PREFIX+"%s.pkl" % rname,'rb'))
assert sorted(results.keys())==sorted(cresults.keys())
for g in results.keys():
assert sorted(results[g].keys())==sorted(cresults[g].keys())
for s in results[g].keys():
assert sorted(results[g][s].keys())==sorted(cresults[g][s].keys())
for e in results[g][s].keys():
assert np.all(np.isclose(results[g][s][e],cresults[g][s][e]))
def test_sim_slice(self):
ps = powspec.read_spectrum(DATA_PREFIX+"test_scalCls.dat")[:1,:1]
test_res_arcmin = 10.0
lmax = 2000
fact = 2.
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(test_res_arcmin/60.),proj='car')
omap = curvedsky.rand_map(shape, wcs, ps,lmax=lmax)
ofunc = lambda ishape,iwcs: fact*enmap.extract(omap,ishape,iwcs)
nmap = reproject.populate(shape,wcs,ofunc,maxpixy = 400,maxpixx = 400)
assert np.all(np.isclose(nmap/omap,2.))
def test_b_sign(self):
"""
We generate a random IQU map with geometry such that cdelt[0]<0
We transform this to TEB with map2harm and map2alm followed by
scalar harm2map and alm2map and use these as reference T,E,B maps.
We flip the original map along the RA direction.
We transform this to TEB with map2harm and map2alm followed by
scalar harm2map and alm2map and use these as comparison T,E,B maps.
We compare these maps.
"""
ells,cltt,clee,clbb,clte = np.loadtxt(DATA_PREFIX+"cosmo2017_10K_acc3_lensedCls.dat",unpack=True)
ps_cmb = np.zeros((3,3,ells.size))
ps_cmb[0,0] = cltt
ps_cmb[1,1] = clee
ps_cmb[2,2] = clbb
ps_cmb[1,0] = clte
ps_cmb[0,1] = clte
np.random.seed(100)
# Curved-sky is fine
lmax = 1000
alm = curvedsky.rand_alm_healpy(ps_cmb,lmax=lmax)
shape,iwcs = enmap.fullsky_geometry(res=np.deg2rad(10./60.))
wcs = enmap.empty(shape,iwcs)[...,::-1].wcs
shape = (3,) + shape
imap = curvedsky.alm2map(alm,enmap.empty(shape,wcs))
oalm = curvedsky.map2alm(imap.copy(),lmax=lmax)
rmap = curvedsky.alm2map(oalm,enmap.empty(shape,wcs),spin=0)
imap2 = imap.copy()[...,::-1]
oalm = curvedsky.map2alm(imap2.copy(),lmax=lmax)
rmap2 = curvedsky.alm2map(oalm,enmap.empty(shape,wcs),spin=0)
assert np.all(np.isclose(rmap[0],rmap2[0]))
assert np.all(np.isclose(rmap[1],rmap2[1]))
assert np.all(np.isclose(rmap[2],rmap2[2]))
# Flat-sky
px = 2.0
N = 300
shape,iwcs = enmap.geometry(pos=(0,0),res=np.deg2rad(px/60.),shape=(300,300))
shape = (3,) + shape
a = enmap.zeros(shape,iwcs)
a = a[...,::-1]
wcs = a.wcs
seed = 100
imap = enmap.rand_map(shape,wcs,ps_cmb,seed=seed)
kmap = enmap.map2harm(imap.copy())
rmap = enmap.harm2map(kmap,spin=0) # reference map
imap = imap[...,::-1]
kmap = enmap.map2harm(imap.copy())
rmap2 = enmap.harm2map(kmap,spin=0)[...,::-1] # comparison map
assert np.all(np.isclose(rmap[0],rmap2[0]))
assert np.all(np.isclose(rmap[1],rmap2[1],atol=1e0))
assert np.all(np.isclose(rmap[2],rmap2[2],atol=1e0))
def test_plain_wcs(self):
# Test area and box for a small Cartesian geometry
shape,wcs = enmap.geometry(res=np.deg2rad(1./60.),shape=(600,600),pos=(0,0),proj='plain')
box = np.rad2deg(enmap.box(shape,wcs))
area = np.rad2deg(np.rad2deg(enmap.area(shape,wcs)))
assert np.all(np.isclose(box,np.array([[-5,-5],[5,5]])))
assert np.isclose(area,100.)
# and for an artifical Cartesian geometry with area>4pi
shape,wcs = enmap.geometry(res=np.deg2rad(10),shape=(100,100),pos=(0,0),proj='plain')
box = np.rad2deg(enmap.box(shape,wcs))
area = np.rad2deg(np.rad2deg(enmap.area(shape,wcs)))
assert np.all(np.isclose(box,np.array([[-500,-500],[500,500]])))
assert np.isclose(area,1000000)
def test_pospix(self):
# Posmap separable and non-separable on CAR
for res in [6,12,24]:
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(res/60.),proj='car')
posmap1 = enmap.posmap(shape,wcs)
posmap2 = enmap.posmap(shape,wcs,separable=True)
assert np.all(np.isclose(posmap1,posmap2))
# Pixmap plain
pres = 0.5
shape,wcs = enmap.geometry(pos=(0,0),shape=(30,30),res=pres*u.degree,proj='plain')
yp,xp = enmap.pixshapemap(shape,wcs)
assert np.all(np.isclose(yp,pres*u.degree))
assert np.all(np.isclose(xp,pres*u.degree))
yp,xp = enmap.pixshape(shape,wcs)
parea = enmap.pixsize(shape,wcs)
assert np.isclose(parea,(pres*u.degree)**2)
assert np.isclose(yp,pres*u.degree)
assert np.isclose(xp,pres*u.degree)
pmap = enmap.pixsizemap(shape,wcs)
assert np.all(np.isclose(pmap,(pres*u.degree)**2))
# Pixmap CAR
pres = 0.1
dec_cut = 89.5 # pixsizemap is not accurate near the poles currently
shape,wcs = enmap.band_geometry(dec_cut=dec_cut*u.degree,res=pres*u.degree,proj='car')
# Current slow and general but inaccurate near the poles implementation
pmap = enmap.pixsizemap(shape,wcs)
# Fast CAR-specific pixsizemap implementation
dra, ddec = wcs.wcs.cdelt*u.degree
dec = enmap.posmap([shape[-2],1],wcs)[0,:,0]
area = np.abs(dra*(np.sin(np.minimum(np.pi/2.,dec+ddec/2))-np.sin(np.maximum(-np.pi/2.,dec-ddec/2))))
Nx = shape[-1]
pmap2 = enmap.ndmap(area[...,None].repeat(Nx,axis=-1),wcs)
assert np.all(np.isclose(pmap,pmap2))
def test_project_nn(self):
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(12/60.),proj='car')
shape2,wcs2 = enmap.fullsky_geometry(res=np.deg2rad(6/60.),proj='car')
shape3,wcs3 = enmap.fullsky_geometry(res=np.deg2rad(24/60.),proj='car')
imap = enmap.ones(shape,wcs)
omap2 = enmap.project(imap,shape2,wcs2,order=0,border='wrap')
omap3 = enmap.project(imap,shape3,wcs3,order=0,border='wrap')
assert np.all(np.isclose(omap2,1))
assert np.all(np.isclose(omap3,1))
def test_wcsunequal(self):
shape1,wcs1 = enmap.geometry(pos=(0,0),shape=(100,100),res=1*u.arcmin,proj='car')
shape1,wcs2 = enmap.geometry(pos=(0,0),shape=(100,100),res=1*u.arcmin,proj='cea')
shape1,wcs3 = enmap.geometry(pos=(10,10),shape=(100,100),res=1*u.arcmin,proj='car')
shape1,wcs4 = enmap.geometry(pos=(0,0),shape=(100,100),res=2*u.arcmin,proj='car')
assert not(wcsutils.equal(wcs1,wcs2))
assert not(wcsutils.equal(wcs1,wcs3))
assert not(wcsutils.equal(wcs1,wcs4))
def test_scale(self):
# Test (with a plain geometry) that scale_geometry
# will result in geometries with the same bounding box
# but different area pixel
pres = 0.5
ufact = 2
dfact = 0.5
shape,wcs = enmap.geometry(pos=(0,0),shape=(30,30),res=pres*u.arcmin,proj='plain')
ushape,uwcs = enmap.scale_geometry(shape,wcs,ufact)
dshape,dwcs = enmap.scale_geometry(shape,wcs,dfact)
box = enmap.box(shape,wcs)
ubox = enmap.box(ushape,uwcs)
dbox = enmap.box(dshape,dwcs)
parea = enmap.pixsize(shape,wcs)
uparea = enmap.pixsize(ushape,uwcs)
dparea = enmap.pixsize(dshape,dwcs)
assert np.all(np.isclose(box,ubox))
assert np.all(np.isclose(box,dbox))
assert np.isclose(parea/(ufact**2),uparea)
assert np.isclose(parea/(dfact**2),dparea)
def test_prepare_alm_mmax(self):
# Check if mmax is correctly handled by prepare_alm.
# Create lmax=mmax=3 alm array and corresponding alm_info.
lmax = 3
nalm = 10 # Triangular alm array of lmax=3 has 10 elements.
alm_in = np.arange(nalm, dtype=np.complex128)
ainfo_in = curvedsky.alm_info(
lmax=3, mmax=3, nalm=nalm, stride=1, layout="triangular")
# Case 1: provide only alm.
alm_out, ainfo_out = curvedsky.prepare_alm(alm=alm_in, ainfo=None)
np.testing.assert_array_almost_equal(alm_out, alm_in)
self.assertEqual(ainfo_out.lmax, ainfo_in.lmax)
self.assertEqual(ainfo_out.mmax, ainfo_in.mmax)
self.assertEqual(ainfo_out.nelem, ainfo_in.nelem)
# Case 2: provide only alm_info.
alm_out, ainfo_out = curvedsky.prepare_alm(alm=None, ainfo=ainfo_in)
# Expect zero array.
np.testing.assert_array_almost_equal(alm_out, alm_in * 0)
self.assertEqual(ainfo_out.lmax, ainfo_in.lmax)
self.assertEqual(ainfo_out.mmax, ainfo_in.mmax)
self.assertEqual(ainfo_out.nelem, ainfo_in.nelem)
# Case 3: provide alm and alm_info
alm_out, ainfo_out = curvedsky.prepare_alm(alm=alm_in, ainfo=ainfo_in)
np.testing.assert_array_almost_equal(alm_out, alm_in)
self.assertEqual(ainfo_out.lmax, ainfo_in.lmax)
self.assertEqual(ainfo_out.mmax, ainfo_in.mmax)
self.assertEqual(ainfo_out.nelem, ainfo_in.nelem)
# Case 4: provide only alm with lmax=3 and mmax=1.
# This should currently fail.
nalm = 7
alm_in = np.arange(7, dtype=np.complex128)
self.assertRaises(AssertionError, curvedsky.prepare_alm,
**dict(alm=alm_in, ainfo=None, lmax=lmax))
# Case 5: provide only alm_info with lmax=3 and mmax=1.
nalm = 7
ainfo_in = curvedsky.alm_info(
lmax=3, mmax=1, nalm=nalm, stride=1, layout="triangular")
alm_exp = np.zeros(7, dtype=np.complex128)
alm_out, ainfo_out = curvedsky.prepare_alm(alm=None, ainfo=ainfo_in)
np.testing.assert_array_almost_equal(alm_out, alm_exp)
self.assertEqual(ainfo_out.lmax, ainfo_in.lmax)
self.assertEqual(ainfo_out.mmax, ainfo_in.mmax)
self.assertEqual(ainfo_out.nelem, ainfo_in.nelem)
# Case 6: provide both alm and alm_info with lmax=3 and mmax=1.
# This should be allowed.
nalm = 7
ainfo_in = curvedsky.alm_info(
lmax=3, mmax=1, nalm=nalm, stride=1, layout="triangular")
alm_in = np.arange(7, dtype=np.complex128)
alm_out, ainfo_out = curvedsky.prepare_alm(alm=alm_in, ainfo=ainfo_in)
np.testing.assert_array_almost_equal(alm_out, alm_in)
self.assertEqual(ainfo_out.lmax, ainfo_in.lmax)
self.assertEqual(ainfo_out.mmax, ainfo_in.mmax)
self.assertEqual(ainfo_out.nelem, ainfo_in.nelem)
def test_lens_alms(self):
# We generate phi alms and convert them to kappa and back
lmax = 100
ps = np.zeros(lmax+1)
ls = np.arange(lmax+1)
ps[ls>=2] = 1./ls[ls>=2]
phi_alm = curvedsky.rand_alm(ps,lmax=lmax)
kappa_alm = lensing.phi_to_kappa(phi_alm)
phi_alm2 = lensing.kappa_to_phi(kappa_alm)
np.testing.assert_array_almost_equal(phi_alm, phi_alm2)
def test_downgrade(self):
shape,wcs = enmap.geometry(pos=(0,0),shape=(100,100),res=0.01)
imap = enmap.ones(shape,wcs)
for dfact in [None,1]:
omap = enmap.downgrade(imap,dfact)
np.testing.assert_equal(imap,omap)
dfact = 2
omap = enmap.downgrade(imap,dfact,op=np.sum)
np.testing.assert_equal(omap,np.ones(enmap.scale_geometry(shape,wcs,1./dfact)[0])*4)
def test_almxfl(self):
# We try to filter alms of shape (nalms,) and (ncomp,nalms) with
# a filter of shape (nells,)
lmax = 30
ells = np.arange(lmax+1)
nells = ells.size
for ncomp in range(4):
if ncomp==0:
fl = np.ones((nells,))
ps = np.zeros((nells,))
ps[ells>1] = 1./ells[ells>1]
else:
fl = np.ones((nells))
ps = np.zeros((ncomp,ncomp,nells))
for i in range(ncomp):
ps[i,i][ells>1] = 1./ells[ells>1]
ialm = curvedsky.rand_alm(ps,lmax=lmax)
oalm = curvedsky.almxfl(ialm,fl)
np.testing.assert_array_almost_equal(ialm, oalm)
def test_alm2map_2d_roundtrip(self):
# Test curvedsky's alm2map/map2alm.
lmax = 30
ainfo = curvedsky.alm_info(lmax)
nrings = lmax + 2
nphi = 2 * lmax + 1
shape, wcs = enmap.fullsky_geometry(shape=(nrings,nphi))
# Test different input shapes and dtypes.
# Case 1a: 1d double precision.
spin = 0
alm = np.zeros((ainfo.nelem), dtype=np.complex128)
i = ainfo.lm2ind(lmax,lmax)
alm[i] = 1. + 1.j
omap = enmap.zeros(shape, wcs, np.float64)
curvedsky.alm2map(alm, omap, spin=spin)
alm_out = curvedsky.map2alm(omap, spin=spin, ainfo=ainfo)
np.testing.assert_array_almost_equal(alm_out, alm)
# Case 1b: 1d single precision.
spin = 0
alm = np.zeros((ainfo.nelem), dtype=np.complex64)
alm[i] = 1. + 1.j
omap = enmap.zeros(shape, wcs, np.float32)
curvedsky.alm2map(alm, omap, spin=spin)
alm_out = curvedsky.map2alm(omap, spin=spin, ainfo=ainfo)
np.testing.assert_array_almost_equal(alm_out, alm)
# Case 2a: 2d double precision.
spin = 1
nspin = 2
alm = np.zeros((nspin, ainfo.nelem), dtype=np.complex128)
alm[0,i] = 1. + 1.j
alm[1,i] = 2. - 2.j
omap = enmap.zeros((nspin,)+shape, wcs, np.float64)
curvedsky.alm2map(alm, omap, spin=spin)
alm_out = curvedsky.map2alm(omap, spin=spin, ainfo=ainfo)
np.testing.assert_array_almost_equal(alm_out, alm)
# Case 2b: 2d single precision.
spin = 1
nspin = 2
alm = np.zeros((nspin, ainfo.nelem), dtype=np.complex64)
alm[0,i] = 1. + 1.j
alm[1,i] = 2. - 2.j
omap = enmap.zeros((nspin,)+shape, wcs, np.float32)
curvedsky.alm2map(alm, omap, spin=spin)
alm_out = curvedsky.map2alm(omap, spin=spin, ainfo=ainfo)
np.testing.assert_array_almost_equal(alm_out, alm)
# Case 3a: 3d double precision.
spin = 1
nspin = 2
ntrans = 3
alm = np.zeros((ntrans, nspin, ainfo.nelem), dtype=np.complex128)
alm[0,0,i] = 1. + 1.j
alm[0,1,i] = 2. - 2.j
alm[1,0,i] = 3. + 3.j
alm[1,1,i] = 4. - 4.j
alm[2,0,i] = 5. + 5.j
alm[2,1,i] = 6. - 6.j
omap = enmap.zeros((ntrans,nspin)+shape, wcs, np.float64)
curvedsky.alm2map(alm, omap, spin=spin)
alm_out = curvedsky.map2alm(omap, spin=spin, ainfo=ainfo)
np.testing.assert_array_almost_equal(alm_out, alm)
# Case 3b: 3d single precision.
spin = 1
nspin = 2
ntrans = 3
alm = np.zeros((ntrans, nspin, ainfo.nelem), dtype=np.complex64)
alm[0,0,i] = 1. + 1.j
alm[0,1,i] = 2. - 2.j
alm[1,0,i] = 3. + 3.j
alm[1,1,i] = 4. - 4.j
alm[2,0,i] = 5. + 5.j
alm[2,1,i] = 6. - 6.j
omap = enmap.zeros((ntrans,nspin)+shape, wcs, np.float32)
curvedsky.alm2map(alm, omap, spin=spin)
alm_out = curvedsky.map2alm(omap, spin=spin, ainfo=ainfo)
np.testing.assert_array_almost_equal(alm_out, alm)
def test_alm2map_healpix_roundtrip(self):
# Test curvedsky's alm2map/map2alm.
nside = 2
lmax = nside*2
nside = lmax//2
ainfo = curvedsky.alm_info(lmax)
npix = 12*nside**2
# 7 iterations needed to reach 6 digits of
# precision. This is more than the 3 default
# in healpy and the 0 default in pixell
niter = 7
for dtype in [np.float64, np.float32]:
ctype = utils.complex_dtype(dtype)
# Case 1: 1d
spin = 0
alm = np.zeros((ainfo.nelem), dtype=ctype)
i = ainfo.lm2ind(lmax,lmax)
alm[i] = 1. + 1.j
omap = np.zeros(npix, dtype)
curvedsky.alm2map_healpix(alm, omap, spin=spin)
alm_out = curvedsky.map2alm_healpix(omap, spin=spin, ainfo=ainfo, niter=niter)
np.testing.assert_array_almost_equal(alm_out, alm)
# Case 2: 2d
spin = 1
nspin = 2
alm = np.zeros((nspin, ainfo.nelem), dtype=ctype)
alm[0,i] = 1. + 1.j
alm[1,i] = 2. - 2.j
omap = np.zeros((nspin,npix), dtype)
curvedsky.alm2map_healpix(alm, omap, spin=spin)
alm_out = curvedsky.map2alm_healpix(omap, spin=spin, ainfo=ainfo, niter=niter)
np.testing.assert_array_almost_equal(alm_out, alm)
# Case 3: 3d
spin = 1
nspin = 2
ntrans = 3
alm = np.zeros((ntrans, nspin, ainfo.nelem), dtype=ctype)
alm[0,0,i] = 1. + 1.j
alm[0,1,i] = 2. - 2.j
alm[1,0,i] = 3. + 3.j
alm[1,1,i] = 4. - 4.j
alm[2,0,i] = 5. + 5.j
alm[2,1,i] = 6. - 6.j
omap = np.zeros((ntrans,nspin,npix), dtype)
curvedsky.alm2map_healpix(alm, omap, spin=spin)
alm_out = curvedsky.map2alm_healpix(omap, spin=spin, ainfo=ainfo, niter=niter)
np.testing.assert_array_almost_equal(alm_out, alm)
# MM: Re-enabled 09/17/2024
# --Disabled for now because the version of ducc currently on pypi
# has an adjointness bug. It's fixed in the ducc git repo.--
def test_adjointness(self):
# This tests if alm2map_adjoint is the adjoint of alm2map,
# and if map2alm_adjoint is the adjoint of map2alm.
# (This doesn't test if they're correct, just that they're
# consistent with each other). This test is a bit slow, taking
# 5 s or so. It would be much faster if we dropped the ncomp=3 case.
for dtype in [np.float32, np.float64]:
for ncomp in [1,3]:
# Define our geometries
geos = []
res = 30*utils.degree
shape, wcs = enmap.fullsky_geometry(res=res, variant="fejer1")
geos.append(("fullsky_fejer1", shape, wcs))
shape, wcs = enmap.fullsky_geometry(res=res, variant="cc")
geos.append(("fullsky_cc", shape, wcs))
lmax = shape[-2]-2
shape, wcs = enmap.Geometry(shape, wcs)[3:-3,3:-3]
geos.append(("patch_cc", shape, wcs))
wcs = wcs.deepcopy()
wcs.wcs.crpix += 0.123
geos.append(("patch_gen_cyl", shape, wcs))
shape, wcs = enmap.geometry(np.array([[-45,45],[45,-45]])*utils.degree, res=res, proj="tan")
geos.append(("patch_tan", shape, wcs))
for gi, (name, shape, wcs) in enumerate(geos):
mat1 = alm_bash(curvedsky.alm2map, shape, wcs, ncomp, lmax, dtype)
mat2 = map_bash(curvedsky.alm2map_adjoint, shape, wcs, ncomp, lmax, dtype)
np.testing.assert_array_almost_equal(mat1, mat2)
mat1 = map_bash(curvedsky.map2alm, shape, wcs, ncomp, lmax, dtype)
mat2 = alm_bash(curvedsky.map2alm_adjoint, shape, wcs, ncomp, lmax, dtype)
np.testing.assert_array_almost_equal(mat1, mat2)
#def test_sharp_alm2map_der1(self):
#
# # Test the wrapper around libsharps alm2map_der1.
# lmax = 3
# ainfo = sharp.alm_info(lmax)
# nrings = lmax + 1
# nphi = 2 * lmax + 1
# minfo = sharp.map_info_gauss_legendre(nrings, nphi)
# sht = sharp.sht(minfo, ainfo)
# # Test different input shapes and dtypes.
# # Case 1a: 1d double precision.
# alm = np.zeros((ainfo.nelem), dtype=np.complex128)
# alm[4] = 1. + 1.j
# omap = sht.alm2map_der1(alm)
# # Compare to expected value by doing spin 1 transform
# # on sqrt(ell (ell + 1)) alm.
# alm_spin = np.zeros((2, ainfo.nelem), dtype=np.complex128)
# alm_spin[0] = alm * np.sqrt(2)
# omap_exp = sht.alm2map(alm_spin, spin=1)
# np.testing.assert_array_almost_equal(omap, omap_exp)
# # Case 1b: 1d single precision.
# alm = np.zeros((ainfo.nelem), dtype=np.complex64)
# alm[4] = 1. + 1.j
# omap = sht.alm2map_der1(alm)
# # Compare to expected value by doing spin 1 transform
# # on sqrt(ell (ell + 1)) alm.
# alm_spin = np.zeros((2, ainfo.nelem), dtype=np.complex64)
# alm_spin[0] = alm * np.sqrt(2)
# omap_exp = sht.alm2map(alm_spin, spin=1)
# np.testing.assert_array_almost_equal(omap, omap_exp)
# # Case 2a: 2d double precision.
# ntrans = 3
# alm = np.zeros((ntrans, ainfo.nelem), dtype=np.complex128)
# alm[0,4] = 1. + 1.j
# alm[1,4] = 2. + 2.j
# alm[2,4] = 3. + 3.j
# omap = sht.alm2map_der1(alm)
# # Compare to expected value by doing spin 1 transform
# # on sqrt(ell (ell + 1)) alm.
# alm_spin = np.zeros((ntrans, 2, ainfo.nelem), dtype=np.complex128)
# alm_spin[0,0] = alm[0] * np.sqrt(2)
# alm_spin[1,0] = alm[1] * np.sqrt(2)
# alm_spin[2,0] = alm[2] * np.sqrt(2)
# omap_exp = sht.alm2map(alm_spin, spin=1)
# np.testing.assert_array_almost_equal(omap, omap_exp)
# # Case 2b: 2d single precision.
# ntrans = 3
# alm = np.zeros((ntrans, ainfo.nelem), dtype=np.complex64)
# alm[0,4] = 1. + 1.j
# alm[1,4] = 2. + 2.j
# alm[2,4] = 3. + 3.j
# omap = sht.alm2map_der1(alm)
# # Compare to expected value by doing spin 1 transform
# # on sqrt(ell (ell + 1)) alm.
# alm_spin = np.zeros((ntrans, 2, ainfo.nelem), dtype=np.complex64)
# alm_spin[0,0] = alm[0] * np.sqrt(2)
# alm_spin[1,0] = alm[1] * np.sqrt(2)
# alm_spin[2,0] = alm[2] * np.sqrt(2)
# omap_exp = sht.alm2map(alm_spin, spin=1)
# np.testing.assert_array_almost_equal(omap, omap_exp)
def test_thumbnails(self):
print("Testing thumbnails...")
# Make a geometry far away from the equator
dec_min = 70 * u.degree
dec_max = 80 * u.degree
res = 0.5 * u.arcmin
shape,wcs = enmap.band_geometry((dec_min,dec_max),res=res)
# Create a set of point source positions separated by
# 2 degrees but with 1 column wrapping around the RA
# direction
width = 120 * u.arcmin
Ny = int((dec_max-dec_min)/(width))
Nx = int((2*np.pi/(width)))
pys = np.linspace(0,shape[0],Ny)[1:-1]
pxs = np.linspace(0,shape[1],Nx)[:-1]
Ny = len(pys)
Nx = len(pxs)
xx,yy = np.meshgrid(pxs,pys)
xx = xx.reshape(-1)
yy = yy.reshape(-1)
ps = np.vstack((yy,xx))
decs,ras = enmap.pix2sky(shape,wcs,ps)
# Simulate these sources with unit peak value and 2.5 arcmin FWHM
N = ps.shape[1]
srcs = np.zeros((N,3))
srcs[:,0] = decs
srcs[:,1] = ras
srcs[:,2] = ras*0 + 1
sigma = 2.5 * u.fwhm * u.arcmin
omap = pointsrcs.sim_srcs(shape,wcs,srcs,beam=sigma)
# Reproject thumbnails centered on the sources
# with gnomonic/tangent projection
proj = "tan"
r = 10*u.arcmin
ret = reproject.thumbnails(omap, srcs[:,:2], r=r, res=res, proj=proj,
apod=2*u.arcmin, order=3, oversample=2,pixwin=False)
# Create a reference source at the equator to compare this against
ishape,iwcs = enmap.geometry(shape=ret.shape,res=res,pos=(0,0),proj=proj)
imodrmap = enmap.modrmap(ishape,iwcs)
model = np.exp(-imodrmap**2./2./sigma**2.)
# Make sure all thumbnails agree with the reference at the
# sub-percent level
for i in range(ret.shape[0]):
diff = ret[i] - model
assert np.all(np.isclose(diff,0,atol=1e-3))
def test_tilemap(self):
shape, wcs = enmap.fullsky_geometry(30*utils.degree, variant="CC")
assert shape == (7,12)
geo = tilemap.geometry((3,)+shape, wcs, tile_shape=(2,2))
assert len(geo.active) == 0
assert np.all(geo.lookup<0)
assert geo.ntile == 24
assert geo.nactive == 0
assert geo.tile_shape == (2,2)
assert geo.grid_shape == (4,6)
assert tuple(geo.tile_shapes[ 0]) == (2,2)
assert tuple(geo.tile_shapes[ 5]) == (2,2)
assert tuple(geo.tile_shapes[18]) == (1,2)
assert tuple(geo.tile_shapes[23]) == (1,2)
assert geo.ind2grid(7) == (1,1)
assert geo.grid2ind(1,1) == 7
geo = geo.copy(active=[1])
assert geo.nactive == 1
assert np.sum(geo.lookup>=0) == 1
assert geo.active[0] == 1
assert geo.lookup[1] == 0
geo2 = geo.copy(active=[0,1,2])
assert geo.nactive == 1
assert geo2.nactive == 3
assert geo.compatible(geo) == 2
assert geo.compatible(geo2) == 1
geo3 = tilemap.geometry((3,)+shape, wcs, tile_shape=(2,3))
assert geo.compatible(geo3) == 0
del geo2, geo3
m1 = tilemap.zeros(geo.copy(active=[1,2]))
m2 = tilemap.zeros(geo.copy(active=[2,3,4]))
m3 = tilemap.zeros(geo.copy(active=[2]))
for a, i in enumerate(m1.geometry.active): m1.active_tiles[a] = i
for a, i in enumerate(m2.geometry.active): m2.active_tiles[a] = i*10
for a, i in enumerate(m3.geometry.active): m3.active_tiles[a] = i*100
assert m1[0,0] == 1
assert np.all(m1.tiles[1] == m1.active_tiles[0])
m12 = m1+m2
m21 = m2+m1
assert(m12.nactive == 4)
assert(m21.nactive == 4)
assert(np.all(m12.tiles[1] == 1))
assert(np.all(m21.tiles[1] == 1))
assert(np.all(m12.tiles[2] == 22))
assert(np.all(m21.tiles[2] == 22))
assert(sorted(m12.geometry.active)==sorted(m21.geometry.active))
m1 += m3
assert np.all(m1.tiles[2] == 202)
with self.assertRaises(ValueError): m3 += m1
m1[:] = 0
m1c = np.cos(m1)
assert m1c.geometry.nactive == 2
assert np.allclose(m1c, 1)
|
simonsobsREPO_NAMEpixellPATH_START.@pixell_extracted@pixell-master@tests@test_pixell.py@.PATH_END.py
|
{
"filename": "legrand.py",
"repo_name": "jabesq-org/pyatmo",
"repo_path": "pyatmo_extracted/pyatmo-master/src/pyatmo/modules/legrand.py",
"type": "Python"
}
|
"""Module to represent Legrand modules."""
from __future__ import annotations
import logging
from pyatmo.modules.module import (
BatteryMixin,
ContactorMixin,
DimmableMixin,
Dimmer,
EnergyHistoryLegacyMixin,
EnergyHistoryMixin,
Fan,
FirmwareMixin,
Module,
OffloadMixin,
PowerMixin,
RfMixin,
ShutterMixin,
Switch,
SwitchMixin,
WifiMixin,
)
LOG = logging.getLogger(__name__)
# pylint: disable=R0901
class NLG(FirmwareMixin, OffloadMixin, WifiMixin, Module):
"""Legrand gateway."""
class NLT(DimmableMixin, FirmwareMixin, BatteryMixin, SwitchMixin, Module):
"""Legrand global remote control...but also wireless switch, like NLD."""
class NLP(Switch, OffloadMixin):
"""Legrand plug."""
class NLPM(Switch, OffloadMixin):
"""Legrand mobile plug."""
class NLPO(ContactorMixin, OffloadMixin, Switch):
"""Legrand contactor."""
class NLPT(Switch, OffloadMixin):
"""Legrand latching relay/teleruptor."""
class NLPBS(Switch):
"""Legrand british standard plug."""
class NLF(Dimmer):
"""Legrand 2 wire light switch."""
class NLFN(Dimmer):
"""Legrand light switch with neutral."""
class NLFE(Dimmer):
"""Legrand On-Off dimmer switch evolution."""
class NLM(Switch):
"""Legrand light micro module."""
class NLIS(Switch):
"""Legrand double switch."""
class NLD(DimmableMixin, FirmwareMixin, BatteryMixin, SwitchMixin, Module):
"""Legrand Double On/Off dimmer remote. Wireless 2 button switch light."""
class NLL(Switch, WifiMixin):
"""Legrand / BTicino italian light switch with neutral."""
class NLV(FirmwareMixin, RfMixin, ShutterMixin, Module):
"""Legrand / BTicino shutters."""
class NLLV(FirmwareMixin, RfMixin, ShutterMixin, Module):
"""Legrand / BTicino shutters."""
class NLLM(FirmwareMixin, RfMixin, ShutterMixin, Module):
"""Legrand / BTicino shutters."""
class NLPC(FirmwareMixin, EnergyHistoryMixin, PowerMixin, Module):
"""Legrand / BTicino connected energy meter."""
class NLE(FirmwareMixin, EnergyHistoryLegacyMixin, Module):
"""Legrand / BTicino connected ecometer. no power supported for the NLE (in the home status API)."""
class NLPS(FirmwareMixin, EnergyHistoryMixin, PowerMixin, Module):
"""Legrand / BTicino smart load shedder."""
class NLC(Switch, OffloadMixin):
"""Legrand / BTicino cable outlet."""
class NLDD(FirmwareMixin, Module):
"""Legrand NLDD dimmer remote control."""
class NLUP(Switch):
"""Legrand NLUP Power outlet."""
class NLAO(FirmwareMixin, SwitchMixin, Module):
"""Legrand wireless batteryless light switch."""
class NLUI(FirmwareMixin, SwitchMixin, Module):
"""Legrand NLUI in-wall switch."""
class NLUF(Dimmer):
"""Legrand NLUF device stub."""
class NLUO(Dimmer):
"""Legrand NLUO device stub."""
class NLLF(Fan, PowerMixin, EnergyHistoryMixin):
"""Legrand NLLF fan/ventilation device."""
class NLunknown(Module):
"""NLunknown device stub."""
class NLAS(Module):
"""NLAS wireless batteryless scene switch."""
class Z3L(Dimmer):
"""Zigbee 3 Light."""
class EBU(Module):
"""EBU gas meter."""
class NLTS(Module):
"""NLTS motion sensor."""
class NLPD(Switch, OffloadMixin):
"""NLPD dry contact."""
class NLJ(FirmwareMixin, RfMixin, ShutterMixin, Module):
"""Legrand garage door opener."""
|
jabesq-orgREPO_NAMEpyatmoPATH_START.@pyatmo_extracted@pyatmo-master@src@pyatmo@modules@legrand.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "axgoujon/convex_ridge_regularizers",
"repo_path": "convex_ridge_regularizers_extracted/convex_ridge_regularizers-main/inverse_problems/mri/README.md",
"type": "Markdown"
}
|
# Single- and multi-coil MRI
**Preprocessed data:** download the data [](https://doi.org/10.5281/zenodo.8302121)
, unzip it and put it in the data folder under the name `data_sets.
The data contains validation (aka calibration) and test sets with:
- subsampling cartesian masks,
- sensitivity masks,
- ground truth image,
- measurements,
for the various settings explored: single- and multi-coil MRI, various acceleration rates (2, 4, and 8), synthetic noise and different image type (fat suppression or not).
For completeness we also put the code used to generate the preprocessed data from the raw data. (Nb need the bart library to generate the datasets, not needed with the data link).
|
axgoujonREPO_NAMEconvex_ridge_regularizersPATH_START.@convex_ridge_regularizers_extracted@convex_ridge_regularizers-main@inverse_problems@mri@README.md@.PATH_END.py
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmapgl/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="heatmapgl", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmapgl@_opacity.py@.PATH_END.py
|
{
"filename": "test_cosmosis_interface.py",
"repo_name": "mraveri/tensiometer",
"repo_path": "tensiometer_extracted/tensiometer-master/tensiometer/tests/test_cosmosis_interface.py",
"type": "Python"
}
|
###############################################################################
# initial imports:
import unittest
import tensiometer.cosmosis_interface as ci
import os
###############################################################################
class test_cosmosis_interface(unittest.TestCase):
def setUp(self):
# get path:
self.here = os.path.dirname(os.path.abspath(__file__))
# chain dir:
self.chain_dir = self.here+'/../../test_chains/'
def test_MCSamplesFromCosmosis(self):
# import the chain:
chain_name = self.chain_dir+'DES_multinest_cosmosis'
chain = ci.MCSamplesFromCosmosis(chain_name)
###############################################################################
if __name__ == '__main__':
unittest.main(verbosity=2)
|
mraveriREPO_NAMEtensiometerPATH_START.@tensiometer_extracted@tensiometer-master@tensiometer@tests@test_cosmosis_interface.py@.PATH_END.py
|
{
"filename": "make_shear.py",
"repo_name": "AWehrhahn/PyReduce",
"repo_path": "PyReduce_extracted/PyReduce-master/pyreduce/make_shear.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Calculate the tilt based on a reference spectrum with high SNR, e.g. Wavelength calibration image
Authors
-------
Nikolai Piskunov
Ansgar Wehrhahn
Version
--------
0.9 - NP - IDL Version
1.0 - AW - Python Version
License
-------
....
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial.polynomial import polyval2d
from scipy import signal
from scipy.ndimage import gaussian_filter1d, median_filter
from scipy.optimize import least_squares
from tqdm import tqdm
from .extract import fix_parameters
from .util import make_index
from .util import polyfit2d_2 as polyfit2d
logger = logging.getLogger(__name__)
class ProgressPlot: # pragma: no cover
def __init__(self, ncol, width, title=None):
plt.ion()
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3)
plot_title = "Curvature in each order"
if title is not None:
plot_title = f"{title}\n{plot_title}"
fig.suptitle(plot_title)
(line1,) = ax1.plot(np.arange(ncol) + 1)
(line2,) = ax1.plot(0, 0, "d")
ax1.set_yscale("log")
self.ncol = ncol
self.width = width * 2 + 1
self.fig = fig
self.ax1 = ax1
self.ax2 = ax2
self.ax3 = ax3
self.line1 = line1
self.line2 = line2
def update_plot1(self, vector, peak, offset=0):
data = np.ones(self.ncol)
data[offset : len(vector) + offset] = np.clip(vector, 1, None)
self.line1.set_ydata(data)
self.line2.set_xdata(peak)
self.line2.set_ydata(data[peak])
self.ax1.set_ylim((data.min(), data.max()))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def update_plot2(self, img, model, tilt, shear, peak):
self.ax2.clear()
self.ax3.clear()
self.ax2.imshow(img)
self.ax3.imshow(model)
nrows, _ = img.shape
middle = nrows // 2
y = np.arange(-middle, -middle + nrows)
x = peak + (tilt + shear * y) * y
y += middle
self.ax2.plot(x, y, "r")
self.ax3.plot(x, y, "r")
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
plt.close()
plt.ioff()
class Curvature:
def __init__(
self,
orders,
extraction_width=0.5,
column_range=None,
order_range=None,
window_width=9,
peak_threshold=10,
peak_width=1,
fit_degree=2,
sigma_cutoff=3,
mode="1D",
plot=False,
plot_title=None,
peak_function="gaussian",
curv_degree=2,
):
self.orders = orders
self.extraction_width = extraction_width
self.column_range = column_range
if order_range is None:
order_range = (0, self.nord)
self.order_range = order_range
self.window_width = window_width
self.threshold = peak_threshold
self.peak_width = peak_width
self.fit_degree = fit_degree
self.sigma_cutoff = sigma_cutoff
self.mode = mode
self.plot = plot
self.plot_title = plot_title
self.curv_degree = curv_degree
self.peak_function = peak_function
if self.mode == "1D":
# fit degree is an integer
if not np.isscalar(self.fit_degree):
self.fit_degree = self.fit_degree[0]
elif self.mode == "2D":
# fit degree is a 2 tuple
if np.isscalar(self.fit_degree):
self.fit_degree = (self.fit_degree, self.fit_degree)
@property
def nord(self):
return self.orders.shape[0]
@property
def n(self):
return self.order_range[1] - self.order_range[0]
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if value not in ["1D", "2D"]:
raise ValueError(
f"Value for 'mode' not understood. Expected one of ['1D', '2D'] but got {value}"
)
self._mode = value
def _fix_inputs(self, original):
orders = self.orders
extraction_width = self.extraction_width
column_range = self.column_range
nrow, ncol = original.shape
nord = len(orders)
extraction_width, column_range, orders = fix_parameters(
extraction_width, column_range, orders, nrow, ncol, nord
)
self.column_range = column_range[self.order_range[0] : self.order_range[1]]
self.extraction_width = extraction_width[
self.order_range[0] : self.order_range[1]
]
self.orders = orders[self.order_range[0] : self.order_range[1]]
self.order_range = (0, self.n)
def _find_peaks(self, vec, cr):
# This should probably be the same as in the wavelength calibration
vec -= np.ma.median(vec)
vec = np.ma.filled(vec, 0)
height = np.percentile(vec, 68) * self.threshold
peaks, _ = signal.find_peaks(
vec, prominence=height, width=self.peak_width, distance=self.window_width
)
# Remove peaks at the edge
peaks = peaks[
(peaks >= self.window_width + 1)
& (peaks < len(vec) - self.window_width - 1)
]
# Remove the offset, due to vec being a subset of extracted
peaks += cr[0]
return vec, peaks
def _determine_curvature_single_line(self, original, peak, ycen, ycen_int, xwd):
"""
Fit the curvature of a single peak in the spectrum
This is achieved by fitting a model, that consists of gaussians
in spectrum direction, that are shifted by the curvature in each row.
Parameters
----------
original : array of shape (nrows, ncols)
whole input image
peak : int
column position of the peak
ycen : array of shape (ncols,)
row center of the order of the peak
xwd : 2 tuple
extraction width above and below the order center to use
Returns
-------
tilt : float
first order curvature
shear : float
second order curvature
"""
_, ncol = original.shape
# look at +- width pixels around the line
# Extract short horizontal strip for each row in extraction width
# Then fit a gaussian to each row, to find the center of the line
x = peak + np.arange(-self.window_width, self.window_width + 1)
x = x[(x >= 0) & (x < ncol)]
xmin, xmax = x[0], x[-1] + 1
# Look above and below the line center
y = np.arange(-xwd[0], xwd[1] + 1)[:, None] - ycen[xmin:xmax][None, :]
x = x[None, :]
idx = make_index(ycen_int - xwd[0], ycen_int + xwd[1], xmin, xmax)
img = original[idx]
img_compressed = np.ma.compressed(img)
img -= np.percentile(img_compressed, 1)
img /= np.percentile(img_compressed, 99)
img = np.ma.clip(img, 0, 1)
sl = np.ma.mean(img, axis=1)
sl = sl[:, None]
peak_func = {"gaussian": gaussian, "lorentzian": lorentzian}
peak_func = peak_func[self.peak_function]
def model(coef):
A, middle, sig, *curv = coef
mu = middle + shift(curv)
mod = peak_func(x, A, mu, sig)
mod *= sl
return (mod - img).ravel()
def model_compressed(coef):
return np.ma.compressed(model(coef))
A = np.nanpercentile(img_compressed, 95)
sig = (xmax - xmin) / 4 # TODO
if self.curv_degree == 1:
shift = lambda curv: curv[0] * y
elif self.curv_degree == 2:
shift = lambda curv: (curv[0] + curv[1] * y) * y
else:
raise ValueError("Only curvature degrees 1 and 2 are supported")
# res = least_squares(model, x0=[A, middle, sig, 0], loss="soft_l1", bounds=([0, xmin, 1, -10],[np.inf, xmax, xmax, 10]))
x0 = [A, peak, sig] + [0] * self.curv_degree
res = least_squares(
model_compressed, x0=x0, method="trf", loss="soft_l1", f_scale=0.1
)
if self.curv_degree == 1:
tilt, shear = res.x[3], 0
elif self.curv_degree == 2:
tilt, shear = res.x[3], res.x[4]
else:
tilt, shear = 0, 0
# model = model(res.x).reshape(img.shape) + img
# vmin = 0
# vmax = np.max(model)
# y = y.ravel()
# x = res.x[1] - xmin + (tilt + shear * y) * y
# y += xwd[0]
# plt.subplot(121)
# plt.imshow(img, vmin=vmin, vmax=vmax, origin="lower")
# plt.plot(xwd[0] + ycen[xmin:xmax], "r")
# plt.title("Input Image")
# plt.xlabel("x [pixel]")
# plt.ylabel("y [pixel]")
# plt.subplot(122)
# plt.imshow(model, vmin=vmin, vmax=vmax, origin="lower")
# plt.plot(x, y, "r", label="curvature")
# plt.ylim((-0.5, model.shape[0] - 0.5))
# plt.title("Model")
# plt.xlabel("x [pixel]")
# plt.ylabel("y [pixel]")
# plt.show()
if self.plot >= 2:
model = res.fun.reshape(img.shape) + img
self.progress.update_plot2(img, model, tilt, shear, res.x[1] - xmin)
return tilt, shear
def _fit_curvature_single_order(self, peaks, tilt, shear):
try:
middle = np.median(tilt)
sigma = np.percentile(tilt, (32, 68))
sigma = middle - sigma[0], sigma[1] - middle
mask = (tilt >= middle - 5 * sigma[0]) & (tilt <= middle + 5 * sigma[1])
peaks, tilt, shear = peaks[mask], tilt[mask], shear[mask]
coef_tilt = np.zeros(self.fit_degree + 1)
res = least_squares(
lambda coef: np.polyval(coef, peaks) - tilt,
x0=coef_tilt,
loss="arctan",
)
coef_tilt = res.x
coef_shear = np.zeros(self.fit_degree + 1)
res = least_squares(
lambda coef: np.polyval(coef, peaks) - shear,
x0=coef_shear,
loss="arctan",
)
coef_shear = res.x
except:
logger.error(
"Could not fit the curvature of this order. Using no curvature instead"
)
coef_tilt = np.zeros(self.fit_degree + 1)
coef_shear = np.zeros(self.fit_degree + 1)
return coef_tilt, coef_shear, peaks
def _determine_curvature_all_lines(self, original, extracted):
ncol = original.shape[1]
# Store data from all orders
all_peaks = []
all_tilt = []
all_shear = []
plot_vec = []
for j in tqdm(range(self.n), desc="Order"):
logger.debug("Calculating tilt of order %i out of %i", j + 1, self.n)
cr = self.column_range[j]
xwd = self.extraction_width[j]
ycen = np.polyval(self.orders[j], np.arange(ncol))
ycen_int = ycen.astype(int)
ycen -= ycen_int
# Find peaks
vec = extracted[j, cr[0] : cr[1]]
vec, peaks = self._find_peaks(vec, cr)
npeaks = len(peaks)
# Determine curvature for each line seperately
tilt = np.zeros(npeaks)
shear = np.zeros(npeaks)
mask = np.full(npeaks, True)
for ipeak, peak in tqdm(
enumerate(peaks), total=len(peaks), desc="Peak", leave=False
):
if self.plot >= 2: # pragma: no cover
self.progress.update_plot1(vec, peak, cr[0])
try:
tilt[ipeak], shear[ipeak] = self._determine_curvature_single_line(
original, peak, ycen, ycen_int, xwd
)
except RuntimeError: # pragma: no cover
mask[ipeak] = False
# Store results
all_peaks += [peaks[mask]]
all_tilt += [tilt[mask]]
all_shear += [shear[mask]]
plot_vec += [vec]
return all_peaks, all_tilt, all_shear, plot_vec
def fit(self, peaks, tilt, shear):
if self.mode == "1D":
coef_tilt = np.zeros((self.n, self.fit_degree + 1))
coef_shear = np.zeros((self.n, self.fit_degree + 1))
for j in range(self.n):
coef_tilt[j], coef_shear[j], _ = self._fit_curvature_single_order(
peaks[j], tilt[j], shear[j]
)
elif self.mode == "2D":
x = np.concatenate(peaks)
y = [np.full(len(p), i) for i, p in enumerate(peaks)]
y = np.concatenate(y)
z = np.concatenate(tilt)
coef_tilt = polyfit2d(x, y, z, degree=self.fit_degree, loss="arctan")
z = np.concatenate(shear)
coef_shear = polyfit2d(x, y, z, degree=self.fit_degree, loss="arctan")
return coef_tilt, coef_shear
def eval(self, peaks, order, coef_tilt, coef_shear):
if self.mode == "1D":
tilt = np.zeros(peaks.shape)
shear = np.zeros(peaks.shape)
for i in np.unique(order):
idx = order == i
tilt[idx] = np.polyval(coef_tilt[i], peaks[idx])
shear[idx] = np.polyval(coef_shear[i], peaks[idx])
elif self.mode == "2D":
tilt = polyval2d(peaks, order, coef_tilt)
shear = polyval2d(peaks, order, coef_shear)
return tilt, shear
def plot_results(
self, ncol, plot_peaks, plot_vec, plot_tilt, plot_shear, tilt_x, shear_x
): # pragma: no cover
fig, axes = plt.subplots(nrows=self.n // 2 + self.n % 2, ncols=2, squeeze=False)
title = "Peaks"
if self.plot_title is not None:
title = f"{self.plot_title}\n{title}"
fig.suptitle(title)
fig1, axes1 = plt.subplots(
nrows=self.n // 2 + self.n % 2, ncols=2, squeeze=False
)
title = "1st Order Curvature"
if self.plot_title is not None:
title = f"{self.plot_title}\n{title}"
fig1.suptitle(title)
fig2, axes2 = plt.subplots(
nrows=self.n // 2 + self.n % 2, ncols=2, squeeze=False
)
title = "2nd Order Curvature"
if self.plot_title is not None:
title = f"{self.plot_title}\n{title}"
fig2.suptitle(title)
plt.subplots_adjust(hspace=0)
def trim_axs(axs, N):
"""little helper to massage the axs list to have correct length..."""
axs = axs.flat
for ax in axs[N:]:
ax.remove()
return axs[:N]
t, s = [None for _ in range(self.n)], [None for _ in range(self.n)]
for j in range(self.n):
cr = self.column_range[j]
x = np.arange(cr[0], cr[1])
order = np.full(len(x), j)
t[j], s[j] = self.eval(x, order, tilt_x, shear_x)
t_lower = min(t.min() * (0.5 if t.min() > 0 else 1.5) for t in t)
t_upper = max(t.max() * (1.5 if t.max() > 0 else 0.5) for t in t)
s_lower = min(s.min() * (0.5 if s.min() > 0 else 1.5) for s in s)
s_upper = max(s.max() * (1.5 if s.max() > 0 else 0.5) for s in s)
for j in range(self.n):
cr = self.column_range[j]
peaks = plot_peaks[j]
vec = np.clip(plot_vec[j], 0, None)
tilt = plot_tilt[j]
shear = plot_shear[j]
x = np.arange(cr[0], cr[1])
# Figure Peaks found (and used)
axes[j // 2, j % 2].plot(np.arange(cr[0], cr[1]), vec)
axes[j // 2, j % 2].plot(peaks, vec[peaks - cr[0]], "X")
axes[j // 2, j % 2].set_xlim([0, ncol])
# axes[j // 2, j % 2].set_yscale("log")
if j not in (self.n - 1, self.n - 2):
axes[j // 2, j % 2].get_xaxis().set_ticks([])
# Figure 1st order
axes1[j // 2, j % 2].plot(peaks, tilt, "rX")
axes1[j // 2, j % 2].plot(x, t[j])
axes1[j // 2, j % 2].set_xlim(0, ncol)
axes1[j // 2, j % 2].set_ylim(t_lower, t_upper)
if j not in (self.n - 1, self.n - 2):
axes1[j // 2, j % 2].get_xaxis().set_ticks([])
else:
axes1[j // 2, j % 2].set_xlabel("x [pixel]")
if j == self.n // 2 + 1:
axes1[j // 2, j % 2].set_ylabel("tilt [pixel/pixel]")
# Figure 2nd order
axes2[j // 2, j % 2].plot(peaks, shear, "rX")
axes2[j // 2, j % 2].plot(x, s[j])
axes2[j // 2, j % 2].set_xlim(0, ncol)
axes2[j // 2, j % 2].set_ylim(s_lower, s_upper)
if j not in (self.n - 1, self.n - 2):
axes2[j // 2, j % 2].get_xaxis().set_ticks([])
else:
axes2[j // 2, j % 2].set_xlabel("x [pixel]")
if j == self.n // 2 + 1:
axes2[j // 2, j % 2].set_ylabel("shear [pixel/pixel**2]")
axes1 = trim_axs(axes1, self.n)
axes2 = trim_axs(axes2, self.n)
plt.show()
def plot_comparison(self, original, tilt, shear, peaks): # pragma: no cover
_, ncol = original.shape
output = np.zeros((np.sum(self.extraction_width) + self.nord, ncol))
pos = [0]
x = np.arange(ncol)
for i in range(self.nord):
ycen = np.polyval(self.orders[i], x)
yb = ycen - self.extraction_width[i, 0]
yt = ycen + self.extraction_width[i, 1]
xl, xr = self.column_range[i]
index = make_index(yb, yt, xl, xr)
yl = pos[i]
yr = pos[i] + index[0].shape[0]
output[yl:yr, xl:xr] = original[index]
pos += [yr]
vmin, vmax = np.percentile(output[output != 0], (5, 95))
plt.imshow(output, vmin=vmin, vmax=vmax, origin="lower", aspect="auto")
for i in range(self.nord):
for p in peaks[i]:
ew = self.extraction_width[i]
x = np.zeros(ew[0] + ew[1] + 1)
y = np.arange(-ew[0], ew[1] + 1)
for j, yt in enumerate(y):
x[j] = p + yt * tilt[i, p] + yt ** 2 * shear[i, p]
y += pos[i] + ew[0]
plt.plot(x, y, "r")
locs = np.sum(self.extraction_width, axis=1) + 1
locs = np.array([0, *np.cumsum(locs)[:-1]])
locs[:-1] += (np.diff(locs) * 0.5).astype(int)
locs[-1] += ((output.shape[0] - locs[-1]) * 0.5).astype(int)
plt.yticks(locs, range(len(locs)))
if self.plot_title is not None:
plt.title(self.plot_title)
plt.xlabel("x [pixel]")
plt.ylabel("order")
plt.show()
def execute(self, extracted, original):
logger.info("Determining the Slit Curvature")
_, ncol = original.shape
self._fix_inputs(original)
if self.plot >= 2: # pragma: no cover
self.progress = ProgressPlot(ncol, self.window_width, title=self.plot_title)
peaks, tilt, shear, vec = self._determine_curvature_all_lines(
original, extracted
)
coef_tilt, coef_shear = self.fit(peaks, tilt, shear)
if self.plot >= 2: # pragma: no cover
self.progress.close()
if self.plot: # pragma: no cover
self.plot_results(ncol, peaks, vec, tilt, shear, coef_tilt, coef_shear)
iorder, ipeaks = np.indices(extracted.shape)
tilt, shear = self.eval(ipeaks, iorder, coef_tilt, coef_shear)
if self.plot: # pragma: no cover
self.plot_comparison(original, tilt, shear, peaks)
return tilt, shear
# TODO allow other line shapes
def gaussian(x, A, mu, sig):
"""
A: height
mu: offset from central line
sig: standard deviation
"""
return A * np.exp(-np.power(x - mu, 2.0) / (2 * np.power(sig, 2.0)))
def lorentzian(x, A, x0, mu):
"""
A: height
x0: offset from central line
mu: width of lorentzian
"""
return A * mu / ((x - x0) ** 2 + 0.25 * mu ** 2)
|
AWehrhahnREPO_NAMEPyReducePATH_START.@PyReduce_extracted@PyReduce-master@pyreduce@make_shear.py@.PATH_END.py
|
{
"filename": "test_primitives.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/test_primitives.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from typing import Optional
import pytest
import torch
import pyro
import pyro.distributions as dist
from pyro import poutine
pytestmark = pytest.mark.stage("unit")
def test_sample_ok():
x = pyro.sample("x", dist.Normal(0, 1))
assert isinstance(x, torch.Tensor)
assert x.shape == ()
def test_observe_warn():
with pytest.warns(RuntimeWarning):
pyro.sample("x", dist.Normal(0, 1), obs=torch.tensor(0.0))
def test_param_ok():
x = pyro.param("x", torch.tensor(0.0))
assert isinstance(x, torch.Tensor)
assert x.shape == ()
def test_deterministic_ok():
x = pyro.deterministic("x", torch.tensor(0.0))
assert isinstance(x, torch.Tensor)
assert x.shape == ()
@pytest.mark.parametrize(
"mask",
[
None,
torch.tensor(True),
torch.tensor([True]),
torch.tensor([True, False, True]),
],
)
def test_obs_mask_shape(mask: Optional[torch.Tensor]):
data = torch.randn(3, 2)
def model():
with pyro.plate("data", 3):
pyro.sample(
"y",
dist.MultivariateNormal(torch.zeros(2), scale_tril=torch.eye(2)),
obs=data,
obs_mask=mask,
)
trace = poutine.trace(model).get_trace()
y_dist = trace.nodes["y"]["fn"]
assert y_dist.batch_shape == (3,)
assert y_dist.event_shape == (2,)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@test_primitives.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/carpet/baxis/tickfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="carpet.baxis.tickfont", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@carpet@baxis@tickfont@_family.py@.PATH_END.py
|
{
"filename": "module.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/flax/nnx/module.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import typing as tp
from functools import partial
import jax.tree_util as jtu
from flax.nnx import (
filterlib,
graph,
)
from flax.nnx import variablelib as variableslib
from flax.nnx.graph import GraphDef
from flax.nnx.object import Object, ObjectMeta
from flax.nnx.graph import GraphState, StateLeaf
from flax.nnx.statelib import State
from flax.typing import Key, Path, PathParts
A = tp.TypeVar('A')
B = tp.TypeVar('B')
M = tp.TypeVar('M', bound='Module')
S = tp.TypeVar('S', bound=tp.Union[GraphState, tuple[GraphState, ...]])
V = tp.TypeVar('V', bound=variableslib.Variable[tp.Any])
F = tp.TypeVar('F', bound=tp.Callable[..., tp.Any])
StateMapping = tp.Mapping[Path, tp.Any]
tuple_reduce = lambda xs, x: xs + (x,)
tuple_init = lambda: ()
class ModuleMeta(ObjectMeta):
# we keep a trivial derived class just in case we need to
# add more functionality in the future
pass
class Module(Object, metaclass=ModuleMeta):
"""Base class for all neural network modules.
Layers and models should subclass this class.
``Module``'s can contain submodules, and in this way can be nested in a tree
structure. Submodules can be assigned as regular attributes inside the
``__init__`` method.
You can define arbitrary "forward pass" methods on your ``Module`` subclass.
While no methods are special-cased, ``__call__`` is a popular choice since
you can call the ``Module`` directly::
>>> from flax import nnx
>>> import jax.numpy as jnp
>>> class Model(nnx.Module):
... def __init__(self, rngs):
... self.linear1 = nnx.Linear(2, 3, rngs=rngs)
... self.linear2 = nnx.Linear(3, 4, rngs=rngs)
... def __call__(self, x):
... x = self.linear1(x)
... x = nnx.relu(x)
... x = self.linear2(x)
... return x
>>> x = jnp.ones((1, 2))
>>> model = Model(rngs=nnx.Rngs(0))
>>> y = model(x)
"""
def sow(
self,
variable_type: tp.Type[variableslib.Variable[tp.Any]],
name: str,
value: A,
reduce_fn: tp.Callable[[B, A], B] = tuple_reduce,
init_fn: tp.Callable[[], B] = tuple_init, # type: ignore
) -> None:
"""``sow()`` can be used to collect intermediate values without
the overhead of explicitly passing a container through each Module call.
``sow()`` stores a value in a new ``Module`` attribute, denoted by ``name``.
The value will be wrapped by a :class:`Variable` of type ``variable_type``,
which can be useful to filter for in :func:`split`, :func:`state` and
:func:`pop`.
By default the values are stored in a tuple and each stored value
is appended at the end. This way all intermediates can be tracked when
the same module is called multiple times.
Example usage::
>>> from flax import nnx
>>> import jax.numpy as jnp
>>> class Model(nnx.Module):
... def __init__(self, rngs):
... self.linear1 = nnx.Linear(2, 3, rngs=rngs)
... self.linear2 = nnx.Linear(3, 4, rngs=rngs)
... def __call__(self, x, add=0):
... x = self.linear1(x)
... self.sow(nnx.Intermediate, 'i', x+add)
... x = self.linear2(x)
... return x
>>> x = jnp.ones((1, 2))
>>> model = Model(rngs=nnx.Rngs(0))
>>> assert not hasattr(model, 'i')
>>> y = model(x)
>>> assert hasattr(model, 'i')
>>> assert len(model.i.value) == 1 # tuple of length 1
>>> assert model.i.value[0].shape == (1, 3)
>>> y = model(x, add=1)
>>> assert len(model.i.value) == 2 # tuple of length 2
>>> assert (model.i.value[0] + 1 == model.i.value[1]).all()
Alternatively, a custom init/reduce function can be passed::
>>> class Model(nnx.Module):
... def __init__(self, rngs):
... self.linear1 = nnx.Linear(2, 3, rngs=rngs)
... self.linear2 = nnx.Linear(3, 4, rngs=rngs)
... def __call__(self, x):
... x = self.linear1(x)
... self.sow(nnx.Intermediate, 'sum', x,
... init_fn=lambda: 0,
... reduce_fn=lambda prev, curr: prev+curr)
... self.sow(nnx.Intermediate, 'product', x,
... init_fn=lambda: 1,
... reduce_fn=lambda prev, curr: prev*curr)
... x = self.linear2(x)
... return x
>>> x = jnp.ones((1, 2))
>>> model = Model(rngs=nnx.Rngs(0))
>>> y = model(x)
>>> assert (model.sum.value == model.product.value).all()
>>> intermediate = model.sum.value
>>> y = model(x)
>>> assert (model.sum.value == intermediate*2).all()
>>> assert (model.product.value == intermediate**2).all()
Args:
variable_type: The :class:`Variable` type for the stored value.
Typically :class:`Intermediate` is used to indicate an
intermediate value.
name: A string denoting the ``Module`` attribute name, where
the sowed value is stored.
value: The value to be stored.
reduce_fn: The function used to combine the existing value with the new
value. The default is to append the value to a tuple.
init_fn: For the first value stored, ``reduce_fn`` will be passed the result
of ``init_fn`` together with the value to be stored. The default is an
empty tuple.
"""
if hasattr(self, name):
variable = getattr(self, name)
if not isinstance(variable, variableslib.Variable):
raise ValueError(
f"Expected '{name}' to be a Variable, got {type(variable).__name__}"
)
elif type(variable) != variable_type:
raise ValueError(
f"Expected '{name}' to be of type '{variable_type.__name__}', "
f"got '{type(variable).__name__}'"
)
variable.raw_value = reduce_fn(variable.raw_value, value)
else:
reduced_value = reduce_fn(init_fn(), value)
setattr(self, name, variable_type(reduced_value))
def iter_modules(self) -> tp.Iterator[tuple[PathParts, Module]]:
"""Recursively iterates over all nested :class:`Module`'s of the current Module, including
the current Module.
``iter_modules`` creates a generator that yields the path and the Module instance, where
the path is a tuple of strings or integers representing the path to the Module from the
root Module.
Example::
>>> from flax import nnx
...
>>> class SubModule(nnx.Module):
... def __init__(self, din, dout, rngs):
... self.linear1 = nnx.Linear(din, dout, rngs=rngs)
... self.linear2 = nnx.Linear(din, dout, rngs=rngs)
...
>>> class Block(nnx.Module):
... def __init__(self, din, dout, *, rngs: nnx.Rngs):
... self.linear = nnx.Linear(din, dout, rngs=rngs)
... self.submodule = SubModule(din, dout, rngs=rngs)
... self.dropout = nnx.Dropout(0.5)
... self.batch_norm = nnx.BatchNorm(10, rngs=rngs)
...
>>> model = Block(2, 5, rngs=nnx.Rngs(0))
>>> for path, module in model.iter_modules():
... print(path, type(module).__name__)
...
('batch_norm',) BatchNorm
('dropout',) Dropout
('linear',) Linear
('submodule', 'linear1') Linear
('submodule', 'linear2') Linear
('submodule',) SubModule
() Block
"""
for path, value in graph.iter_graph(self):
if isinstance(value, Module):
yield path, value
def iter_children(self) -> tp.Iterator[tuple[Key, Module]]:
"""Iterates over all children :class:`Module`'s of the current Module. This
method is similar to :func:`iter_modules`, except it only iterates over the
immediate children, and does not recurse further down.
``iter_children`` creates a generator that yields the key and the Module instance,
where the key is a string representing the attribute name of the Module to access
the corresponding child Module.
Example::
>>> from flax import nnx
...
>>> class SubModule(nnx.Module):
... def __init__(self, din, dout, rngs):
... self.linear1 = nnx.Linear(din, dout, rngs=rngs)
... self.linear2 = nnx.Linear(din, dout, rngs=rngs)
...
>>> class Block(nnx.Module):
... def __init__(self, din, dout, *, rngs: nnx.Rngs):
... self.linear = nnx.Linear(din, dout, rngs=rngs)
... self.submodule = SubModule(din, dout, rngs=rngs)
... self.dropout = nnx.Dropout(0.5)
... self.batch_norm = nnx.BatchNorm(10, rngs=rngs)
...
>>> model = Block(2, 5, rngs=nnx.Rngs(0))
>>> for path, module in model.iter_children():
... print(path, type(module).__name__)
...
batch_norm BatchNorm
dropout Dropout
linear Linear
submodule SubModule
"""
node_dict = graph.get_node_impl(self).node_dict(self)
for key, value in node_dict.items():
if isinstance(value, Module):
yield key, value
def set_attributes(
self,
*filters: filterlib.Filter,
raise_if_not_found: bool = True,
**attributes: tp.Any,
) -> None:
"""Sets the attributes of nested Modules including the current Module.
If the attribute is not found in the Module, it is ignored.
Example::
>>> from flax import nnx
...
>>> class Block(nnx.Module):
... def __init__(self, din, dout, *, rngs: nnx.Rngs):
... self.linear = nnx.Linear(din, dout, rngs=rngs)
... self.dropout = nnx.Dropout(0.5, deterministic=False)
... self.batch_norm = nnx.BatchNorm(10, use_running_average=False, rngs=rngs)
...
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(False, False)
>>> block.set_attributes(deterministic=True, use_running_average=True)
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, True)
``Filter``'s can be used to set the attributes of specific Modules::
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.set_attributes(nnx.Dropout, deterministic=True)
>>> # Only the dropout will be modified
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, False)
Args:
*filters: Filters to select the Modules to set the attributes of.
raise_if_not_found: If True (default), raises a ValueError if at least one attribute
instance is not found in one of the selected Modules.
**attributes: The attributes to set.
"""
remaining_attributes = set(attributes.keys())
if not filters:
filters = (True,)
predicates = tuple(map(filterlib.to_predicate, filters))
for path, module in self.iter_modules():
for predicate in predicates:
if predicate(path, module):
for name, value in attributes.items():
if hasattr(module, name):
if name in remaining_attributes:
remaining_attributes.remove(name)
setattr(module, name, value)
break
if remaining_attributes and raise_if_not_found:
raise ValueError(
f'Could not find at least one instance of the following attributes: {remaining_attributes}'
)
def train(self, **attributes):
"""Sets the Module to training mode.
``train`` uses ``set_attributes`` to recursively set attributes ``deterministic=False``
and ``use_running_average=False`` of all nested Modules that have these attributes.
Its primarily used to control the runtime behavior of the ``Dropout`` and ``BatchNorm``
Modules.
Example::
>>> from flax import nnx
...
>>> class Block(nnx.Module):
... def __init__(self, din, dout, *, rngs: nnx.Rngs):
... self.linear = nnx.Linear(din, dout, rngs=rngs)
... # initialize Dropout and BatchNorm in eval mode
... self.dropout = nnx.Dropout(0.5, deterministic=True)
... self.batch_norm = nnx.BatchNorm(10, use_running_average=True, rngs=rngs)
...
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, True)
>>> block.train()
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(False, False)
Args:
**attributes: additional attributes passed to ``set_attributes``.
"""
return self.set_attributes(
deterministic=False,
use_running_average=False,
**attributes,
raise_if_not_found=False,
)
def eval(self, **attributes):
"""Sets the Module to evaluation mode.
``eval`` uses ``set_attributes`` to recursively set attributes ``deterministic=True``
and ``use_running_average=True`` of all nested Modules that have these attributes.
Its primarily used to control the runtime behavior of the ``Dropout`` and ``BatchNorm``
Modules.
Example::
>>> from flax import nnx
...
>>> class Block(nnx.Module):
... def __init__(self, din, dout, *, rngs: nnx.Rngs):
... self.linear = nnx.Linear(din, dout, rngs=rngs)
... self.dropout = nnx.Dropout(0.5)
... self.batch_norm = nnx.BatchNorm(10, rngs=rngs)
...
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(False, False)
>>> block.eval()
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, True)
Args:
**attributes: additional attributes passed to ``set_attributes``.
"""
return self.set_attributes(
deterministic=True,
use_running_average=True,
**attributes,
raise_if_not_found=False,
)
def __init_subclass__(cls, experimental_pytree: bool = False) -> None:
super().__init_subclass__()
if experimental_pytree:
jtu.register_pytree_with_keys(
cls,
partial(_module_flatten, with_keys=True),
_module_unflatten, # type: ignore[arg-type]
flatten_func=partial(_module_flatten, with_keys=False),
)
def __treescope_repr__(self, path, subtree_renderer):
import treescope # type: ignore[import-not-found,import-untyped]
children = {}
for name, value in vars(self).items():
if name.startswith('_'):
continue
children[name] = value
return treescope.repr_lib.render_object_constructor(
object_type=type(self),
attributes=children,
path=path,
subtree_renderer=subtree_renderer,
color=treescope.formatting_util.color_from_string(
type(self).__qualname__
)
)
# -------------------------
# Pytree Definition
# -------------------------
def _module_flatten(module: Module, *, with_keys: bool):
graphdef, state = graph.split(module)
key_values = sorted(state.raw_mapping.items())
keys = tuple(key for key, _ in key_values)
children: tuple[tp.Any, ...]
if with_keys:
children = tuple((jtu.DictKey(key), value) for key, value in key_values)
else:
children = tuple(value for _, value in key_values)
return children, (keys, graphdef)
def _module_unflatten(
paths_moduledef: tuple[tuple[Path, ...], GraphDef[M]],
variables: tuple[StateLeaf, ...],
) -> M:
paths, graphdef = paths_moduledef
return graph.merge(graphdef, State(zip(paths, variables)))
def first_from(*args: tp.Optional[A], error_msg: str) -> A:
"""Return the first non-None argument.
If all arguments are None, raise a ValueError with the given error message.
Args:
*args: the arguments to check
error_msg: the error message to raise if all arguments are None
Returns:
The first non-None argument.
"""
for arg in args:
if arg is not None:
return arg
raise ValueError(error_msg)
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@flax@nnx@module.py@.PATH_END.py
|
{
"filename": "_showticklabels.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_showticklabels.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="scatterternary.marker.colorbar",
**kwargs,
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterternary@marker@colorbar@_showticklabels.py@.PATH_END.py
|
{
"filename": "_valuessrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/_valuessrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValuessrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="valuessrc", parent_name="treemap", **kwargs):
super(ValuessrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@_valuessrc.py@.PATH_END.py
|
{
"filename": "apply_blinding_main_fromfile_fcomp_double_blinding.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/main/apply_blinding_main_fromfile_fcomp_double_blinding.py",
"type": "Python"
}
|
'''
Documentation needs to be updated
EXAMPLE USE
===========
GENERAL NOTES
=============
NOTES FOR TESTING AND VALIDATION
================================
'''
import sys
import os
import logging
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
#from numpy.random import MT19937
#from numpy.random import RandomState, SeedSequence
from numpy.random import random
import fitsio
import glob
import argparse
from astropy.io import fits
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import LSS.main.cattools as ct
from LSS.globals import main
import LSS.blinding_tools as blind
from LSS.tabulated_cosmo import TabulatedDESI
import LSS.recon_tools as rectools
from LSS.cosmodesi_io_tools import catalog_fn
import LSS.common_tools as common
import pyrecon
from pyrecon import MultiGridReconstruction, IterativeFFTReconstruction, IterativeFFTParticleReconstruction, utils, setup_logging
from cosmoprimo.fiducial import DESI
from cosmoprimo.utils import DistanceToRedshift
from cosmoprimo import Cosmology
if os.environ['NERSC_HOST'] == 'cori':
scratch = 'CSCRATCH'
elif os.environ['NERSC_HOST'] == 'perlmutter':
scratch = 'PSCRATCH'
else:
print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST'])
sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding')
# to remove jax warning (from cosmoprimo)
logging.getLogger("jax._src.lib.xla_bridge").addFilter(logging.Filter("No GPU/TPU found, falling back to CPU."))
parser = argparse.ArgumentParser()
parser.add_argument("--type", help="tracer type to be selected")
parser.add_argument("--basedir_in", help="base directory for input, default is location for official catalogs", default='/dvs_ro/cfs/cdirs/desi/survey/catalogs/')
parser.add_argument("--basedir_out", help="base directory for output, default is C(P)SCRATCH", default=os.environ[scratch])
parser.add_argument("--version", help="catalog version", default='test')
parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA", default='Y1')
parser.add_argument("--verspec", help="version for redshifts", default='iron')
parser.add_argument("--notqso", help="if y, do not include any qso targets", default='n')
parser.add_argument("--use_map_veto",help="string to add on the end of full file reflecting if hp maps were used to cut",default='_HPmapcut')
#parser.add_argument("--reg_md", help="whether to run on split N/S or NGC/SGC", default='GC')
#parser.add_argument("--split_GC", help="whether to make the split NGC/SGC", default='y')
parser.add_argument("--get_par_mode", help="how to get the row of the file with w0/wa values", choices=['random', 'from_file','specified'],default='from_file')
parser.add_argument("--baoblind", help="if y, do the bao blinding shift", default='n')
parser.add_argument("--compmd", help="whether the extra completeness gets added to data or random", choices=['dat','ran'],default='ran')
parser.add_argument("--mkclusdat", help="if y, make the clustering data files after the BAO blinding (needed for RSD blinding)", default='n')
parser.add_argument("--wsyscol", help="column name to use for WEIGHT_SYS", default='WEIGHT_SN')
parser.add_argument("--mkclusran", help="if y, make the clustering random files after the BAO blinding (needed for RSD blinding)", default='n')
parser.add_argument("--minr", help="minimum number for random files", default=0, type=int)# use 1 for abacus mocks
parser.add_argument("--maxr", help="maximum for random files, default is 1", default=1, type=int) # use 2 for abacus mocks
parser.add_argument("--dorecon", help="if y, run the recon needed for RSD blinding", default='n')
parser.add_argument("--rsdblind", help="if y, do the RSD blinding shift", default='n')
parser.add_argument("--fnlblind", help="if y, do the fnl blinding", default='n')
parser.add_argument("--resamp", help="resample the randoms to make sure all is consistent with how weights changed", default='n')
parser.add_argument("--getFKP", help="calculate n(z) and FKP weights on final clustering catalogs", default='n')
parser.add_argument("--fiducial_f", help="fiducial value for f", default=0.8)
#relevant if args.get_par_mode is specified
parser.add_argument("--specified_w0",
help="Specify a blind w0 value",
default=None)
parser.add_argument("--specified_wa",
help="Specify a blind wa value ",
default=None)
parser.add_argument("--specified_fnl",
help="Specify a blind fnl value ",
default=None)
parser.add_argument("--visnz",help="whether to look at the original, blinded, and weighted n(z)",default='n')
parser.add_argument("--useMPI",help="whether to try to use MPI or not",default='y')
args = parser.parse_args()
mpicomm = None
if args.useMPI == 'y':
try:
mpicomm = pyrecon.mpi.COMM_WORLD # MPI version
except AttributeError:
mpicomm = None # non-MPI version
print('Not in MPI mode. The fNL blinding requires MPI, the script will exit before attempting fNL blinding')
#sys.exit('The following script need to be run with the MPI version of pyrecon. Please use module swap pyrecon:mpi')
if mpicomm is None:
print('NOT using MPI. If you specified a number of processes, e.g. "srun ... -n 32", greater than 1, things will not work well')
root = mpicomm is None or mpicomm.rank == 0
if root: print(args)
type = args.type
version = args.version
specrel = args.verspec
notqso = 'notqso' if (args.notqso == 'y') else ''
if root: print('blinding catalogs for tracer type ' + type + notqso)
prog = 'BRIGHT' if (type[:3] == 'BGS' or type == 'bright' or type == 'MWS_ANY') else 'DARK'
progl = prog.lower()
mainp = main(args.type,survey='Y1')
zmin = mainp.zmin
zmax = mainp.zmax
tsnrcol = mainp.tsnrcol
#share basedir location '/global/cfs/cdirs/desi/survey/catalogs'
if 'mock' not in args.verspec:
maindir = args.basedir_in +'/'+args.survey+'/LSS/'
ldirspec = maindir+specrel+'/'
dirin = ldirspec+'LSScats/'+version+'/'
dirin_blind = ldirspec+'LSScats/'+version+'/blinded/'
LSSdir = ldirspec+'LSScats/'
tsnrcut = mainp.tsnrcut
dchi2 = mainp.dchi2
randens = 2500.
nzmd = 'data'
elif 'Y1/mock' in args.verspec: #e.g., use 'mocks/FirstGenMocks/AbacusSummit/Y1/mock1' to get the 1st mock with fiberassign
dirin = args.basedir_in +'/'+args.survey+'/'+args.verspec+'/LSScats/'+version+'/'
LSSdir = args.basedir_in +'/'+args.survey+'/'+args.verspec+'/LSScats/'
dchi2=None
tsnrcut=0
randens = 10460.
nzmd = 'mock'
else:
sys.exit('verspec '+args.verspec+' not supported')
dirout = args.basedir_out + '/LSScats/' + version + '/doubleblinded/'
def mkdir(dirname):
"""Try to create ``dirname`` and catch :class:`OSError`."""
try:
os.makedirs(dirname) # MPI...
except OSError:
return
mkdir(dirout)
#if root and (not os.path.exists(dirout)):
# os.makedirs(dirout)
# print('made ' + dirout)
tp2z = {'LRG': 0.8, 'ELG': 1.1, 'QSO': 1.6,'BGS':0.25}
tp2bias = {'LRG': 2., 'ELG': 1.3, 'QSO': 2.3,'BGS':1.8}
regl = ['_S', '_N']
gcl = ['_SGC', '_NGC']
if root:
ztp = tp2z[args.type[:3]]
bias = tp2bias[args.type[:3]]
w0wa = np.loadtxt('/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/w0wa_initvalues_zeffcombined_1000realisations.txt')
if args.get_par_mode == 'specified':
[w0_blind, wa_blind] = [float(args.specified_w0),float(args.specified_wa)]
if w0_blind is None or wa_blind is None:
sys.exit('you must provide arguments for --specified_w0 and --specified_wa in the specified get_par_mode')
if args.get_par_mode == 'random':
#if args.type != 'LRG':
# sys.exit('Only do LRG in random mode, read from LRG file for other tracers')
ind = int(random() * 1000)
[w0_blind, wa_blind] = w0wa[ind]
if args.get_par_mode == 'from_file':
fn = LSSdir + 'filerow.txt'
if not os.path.isfile(fn):
ind_samp = int(random()*1000)
fo = open(fn,'w')
fo.write(str(ind_samp)+'\n')
fo.close()
ind = int(np.loadtxt(fn))
[w0_blind,wa_blind] = w0wa[ind]
#choose f_shift to compensate shift in monopole amplitude
cosmo_fid = DESI()
cosmo_shift = cosmo_fid.clone(w0_fld=w0_blind, wa_fld=wa_blind)
DM_fid = cosmo_fid.comoving_angular_distance(ztp)
DH_fid = 1. / cosmo_fid.hubble_function(ztp)
DM_shift = cosmo_shift.comoving_angular_distance(ztp)
DH_shift = 1. / cosmo_shift.hubble_function(ztp)
vol_fac = (DM_shift**2 * DH_shift) / (DM_fid**2 * DH_fid)
#a, b, c for quadratic formula
a = 0.2 / bias**2
b = 2 / (3 * bias)
c = 1 - (1 + 0.2 * (args.fiducial_f / bias)**2. + 2/3 * args.fiducial_f / bias) / vol_fac
f_shift = (-b + np.sqrt(b**2. - 4.*a*c))/(2*a)
dfper = (f_shift - args.fiducial_f)/args.fiducial_f
maxfper = 0.1
if abs(dfper) > maxfper:
dfper = maxfper*dfper/abs(dfper)
f_shift = (1+dfper)*args.fiducial_f
fgrowth_blind = f_shift
fb_in = dirin + type + notqso
fbr_in = fb_in
if type == 'BGS_BRIGHT-21.5':
fbr_in = dirin +'BGS_BRIGHT'
#fcr_in = fbr_in +'{}_0_clustering.ran.fits'
fcd_in = dirin_blind+ type + notqso+ '{}_clustering.dat.fits'
print('input file is '+fcd_in)
nzf_in = dirin + type + notqso + '_full_nz.txt'
wo = 'y'
if os.path.isfile(nzf_in):
wo = 'n'
if type[:3] == 'QSO':
dz = 0.02
#zmin = 0.8
#zmax = 3.5
P0 = 6000
else:
dz = 0.01
#zmin = 0.01
#zmax = 1.6
if type[:3] == 'LRG':
P0 = 10000
#zmin = 0.4
#zmax = 1.1
if type[:3] == 'ELG':
P0 = 4000
#zmin = 0.6
#zmax = 1.6
if type[:3] == 'BGS':
P0 = 7000
#zmin = 0.1
#zmax = 0.5
if args.baoblind == 'y':
data_clus_fn = dirin_blind + type + notqso+'_clustering.dat.fits'
if os.path.isfile(data_clus_fn) == False:
dl = []
regl = ['_NGC','_SGC']
for reg in regl:
dl.append(fitsio.read(dirin_blind + type + notqso+reg+'_clustering.dat.fits'))
data = Table(np.concatenate(dl))
if 'PHOTSYS' not in list(data.dtype.names):
data = common.addNS(data)
else:
data = Table(fitsio.read(data_clus_fn))
#redo 'WEIGHT' column because of completeness factorization (applied back later if FKP option is chosen)
cols = list(data.dtype.names)
if 'WEIGHT_SYS' not in cols:
if args.wsyscol is not None:
fd['WEIGHT_SYS'] = np.copy(fd[args.wsyscol])
else:
print('did not find WEIGHT_SYS, putting it in as all 1')
fd['WEIGHT_SYS'] = np.ones(len(fd))
data['WEIGHT'] = data['WEIGHT_COMP']*data['WEIGHT_ZFAIL']*data['WEIGHT_SYS']
#cl = gcl
#for reg in cl:
# fnd = fcd_in.format(reg)
# fndr = dirout + type + notqso + fcr_in.format(reg)
# data = Table(fitsio.read(fnd))
# data_real = Table(fitsio.read(fndr))
# fin = fitsio.read(fnd)
# cols = list(fin.dtype.names)
# nz_in = common.mknz_full(fcd_in, fcr_in, type[:3], bs=dz, zmin=zmin, zmax=zmax, write=wo, randens=randens, md=nzmd)
# if 'WEIGHT_FKP' not in cols:
# print('adding FKP weights')
# common.addFKPfull(fcd_in, nz_in, type[:3], bs=dz, zmin=zmin, zmax=zmax, P0=P0, md=nzmd)
# data = Table(fitsio.read(fcd_in))
data['Z'] = np.clip(data['Z'],0.01,3.6)
#outf = dirout + fnd.split('/')[-1]
outf = dirout + type + notqso+'_clustering.dat.fits'
print('output going to '+outf)
blind.apply_zshift_DE(data, outf, w0=w0_blind, wa=wa_blind, zcol='Z')
#fb_out = dirout + type + notqso
#fcd_out = fb_out + '_full.dat.fits'
# nz_out = common.mknz_full(outf, fcr_in, type[:3], bs=dz, zmin=zmin, zmax=zmax, randens=randens, md=nzmd, zcol='Z')
# ratio_nz = nz_in / nz_out
fd = Table(fitsio.read(outf))
zl = fd['Z']
zr = zl > zmin
zr &= zl < zmax
fd = fd[zr]
common.write_LSS(fd, outf)
if args.visnz == 'y':
print('min/max of weights for nz:')
print(np.min(wl),np.max(wl))
fdin = fitsio.read(fcd_in)
a = plt.hist(fdin['Z'][gz],bins=100,range=(zmin,zmax),histtype='step',label='input')
b = plt.hist(fd['Z'][gz],bins=100,range=(zmin,zmax),histtype='step',label='blinded')
c = plt.hist(fd['Z'][gz],bins=100,range=(zmin,zmax),histtype='step',weights=fd['WEIGHT_SYS'][gz],label='blinded+reweight')
plt.legend()
plt.show()
#if args.type == 'LRG':
# hdul = fits.open(fcd_out,mode='update')
# hdul['LSS'].header['FILEROW'] = ind
# hdul.close()
# hdtest = fitsio.read_header(dirout + 'LRG_full.dat.fits', ext='LSS')['FILEROW']
# if hdtest != ind:
# sys.exit('ERROR writing/reading row from blind file')
if args.mkclusdat == 'y':
ct.mkclusdat(dirout + type + notqso, tp=type, dchi2=dchi2, tsnrcut=tsnrcut, zmin=zmin, zmax=zmax,compmd=args.compmd)
if args.mkclusran == 'y':
rcols = ['Z', 'WEIGHT', 'WEIGHT_SYS', 'WEIGHT_COMP', 'WEIGHT_ZFAIL','WEIGHT_FKP','TARGETID_DATA','WEIGHT_SN']
tsnrcol = 'TSNR2_ELG'
if args.type[:3] == 'BGS':
tsnrcol = 'TSNR2_BGS'
#for rannum in range(args.minr, args.maxr):
ranin = dirin + args.type + notqso + '_'
if args.type == 'BGS_BRIGHT-21.5':
ranin = dirin + 'BGS_BRIGHT' + notqso + '_'
data_clus_fn = dirout + type + notqso+'_clustering.dat.fits'
if os.path.isfile(data_clus_fn) == False:
dl = []
regl = ['_NGC','_SGC']
for reg in regl:
dl.append(fitsio.read(dirout + type + notqso+reg+'_clustering.dat.fits'))
dtot = np.concatenate(dl)
if 'PHOTSYS' not in list(dtot.dtype.names):
dtot = common.addNS(Table(dtot))
clus_arrays = [dtot]
else:
clus_arrays = [fitsio.read(data_clus_fn)]
#for reg in ['N','S']:
# clus_arrays.append(fitsio.read(dirout + type + notqso+'_'+reg+'_clustering.dat.fits'))
def _parfun(rannum):
ct.mkclusran(ranin, dirout + args.type + notqso + '_', rannum, rcols=rcols, tsnrcut=tsnrcut, tsnrcol=tsnrcol,clus_arrays=clus_arrays,use_map_veto=args.use_map_veto)#, ntilecut=ntile, ccut=ccut)
#for clustering, make rannum start from 0
if 'Y1/mock' in args.verspec:
for reg in regl:
ranf = dirout + args.type + notqso + reg + '_' + str(rannum) + '_clustering.ran.fits'
ranfm = dirout + args.type + notqso + reg + '_' + str(rannum - 1) + '_clustering.ran.fits'
os.system('mv ' + ranf + ' ' + ranfm)
nran = args.maxr-args.minr
inds = np.arange(args.minr,args.maxr)
if args.useMPI == 'y':
from multiprocessing import Pool
nproc = 9
#nproc = nran*2
with Pool(processes=nproc) as pool:
res = pool.map(_parfun, inds)
else:
for ii in inds:
_parfun(ii)
#ct.mkclusran(ranin, dirout + args.type + notqso + '_', ii, rcols=rcols, tsnrcut=tsnrcut, tsnrcol=tsnrcol,clus_arrays=clus_arrays)
print(ii,clus_arrays[0].dtype.names)
#if args.split_GC == 'y':
fb = dirout + args.type + notqso + '_'
#ct.clusNStoGC(fb, args.maxr - args.minr)
ct.splitclusGC(fb, args.maxr - args.minr)
# if args.mkclusran == 'y':
# rcols = ['Z', 'WEIGHT', 'WEIGHT_SYS', 'WEIGHT_COMP', 'WEIGHT_ZFAIL','WEIGHT_FKP','TARGETID_DATA','WEIGHT_SN']
# tsnrcol = 'TSNR2_ELG'
# if args.type[:3] == 'BGS':
# tsnrcol = 'TSNR2_BGS'
# #for rannum in range(args.minr, args.maxr):
# ranin = dirin + args.type + notqso + '_'
# if args.type == 'BGS_BRIGHT-21.5':
# ranin = dirin + 'BGS_BRIGHT' + notqso + '_'
# clus_arrays = []
# for reg in ['N','S']:
# clus_arrays.append(fitsio.read(dirout + type + notqso+'_'+reg+'_clustering.dat.fits'))
# def _parfun(rannum):
# ct.mkclusran(ranin, dirout + args.type + notqso + '_', rannum, rcols=rcols, tsnrcut=tsnrcut, tsnrcol=tsnrcol,clus_arrays=clus_arrays,use_map_veto=args.use_map_veto)#, ntilecut=ntile, ccut=ccut)
# #for clustering, make rannum start from 0
# if 'Y1/mock' in args.verspec:
# for reg in regl:
# ranf = dirout + args.type + notqso + reg + '_' + str(rannum) + '_clustering.ran.fits'
# ranfm = dirout + args.type + notqso + reg + '_' + str(rannum - 1) + '_clustering.ran.fits'
# os.system('mv ' + ranf + ' ' + ranfm)
# nran = args.maxr-args.minr
# inds = np.arange(args.minr,args.maxr)
# if args.useMPI == 'y':
# from multiprocessing import Pool
# nproc = 9
# #nproc = nran*2
# with Pool(processes=nproc) as pool:
# res = pool.map(_parfun, inds)
# else:
# for ii in inds:
# _parfun(ii)
# #ct.mkclusran(ranin, dirout + args.type + notqso + '_', ii, rcols=rcols, tsnrcut=tsnrcut, tsnrcol=tsnrcol,clus_arrays=clus_arrays)
# print(ii,clus_arrays[0].dtype.names)
# #if args.split_GC == 'y':
# fb = dirout + args.type + notqso + '_'
# ct.clusNStoGC(fb, args.maxr - args.minr)
sys.stdout.flush()
if args.dorecon == 'y':
distance = TabulatedDESI().comoving_radial_distance
f, bias = rectools.get_f_bias(args.type)
Reconstruction = IterativeFFTReconstruction#MultiGridReconstruction
setup_logging()
#regions = ['N', 'S'] if args.reg_md == 'NS' else ['NGC', 'SGC']
regions = ['NGC', 'SGC']
for region in regions:
catalog_kwargs = dict(tracer=args.type, region=region, ctype='clustering', nrandoms=(int(args.maxr) - int(args.minr)))
data_fn = catalog_fn(**catalog_kwargs, cat_dir=dirout, name='data')
randoms_fn = catalog_fn(**catalog_kwargs, cat_dir=dirout, name='randoms')
#print(randoms_fn)
data_rec_fn = catalog_fn(**catalog_kwargs, cat_dir=dirout, rec_type='IFFTrsd', name='data')
randoms_rec_fn = catalog_fn(**catalog_kwargs, cat_dir=dirout, rec_type='IFFTrsd', name='randoms')
rectools.run_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_rec_fn, randoms_rec_fn, f=f, bias=bias, convention='rsd', dtype='f8', zlim=(zmin, zmax), mpicomm=mpicomm)
if root:
#re-sample redshift dependent columns from data
nran = args.maxr-args.minr
if args.resamp == 'y':
regions = ['NGC', 'SGC']
rcols = ['Z', 'WEIGHT', 'WEIGHT_SYS', 'WEIGHT_COMP', 'WEIGHT_ZFAIL','WEIGHT_FKP','TARGETID_DATA','WEIGHT_SN']
for reg in regions:
flin = dirout + args.type + notqso + '_'+reg
def _parfun(rannum):
ct.clusran_resamp(flin,rannum,rcols=rcols,compmd=args.compmd)#, ntilecut=ntile, ccut=ccut)
inds = np.arange(nran)
from multiprocessing import Pool
with Pool(processes=nran*2) as pool:
res = pool.map(_parfun, inds)
if root and (args.rsdblind == 'y'):
#if args.reg_md == 'NS':
# cl = regl
#if args.reg_md == 'GC':
cl = gcl
for reg in cl:
fnd = dirout + type + notqso + reg + '_clustering.dat.fits'
fndr = dirout + type + notqso + reg + '_clustering.IFFTrsd.dat.fits'
data = Table(fitsio.read(fnd))
data_real = Table(fitsio.read(fndr))
out_file = fnd
blind.apply_zshift_RSD(data, data_real, out_file,
fgrowth_fid=args.fiducial_f,
fgrowth_blind=fgrowth_blind)#,
#comments=f"f_blind: {fgrowth_blind}, w0_blind: {w0_blind}, wa_blind: {wa_blind}")
if args.fnlblind == 'y':
if mpicomm is None:
sys.exit('fNL blinding requires MPI, exiting')
from mockfactory.blinding import get_cosmo_blind, CutskyCatalogBlinding
logger = logging.getLogger('recon')
if root:
f_blind = fgrowth_blind
if args.get_par_mode == 'specified':
fnl_blind = args.specified_fnl
if fnl_blind is None:
sys.exit('you must provide arguments for --specified_fnl in the specified get_par_mode')
fnl_blind = float(fnl_blind )
print('fnl value is '+str(fnl_blind))
else:
# generate blinding value from the choosen index above
np.random.seed(ind)
fnl_blind = np.random.uniform(low=-15, high=15, size=1)[0]
if not root:
w0_blind, wa_blind, f_blind, fnl_blind = None, None, None, None
w0_blind = mpicomm.bcast(w0_blind, root=0)
wa_blind = mpicomm.bcast(wa_blind, root=0)
f_blind = mpicomm.bcast(f_blind, root=0)
fnl_blind = mpicomm.bcast(fnl_blind, root=0)
# collect effective redshift and bias for the considered tracer
zeff = tp2z[args.type[:3]]
bias = tp2bias[args.type[:3]]
# build blinding cosmology
cosmo_blind = DESI()
cosmo_blind._derived['fnl'] = fnl_blind # only parameter used for PNG blinding
blinding = CutskyCatalogBlinding(cosmo_fid='DESI', cosmo_blind=cosmo_blind, bias=bias, z=zeff, position_type='rdz', mpicomm=mpicomm, mpiroot=0)
# loop over the different region of the sky
#regions = ['N', 'S'] if args.reg_md == 'NS' else ['NGC', 'SGC']
regions = ['NGC', 'SGC']
for region in regions:
# path of data and randoms:
catalog_kwargs = dict(tracer=args.type, region=region, ctype='clustering', nrandoms=(args.maxr - args.minr))
data_fn = catalog_fn(**catalog_kwargs, cat_dir=dirout, name='data')
randoms_fn = catalog_fn(**catalog_kwargs, cat_dir=dirout, name='randoms')
if np.ndim(randoms_fn) == 0: randoms_fn = [randoms_fn]
data_positions, data_weights = None, None
randoms_positions, randoms_weights = None, None
if root:
logger.info('Loading {}.'.format(data_fn))
data = Table.read(data_fn)
data_positions, data_weights = [np.array(data['RA'], dtype='float64'), np.array(data['DEC'], dtype='float64'), np.array(data['Z'], dtype='float64')], data['WEIGHT']
logger.info('Loading {}'.format(randoms_fn))
randoms = vstack([Table(fitsio.read(fn)) for fn in randoms_fn])
randoms_positions, randoms_weights = [np.array(randoms['RA'], dtype='float64'), np.array(randoms['DEC'], dtype='float64'), np.array(randoms['Z'], dtype='float64')], randoms['WEIGHT']
# add fnl blinding weight to the data weight
new_data_weights = blinding.png(data_positions, data_weights=data_weights,
randoms_positions=randoms_positions, randoms_weights=randoms_weights,
method='data_weights', shotnoise_correction=True)
# overwrite the data!
if root:
fnl_blind_weights = new_data_weights / data['WEIGHT']
data['WEIGHT'] = new_data_weights
data['WEIGHT_COMP'] = data['WEIGHT_COMP'] * fnl_blind_weights
common.write_LSS(data, data_fn)
if root:
#re-sample redshift dependent columns from data
nran = args.maxr-args.minr
if args.resamp == 'y':
regions = ['NGC', 'SGC']
rcols = ['Z', 'WEIGHT', 'WEIGHT_SYS', 'WEIGHT_COMP', 'WEIGHT_ZFAIL','WEIGHT_FKP','TARGETID_DATA','WEIGHT_SN']
for reg in regions:
flin = dirout + args.type + notqso + '_'+reg
def _parfun(rannum):
ct.clusran_resamp(flin,rannum,rcols=rcols,compmd=args.compmd)#, ntilecut=ntile, ccut=ccut)
inds = np.arange(nran)
from multiprocessing import Pool
with Pool(processes=nran*2) as pool:
res = pool.map(_parfun, inds)
if type[:3] == 'QSO':
dz = 0.02
zmin = 0.8
zmax = 3.5
P0 = 6000
if type[:3] == 'LRG':
P0 = 10000
zmin = 0.4
zmax = 1.1
if type[:3] == 'ELG':
P0 = 4000
zmin = 0.8
zmax = 1.6
if type[:3] == 'BGS':
P0 = 7000
zmin = 0.1
zmax = 0.4
if args.getFKP == 'y':
for reg in gcl:
fb = dirout+args.type+reg
fcr = fb+'_0_clustering.ran.fits'
fcd = fb+'_clustering.dat.fits'
fout = fb+'_nz.txt'
common.mknz(fcd,fcr,fout,bs=dz,zmin=zmin,zmax=zmax,randens=randens)
common.addnbar(fb,bs=dz,zmin=zmin,zmax=zmax,P0=P0,nran=nran)
os.system('rm '+dirout+args.type+'*_S_*')
os.system('rm '+dirout+args.type+'*_N_*')
os.system('rm '+dirout+args.type+'*IFFT*')
os.system('rm '+dirout+args.type+'*full*')
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@main@apply_blinding_main_fromfile_fcomp_double_blinding.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "AWehrhahn/PyReduce",
"repo_path": "PyReduce_extracted/PyReduce-master/pyreduce/clib/__init__.py",
"type": "Python"
}
|
AWehrhahnREPO_NAMEPyReducePATH_START.@PyReduce_extracted@PyReduce-master@pyreduce@clib@__init__.py@.PATH_END.py
|
|
{
"filename": "devcore.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/tractor/devcore.py",
"type": "Python"
}
|
from tractor.utils import *
from tractor.galaxy import *
from tractor.pointsource import PointSource
class DevCoreGalaxy(MultiParams, BasicSource):
'''
A galaxy with deVauc and central point source components.
The two components share a position (ie the centers are the same),
but have different brightnesses.
'''
def __init__(self, pos, brightnessDev, shapeDev, brightnessPsf):
MultiParams.__init__(self, pos, brightnessDev, shapeDev, brightnessPsf)
self.name = self.getName()
@staticmethod
def getNamedParams():
return dict(pos=0, brightnessDev=1, shapeDev=2, brightnessPsf=3)
def getName(self):
return 'DevCoreGalaxy'
def __str__(self):
return (self.name + ' at ' + str(self.pos)
+ ' with deV ' + str(self.brightnessDev) + ' '
+ str(self.shapeDev) + ' and PSF ' + str(self.brightnessPsf))
def __repr__(self):
return (self.name + '(pos=' + repr(self.pos) +
', brightnessDev=' + repr(self.brightnessDev) +
', shapeDev=' + repr(self.shapeDev) +
', brightnessPsf=' + repr(self.brightnessPsf))
def getBrightness(self):
# assume linear
return self.brightnessDev + self.brightnessPsf
def getBrightnesses(self):
return [self.brightnessDev, self.brightnessPsf]
def _getModelPatches(self, img, minsb=0., modelMask=None):
d = DevGalaxy(self.pos, self.brightnessDev, self.shapeDev)
p = PointSource(self.pos, self.brightnessPsf)
if minsb == 0. or minsb is None:
kw = {}
else:
kw = dict(minsb=minsb / 2.)
if hasattr(self, 'halfsize'):
d.halfsize = self.halfsize
pd = d.getModelPatch(img, modelMask=modelMask, **kw)
pp = p.getModelPatch(img, modelMask=modelMask, **kw)
return (pd, pp)
def getModelPatch(self, img, minsb=0., modelMask=None):
pd, pp = self._getModelPatches(img, minsb=minsb, modelMask=modelMask)
return add_patches(pd, pp)
def getUnitFluxModelPatches(self, img, minval=0., modelMask=None):
# Needed for forced photometry
if minval > 0:
# allow each component half the error
minval = minval * 0.5
d = DevGalaxy(self.pos, self.brightnessDev, self.shapeDev)
p = PointSource(self.pos, self.brightnessPsf)
if hasattr(self, 'halfsize'):
d.halfsize = self.halfsize
return (d.getUnitFluxModelPatches(img, minval=minval,
modelMask=modelMask) +
p.getUnitFluxModelPatches(img, minval=minval,
modelMask=modelMask))
# MAGIC: ORDERING OF EXP AND DEV PARAMETERS
def getParamDerivatives(self, img, modelMask=None):
d = DevGalaxy(self.pos, self.brightnessDev, self.shapeDev)
p = PointSource(self.pos, self.brightnessPsf)
if hasattr(self, 'halfsize'):
d.halfsize = self.halfsize
d.dname = 'devcore.dev'
p.dname = 'devcore.psf'
if self.isParamFrozen('pos'):
d.freezeParam('pos')
p.freezeParam('pos')
if self.isParamFrozen('brightnessDev'):
d.freezeParam('brightness')
if self.isParamFrozen('shapeDev'):
d.freezeParam('shape')
if self.isParamFrozen('brightnessPsf'):
p.freezeParam('brightness')
dd = d.getParamDerivatives(img, modelMask=modelMask)
dp = p.getParamDerivatives(img, modelMask=modelMask)
if self.isParamFrozen('pos'):
derivs = dd + dp
else:
derivs = []
# "pos" is shared between the models, so add the derivs.
npos = len(self.pos.getStepSizes())
for i in range(npos):
dpos = add_patches(dd[i], dp[i])
if dpos is not None:
dpos.setName('d(devcore)/d(pos%i)' % i)
derivs.append(dpos)
derivs.extend(dd[npos:])
derivs.extend(dp[npos:])
return derivs
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@tractor@devcore.py@.PATH_END.py
|
{
"filename": "_util.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/_util.py",
"type": "Python"
}
|
#####################################################################################
#
# Copyright (c) typedef int GmbH
# SPDX-License-Identifier: EUPL-1.2
#
#####################################################################################
import contextlib
import socket
import sys
import json
import os
import re
import inspect
import uuid
import copy
from collections.abc import Mapping
import click
from autobahn.wamp import CallDetails
from crossbar.common.checkconfig import InvalidConfigException
_ENVPAT_STR = r'^\$\{(.+)\}$'
_ENVPAT = re.compile(_ENVPAT_STR)
DEBUG_LIFECYCLE = False
DEBUG_PROGRAMFLOW = False
def set_flags_from_args(_args):
global DEBUG_LIFECYCLE
global DEBUG_PROGRAMFLOW
for arg in _args:
if arg.strip().lower() == '--debug-lifecycle':
DEBUG_LIFECYCLE = True
if arg.strip().lower() == '--debug-programflow':
DEBUG_PROGRAMFLOW = True
# FS path to controlling terminal
_TERMINAL = None
# Linux, *BSD and MacOSX
if sys.platform.startswith('linux') or 'bsd' in sys.platform or sys.platform.startswith('darwin'):
_TERMINAL = '/dev/tty' if os.path.exists('/dev/tty') else None
# Windows
elif sys.platform in ['win32']:
pass
# Other OS
else:
pass
# still, we might not be able to use TTY, so duck test it:
if _TERMINAL:
try:
with open('/dev/tty', 'w') as f:
f.write('\n')
f.flush()
except:
# under systemd: OSError: [Errno 6] No such device or address: '/dev/tty'
_TERMINAL = None
def class_name(obj):
"""
This returns a name like "module.Class" given either an instance, or a class.
"""
if inspect.isclass(obj):
cls = obj
else:
cls = obj.__class__
return '{}.{}'.format(cls.__module__, cls.__name__)
def dump_json(obj, minified=True):
"""
Dump JSON to a string, either pretty printed or not. Returns a Unicode
string.
"""
if minified:
return json.dumps(obj, separators=(',', ':'), ensure_ascii=False)
else:
return json.dumps(obj, indent=4, separators=(',', ': '), sort_keys=False, ensure_ascii=False)
def hl(text, bold=False, color='yellow'):
"""
Returns highlighted text.
"""
if not isinstance(text, str):
text = '{}'.format(text)
return click.style(text, fg=color, bold=bold)
def _qn(obj):
if inspect.isclass(obj) or inspect.isfunction(obj) or inspect.ismethod(obj):
qn = '{}.{}'.format(obj.__module__, obj.__qualname__)
else:
qn = 'unknown'
return qn
# def hltype(obj, render=DEBUG_PROGRAMFLOW):
def hltype(obj, render=True):
if render:
qn = _qn(obj).split('.')
text = hl(qn[0], color='yellow', bold=True) + hl('.' + '.'.join(qn[1:]), color='yellow', bold=False)
return '<' + text + '>'
else:
return ''
def hlflag(flag, true_txt='YES', false_txt='NO', null_txt='UNSET'):
assert flag is None or type(flag) == bool
if flag is None:
return hl('{}'.format(null_txt), color='blue', bold=True)
elif flag:
return hl('{}'.format(true_txt), color='green', bold=True)
else:
return hl('{}'.format(false_txt), color='red', bold=True)
def hlid(oid):
return hl('{}'.format(oid), color='blue', bold=True)
def hlval(val, color='white'):
return hl('{}'.format(val), color=color, bold=True)
def hluserid(oid):
"""
Returns highlighted text.
"""
if not isinstance(oid, str):
oid = '{}'.format(oid)
return hl('"{}"'.format(oid), color='yellow', bold=True)
def hlfixme(msg, obj):
return hl('FIXME: {} {}'.format(msg, _qn(obj)), color='green', bold=True)
def hlcontract(oid):
if not isinstance(oid, str):
oid = '{}'.format(oid)
return hl('<{}>'.format(oid), color='magenta', bold=True)
def term_print(text):
"""
This directly prints to the process controlling terminal (if there is any).
It bypasses any output redirections, or closes stdout/stderr pipes.
This can be used eg for "admin messages", such as "node is shutting down now!"
This currently only works on Unix like systems (tested only on Linux).
When it cannot do so, it falls back to plain old print.
"""
if DEBUG_LIFECYCLE:
text = '{:<44}'.format(text)
text = click.style(text, fg='blue', bold=True)
if _TERMINAL:
with open('/dev/tty', 'w') as f:
f.write(text + '\n')
f.flush()
else:
print(text)
def _add_debug_options(parser):
parser.add_argument('--debug-lifecycle',
action='store_true',
help="This debug flag enables overall program lifecycle messages directly to terminal.")
parser.add_argument(
'--debug-programflow',
action='store_true',
help="This debug flag enables program flow log messages with fully qualified class/method names.")
return parser
def _add_cbdir_config(parser):
parser.add_argument('--cbdir',
type=str,
default=None,
help="Crossbar.io node directory (overrides ${CROSSBAR_DIR} and the default ./.crossbar)")
parser.add_argument('--config',
type=str,
default=None,
help="Crossbar.io configuration file (overrides default CBDIR/config.json)")
return parser
def _add_log_arguments(parser):
color_args = dict({
"type": str,
"default": "auto",
"choices": ["true", "false", "auto"],
"help": "If logging should be colored."
})
parser.add_argument('--color', **color_args)
log_level_args = dict({
"type": str,
"default": 'info',
"choices": ['none', 'error', 'warn', 'info', 'debug', 'trace'],
"help": ("How much Crossbar.io should log to the terminal, in order of verbosity.")
})
parser.add_argument('--loglevel', **log_level_args)
parser.add_argument('--logformat',
type=str,
default='standard',
choices=['syslogd', 'standard', 'none'],
help=("The format of the logs -- suitable for syslogd, not colored, or colored."))
parser.add_argument('--logdir',
type=str,
default=None,
help="Crossbar.io log directory (default: <Crossbar Node Directory>/)")
parser.add_argument('--logtofile', action='store_true', help="Whether or not to log to file")
return parser
def get_free_tcp_port(host='127.0.0.1'):
"""
Returns random, free listening port.
.. note::
This is _not_ completely race free, as a port returned as free is closed
before returning, and might then be used by another process before the caller
of this function can actually bind it. So watch out ..
:param host: Host (interface) for which to return a free port for.
:type host: str
:return: Free TCP listening port.
:rtype: int
"""
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind((host, 0))
return sock.getsockname()[1]
def first_free_tcp_port(host='127.0.0.1', portrange=(1024, 65535)):
"""
Returns the first free listening port within the given range.
:param host: Host (interface) for which to return a free port for.
:type host: str
:param portrange: Pair of start and end port for port range to select free port within.
:type portrange: tuple
:return: Free TCP listening port.
:rtype: int
"""
port, max_port = portrange
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while port <= max_port:
try:
sock.bind((host, port))
sock.close()
return port
except OSError:
port += 1
raise IOError('no free ports')
def get_free_tcp_address(host='127.0.0.1'):
"""
Returns default local listening address with random port.
Note: this is _not_ completely race free, as a port returned as free
might be used by another process before the caller can bind it.
:return: Default/free listening address:port.
:rtype: str
"""
# source: https://gist.github.com/gabrielfalcao/20e567e188f588b65ba2
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind((host, 0))
host, port = tcp.getsockname()
tcp.close()
address = 'tcp://{host}:{port}'.format(**locals())
return address
def _deep_merge_map(a, b):
"""
:param a:
:param b:
:return:
"""
assert isinstance(a, Mapping)
assert isinstance(b, Mapping)
new_map = copy.deepcopy(a)
for k, v in b.items():
if v is None:
if k in new_map and new_map[k]:
del new_map[k]
else:
if k in new_map and isinstance(new_map[k], Mapping):
assert isinstance(v, Mapping)
new_map[k] = _deep_merge_map(new_map[k], v)
# use list, not Sequence, since strings are also Sequences!
# elif k in new_map and isinstance(new_map[k], Sequence):
elif k in new_map and isinstance(new_map[k], list):
# assert isinstance(v, Sequence)
assert isinstance(v, list)
new_map[k] = _deep_merge_list(new_map[k], v)
else:
new_map[k] = v
return new_map
def _deep_merge_list(a, b):
"""
Merges two lists. The list being merged must have length >= the
list into which is merged.
:param a: The list into which the other list is merged
:param b: The list to be merged into the former
:return: The merged list
"""
# assert isinstance(a, Sequence)
# assert isinstance(b, Sequence)
assert isinstance(a, list)
assert isinstance(b, list)
assert len(b) >= len(a)
if len(b) > len(a):
for i in range(len(a), len(b)):
assert b[i] is not None
assert b[i] != 'COPY'
new_list = []
i = 0
for item in b:
if item is None:
# drop the item from the target list
pass
elif item == 'COPY':
# copy the item to the target list
new_list.append(a[i])
else:
if i < len(a):
# add merged item
new_list.append(_deep_merge_object(a[i], item))
else:
# add new item
new_list.append(item)
i += 1
return new_list
def _deep_merge_object(a, b):
"""
:param a:
:param b:
:return:
"""
# if isinstance(a, Sequence):
if isinstance(a, list):
return _deep_merge_list(a, b)
elif isinstance(a, Mapping):
return _deep_merge_map(a, b)
else:
return b
def merge_config(base_config, other_config):
"""
:param base_config:
:param other_config:
:return:
"""
if not isinstance(base_config, Mapping):
raise InvalidConfigException('invalid type for configuration item - expected dict, got {}'.format(
type(base_config).__name__))
if not isinstance(other_config, Mapping):
raise InvalidConfigException('invalid type for configuration item - expected dict, got {}'.format(
type(other_config).__name__))
merged_config = copy.deepcopy(base_config)
if 'controller' in other_config:
merged_config['controller'] = _deep_merge_map(merged_config.get('controller', {}), other_config['controller'])
if 'workers' in other_config:
merged_config['workers'] = _deep_merge_list(base_config.get('workers', []), other_config['workers'])
return merged_config
def extract_member_oid(details: CallDetails) -> uuid.UUID:
"""
Extract the XBR network member ID from the WAMP session authid (eg ``member_oid==72de3e0c-ca62-452f-8f09-2d3d30d1b511`` from ``authid=="member-72de3e0c-ca62-452f-8f09-2d3d30d1b511"``
:param details: Call details.
:return: Extracted XBR network member ID.
"""
if details and details.caller_authrole == 'member' and details.caller_authid:
return uuid.UUID(details.caller_authid[7:])
else:
raise RuntimeError('no XBR member identification in call details\n{}'.format(details))
def alternative_username(username):
max_i = None
for i in range(len(username) - 1, -1, -1):
if username[i:].isdigit():
max_i = i
if max_i is not None:
next = int(username[max_i:]) + 1
alt_username = '{}{}'.format(username[:max_i], next)
else:
alt_username = '{}{}'.format(username, 1)
return alt_username
def maybe_from_env(value):
if isinstance(value, str):
match = _ENVPAT.match(value)
if match and match.groups():
var = match.groups()[0]
if var in os.environ:
new_value = os.environ[var]
return True, new_value
else:
print(
'WARNING: environment variable "{}" not set, but needed in XBR backend configuration'.format(var))
return False, None
else:
return False, value
else:
return False, value
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@_util.py@.PATH_END.py
|
{
"filename": "_stylesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/hoverlabel/font/_stylesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StylesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="stylesrc", parent_name="scattermap.hoverlabel.font", **kwargs
):
super(StylesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@hoverlabel@font@_stylesrc.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/slider/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="visible", parent_name="layout.slider", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@slider@_visible.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sunpy/ndcube",
"repo_path": "ndcube_extracted/ndcube-main/ndcube/visualization/__init__.py",
"type": "Python"
}
|
from .base import BasePlotter
from .descriptor import PlotterDescriptor
__all__ = ['BasePlotter', "PlotterDescriptor"]
|
sunpyREPO_NAMEndcubePATH_START.@ndcube_extracted@ndcube-main@ndcube@visualization@__init__.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "johnveitch/cpnest",
"repo_path": "cpnest_extracted/cpnest-master/cpnest/utils.py",
"type": "Python"
}
|
import os
import logging
# Default formats and level names
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)-38s: %(message)s',
datefmt='%Y-%m-%d, %H:%M:%S'
)
LEVELS = ['CRITICAL', 'WARNING', 'INFO', 'DEBUG']
LOGGER = logging.getLogger('cpnest.utils')
class _Handler(logging.Handler):
def __init__(self, verbose=0, **kwargs):
super().__init__(**kwargs)
self.set_verbosity(verbose)
self.setFormatter(FORMATTER)
def get_verbosity(self):
return self._verbose
def set_verbosity(self, verbose):
LOGGER.warning('Setting verbosity to {}'.format(verbose))
self._verbose = verbose
self.setLevel(LEVELS[verbose])
class StreamHandler(_Handler, logging.StreamHandler):
def __init__(self, verbose=0, **kwargs):
super().__init__(verbose=verbose, **kwargs)
class FileHandler(_Handler, logging.FileHandler):
def __init__(self, filename, verbose=0, **kwargs):
super().__init__(filename=filename, verbose=verbose, **kwargs)
class LogFile:
"""
Context manager for file logging. It logs everything from `logger`
in some file at a given `filename`.
Parameters
----------
filename : str
Filename under which to save the log.
verbose : int, optional
Logging level verbosity 0='CRITICAL' 1='WARNING' 2='INFO' 3='DEBUG'.
loggername : str, optional
Name of the logger to send to file at `path`. Default is `'cpnest'` so
all cpnest logs are recorded. E.g. specify `'cpnest.cpnest'` to only
record logs from the `cpnest.py` module.
Attributes
----------
handler : logging.FileHandler
File handler object.
Examples
--------
```python
from cpnest.utils import LogFile
with LogFile('example.log') as flog:
# Do some stuff here and it will be logged to 'example.log'
...
# Do some stuff here and it won't be logged to 'example.log'
with flog:
# Do some stuff here and it will be logged to 'example.log'
...
```
"""
def __init__(self, filename, verbose=0, loggername='cpnest'):
self._filename = filename
self._verbose = verbose
self._logger = logging.getLogger(loggername)
self.handler = None
def open(self):
self.handler = FileHandler(self._filename, verbose=self._verbose)
self._logger.addHandler(self.handler)
def close(self):
self._logger.removeHandler(self.handler)
self.handler.close()
self.handler = None
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
|
johnveitchREPO_NAMEcpnestPATH_START.@cpnest_extracted@cpnest-master@cpnest@utils.py@.PATH_END.py
|
{
"filename": "lctemplate.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/src/pint/templates/lctemplate.py",
"type": "Python"
}
|
"""Normalized template representing directional data
Implements a mixture model of LCPrimitives to form a normalized template representing directional data.
author: M. Kerr <matthew.kerr@gmail.com>
"""
import contextlib
import logging
from collections import defaultdict
from copy import deepcopy
import numpy as np
from .lcnorm import NormAngles
from .lcenorm import ENormAngles
from .lceprimitives import *
log = logging.getLogger(__name__)
def isvector(x):
return len(np.asarray(x).shape) > 0
class LCTemplate:
"""Manage a lightcurve template (collection of LCPrimitive objects).
IMPORTANT: a constant background is assumed in the overall model,
so there is no need to furnish this separately.
Parameters
----------
primitives : list of LCPrimitive
norms : NormAngles or tuple of float, optional
If a tuple, they are relative amplitudes for the primitive components.
"""
def __init__(self, primitives, norms=None, cache_kwargs=None):
self.primitives = primitives
self.shift_mode = np.any([p.shift_mode for p in self.primitives])
if norms is None:
norms = np.ones(len(primitives)) / len(primitives)
self.norms = norms if hasattr(norms, "_make_p") else NormAngles(norms)
self._sanity_checks()
self._cache = defaultdict(None)
self._cache_dirty = defaultdict(lambda: True)
if cache_kwargs is None:
cache_kwargs = {}
self.set_cache_properties(**cache_kwargs)
def __setstate__(self, state):
# TEMPORARY to handle changed class definition
self.__dict__.update(state)
_cache_dirty = defaultdict(lambda: True)
if not hasattr(self, "_cache_dirty"):
self._cache = defaultdict(None)
else:
# make _cache_dirty a defaultdict from a normal dict
_cache_dirty.update(self._cache_dirty)
self._cache_dirty = _cache_dirty
if not hasattr(self, "ncache"):
self.ncache = 1000
if not hasattr(self, "ph_edges"):
self.ph_edges = np.linspace(0, 1, self.ncache + 1)
if not hasattr(self, "en_cens"):
self.en_cens = None
if not hasattr(self, "en_edges"):
self.en_edges = None
def __getstate__(self):
# transform _cache_dirty into a normal dict, necessary to pickle it
state = self.__dict__.copy()
state["_cache_dirty"] = dict(state["_cache_dirty"])
return state
def _sanity_checks(self):
if len(self.primitives) != len(self.norms):
raise ValueError("Must provide a normalization for each component.")
def is_energy_dependent(self):
c1 = np.any([p.is_energy_dependent() for p in self.primitives])
return c1 or self.norms.is_energy_dependent()
def has_bridge(self):
return False
def __getitem__(self, index):
if index < 0:
index += len(self.primitives) + 1
return self.norms if index == len(self.primitives) else self.primitives[index]
def __setitem__(self, index, value):
if index < 0:
index += len(self.primitives) + 1
if index == len(self.primitives):
self.norms = value
else:
self.primitives[index] = value
def __len__(self):
# sourcery skip: remove-unreachable-code
raise DeprecationWarning("I'd like to see if this is used.")
return len(self.primitives)
def copy(self):
prims = [deepcopy(x) for x in self.primitives]
norms = self.norms.copy()
cache_kwargs = dict(ncache=self.ncache, en_edges=self.en_edges)
newcopy = self.__class__(prims, norms, cache_kwargs=cache_kwargs)
for key in self._cache.keys():
newcopy._cache[key] = self._cache[key]
newcopy._cache_dirty[key] = self._cache_dirty[key]
return newcopy
def set_cache_properties(self, ncache=1000, en_edges=None):
"""Set the granularity and behavior of the cache.
In all cases, ncache sets the phase resolution. If it is desired
to have energy dependence, en_edges must be specified as a set of
edges in log10 space which fully encompass all possible photon
energies that wil be used.
Interpolation is always linear (bilinear in energy if applicable.)
"""
if hasattr(self, "ncache") and (ncache == self.ncache):
if (en_edges is None) and (self.en_edges is None):
return
elif np.all(en_edges == self.en_edges):
return
self.ncache = ncache
self.ph_edges = np.linspace(0, 1, ncache + 1)
if en_edges is None:
self.en_edges = None
self.en_cens = None
else:
en_edges = np.asarray(en_edges)
if len(en_edges) < 2:
raise ValueError("len(en_edges) must be >=2.")
self.en_edges = en_edges
self.en_cens = 0.5 * (en_edges[1:] + en_edges[:-1])
self.mark_cache_dirty()
def mark_cache_dirty(self):
for k in self._cache_dirty.keys():
self._cache_dirty[k] = True
def get_cache(self, order=0):
if self._cache_dirty[order]:
self.set_cache(order=order)
# I don't see how it's possible to have a cache with wrong shape.
rval = self._cache[order]
if self.en_edges is not None:
assert rval.shape[0] == len(self.en_edges)
assert rval.shape[-1] == (self.ncache + 1)
return rval
def set_cache(self, order=0):
"""Populate the cache with values along the bin edges."""
ncache = self.ncache
if self.en_edges is None:
if order == 0:
new_cache = self(self.ph_edges)
else:
new_cache = self.derivative(self.ph_edges, order=order)
else:
new_cache = np.empty((len(self.en_edges), len(self.ph_edges)))
if order == 0:
for ibin, en in enumerate(self.en_edges):
new_cache[ibin] = self(self.ph_edges, log10_ens=en)
else:
for ibin, en in enumerate(self.en_edges):
new_cache[ibin] = self.derivative(
self.ph_edges, log10_ens=en, order=order
)
self._cache[order] = new_cache
self._cache_dirty[order] = False
def eval_cache(self, phases, log10_ens=3, order=0):
"""
Cached values are stored on edges in both phase and, if applicable,
energy, so lookup is straightforward.
"""
cache = self.get_cache(order=order)
dphi = np.atleast_1d(phases) * self.ncache
phind_lo = dphi.astype(int)
phind_hi = phind_lo + (phind_lo < self.ncache) # allows ph==1
dphi_hi = dphi - phind_lo
dphi_lo = 1.0 - dphi_hi
assert np.all(dphi_hi >= 0)
assert np.all(dphi_hi <= 1)
assert np.all(dphi_lo >= 0)
assert np.all(dphi_lo <= 1)
edges = self.en_edges
if edges is None:
return cache[phind_lo] * dphi_lo + cache[phind_hi] * dphi_hi
de = (log10_ens - edges[0]) / (edges[1] - edges[0])
eind_lo = de.astype(int)
eind_hi = eind_lo + 1
de_hi = de - eind_lo
de_lo = 1.0 - de_hi
assert np.all(de_hi >= 0)
assert np.all(de_hi <= 1)
assert np.all(de_lo >= 0)
assert np.all(de_lo <= 1)
v00 = cache[eind_lo, phind_lo] * (de_lo * dphi_lo)
v01 = cache[eind_lo, phind_hi] * (de_lo * dphi_hi)
v10 = cache[eind_hi, phind_lo] * (de_hi * dphi_lo)
v11 = cache[eind_hi, phind_hi] * (de_hi * dphi_hi)
return v00 + v01 + v10 + v11
def set_parameters(self, p, free=True):
start = 0
params_ok = True
for prim in self.primitives:
n = len(prim.get_parameters(free=free))
prim.set_parameters(p[start : start + n], free=free)
start += n
self.norms.set_parameters(p[start:], free)
self.mark_cache_dirty()
return self.check_bounds(free=free)
def set_errors(self, errs):
start = 0
for prim in self.primitives:
start += prim.set_errors(errs[start:])
self.norms.set_errors(errs[start:])
def get_parameters(self, free=True):
return np.append(
np.concatenate([prim.get_parameters(free) for prim in self.primitives]),
self.norms.get_parameters(free),
)
def num_parameters(self, free=True):
"""Return the total number of free parameters."""
nprim = sum((prim.num_parameters(free) for prim in self.primitives))
return nprim + self.norms.num_parameters(free)
def _set_all_free_or_fixed(self, freeze=True):
for prim in self.primitives:
prim.free[:] = not freeze
self.norms.free[:] = not freeze
def free_parameters(self):
"""Free all parameters. Convenience function."""
self._set_all_free_or_fixed(freeze=False)
def freeze_parameters(self):
"""Freeze all parameters. Convenience function."""
self._set_all_free_or_fixed(freeze=True)
def get_errors(self, free=True):
return np.append(
np.concatenate([prim.get_errors(free) for prim in self.primitives]),
self.norms.get_errors(free),
)
def get_free_mask(self):
"""Return a mask with True if parameters are free, else False."""
m1 = np.concatenate([p.get_free_mask() for p in self.primitives])
return np.append(m1, self.norms.get_free_mask())
def get_parameter_names(self, free=True):
# I will no doubt hate myself in future for below comprehension
# (or rather lack thereof); this comment will not assuage my rage
prim_names = [
"P%d_%s_%s"
% (
iprim + 1,
prim.name[:3] + (prim.name[-1] if prim.name[-1].isdigit() else ""),
pname[:3] + (pname[-1] if pname[-1].isdigit() else ""),
)
for iprim, prim in enumerate(self.primitives)
for pname in prim.get_parameter_names(free=free)
]
norm_names = [
"Norm_%s" % pname for pname in self.norms.get_parameter_names(free=free)
]
return prim_names + norm_names
# return np.append(np.concatenate( [prim.pnames(free) for prim in self.primitives]) , self.norms.get_parameters(free))
def get_gaussian_prior(self):
locs, widths, mods, enables = [], [], [], []
for prim in self.primitives:
l, w, m, e = prim.get_gauss_prior_parameters()
locs.append(l)
widths.append(w)
mods.append(m)
enables.append(e)
t = np.zeros_like(self.norms.get_parameters())
locs = np.append(np.concatenate(locs), t)
widths = np.append(np.concatenate(widths), t)
mods = np.append(np.concatenate(mods), t.astype(bool))
enables = np.append(np.concatenate(enables), t.astype(bool))
return GaussianPrior(locs, widths, mods, mask=enables)
def get_bounds(self, free=True):
b1 = np.concatenate([prim.get_bounds(free) for prim in self.primitives])
b2 = self.norms.get_bounds(free)
return np.concatenate((b1, b2)).tolist()
def check_bounds(self, free=True):
bounds = np.asarray(self.get_bounds(free=free))
x0 = self.get_parameters(free=free)
return np.all((x0 >= bounds[:, 0]) & (x0 <= bounds[:, 1]))
def set_overall_phase(self, ph):
"""Put the peak of the first component at phase ph."""
self.mark_cache_dirty()
if self.shift_mode:
self.primitives[0].p[0] = ph
return
shift = ph - self.primitives[0].get_location()
for prim in self.primitives:
new_location = (prim.get_location() + shift) % 1
prim.set_location(new_location)
def get_location(self):
return self.primitives[0].get_location()
def get_amplitudes(self, log10_ens=3):
"""Return maximum amplitude of a component."""
ampls = [p(p.get_location(), log10_ens) for p in self.primitives]
return self.norms(log10_ens) * np.asarray(ampls)
def get_code(self):
"""Return a short string encoding the components in the template."""
return "/".join((p.shortname for p in self.primitives))
def norm(self):
return self.norms.get_total()
def norm_ok(self):
return self.norm() <= 1
def integrate(self, phi1, phi2, log10_ens=3, suppress_bg=False):
"""Integrate profile from phi1 to phi2.
NB that because of the phase modulo ambiguity, it is not uniquely
definite what the phi2 < phi1 case means:
integral(0.8,0.2) == -integral(0.2,0.8)
integral(0.8,1.2) == 1-integral(0.2,0.8)
To break the ambiguity, we support non-modulo phase here, so you
can just write integral(0.8,1.2) if that's what you mean.
"""
phi1 = np.asarray(phi1)
phi2 = np.asarray(phi2)
if isvector(log10_ens):
assert len(log10_ens) == len(phi1)
with contextlib.suppress(TypeError):
assert len(phi1) == len(phi2)
norms = self.norms(log10_ens=log10_ens)
t = norms.sum(axis=0)
dphi = phi2 - phi1
rvals = np.zeros(phi1.shape, dtype=float)
for n, prim in zip(norms, self.primitives):
rvals += n * prim.integrate(phi1, phi2, log10_ens=log10_ens)
return rvals * (1.0 / t) if suppress_bg else (1 - t) * dphi + rvals
def cdf(self, x, log10_ens=3):
return self.integrate(np.zeros_like(x), x, log10_ens, suppress_bg=False)
def max(self, resolution=0.01):
return self(np.arange(0, 1, resolution)).max()
def _get_scales(self, phases, log10_ens=3):
"""Method to allow abstraction for setting amplitudes for each
peak. Trivial in typical cases, but important for linked
components, e.g. the bridge pedestal.
"""
rvals = np.zeros(np.asarray(phases).shape, dtype=float)
norms = self.norms(log10_ens)
return rvals, norms, norms.sum(axis=0)
def __call__(self, phases, log10_ens=3, suppress_bg=False, use_cache=False):
"""Evaluate template at the provided phases and (if provided)
energies. If "suppress_bg" is set, ignore the DC component."""
# TMP -- check phase range. Add this as a formal check?
phases = np.asarray(phases)
log10_ens = np.asarray(log10_ens)
assert np.all(phases >= 0)
assert np.all(phases <= 1)
# end TM
if use_cache:
return self.eval_cache(phases, log10_ens=log10_ens, order=0)
rvals, norms, norm = self._get_scales(phases, log10_ens)
for n, prim in zip(norms, self.primitives):
rvals += n * prim(phases, log10_ens=log10_ens)
return rvals / norm if suppress_bg else (1.0 - norm) + rvals
def derivative(self, phases, log10_ens=3, order=1, use_cache=False):
"""Return the derivative of the template with respect to pulse
phase (as opposed to the gradient of the template with respect to
some of the template parameters)."""
if use_cache:
return self.eval_cache(phases, order=order)
rvals = np.zeros_like(phases)
norms = self.norms(log10_ens=log10_ens)
for n, prim in zip(norms, self.primitives):
rvals += n * prim.derivative(phases, log10_ens=log10_ens, order=order)
return rvals
def single_component(self, index, phases, log10_ens=3, add_bg=False):
"""Evaluate a single component of template."""
n = self.norms(log10_ens=log10_ens)
rvals = self.primitives[index](phases, log10_ens=log10_ens) * n[index]
return rvals + n.sum(axis=0) if add_bg else rvals
def gradient(self, phases, log10_ens=3, free=True, template_too=False):
r = np.empty((self.num_parameters(free), len(phases)))
c = 0
rvals, norms, norm = self._get_scales(phases, log10_ens=log10_ens)
prim_terms = np.empty((len(self.primitives), len(phases)))
for i, (nm, prim) in enumerate(zip(norms, self.primitives)):
n = len(prim.get_parameters(free=free))
r[c : c + n, :] = nm * prim.gradient(phases, log10_ens=log10_ens, free=free)
c += n
prim_terms[i] = prim(phases, log10_ens=log10_ens) - 1
# handle case where no norm parameters are free
if c != r.shape[0]:
# "prim_terms" are df/dn_i and have shape nnorm x nphase
# the output of gradient is the matrix M_ij or M_ijk, depending
# on whether or not there is energy dependence, which is
# dnorm_i/dnorm_angle_j (at energy k). The sum is over the
# "internal parameter" norm_j, while the phase axis and
# norm_angle axis are retained.
m = self.norms.gradient(log10_ens=log10_ens, free=free)
if len(m.shape) == 2:
m = m[..., None]
np.einsum("ij,ikj->kj", prim_terms, m, out=r[c:])
if template_too:
rvals[:] = 1 - norm
for i in range(len(prim_terms)):
rvals += (prim_terms[i] + 1) * norms[i]
return r, rvals
return r
def gradient_derivative(self, phases, log10_ens=3, free=False):
"""Return d/dphi(gradient). This is the derivative with respect
to pulse phase of the gradient with respect to the parameters.
"""
# sourcery skip: remove-unreachable-code
raise NotImplementedError() # is this used anymore?
free_mask = self.get_free_mask()
nparam = len(free_mask)
nnorm_param = len(self.norms.p)
nprim_param = nparam - nnorm_param
rvals = np.empty([nparam, len(phases)])
prim_terms = np.empty([len(self.primitives), len(phases)])
norms = self.norms()
c = 0
for iprim, prim in enumerate(self.primitives):
n = len(prim.p)
rvals[c : c + n] = norms[iprim] * prim.gradient_derivative(phases)
prim_terms[iprim] = prim.derivative(phases)
c += n
norm_grads = self.norms.gradient(phases, free=False)
for j in range(nnorm_param):
rvals[nprim_param + j] = 0
for i in range(nnorm_param):
rvals[nprim_param + j] += norm_grads[i, j] * prim_terms[i]
return rvals
def approx_gradient(self, phases, log10_ens=None, eps=1e-5):
return approx_gradient(self, phases, log10_ens=log10_ens, eps=eps)
def approx_hessian(self, phases, log10_ens=None, eps=1e-5):
return approx_hessian(self, phases, log10_ens=log10_ens, eps=eps)
def approx_derivative(self, phases, log10_ens=None, order=1, eps=1e-7):
return approx_derivative(
self, phases, log10_ens=log10_ens, order=order, eps=eps
)
def check_gradient(
self, atol=1e-7, rtol=1e-5, quiet=False, seed=None, en=None, ph=None
):
if seed is not None:
# essentially set a known good state of the RNG to avoid any
# numerical issues interfering with the test
np.random.seed(seed)
return check_gradient(self, atol=atol, rtol=rtol, quiet=quiet, en=en, ph=ph)
def check_derivative(self, atol=1e-7, rtol=1e-5, order=1, eps=1e-7, quiet=False):
return check_derivative(
self, atol=atol, rtol=rtol, quiet=quiet, eps=1e-7, order=order
)
def hessian(self, phases, log10_ens=3, free=True):
"""Return the hessian of the primitive and normalization angles.
The primitives components are not coupled due to the additive form
of the template. However, because each normalization depends on
multiple hyper angles, there is in general coupling between the
normalization components and the primitive components. The
general form of the terms is
(1) block diagonal hessian terms from primitive
(2 ) for the unmixed derivative of the norms, the sum of the
hessian of the hyper angles over the primitive terms
(3) for mixed derivatives, the product gradient of the norm
In general, this is pretty complicated if some parameters are free
and some are not, and (currently) this method isn't used in
fitting, so for ease of implementation, simply evaluate the whole
hessian, then return only the relevant parts for the free
parameters.
"""
free_mask = self.get_free_mask()
nparam = len(free_mask)
nnorm_param = self.norms.num_parameters()
nprim_param = nparam - nnorm_param
r = np.zeros([nparam, nparam, len(phases)])
norms = self.norms(log10_ens=log10_ens)
norm_grads = self.norms.gradient(log10_ens=log10_ens, free=False)
prim_terms = np.empty([len(self.primitives), len(phases)])
c = 0
for i, prim in enumerate(self.primitives):
h = prim.hessian(phases, log10_ens=log10_ens, free=False)
pg = prim.gradient(phases, log10_ens=log10_ens, free=False)
n = len(prim.p)
# put hessian in diagonal elements
r[c : c + n, c : c + n, :] = norms[i] * h
# put cross-terms with normalization; although only one primitive
# survives in the second derivative, all of the normalization angles
# feature
for j in range(n):
for k in range(nnorm_param):
r[nprim_param + k, c + j, :] = pg[j] * norm_grads[i, k]
r[c + j, nprim_param + k, :] = r[nprim_param + k, c + j, :]
prim_terms[i, :] = prim(phases, log10_ens=log10_ens) - 1
c += n
# now put in normalization hessian
hnorm = self.norms.hessian(
log10_ens=log10_ens
) # nnorm_param x nnorm_param x nnorm_param
for j in range(nnorm_param):
for k in range(j, nnorm_param):
for i in range(nnorm_param):
r[c + j, c + k, :] += hnorm[i, j, k] * prim_terms[i]
r[c + k, c + j, :] = r[c + j, c + k, :]
return r[free_mask][:, free_mask] if free else r
def delta(self, index=None):
"""Return radio lag -- reckoned by default as the position of the first peak following phase 0."""
if (index is not None) and (index <= (len(self.primitives))):
return self[index].get_location(error=True)
return self.Delta(delta=True)
def Delta(self, delta=False):
"""Report peak separation -- reckoned by default as the distance
between the first and final component locations.
delta [False] -- if True, return the first peak position"""
if len(self.primitives) == 1:
return -1, 0
prim0, prim1 = self.primitives[0], self.primitives[-1]
for p in self.primitives:
if p.get_location() < prim0.get_location():
prim0 = p
if p.get_location() > prim1.get_location():
prim1 = p
p1, e1 = prim0.get_location(error=True)
p2, e2 = prim1.get_location(error=True)
return (p1, e1) if delta else (p2 - p1, (e1**2 + e2**2) ** 0.5)
def _sorted_prims(self):
def cmp(p1, p2):
if p1.p[-1] < p2.p[-1]:
return -1
elif p1.p[-1] == p2.p[-1]:
return 0
else:
return 1
return sorted(self.primitives, cmp=cmp)
def __str__(self):
prims = self.primitives
s0 = str(self.norms)
s1 = (
"\n\n"
+ "\n\n".join(
["P%d -- " % (i + 1) + str(prim) for i, prim in enumerate(prims)]
)
+ "\n"
)
if len(prims) > 1:
s1 += "\ndelta : %.4f +\\- %.4f" % self.delta()
s1 += "\nDelta : %.4f +\\- %.4f" % self.Delta()
return s0 + s1
def prof_string(self, outputfile=None):
"""Return a string compatible with the format used by pygaussfit.
Assume all primitives are gaussians."""
rstrings = []
dashes = "-" * 25
norm, errnorm = 0, 0
for nprim, prim in enumerate(self.primitives):
phas = prim.get_location(error=True)
fwhm = 2 * prim.get_width(error=True, hwhm=True)
ampl = [self.norms()[nprim], 0]
norm += ampl[0]
errnorm += ampl[1] ** 2
for st, va in zip(["phas", "fwhm", "ampl"], [phas, fwhm, ampl]):
rstrings += ["%s%d = %.5f +/- %.5f" % (st, nprim + 1, va[0], va[1])]
const = "const = %.5f +/- %.5f" % (1 - norm, errnorm**0.5)
rstring = [dashes] + [const] + rstrings + [dashes]
if outputfile is not None:
f = open(outputfile, "w")
f.write("# gauss\n")
for s in rstring:
f.write(s + "\n")
return "\n".join(rstring)
def random(self, n, weights=None, log10_ens=3, return_partition=False):
"""Return n pseudo-random variables drawn from the distribution
given by this light curve template.
For simplicity, if weights are not provided, unit weights are
assumed. If energies are not provided, a vector of 1 GeV (3)
is assumed.
Next, optionally using the weights and the energy vectors, the
probability for each realization to arise from the primitives or
the background is determined. Those probabilities are used in a
multinomial to determine which component will generate each photon,
and finally using that partition the correct number of phases are
simulated from each component.
Weights ("w") are interpreted as the probability to originate from
the source, which includes the DC component, so the total prob. to
be DC is (1-w) (background) + w*sum_prims (unpulsed).
"""
n = int(round(n))
if len(self.primitives) == 0:
return (np.random.rand(n), [n]) if return_partition else np.random.rand(n)
# check weights
if weights is None:
weights = np.ones(n)
elif len(weights) != n:
raise ValueError("Provided weight vector does not match requested n.")
# check energies
if isvector(log10_ens):
if len(log10_ens) != n:
raise ValueError(
"Provided log10_ens vector does not match requested n."
)
else:
log10_ens = np.full(n, log10_ens)
# first, calculate the energy dependent norm of each vector
norms = self.norms(log10_ens=log10_ens) # nnorm x nen array
N = norms.sum(axis=0)
nDC = weights * N
pDC = 1 - nDC
partition_probs = np.append(norms / N * nDC, pDC[None, :], axis=0)
# now, draw a component for each bit of the partition
cpp = np.cumsum(partition_probs, axis=0)
assert np.allclose(cpp[-1], 1)
comps = np.full(n, len(self.primitives))
Q = np.random.rand(n)
for i in np.arange(len(self.primitives))[::-1]:
mask = Q < cpp[i]
comps[mask] = i
total = 0
rvals = np.empty(n)
rvals[:] = np.nan # TMP
total = 0
for iprim, prim in enumerate(self.primitives):
mask = comps == iprim
total += mask.sum()
rvals[mask] = prim.random(mask.sum(), log10_ens=log10_ens[mask])
# DC component
mask = comps == len(self.primitives)
total += mask.sum()
rvals[mask] = np.random.rand(mask.sum())
assert not np.any(np.isnan(rvals)) # TMP
return (rvals, comps) if return_partition else rvals
def swap_primitive(self, index, ptype=LCLorentzian):
"""Swap the specified primitive for a new one with the parameters
that match the old one as closely as possible."""
self.primitives[index] = convert_primitive(self.primitives[index], ptype)
def delete_primitive(self, index, inplace=False):
"""Return a new LCTemplate with the primitive at index removed.
The flux is renormalized to preserve the same pulsed ratio (in the
case of an energy-dependent template, at the pivot energy).
"""
norms, prims = self.norms, self.primitives
if len(prims) == 1:
raise ValueError("Template only has a single primitive.")
if index < 0:
index += len(prims)
newprims = [deepcopy(p) for ip, p in enumerate(prims) if index != ip]
newnorms = self.norms.delete_component(index)
if not inplace:
return LCTemplate(newprims, newnorms)
self.primitives = newprims
self.norms = newnorms
def add_primitive(self, prim, norm=0.1, inplace=False):
"""[Convenience] -- return a new LCTemplate with the specified
LCPrimitive added and renormalized."""
norms, prims = self.norms, self.primitives
if len(prims) == 0:
# special case of an empty profile
return LCTemplate([prim], [1])
nprims = [deepcopy(prims[i]) for i in range(len(prims))] + [prim]
nnorms = self.norms.add_component(norm)
if not inplace:
return LCTemplate(nprims, nnorms)
self.norms = nnorms
self.primitives = nprims
def order_primitives(self, order=0):
"""Re-order components in place.
order == 0: order by ascending position
order == 1: order by descending maximum amplitude
order == 2: order by descending normalization
"""
if order == 0:
indices = np.argsort([p.get_location() for p in self.primitives])
elif order == 1:
indices = np.argsort(self.get_amplitudes())[::-1]
elif order == 2:
indices = np.argsort(self.norms())[::-1]
else:
raise NotImplementedError("Specified order not supported.")
self.primitives = [self.primitives[i] for i in indices]
self.norms.reorder_components(indices)
def get_fixed_energy_version(self, log10_en=3):
return self
def add_energy_dependence(self, index, slope_free=True):
comp = self[index]
if comp.is_energy_dependent():
return
if comp.name == "NormAngles":
# normalization
newcomp = ENormAngles(self.norms())
else:
# primitive
if comp.name == "Gaussian":
constructor = LCEGaussian
elif comp.name == "VonMises":
constructor = LCEVonMises
else:
raise NotImplementedError(f"{comp.name} not supported.")
newcomp = constructor(p=comp.p)
newcomp.free[:] = comp.free
newcomp.slope_free[:] = slope_free
self[index] = newcomp
def get_eval_string(self):
"""Return a string that can be "eval"ed to make a cloned set of
primitives and template."""
ps = "\n".join(
("p%d = %s" % (i, p.eval_string()) for i, p in enumerate(self.primitives))
)
prims = f'[{",".join("p%d" % i for i in range(len(self.primitives)))}]'
ns = f"norms = {self.norms.eval_string()}"
return f"{self.__class__.__name__}({prims},norms)"
def closest_to_peak(self, phases):
return min((p.closest_to_peak(phases) for p in self.primitives))
def mean_value(self, phases, log10_ens=None, weights=None, bins=20):
"""Compute the mean value of the profile over the codomain of
phases. Mean is taken over energy and is unweighted unless
a set of weights are provided."""
if (log10_ens is None) or (not self.is_energy_dependent()):
return self(phases)
if weights is None:
weights = np.ones_like(log10_ens)
edges = np.linspace(log10_ens.min(), log10_ens.max(), bins + 1)
w = np.histogram(log10_ens, weights=weights, bins=edges)
rvals = np.zeros_like(phases)
for weight, en in zip(w[0], (edges[:-1] + edges[1:]) / 2):
rvals += weight * self(phases, en)
rvals /= w[0].sum()
return rvals
def mean_single_component(
self, index, phases, log10_ens=None, weights=None, bins=20, add_pedestal=True
):
prim = self.primitives[index]
if (log10_ens is None) or (not self.is_energy_dependent()):
n = self.norms()
return prim(phases) * n[index] + add_pedestal * (1 - n.sum())
if weights is None:
weights = np.ones_like(log10_ens)
edges = np.linspace(log10_ens.min(), log10_ens.max(), bins + 1)
w = np.histogram(log10_ens, weights=weights, bins=edges)
rvals = np.zeros_like(phases)
for weight, en in zip(w[0], (edges[:-1] + edges[1:]) / 2):
rvals += weight * prim(phases, en) * self.norms(en)[index]
rvals /= w[0].sum()
return rvals
def rotate(self, dphi):
"""Adjust the template by dphi."""
self.mark_cache_dirty()
log.info(f"Shifting template by {dphi}.")
for prim in self.primitives:
new_location = (prim.get_location() + dphi) % 1
prim.set_location(new_location)
def get_display_point(self, do_rotate=False):
# TODO -- need to fix this to scan all the way around, either
# from -0.5 to 0 or from 0.5 to 1.0, whichever -- see J0102
"""Return phase shift which optimizes the display of the profile.
This is determined by finding the 60% window which contains the
most flux and returning the left edge. Rotating the profile such
that this edge is at phi=0.20 would then center this interval, so
the resulting phase shift would do that.
"""
N = 50
dom = np.linspace(0, 1, 2 * N + 1)[:-1]
cod = self.integrate(dom, dom + 0.6)
dphi = 0.20 - dom[np.argmax(cod)]
if do_rotate:
self.rotate(dphi)
return dphi
def write_profile(self, fname, nbin, integral=False, suppress_bg=False):
"""Write out a two-column tabular profile to file fname.
The first column indicates the left edge of the phase bin, while
the right column indicates the profile value.
Parameters
----------
integral : bool
if True, integrate the profile over the bins. Otherwise, differential
value at indicated bin phase.
suppress_bg : bool
if True, do not include the unpulsed (DC) component
"""
if not integral:
bin_phases = np.linspace(0, 1, nbin + 1)[:-1]
bin_values = self(bin_phases, suppress_bg=suppress_bg)
bin_values *= 1.0 / bin_values.mean()
else:
phases = np.linspace(0, 1, 2 * nbin + 1)
values = self(phases, suppress_bg=suppress_bg)
hi = values[2::2]
lo = values[:-1:2]
mid = values[1::2]
bin_phases = phases[:-1:2]
bin_values = 1.0 / (6 * nbin) * (hi + 4 * mid + lo)
bin_values *= 1.0 / bin_values.mean()
open(fname, "w").write(
"".join(("%.6f %.6f\n" % (x, y) for x, y in zip(bin_phases, bin_values)))
)
def get_gauss2(
pulse_frac=1,
x1=0.1,
x2=0.55,
ratio=1.5,
width1=0.01,
width2=0.02,
lorentzian=False,
bridge_frac=0,
skew=False,
):
"""Return a two-gaussian template. Convenience function."""
n1, n2 = np.asarray([ratio, 1.0]) * (1 - bridge_frac) * (pulse_frac / (1.0 + ratio))
if skew:
prim = LCLorentzian2 if lorentzian else LCGaussian2
p1, p2 = [width1, width1 * (1 + skew), x1], [width2 * (1 + skew), width2, x2]
else:
if lorentzian:
prim = LCLorentzian
width1 *= 2 * np.pi
width2 *= 2 * np.pi
else:
prim = LCGaussian
p1, p2 = [width1, x1], [width2, x2]
if bridge_frac > 0:
nb = bridge_frac * pulse_frac
b = LCGaussian(p=[0.1, (x2 + x1) / 2])
return LCTemplate([prim(p=p1), b, prim(p=p2)], [n1, nb, n2])
return LCTemplate([prim(p=p1), prim(p=p2)], [n1, n2])
def get_gauss1(pulse_frac=1, x1=0.5, width1=0.01):
"""Return a one-gaussian template. Convenience function."""
return LCTemplate([LCGaussian(p=[width1, x1])], [pulse_frac])
def get_2pb(pulse_frac=0.9, lorentzian=False):
"""Convenience function to get a 2 Lorentzian + Gaussian bridge template."""
prim = LCLorentzian if lorentzian else LCGaussian
p1 = prim(p=[0.03, 0.1])
b = LCGaussian(p=[0.15, 0.3])
p2 = prim(p=[0.03, 0.55])
return LCTemplate(
primitives=[p1, b, p2],
norms=[0.3 * pulse_frac, 0.4 * pulse_frac, 0.3 * pulse_frac],
)
def make_twoside_gaussian(one_side_gaussian):
"""Make a two-sided gaussian with the same initial shape as the
input one-sided gaussian."""
g2 = LCGaussian2()
g1 = one_side_gaussian
g2.p[0] = g2.p[1] = g1.p[0]
g2.p[-1] = g1.p[-1]
return g2
def adaptive_samples(func, npt, log10_ens=3, nres=200):
"""func should have a .cdf method.
NB -- log10_ens needs to be a scalar!
The idea is to return a set of points on [0,1] which are approximately
distributed uniformly in F(phi) and thus more densely sample the
peaks. First, the cdf is evaluated on nres points. Then npt estimates
of the inverse cdf are obtained by linear interpolation.
"""
assert np.isscalar(log10_ens)
x = np.linspace(0, 1, nres)
F = func.cdf(x, log10_ens=log10_ens)
Y = np.linspace(0, 1, npt)
idx = np.searchsorted(F, Y[1:-1])
assert idx.min() > 0
F1 = F[idx]
F0 = F[idx - 1]
m = (F1 - F0) * (1.0 / (x[1] - x[0]))
X1 = x[idx]
X0 = x[idx - 1]
Y[1:-1] = X0 + (Y[1:-1] - F0) / (F1 - F0) * (x[1] - x[0])
return Y
class GaussianPrior:
def __init__(self, locations, widths, mod, mask=None):
self.x0 = np.where(mod, np.mod(locations, 1), locations)
self.s0 = np.asarray(widths) * 2**0.5
self.mod = np.asarray(mod)
if mask is None:
self.mask = np.asarray([True] * len(locations))
else:
self.mask = np.asarray(mask)
self.x0 = self.x0[self.mask]
self.s0 = self.s0[self.mask]
self.mod = self.mod[self.mask]
def __len__(self):
"""Return number of parameters with a prior."""
return self.mask.sum()
def __call__(self, parameters):
if not np.any(self.mask):
return 0
parameters = parameters[self.mask]
parameters = np.where(self.mod, np.mod(parameters, 1), parameters)
return np.sum(((parameters - self.x0) / self.s0) ** 2)
def gradient(self, parameters):
if not np.any(self.mask):
return np.zeros_like(parameters)
parameters = parameters[self.mask]
parameters = np.where(self.mod, np.mod(parameters, 1), parameters)
rvals = np.zeros(len(self.mask))
rvals[self.mask] = 2 * (parameters - self.x0) / self.s0**2
return rvals
def prim_io(template, bound_eps=1e-5):
"""Read files and build LCPrimitives."""
def read_gaussian(toks):
primitives = []
norms = []
for i, tok in enumerate(toks):
if tok[0].startswith("phas"):
g = LCGaussian()
g.p[-1] = float(tok[2])
g.errors[-1] = float(tok[4])
primitives += [g]
elif tok[0].startswith("fwhm"):
g = primitives[-1]
g.p[0] = float(tok[2]) / 2.3548200450309493 # kluge for now
g.errors[0] = float(tok[4]) / 2.3548200450309493
elif tok[0].startswith("ampl"):
norms.append(float(tok[2]))
# check to that bounds are OK
for iprim, prim in enumerate(primitives):
if prim.check_bounds():
continue
for ip, p in enumerate(prim.p):
lo, hi = prim.bounds[ip]
if (p < lo) and (abs(p - lo) < bound_eps):
prim.p[ip] = lo
if (p > hi) and (abs(p - hi) < bound_eps):
prim.p[ip] = hi
if not prim.check_bounds():
raise ValueError("Unrecoverable bounds errors on input.")
# check norms
norms = np.asarray(norms)
n = norms.sum()
if (n > 1) and (abs(n - 1) < bounds_eps):
norms *= 1.0 / n
return primitives, list(norms)
lines = None
try:
with open(template, "r") as f:
lines = f.readlines()
except FileNotFoundError:
lines = template.split("\n")
if lines is None:
raise ValueError("Could not load lines from template.")
toks = [line.strip().split() for line in lines if len(line.strip()) > 0]
label, toks = toks[0], toks[1:]
if "gauss" in label:
return read_gaussian(toks)
elif "kernel" in label:
return [LCKernelDensity(input_file=toks)], None
elif "fourier" in label:
return [LCEmpiricalFourier(input_file=toks)], None
raise ValueError("Template format not recognized!")
def check_gradient_derivative(templ):
dom = np.linspace(0, 1, 10001)
pcs = 0.5 * (dom[:-1] + dom[1:])
ngd = templ.gradient(dom)
ngd = (ngd[:, 1:] - ngd[:, :-1]) / (dom[1] - dom[0])
gd = templ.gradient_derivative(templ, pcs)
for i in range(gd.shape[0]):
print(np.max(np.abs(gd[i] - ngd[i])))
return pcs, gd, ngd
def isvector(x):
return len(np.asarray(x).shape) > 0
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@src@pint@templates@lctemplate.py@.PATH_END.py
|
{
"filename": "open_problems.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/open_problems/open_problems.md",
"type": "Markdown"
}
|
1. `treat_object_as_categorical`
Currently you have to pass `cat_features` to CatBoost* init function or to fit function.
Many people ask for automatic detection of categorical features.
This flag would solve the problem.
It is suggested to add the flag to Pool init function, CatBoost* init functions and to fit function the same way `cat_features` parameter is added.
Tests for all these cases must be provided.
2. `allow_float_categories`
Categorical features are treated in the following way. We first convert them to strings, then calculate hash from the string, then use the hash value in the algorithm.
For this reason it is only allowed to use data types that can be converted to string in a unique way. Otherwise if you are training from python and applying from C++, you might get different results because of different string representation.
But if you are only working from python, then it can be safe to use float numbers if user has explicitly confirmed that this is what the user wants.
This flag should also be used in Pool and CatBoost* init functions and in fit function.
3. `allow_nan_categories`
This problem is very similar to #2, but now nan categories are allowed.
It is suggested that in this case nan value is always converted to "None" string before calculating hashes.
4. Skip invalid parameter configurations in `grid_search` and `randomized_search` methods.
The python code of running parameter search should chech if configuration is valid. If it is not valid it should be skipped and a warning msg should be printed.
In case of `randomized_search`, where `n_iter` is number of checked configurations, invalid configurations should not be count as checked ones.
5. Add `model.class_count_` property to CatBoostClassifier class.
It should return `len(model.class_names_)`
6. Add `feature_names_`, `cat_feature_names_`, `num_feature_names_`, `cat_feature_indices_` properties to CatBoost* classes.
7. Implement a new ranking metric ERR (Expected Reciprocal Rank) and its documentation
8. Add CatBoostClassifier `predict_log_proba` method
9. Better parameter checks:
if `leaf_estimation_iterations`:5 with RMSE, there should be warning and 1 iteration
10. tutorial on poisson regression using monotonic1 dataset.
Jupyter notebook should give text explanation of what is the task, examples when it might appear and how it is solved.
11. In python cv request `loss_function` to be set
Currently if no `loss_function` is passed, then RMSE is used by default.
This might be misleading in the following case.
A user creates CatBoostClassifier and calles `get_params()` from a not trained model. The resulting parameters don't contain the `loss_function` parameter, because the default `loss_function` for CatBoostClassifier depends on number of classes in train dataset. If there are 2 classes, it is Logloss, if there are more than 2 classes, it is MultiClass.
These parameters are passed to cv function. And it trains an RMSE model, because it is the default loss.
This is not expected behavoir. So it is better to check that the loss is present among parameters passed to cv method.
12. Implement CatBoostRanker class
Currently we only have CatBoostRegressor and CatBoostClassifier.
It would be nice to implement a class for ranking also.
The default loss function in this case will be YetiRank.
13. Implement a ColumnDescription class in Python that can be used instead of cd file https://catboost.ai/docs/concepts/input-data_column-descfile.html
when creating Pool from file.
The class should have init function, methods load and save, and Pool init method should be able to use object of this class instead of cd file during initialization.
14. Add `eval_metrics` method to R library. Currently it's only supported in Python package.
15. Add `baseline` parameter to `eval_metrics` function in Python.
Currently this function assumes that initial value for every sample is 0.
This might be not the case, if we are traing from some baseline.
16. Automatic `class_weights`/`scale_pos_weight` based on training dataset class appearance frequency.
Interface: `class_weights`='Auto'
17. Add CatBoost to https://github.com/apple/turicreate
18. Implement Tweedie Regression
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@open_problems@open_problems.md@.PATH_END.py
|
{
"filename": "_xanchor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/coloraxis/colorbar/_xanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="layout.coloraxis.colorbar", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@coloraxis@colorbar@_xanchor.py@.PATH_END.py
|
{
"filename": "calculation.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/coordinates/calculation.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
from datetime import datetime
from xml.dom.minidom import parse
import re
import textwrap
# Third-party
from .. import time as atime
from ..utils.console import color_print, _color_text
from ..extern.six.moves.urllib.request import urlopen
from . import get_sun
__all__ = []
class HumanError(ValueError): pass
class CelestialError(ValueError): pass
def get_sign(dt):
"""
"""
if ((int(dt.month) == 12 and int(dt.day) >= 22)or(int(dt.month) == 1 and int(dt.day) <= 19)):
zodiac_sign = "capricorn"
elif ((int(dt.month) == 1 and int(dt.day) >= 20)or(int(dt.month) == 2 and int(dt.day) <= 17)):
zodiac_sign = "aquarius"
elif ((int(dt.month) == 2 and int(dt.day) >= 18)or(int(dt.month) == 3 and int(dt.day) <= 19)):
zodiac_sign = "pisces"
elif ((int(dt.month) == 3 and int(dt.day) >= 20)or(int(dt.month) == 4 and int(dt.day) <= 19)):
zodiac_sign = "aries"
elif ((int(dt.month) == 4 and int(dt.day) >= 20)or(int(dt.month) == 5 and int(dt.day) <= 20)):
zodiac_sign = "taurus"
elif ((int(dt.month) == 5 and int(dt.day) >= 21)or(int(dt.month) == 6 and int(dt.day) <= 20)):
zodiac_sign = "gemini"
elif ((int(dt.month) == 6 and int(dt.day) >= 21)or(int(dt.month) == 7 and int(dt.day) <= 22)):
zodiac_sign = "cancer"
elif ((int(dt.month) == 7 and int(dt.day) >= 23)or(int(dt.month) == 8 and int(dt.day) <= 22)):
zodiac_sign = "leo"
elif ((int(dt.month) == 8 and int(dt.day) >= 23)or(int(dt.month) == 9 and int(dt.day) <= 22)):
zodiac_sign = "virgo"
elif ((int(dt.month) == 9 and int(dt.day) >= 23)or(int(dt.month) == 10 and int(dt.day) <= 22)):
zodiac_sign = "libra"
elif ((int(dt.month) == 10 and int(dt.day) >= 23)or(int(dt.month) == 11 and int(dt.day) <= 21)):
zodiac_sign = "scorpio"
elif ((int(dt.month) == 11 and int(dt.day) >= 22)or(int(dt.month) == 12 and int(dt.day) <= 21)):
zodiac_sign = "sagittarius"
return zodiac_sign
_VALID_SIGNS = ["capricorn", "aquarius", "pisces", "aries", "taurus", "gemini",
"cancer", "leo", "virgo", "libra", "scorpio", "sagittarius"]
# Some of the constellation names map to different astrological "sign names".
# Astrologers really needs to talk to the IAU...
_CONST_TO_SIGNS = {'capricornus': 'capricorn', 'scorpius': 'scorpio'}
def horoscope(birthday, corrected=True):
"""
Enter your birthday as an `astropy.time.Time` object and
receive a mystical horoscope about things to come.
Parameter
---------
birthday : `astropy.time.Time`
Your birthday as a `datetime.datetime` or `astropy.time.Time` object.
corrected : bool
Whether to account for the precession of the Earth instead of using the
ancient Greek dates for the signs. After all, you do want your *real*
horoscope, not a cheap inaccurate approximation, right?
Returns
-------
Infinite wisdom, condensed into astrologically precise prose.
Notes
-----
This function was implemented on April 1. Take note of that date.
"""
special_words = {
'([sS]tar[s^ ]*)': 'yellow',
'([yY]ou[^ ]*)': 'magenta',
'([pP]lay[^ ]*)': 'blue',
'([hH]eart)': 'red',
'([fF]ate)': 'lightgreen',
}
birthday = atime.Time(birthday)
today = datetime.now()
if corrected:
zodiac_sign = get_sun(birthday).get_constellation().lower()
zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign)
if zodiac_sign not in _VALID_SIGNS:
raise HumanError('On your birthday the sun was in {}, which is not '
'a sign of the zodiac. You must not exist. Or '
'maybe you can settle for '
'corrected=False.'.format(zodiac_sign.title()))
else:
zodiac_sign = get_sign(birthday.to_datetime())
url = "http://www.findyourfate.com/rss/dailyhoroscope-feed.php?sign={sign}&id=45"
f = urlopen(url.format(sign=zodiac_sign.capitalize()))
try: # urlopen in py2 is not a decorator
doc = parse(f)
item = doc.getElementsByTagName('item')[0]
desc = item.getElementsByTagName('description')[0].childNodes[0].nodeValue
except Exception:
raise CelestialError("Invalid response from celestial gods (failed to load horoscope).")
finally:
f.close()
print("*"*79)
color_print("Horoscope for {} on {}:".format(zodiac_sign.capitalize(), today.strftime("%Y-%m-%d")),
'green')
print("*"*79)
for block in textwrap.wrap(desc, 79):
split_block = block.split()
for i, word in enumerate(split_block):
for re_word in special_words.keys():
match = re.search(re_word, word)
if match is None:
continue
split_block[i] = _color_text(match.groups()[0], special_words[re_word])
print(" ".join(split_block))
def inject_horoscope():
import astropy
astropy._yourfuture = horoscope
inject_horoscope()
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@coordinates@calculation.py@.PATH_END.py
|
{
"filename": "database.py",
"repo_name": "aburgasser/splat",
"repo_path": "splat_extracted/splat-main/splat/database.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
"""
.. note::
These are the database functions for SPLAT
"""
# imports: internal
import base64
import copy
import csv
import glob
import os
import re
import requests
from shutil import copyfile
import time
# imports: external
import astropy
import numpy
import pandas
from astropy.io import ascii, fits # for reading in spreadsheet
from astropy.table import Column, Table, join, vstack # for reading in table files
from astropy.time import Time # for reading in table files
from astropy.coordinates import Angle,SkyCoord,Galactic,BarycentricTrueEcliptic,match_coordinates_sky,search_around_sky
from astropy import units as u # standard units
from astroquery.simbad import Simbad
from astroquery.vizier import Vizier
from astroquery.nist import Nist
from astroquery.xmatch import XMatch
#from astroquery.gaia import Gaia
# splat requirements
import splat
import splat.plot as splot
from splat.initialize import *
from splat.utilities import *
from splat.empirical import estimateDistance, typeToColor
#from splat import DB_SOURCES, DB_SPECTRA
#import splat as spl
# Python 2->3 fix for input
try: input=raw_input
except NameError: pass
# set timeout limits to 1 minute
Simbad.TIMEOUT = 60
Vizier.TIMEOUT = 60
Nist.TIMEOUT = 60
XMatch.TIMEOUT = 180
##########################################################
########### DATABASE QUERY, ACCESS & TOOLS ###########
##########################################################
def prepDB(db_init,raCol='RA',decCol='DEC',desigCol='DESIGNATION',shortnameCol='SHORTNAME',coordinateCol='COORDINATES',force=False,verbose=False):
'''
Purpose
-------
Populates RA, DEC, DESIGNATION, COORDINATE and SHORTNAME columns if not present
Requires RA and DEC, or DESIGNATION to be present
'''
db = copy.deepcopy(db_init)
if raCol not in list(db.columns) or decCol not in list(db.columns):
if desigCol not in list(db.columns):
raise ValueError('Database must have columns {} and {}, or {}'.format(raCol,decCol,desigCol))
else:
db[coordinateCol] = [splat.designationToCoordinate(d) for d in db[desigCol]]
if raCol not in list(db.columns) or decCol not in list(db.columns):
db[raCol] = [c.ra.degree for c in db[coordinateCol]]
db[decCol] = [c.dec.degree for c in db[coordinateCol]]
if desigCol not in list(db.columns):
db[desigCol] = [splat.coordinateToDesignation([db[raCol].iloc[i],db[decCol].iloc[i]]) for i in range(len(db))]
if shortnameCol not in list(db.columns):
db[shortnameCol] = [splat.designationToShortName(d) for d in db[desigCol]]
# force COORDINATES, RA, DEC if desired
if force == True:
db[coordinateCol] = [splat.designationToCoordinate(d) for d in db[desigCol]]
db[raCol] = [c.ra.degree for c in db[coordinateCol]]
db[decCol] = [c.dec.degree for c in db[coordinateCol]]
db[shortnameCol] = [splat.designationToShortName(d) for d in db[desigCol]]
# remove extra space in designation string
db[desigCol] = [x.strip() for x in db[desigCol]]
return db
def longCoordList(dp,imax=10000):
'''
Purpose
-------
Creates a SkyCoord array of more than 10,000 coordinates
'''
for i in range(int(len(dp)/imax)):
c0 = SkyCoord(ra=dp['RA'].iloc[(i*imax):((i+1)*imax)]*u.degree,dec=dp['DEC'].iloc[(i*imax):((i+1)*imax)]*u.degree)
if i==0: c = c0
else: c = c0.insert(0,c)
c0 = SkyCoord(ra=dp['RA'].iloc[((i+1)*imax):]*u.degree,dec=dp['DEC'].iloc[((i+1)*imax):]*u.degree)
c = c0.insert(0,c)
return(c)
def catXMatch(dp1,dp2,sep=5*u.arcsec,imax = 10000,merge=False):
'''
Purpose
-------
Cross-matches two catalogs and return only those sources in common within the specified separation
'''
sep = sep.to(u.arcsec)
dp1p = prepDB(dp1)
dp2p = prepDB(dp2)
if len(dp1p)>imax: c1 = longCoordList(dp1p)
else: c1 = SkyCoord(ra=dp1p['RA']*u.degree,dec=dp1p['DEC']*u.degree)
if len(dp2p)>imax: c2 = longCoordList(dp2p)
else: c2 = SkyCoord(ra=dp2p['RA']*u.degree,dec=dp2p['DEC']*u.degree)
idx, sep2d, sep3d = match_coordinates_sky(c1,c2)
dp1p['separation'] = [s.to(u.arcsec).value for s in sep2d]
dp1pc = dp1p[dp1p['separation']<sep.value]
idx, sep2d, sep3d = match_coordinates_sky(c2,c1)
dp2p['separation'] = [s.to(u.arcsec).value for s in sep2d]
dp2pc = dp2p[dp2p['separation']<sep.value]
dp1pc.reset_index(inplace=True,drop=True)
dp2pc.reset_index(inplace=True,drop=True)
return dp1pc,dp2pc
def sourceXMatch(c,dp2,sep=5*u.arcsec,imax = 10000):
'''
Purpose
-------
Cross-matches a coordinate with a catalog and returns only those sources within the specified separation
'''
sep = sep.to(u.arcsec)
if not isinstance(c,SkyCoord): c = splat.properCoordinates(c)
dp2p = prepDB(dp2)
if len(dp2p)>imax: c2 = longCoordList(dp2p)
else: c2 = SkyCoord(ra=dp2p['RA']*u.degree,dec=dp2p['DEC']*u.degree)
d2d = c.separation(c2).arcsec
dp2p['separation'] = d2d
dp2pc = dp2p[dp2p['separation']<sep.value]
dp2pc.reset_index(inplace=True,drop=True)
return dp2pc
def fetchDatabase(*args, **kwargs):
'''
Purpose
-------
Get the SpeX Database from either online repository or local drive
'''
filename = 'db_spexprism.txt' # temporary original database file for backwards compatability
if len(args) > 0:
filename = args[0]
kwargs['filename'] = kwargs.get('filename',filename)
kwargs['filename'] = kwargs.get('file',kwargs['filename'])
kwargs['folder'] = kwargs.get('folder',SPLAT_PATH+DB_FOLDER)
url = kwargs.get('url',SPLAT_URL)+kwargs['folder']
local = kwargs.get('local',True)
online = kwargs.get('online',not local and checkOnline())
local = not online
kwargs['local'] = local
kwargs['online'] = online
kwargs['model'] = True
# determine format of file
delimiter = kwargs.get('delimiter','')
fmt = kwargs.get('format','')
fmt = kwargs.get('fmt',fmt)
if delimiter == ',' or delimiter == 'comma' or delimiter == 'csv' or kwargs.get('comma',False) == True or ('.csv' in kwargs['filename']):
delimiter = ','
fmt = 'csv'
if delimiter == '\t' or delimiter == 'tab' or kwargs.get('tab',False) == True or ('.txt' in kwargs['filename']):
delimiter = '\t'
fmt = 'tab'
if fmt == '':
raise NameError('\nCould not determine the file format of '+kwargs['filename']+'; please specify using format or delimiter keywords\n\n')
# check that folder/set is present either locally or online
# if not present locally but present online, switch to this mode
# if not present at either raise error
folder = checkLocal(kwargs['folder'])
if folder=='':
folder = checkOnlineFile(kwargs['folder'])
if folder=='':
raise NameError('\nCould not find '+kwargs['folder']+' locally or on SPLAT website\n\n')
else:
kwargs['folder'] = folder
kwargs['local'] = False
kwargs['online'] = True
else:
kwargs['folder'] = folder
# locally:
if kwargs['local']:
# print('Reading local')
infile = checkLocal(kwargs['filename'])
if infile=='':
infile = checkLocal(kwargs['folder']+'/'+kwargs['filename'])
if infile=='':
raise NameError('\nCould not find '+kwargs['filename']+' locally\n\n')
else:
try:
data = ascii.read(os.path.normpath(infile), delimiter=delimiter,fill_values='-99.',format=fmt)
# data = ascii.read(infile, delimiter='\t',fill_values='-99.',format='tab')
except:
raise NameError('\nCould not load {}: this may be a decoding error\n'.format(infile))
# check if file is present; if so, read it in, otherwise go to interpolated
# online:
if kwargs['online']:
# print('Reading online')
infile = checkOnlineFile(kwargs['filename'])
if infile=='':
infile = checkOnlineFile(kwargs['folder']+'/'+kwargs['filename'])
if infile=='':
raise NameError('\nCould not find '+kwargs['filename']+' on the SPLAT website\n\n')
try:
# open(os.path.basename(TMPFILENAME), 'wb').write(urllib2.urlopen(url+infile).read())
open(os.path.basename(TMPFILENAME), 'wb').write(requests.get(url+infile).content)
kwargs['filename'] = os.path.basename(tmp)
data = ascii.read(os.path.basename(TMPFILENAME), delimiter=delimiter,fill_values='-99.',format=fmt)
os.remove(os.path.basename(TMPFILENAME))
except:
raise NameError('\nHaving a problem reading in '+kwargs['filename']+' on the SPLAT website\n\n')
return data
#####################################################
########### ADDING NEW SPECTRA TO SPLAT ##########
#####################################################
def addUserSpectra(folder='./',instrument='SPEX-PRISM',mode='update',repeat='retain',radius_repeat=10.*u.arcsec,input_file='input.txt',search_str='*.fits',sources_data_file=DB_SOURCES_FILE,spectra_data_file=DB_SPECTRA_FILE,verbose=True,*args):
'''
:Purpose:
Adds in local spectral data to the underlying SPLAT library
This program is currently UNDER DEVELOPMENT
'''
# program constants
optional_spectra_columns = ['PUBLISHED','DATA_BIBCODE','PROGRAM_PI','OBSERVATION_DATE','OBSERVATION_MJD','OBSERVATION_TIME','OBSERVER','AIRMASS']
optional_sources_columns = ['NAME','DESIGNATION','RA','DEC','COORDINATES','DISCOVERY_REF','SPT','SPT_REF','SPT_OPT','SPT_OPT_REF','SPT_NIR','SPT_NIR_REF','SPT_LIT','SPT_LIT_REF','LUMINOSITY_CLASS','METALLICITY_CLASS','GRAVITY_CLASS_OPTICAL','GRAVITY_CLASS_OPTICAL_REF','GRAVITY_CLASS_NIR','GRAVITY_CLASS_NIR_REF','CLUSTER','CLUSTER_REF','BINARY','BINARY_TYPE','BINARY_REF','SBINARY','SBINARY_REF','COMPANION_NAME','COMPANION_REF']
header_spectra_columns = {
'OBSERVATION_DATE': ['OBS_DATE','OBS-DATE','UT-DATE'],
'OBSERVATION_TIME': ['OBS_TIME','OBS-TIME','UT-TIME'],
'OBSERVER': [],
'AIRMASS': ['Z'],
'SLIT': ['APERTURE'],
'DISPERSER': ['GRATING','GRISM','DISPERSE'],
}
header_sources_columns = {
'NAME': ['OBJECT','SOURCE','TARGET'],
'RA': ['RA-D','RADEG'],
'DEC': ['DEC-D','DECDEG'],
}
dataset_number_factor = 1e6
now = time.localtime()
nowstr = str(now.tm_year)+str(now.tm_mon)+str(now.tm_mday)
if len(args) > 0:
folder = args[0]
if len(args) > 1:
instrument = args[1]
##### STOPPED HERE #####
# check if this has already been read in
# if folder in DATA_FOLDERS:
# n =
# check instrument
inst = splat.checkInstrument(instrument)
if inst != False: instrument = inst
# check mode and repeat
mode_labels = ['new','append','refresh','update']
if mode.lower() not in mode_labels:
if verbose==True: print('\nDo not recognize mode = {}; should be one of {}; reverting to update'.format(mode,mode_labels))
mode = 'update'
repeat_labels = ['replace','assert','retain','keep']
if repeat.lower() not in repeat_labels:
if verbose==True: print('\nDo not recognize repeat = {}; should be one of {}; reverting to retain'.format(repeat,repeat_labels))
repeat = 'retain'
# check the folder is correctly specified
if not os.path.exists(folder):
print('\nCould not find folder {} in local directory structure; skipping')
return
# check if spectra data file is present; if not, you'll need to generate a new one
if spectra_data_file not in os.listdir(folder):
if verbose == True: print('\nCannot find spectral data file {}; generating a new one from input files'.format(spectra_data_file))
mode = 'new'
# STAGE 1: SET UP A NEW FOLDER OF DATA
if mode.lower() == 'new':
# check if input file is in place; if not, make one
if input_file not in os.listdir(folder):
files = glob.glob(folder+'/'+search_str)
files = [os.path.basename(f) for f in files]
for f in [input_file,sources_data_file,spectra_data_file]:
if f in files: files.remove(f)
# turn into preliminary input.txt file
input_db = pandas.DataFrame()
input_db['DATA_FILE'] = files
input_db['INSTRUMENT'] = [instrument]*len(files)
if '.txt' in input_file: input_db.to_csv(folder+'/'+input_file,sep='\t',index=False)
elif '.csv' in input_file: input_db.to_csv(folder+'/'+input_file,sep=',',index=False)
elif '.xls' in input_file: input_db.to_excel(folder+'/'+input_file,index=False)
else: raise ValueError('\nDo not recognize file format for {}'.format(input_file))
# prompt to continue?
# read in input file and start building spectral database
if '.txt' in input_file: input_db = pandas.read_csv(folder+'/'+input_file,delimiter='\t')
elif '.csv' in input_file: input_db = pandas.read_csv(folder+'/'+input_file,delimiter=',')
elif '.xls' in input_file: input_db = pandas.read_excel(folder+'/'+input_file)
else: raise ValueError('\nDo not recognize file format for input file {}'.format(input_file))
# capitalize all columns
for c in list(input_db.columns):
if c.upper() not in list(input_db.columns):
input_db[c.upper()] = input_db[c]
del input_db[c]
# adjust instrument
syn = ['INST','INSTR']
if 'INSTRUMENT' not in list(input_db.columns):
for s in syn:
if s in list(input_db.columns):
input_db['INSTRUMENT'] = input_db[s]
del input_db[s]
if 'INSTRUMENT' not in list(input_db.columns):
input_db['INSTRUMENT'] = [instrument]*len(input_db)
for i,inst in enumerate(input_db['INSTRUMENT']):
inst = splat.checkInstrument(inst)
if inst != False: input_db['INSTRUMENT'].iloc[i] = inst
# adjust filename
syn = ['FILE','FILENAME','FILE_NAME']
if 'DATA_FILE' not in list(input_db.columns):
for s in syn:
if s in list(input_db.columns):
input_db['DATA_FILE'] = input_db[s]
del input_db[s]
# establish source and spectra data frames
sources_db = pandas.DataFrame()
spectra_db = pandas.DataFrame()
# prep keys
n = len(DATA_FOLDERS)
keys = numpy.arange(len(input_db))+n*dataset_number_factor+1
sources_db['SOURCE_KEY'] = [int(k) for k in keys]
spectra_db['DATA_KEY'] = sources_db['SOURCE_KEY']
spectra_db['SOURCE_KEY'] = sources_db['SOURCE_KEY']
# required spectral information
spectra_db['DATA_FILE'] = input_db['DATA_FILE']
spectra_db['INSTRUMENT'] = input_db['INSTRUMENT']
spectra_db['DATA_ENTRY'] = [nowstr]*len(input_db)
# add in optional columns from input
for c in optional_spectra_columns:
if c in list(input_db.columns): spectra_db[c] = input_db[c]
for c in optional_sources_columns:
if c in list(input_db.columns): sources_db[c] = input_db[c]
for c in list(input_db.columns):
if c not in optional_spectra_columns and c not in optional_sources_columns and c not in list(spectra_db.columns): spectra_db[c] = input_db[c]
# write out the source and spectra folders
if '.txt' in sources_data_file: sources_db.to_csv(folder+'/'+sources_data_file,sep='\t',index=False)
elif '.csv' in sources_data_file: sources_db.to_csv(folder+'/'+sources_data_file,sep=',',index=False)
elif '.xls' in sources_data_file: sources_db.to_excel(folder+'/'+sources_data_file,index=False)
else: raise ValueError('\nDo not recognize file format for {}'.format(sources_data_file))
if '.txt' in spectra_data_file: spectra_db.to_csv(folder+'/'+spectra_data_file,sep='\t',index=False)
elif '.csv' in spectra_data_file: spectra_db.to_csv(folder+'/'+spectra_data_file,sep=',',index=False)
elif '.xls' in spectra_data_file: spectra_db.to_excel(folder+'/'+spectra_data_file,index=False)
else: raise ValueError('\nDo not recognize file format for {}'.format(spectra_data_file))
# STAGE 1: SET UP A NEW FOLDER OF DATA
if mode.lower() == 'new' or mode.lower() == 'append':
pass
return
#####################################################
########### ACCESSING ONLINE CATALOGS ###########
#####################################################
def getVizierName(catalog,output=False):
'''
Wrapper function for Vizier.find_catalogs to help search for Vizier catalog names
'''
catalog_list = Vizier.find_catalogs(catalog)
output={}
for k,v in catalog_list.items():
print('{}: {}'.format(k,v.description))
output[k] = v.description
if output==True: return output
else: return
def getVizierCatalog(catalog,catnum=0,verbose=True,limit=-1,return_pandas=True):
'''
Wrapper function for Vizier.get_catalogs which returns a single whole catalog
'''
if limit<0: Vizier.ROW_LIMIT = -1
else: Vizier.ROW_LIMIT = int(limit)
catalogs = Vizier.get_catalogs(catalog)
Vizier.ROW_LIMIT = 50
if len(catalogs)<1:
raise ValueError('Catalog {} is not in Vizier; use getVizierName() to check for catalog ID'.format(catalog))
catnum = int(numpy.min([catnum,len(catalogs)]))
if verbose==True:
print('{} catalog(s) identified, returning the {}th one: {}'.format(len(catalogs),catnum,list(catalogs.keys())[catnum]))
if return_pandas==True:
return catalogs[catnum].to_pandas()
else:
return catalogs[catnum]
def queryVizier(coordinate,**kwargs):
'''
see `splat.database.getPhotometry()`_
.. _`splat.database.getPhotometry()` : api.html#splat.database.getPhotometry
'''
return getPhotometry(coordinate,**kwargs)
# NOTE: THIS IS NOT PROPERLY PASSING ON THE KEYWORDS
def getPhotometry(coordinate,return_pandas=True,catalog='2MASS',radius=30.*u.arcsec,sort='sep',limit=-1,info=False,nearest=False,verbose=False,**kwargs):
'''
Purpose
Downloads photometry for a single source coordinate using astroquery.
If you are getting data on multiple sources, it is preferable to use `splat.database.queryXMatch()`_
.. _`splat.database.queryXMatch()` : api.html#splat.database.queryXMatch
Required Inputs:
:param: coordinate: Either an astropy SkyCoord or a variable that can be converted into a SkyCoord using `properCoordinates()`_
.. _`properCoordinates()` : api.html#properCoordinates
Optional Inputs:
:param radius: Search radius, nominally in arcseconds although this can be changed by passing an astropy.unit quantity (default = 30 arcseconds)
:param catalog: Catalog to query, which can be set to the Vizier catalog identifier code or to one of the following preset catalogs:
* '2MASS' (or set ``2MASS``=True): the 2MASS All-Sky Catalog of Point Sources (`Cutri et al. 2003 <http://adsabs.harvard.edu/abs/2003yCat.2246....0C>`_), Vizier id II/246
* 'SDSS' (or set ``SDSS``=True): the The SDSS Photometric Catalog, Release 9 (`Adelman-McCarthy et al. 2012 <http://adsabs.harvard.edu/abs/2012ApJS..203...21A>`_), Vizier id V/139
* 'WISE' (or set ``WISE``=True): the WISE All-Sky Data Release (`Cutri et al. 2012 <http://adsabs.harvard.edu/abs/2012yCat.2311....0C>`_), Vizier id II/311
* 'ALLWISE' (or set ``ALLWISE``=True): the AllWISE Data Release (`Cutri et al. 2014 <http://adsabs.harvard.edu/abs/2014yCat.2328....0C>`_), Vizier id II/328
* 'VISTA' (or set ``VISTA``=True): the VIKING catalogue data release 1 (`Edge et al. 2013 <http://adsabs.harvard.edu/abs/2013Msngr.154...32E>`_), Vizier id II/329
* 'CFHTLAS' (or set ``CFHTLAS``=True): the CFHTLS Survey (T0007 release) by (`Hudelot et al. 2012 <http://adsabs.harvard.edu/abs/2012yCat.2317....0H>`_), Vizier id II/317
* 'DENIS' (or set ``DENIS``=True): the DENIS DR3 (DENIS Consortium 2005), Vizier id B/denis/denis
* 'UKIDSS' (or set ``UKIDSS``=True): the UKIDSS-DR8 LAS, GCS and DXS Surveys (`Lawrence et al. 2012 <http://adsabs.harvard.edu/abs/2007MNRAS.379.1599L>`_), Vizier id II/314
* 'LEHPM' (or set ``LEHPM``=True): the Liverpool-Edinburgh High Proper Motion Catalogue (`Pokorny et al. 2004 <http://adsabs.harvard.edu/abs/2004A&A...421..763P>`_), Vizier id J/A+A/421/763
* 'SIPS' (or set ``SIPS``=True): the Southern Infrared Proper Motion Survey (`Deacon et al 2005 <http://adsabs.harvard.edu/abs/2005A&A...435..363D>`_), Vizier id J/A+A/435/363
* 'UCAC4' (or set ``UCAC4``=True): the UCAC4 Catalogue (`Zacharias et al. 2012 <http://adsabs.harvard.edu/abs/2012yCat.1322....0Z>`_), Vizier id I/322A
* 'USNOB' (or set ``USNO``=True): the USNO-B1.0 Catalog (`Monet et al. 2003 <http://adsabs.harvard.edu/abs/2003AJ....125..984M>`_), Vizier id I/284
* 'LSPM' (or set ``LSPM``=True): the LSPM-North Catalog (`Lepine et al. 2005 <http://adsabs.harvard.edu/abs/2005AJ....129.1483L>`_), Vizier id I/298
* 'GAIA-DR1': the GAIA DR1 Catalog (`Gaia Collaboration et al. 2016 <http://adsabs.harvard.edu/abs/2016yCat.1337....0G>`_), Vizier id I/337
* 'GAIA' or 'GAIA-DR2' (or set ``GAIA``=True): the GAIA DR2 Catalog (REF TBD), Vizier id I/345/gaia2
:param: sort: String specifying the parameter to sort the returned SIMBAD table by; by default this is the offset from the input coordinate (default = 'sep')
:param: nearest: Set to True to return on the single nearest source to coordinate (default = False)
:param: return_pandas: Return a pandas table as opposed to an astropy Table (default = True)
:param: verbose: Give feedback (default = False)
Output:
An astropy or pandas Table that contains data from the Vizier query, or a blank Table if no sources are found
Example:
>>> import splat
>>> import splat.database as spdb
>>> from astropy import units as u
>>> c = splat.properCoordinates('J053625-064302')
>>> v = spdb.querySimbad(c,catalog='SDSS',radius=15.*u.arcsec)
>>> print(v)
_r _RAJ2000 _DEJ2000 mode q_mode cl ... r_E_ g_J_ r_F_ i_N_ sep
arcs deg deg ... mag mag mag mag arcs
------ ---------- ---------- ---- ------ --- ... ---- ---- ---- ---- ------
7.860 84.105967 -6.715966 1 3 ... -- -- -- -- 7.860
14.088 84.108113 -6.717206 1 6 ... -- -- -- -- 14.088
14.283 84.102528 -6.720843 1 + 6 ... -- -- -- -- 14.283
16.784 84.099524 -6.717878 1 3 ... -- -- -- -- 16.784
22.309 84.097988 -6.718049 1 + 6 ... -- -- -- -- 22.309
23.843 84.100079 -6.711999 1 + 6 ... -- -- -- -- 23.843
27.022 84.107504 -6.723965 1 + 3 ... -- -- -- -- 27.022
'''
# check if online
if not checkOnline():
print('\nYou are currently not online; cannot do a Vizier query')
return Table()
VIZIER_REF = {
'SDSS': {'altname': [], 'catalog': u'V/147/sdss12'},
'2MASS': {'altname': [], 'catalog': u'II/246/out'},
'USNO': {'altname': ['USNOB','USNO-B','USNOB1.0','USNO-B1.0'], 'catalog': u'I/284/out'},
'LSPM': {'altname': ['LSPM-N','LSPM-NORTH'], 'catalog': u'I/298/lspm_n'},
'WISE': {'altname': [], 'catalog': u'II/311/wise'},
'ALLWISE': {'altname': [], 'catalog': u'II/328/allwise'},
'CATWISE': {'altname': [], 'catalog': u'II/365/catwise'},
'UKIDSS': {'altname': [], 'catalog': u'II/314'},
'CFHT': {'altname': ['CFHTLAS'], 'catalog': u'II/317/sample'},
'UCAC': {'altname': [], 'catalog': u'I/322A/out'},
'VISTA': {'altname': [], 'catalog': u'II/329/urat1'},
'GAIA-DR1': {'altname': ['GAIA1','GAIADR1'], 'catalog': u'II/337/gaia'},
'GAIA-DR2': {'altname': ['GAIA2','GAIADR2'], 'catalog': u'I/345/gaia2'},
'GAIA-EDR3': {'altname': ['GAIA','GAIA3','GAIADR3'], 'catalog': u'I/350/gaiaedr3'},
'PANSTARRS': {'altname': ['PAN-STARRS','PS1'], 'catalog': u'II/349/ps1'},
'DENIS': {'altname': [], 'catalog': u'B/denis'},
'LEHPM': {'altname': [], 'catalog': u'J/A+A/421/763'},
'LEPINE': {'altname': ['LEPINE-MDWARFS'], 'catalog': u'J/AJ/142/138/Mdwarfs'},
'SIPS': {'altname': [], 'catalog': u'J/A+A/435/363'},
'MOVERS': {'altname': [], 'catalog': u'J/AJ/151/41'},
'LATEMOVERS': {'altname': ['LATE-MOVERS'], 'catalog': u'J/AJ/153/92'},
'GLIESE': {'altname': ['GJ'], 'catalog': u'J/PASP/122/885/table1'},
'DESHPANDE2013': {'altname': ['DESHPANDE-2013','APOGEE_UCD'], 'catalog': u'J/AJ/146/156/table1'},
'DITTMAN2014': {'altname': ['DITTMAN-2014','DITTMAN-PARALLAX','DIT16'], 'catalog': u'J/ApJ/784/156/table2'},
'NEWTON2016': {'altname': ['NEWTON-2016','NEW16'], 'catalog': u'J/ApJ/821/93/table1'},
'KIRKPATRICK2016': {'altname': ['KIRKPATRICK-2016','ALLWISE-MOTION','KIR16'], 'catalog': u'J/ApJS/224/36/motionobj'},
}
# give a summary of the built-in catalogs
if info==True:
print('Currently available input catalogs:')
for k in list(VIZIER_REF.keys()):
line = '\t{}: '.format(k)
if len(VIZIER_REF[k]['altname'])>0:
line=line+'(or'
for a in VIZIER_REF[k]['altname']: line=line+' {}'.format(a)
line=line+') '
print(line+'Vizier reference: {}'.format(str(VIZIER_REF[k]['catalog'])))
catsp = str(VIZIER_REF[k]['catalog']).split('/')
ctref = catsp[0]
for ct in catsp[1:-1]: ctref=ctref+'/'+ct
print('\tURL = https://cdsarc.unistra.fr/viz-bin/cat/{}\n'.format(ctref))
return
for c in list(VIZIER_REF.keys()):
if kwargs.get(c,False): catalog = c
# is catalog one of pre-defined ones?
for c in list(VIZIER_REF.keys()):
if kwargs.get(c,False): catalog = c
cat = checkDict(catalog,VIZIER_REF)
if cat == False: cat = catalog
else: cat = VIZIER_REF[cat]['catalog']
# parameters
if not isUnit(radius): radius = radius*u.arcsec
# convert coordinate if necessary
if not isinstance(coordinate,SkyCoord):
try:
c = properCoordinates(coordinate)
except:
print('\n{} is not a proper coordinate'.format(coordinate))
return numpy.nan
else:
c = copy.deepcopy(coordinate)
# search Vizier, sort by separation
v = Vizier(columns=["**", "+_r"], catalog=cat)
if limit<0: v.ROW_LIMIT = -1
else: v.ROW_LIMIT = int(limit)
t_vizier = v.query_region(c,radius=radius)
tv = Table()
if len(t_vizier) > 0:
for k in list(t_vizier.keys()):
if cat in k: tv = t_vizier[k]
else:
tv = Table()
if len(tv)==0:
if return_pandas==True: return pandas.DataFrame()
else: return tv
# sorting
tv['sep'] = tv['_r']
if len(tv) > 1:
sortparam = kwargs.get('sort','sep')
if sortparam in list(tv.keys()):
tv.sort(sortparam)
else:
if verbose:
print('\nCannot find sorting keyword {}; try using {}\n'.format(sort,list(tv.keys())))
# return only nearest
# print(kwargs.get('nearest',False))
if nearest == True:
# tv = tv[0]
while len(tv) > 1:
tv.remove_row(1)
# print(tv)
# reformat to convert binary ascii data to text
for s in list(tv.keys()):
if isinstance(tv[s][0],bytes) == True or isinstance(tv[s][0],numpy.bytes_) == True:
tmp = [x.decode() for x in tv[s]]
tv.remove_column(s)
tv[s] = tmp
# convert to pandas if desired
if return_pandas==True:
tv = tv.to_pandas()
fix = list(tv.dtypes[tv.dtypes=='object'].keys())
if len(fix) > 0:
for f in fix:
tv[f] = tv[f].str.decode('utf8')
return tv
def querySimbad(variable,radius=30.*u.arcsec,sort='sep',reject_type='',nearest=False,iscoordinate=False,isname=False,clean=False,return_pandas=True,verbose=False,**kwargs):
'''
Purpose
Queries SIMBAD using astroquery for a single source
If you are getting data on multiple sources, it is preferable to use `splat.database.queryXMatch()`_
Required Inputs:
:param: variable: Either an astropy SkyCoord object containing position of a source, a variable that can be converted into a SkyCoord using `spl.properCoordinates()`_, or a string name for a source.
Optional Inputs:
:param: radius: Search radius, nominally in arcseconds although can be set by assigning and astropy.unit value (default = 30 arcseconds)
:param: sort: String specifying the parameter to sort the returned SIMBAD table by; by default this is the offset from the input coordinate (default = 'sep')
:param: reject_type: Set to string or list of strings to filter out object types not desired. Useful for crowded fields (default = None)
:param: nearest: Set to True to return on the single nearest source to coordinate (default = False)
:param: iscoordinate: Specifies that input is a coordinate of some kind (default = False)
:param: isname: Specifies that input is a name of some kind (default = False)
:param: clean: Set to True to clean the SIMBAD output and reassign to a predefined set of parameters (default = True)
:param: return_pandas: Return a pandas table as opposed to an astropy Table (default = True)
:param: verbose: Give lots of feedback (default = False)
Output:
An astropy or pandas Table that contains data from the SIMBAD search, or a blank Table if no sources found
Example:
>>> import splat
>>> import splat.database as spdb
>>> from astropy import units as u
>>> c = splat.properCoordinates('J053625-064302')
>>> q = spdb.querySimbad(c,radius=15.*u.arcsec,reject_type='**')
>>> print(q)
NAME OBJECT_TYPE OFFSET ... K_2MASS K_2MASS_E
----------------------- ----------- ------------- ... ------- ---------
BD-06 1253B Star 4.8443894429 ...
[SST2010] 3 Star 5.74624887682 ... 18.36 0.1
BD-06 1253 Ae* 7.74205447776 ... 5.947 0.024
BD-06 1253A ** 7.75783861347 ...
2MASS J05362590-0643020 brownD* 13.4818185612 ... 12.772 0.026
2MASS J05362577-0642541 Star 13.983717577 ...
.. _`splat.database.queryXMatch()` : api.html#splat.database.queryXMatch
.. _`spl.properCoordinates()` : api.html#spl.properCoordinates
'''
# check that online
if not checkOnline():
print('\nYou are currently not online; cannot do a SIMBAD query')
return Table()
# parameters
if not isUnit(radius): radius=radius*u.arcsec
# check if this is a coordinate query
if isinstance(variable,SkyCoord):
c = copy.deepcopy(variable)
iscoordinate = True
elif not isname:
try:
c = properCoordinates(variable)
iscoordinate = True
# this is probably a name
except:
isname = True
else:
if isinstance(variable,bytes):
c = variable.decode()
else:
c = str(variable)
# prep Simbad search
sb = Simbad()
votfields = ['otype','parallax','sptype','propermotions','rot','rvz_radvel','rvz_error',\
'rvz_bibcode','fluxdata(B)','fluxdata(V)','fluxdata(R)','fluxdata(I)','fluxdata(g)','fluxdata(r)',\
'fluxdata(i)','fluxdata(z)','fluxdata(J)','fluxdata(H)','fluxdata(K)']
for v in votfields:
sb.add_votable_fields(v)
# search SIMBAD by coordinate
if iscoordinate:
t_sim = sb.query_region(c,radius=radius)
if not isinstance(t_sim,Table):
if verbose:
print('\nNo sources found; returning empty Table\n')
return Table()
# if more than one source, sort the results by separation
sep = [c.separation(SkyCoord(str(t_sim['RA'][lp]),str(t_sim['DEC'][lp]),unit=(u.hourangle,u.degree))).arcsecond for lp in numpy.arange(len(t_sim))]
t_sim['sep'] = sep
# search SIMBAD by name
elif isname:
t_sim = sb.query_object(c)
if not isinstance(t_sim,Table):
if verbose:
print('\nNo sources found; returning empty Table\n')
return Table()
t_sim['sep'] = numpy.zeros(len(t_sim['RA']))
else:
raise ValueError('problem!')
# sort results by separation by default
if sort in list(t_sim.keys()):
t_sim.sort(sort)
else:
if verbose:
print('\nCannot sort by {}; try keywords {}\n'.format(sort,list(t_sim.keys())))
# reject object types not wanted
if reject_type != '':
rej = reject_type.split(',')
for r in rej:
w = numpy.array([str(r) not in str(o) for o in t_sim['OTYPE']])
if len(w) > 0:
t_sim = t_sim[w]
# trim to single source if nearest flag is set
if iscoordinate and nearest==True:
while len(t_sim)>1:
t_sim.remove_row(1)
# clean up the columns
if clean == True and len(t_sim) > 0:
t_src = Table()
# reformat to convert binary ascii data to text
for s in list(t_sim.keys()):
if isinstance(t_sim[s][0],bytes) == True or isinstance(t_sim[s][0],numpy.bytes_) == True:
tmp = [x.decode() for x in t_sim[s]]
t_sim.remove_column(s)
t_sim[s] = tmp
# if not isinstance(t_sim['MAIN_ID'][0],str):
t_src['NAME'] = [x.replace(' ',' ') for x in t_sim['MAIN_ID']]
# else:
# t_src['NAME'] = t_sim['MAIN_ID']
# if not isinstance(t_sim['OTYPE'][0],str):
t_src['OBJECT_TYPE'] = [x.replace(' ',' ') for x in t_sim['OTYPE']]
# else:
# t_src['OBJECT_TYPE'] = t_sim['OTYPE']
t_src['OFFSET'] = t_sim['sep']
# if not isinstance(t_sim['SP_TYPE'][0],str):
t_src['LIT_SPT'] = [x.replace(' ','') for x in t_sim['SP_TYPE']]
# else:
# t_src['LIT_SPT'] = t_sim['SP_TYPE']
# if not isinstance(t_sim['SP_BIBCODE'][0],str):
t_src['LIT_SPT_REF'] = [x.replace(' ','') for x in t_sim['SP_BIBCODE']]
# else:
# t_src['LIT_SPT_REF'] = t_sim['SP_BIBCODE']
t_src['DESIGNATION'] = ['J{}{}'.format(t_sim['RA'][i],t_sim['DEC'][i]).replace(' ','').replace('.','') for i in range(len(t_sim))]
t_src['RA'] = numpy.zeros(len(t_sim))
t_src['DEC'] = numpy.zeros(len(t_sim))
for i in range(len(t_sim)):
c2 = properCoordinates(t_src['DESIGNATION'][i])
t_src['RA'][i] = c2.ra.value
t_src['DEC'][i] = c2.dec.value
t_src['PARALLAX'] = [str(p).replace('--','') for p in t_sim['PLX_VALUE']]
t_src['PARALLAX_E'] = [str(p).replace('--','') for p in t_sim['PLX_ERROR']]
# if not isinstance(t_sim['PLX_BIBCODE'][0],str):
t_src['PARALLEX_REF'] = [x.replace(' ','') for x in t_sim['PLX_BIBCODE']]
# else:
# t_src['PARALLEX_REF'] = t_sim['PLX_BIBCODE']
t_src['MU_RA'] = [str(p).replace('--','') for p in t_sim['PMRA']]
t_src['MU_DEC'] = [str(p).replace('--','') for p in t_sim['PMDEC']]
t_src['MU'] = numpy.zeros(len(t_sim))
for i in range(len(t_sim)):
if t_src['MU_RA'][i] != '':
t_src['MU'][i] = (float(t_src['MU_RA'][i])**2+float(t_src['MU_DEC'][i])**2)**0.5
t_src['MU_E'] = [str(p).replace('--','') for p in t_sim['PM_ERR_MAJA']]
# if not isinstance(t_sim['PM_BIBCODE'][0],str):
t_src['MU_REF'] = [x.replace(' ','') for x in t_sim['PM_BIBCODE']]
# else:
# t_src['MU_REF'] = t_sim['PM_BIBCODE']
t_src['RV'] = [str(p).replace('--','') for p in t_sim['RVZ_RADVEL']]
t_src['RV_E'] = [str(p).replace('--','') for p in t_sim['RVZ_ERROR']]
# if not isinstance(t_sim['RVZ_BIBCODE'][0],str):
t_src['RV_REF'] = [x.replace(' ','') for x in t_sim['RVZ_BIBCODE']]
# else:
# t_src['RV_REF'] = t_sim['RVZ_BIBCODE']
t_src['VSINI'] = [str(p).replace('--','') for p in t_sim['ROT_Vsini']]
t_src['VSINI_E'] = [str(p).replace('--','') for p in t_sim['ROT_err']]
# if not isinstance(t_sim['ROT_bibcode'][0],str):
t_src['VSINI_REF'] = [x.replace(' ','') for x in t_sim['ROT_bibcode']]
# else:
# t_src['VSINI_REF'] = t_sim['ROT_bibcode']
t_src['J_2MASS'] = [str(p).replace('--','') for p in t_sim['FLUX_J']]
t_src['J_2MASS_E'] = [str(p).replace('--','') for p in t_sim['FLUX_ERROR_J']]
t_src['H_2MASS'] = [str(p).replace('--','') for p in t_sim['FLUX_H']]
t_src['H_2MASS_E'] = [str(p).replace('--','') for p in t_sim['FLUX_ERROR_H']]
t_src['K_2MASS'] = [str(p).replace('--','') for p in t_sim['FLUX_K']]
t_src['K_2MASS_E'] = [str(p).replace('--','') for p in t_sim['FLUX_ERROR_K']]
else:
t_src = t_sim.copy()
# convert to pandas if desired
if return_pandas==True:
t_src = t_src.to_pandas()
# fix = list(t_src.dtypes[t_src.dtypes=='object'].keys())
# if len(fix) > 0:
# for f in fix:
# t_src[f] = t_src[f].str.decode('utf8')
return t_src
def _querySimbad2(t_src,designation='DESIGNATION',**kwargs):
'''
Purpose
Internal function that queries Simbad and populates data for source table.
:Note:
**this program is in beta testing; bugs/errors are likely**
:Required parameters:
:param table: an astropy Table object, requires the presence of DESIGNATION column
:Optional parameters:
:param simbad_radius = 30 arcseconds: circular radius to search for sources (note: must be an angular quantity)
:param export = '': filename to which to export resulting table to; if equal to a null string then no expoer is made. Note that a populated table is returned in either case
:param closest = False: return only the closest source to given coordinate
'''
# parameters
simbad_radius = kwargs.get('simbad_radius',30.*u.arcsec)
verbose = kwargs.get('verbose',True)
# checks
if designation not in t_src.keys():
raise NameError('\nDesigation column {} is required for input table to querySimbad\n'.format(designation))
if 'SIMBAD_SEP' not in t_src.keys():
t_src['SIMBAD_SEP'] = Column(numpy.zeros(len(t_src)),dtype='float')
# must be online
if not checkOnline():
print('\nYou are currently not online so cannot query Simbad\n')
return t_src
# if necessary, populate columns that are expected for source database
for c in list(splat.DB_SOURCES.keys()):
if c not in t_src.keys():
t_src[c] = Column([' '*50 for des in t_src['DESIGNATION']],dtype='str')
# prep Simbad search
sb = Simbad()
votfields = ['otype','parallax','sptype','propermotions','rot','rvz_radvel','rvz_error',\
'rvz_bibcode','fluxdata(B)','fluxdata(V)','fluxdata(R)','fluxdata(I)','fluxdata(g)','fluxdata(r)',\
'fluxdata(i)','fluxdata(z)','fluxdata(J)','fluxdata(H)','fluxdata(K)']
for v in votfields:
sb.add_votable_fields(v)
# search by source
for i,des in enumerate(t_src['DESIGNATION']):
print(i,des)
c = designationToCoordinate(des)
try:
t_sim = sb.query_region(c,radius=simbad_radius)
except:
t_sim = None
# source found in query
if isinstance(t_sim,Table):
# many sources found
# if len(t_sim) >= 1: # take the closest position
if verbose:
print('\nSource {} Designation = {} {} match(es)'.format(i+1,des,len(t_sim)))
print(t_sim)
sep = [c.separation(SkyCoord(str(t_sim['RA'][lp]),str(t_sim['DEC'][lp]),unit=(u.hourangle,u.degree))).arcsecond for lp in numpy.arange(len(t_sim))]
t_sim['sep'] = sep
t_sim.sort('sep')
if len(t_sim) > 1:
while len(t_sim)>1:
t_sim.remove_row(1)
# one source found
# else:
# t_sim['sep'] = [c.separation(SkyCoord(str(t_sim['RA'][0]),str(t_sim['DEC'][0]),unit=(u.hourangle,u.degree))).arcsecond]
# fill in information
t_src['SIMBAD_NAME'][i] = t_sim['MAIN_ID'][0]
t_src['NAME'][i] = t_src['SIMBAD_NAME'][i]
t_src['SIMBAD_OTYPE'][i] = t_sim['OTYPE'][0]
if not isinstance(t_sim['SP_TYPE'][0],str):
t_sim['SP_TYPE'][0] = t_sim['SP_TYPE'][0].decode()
spt = t_sim['SP_TYPE'][0]
spt.replace(' ','').replace('--','')
t_src['SIMBAD_SPT'][i] = spt
t_src['SIMBAD_SPT_REF'][i] = t_sim['SP_BIBCODE'][0]
t_src['SIMBAD_SEP'][i] = t_sim['sep'][0]
if spt != '':
t_src['LIT_TYPE'][i] = t_src['SIMBAD_SPT'][i]
t_src['LIT_TYPE_REF'][i] = t_src['SIMBAD_SPT_REF'][i]
t_src['DESIGNATION'][i] = 'J{}{}'.format(t_sim['RA'][0],t_sim['DEC'][0]).replace(' ','').replace('.','')
coord = properCoordinates(t_src['DESIGNATION'][i])
t_src['RA'][i] = coord.ra.value
t_src['DEC'][i] = coord.dec.value
t_src['OBJECT_TYPE'][i] = 'VLM'
if 'I' in t_sim['SP_TYPE'][0] and 'V' not in t_sim['SP_TYPE'][0]:
t_src['LUMINOSITY_CLASS'][i] = 'I{}'.format(t_sim['SP_TYPE'][0].split('I',1)[1])
t_src['OBJECT_TYPE'][i] = 'GIANT'
if 'VI' in t_sim['SP_TYPE'][0] or 'sd' in t_sim['SP_TYPE'][0]:
t_src['METALLICITY_CLASS'][i] = '{}sd'.format(t_sim['SP_TYPE'][0].split('sd',1)[0])
t_src['PARALLAX'][i] = str(t_sim['PLX_VALUE'][0]).replace('--','')
t_src['PARALLAX_E'][i] = str(t_sim['PLX_ERROR'][0]).replace('--','')
if isinstance(t_sim['PLX_BIBCODE'][0],str):
t_src['PARALLEX_REF'][i] = str(t_sim['PLX_BIBCODE'][0]).replace('--','')
else:
t_src['PARALLEX_REF'][i] = t_sim['PLX_BIBCODE'][0].decode()
t_src['MU_RA'][i] = str(t_sim['PMRA'][0]).replace('--','')
t_src['MU_DEC'][i] = str(t_sim['PMDEC'][0]).replace('--','')
# try: # this is in case MU is not present
t_src['MU'][i] = (float('{}0'.format(t_src['MU_RA'][i]))**2+float('{}0'.format(t_src['MU_DEC'][i]))**2)**0.5
t_src['MU_E'][i] = str(t_sim['PM_ERR_MAJA'][0]).replace('--','')
# except:
# pass
t_src['MU_REF'][i] = t_sim['PM_BIBCODE'][0]
t_src['RV'][i] = str(t_sim['RVZ_RADVEL'][0]).replace('--','')
t_src['RV_E'][i] = str(t_sim['RVZ_ERROR'][0]).replace('--','')
t_src['RV_REF'][i] = t_sim['RVZ_BIBCODE'][0]
t_src['VSINI'][i] = str(t_sim['ROT_Vsini'][0]).replace('--','')
t_src['VSINI_E'][i] = str(t_sim['ROT_err'][0]).replace('--','')
t_src['VSINI_REF'][i] = t_sim['ROT_bibcode'][0]
if isinstance(t_sim['FLUX_J'][0],str):
t_src['J_2MASS'][i] = t_sim['FLUX_J'][0].replace('--','')
else:
t_src['J_2MASS'][i] = t_sim['FLUX_J'][0]
if isinstance(t_sim['FLUX_ERROR_J'][0],str):
t_src['J_2MASS_E'][i] = t_sim['FLUX_ERROR_J'][0].replace('--','')
else:
t_src['J_2MASS_E'][i] = t_sim['FLUX_ERROR_J'][0]
if isinstance(t_sim['FLUX_H'][0],str):
t_src['H_2MASS'][i] = t_sim['FLUX_H'][0].replace('--','')
else:
t_src['H_2MASS'][i] = t_sim['FLUX_H'][0]
if isinstance(t_sim['FLUX_ERROR_H'][0],str):
t_src['H_2MASS_E'][i] = t_sim['FLUX_ERROR_H'][0].replace('--','')
else:
t_src['H_2MASS_E'][i] = t_sim['FLUX_ERROR_H'][0]
if isinstance(t_sim['FLUX_K'][0],str):
t_src['KS_2MASS'][i] = t_sim['FLUX_K'][0].replace('--','')
else:
t_src['KS_2MASS'][i] = t_sim['FLUX_K'][0]
if isinstance(t_sim['FLUX_ERROR_K'][0],str):
t_src['KS_2MASS_E'][i] = t_sim['FLUX_ERROR_K'][0].replace('--','')
else:
t_src['KS_2MASS_E'][i] = t_sim['FLUX_ERROR_K'][0]
return
# query the NIST database
def queryNist(element,wave_range,clean=['Observed'],noclean=False,verbose=True,wavelength_type='vacuum'):
# check inputs
if not isinstance(element,str):
raise ValueError('\nElement input must be a string like "K I", not {}'.format(element))
if len(element.strip().split(' ')) == 1:
element = element+' I'
if len(element.strip().split(' ')) != 2:
raise ValueError('\nElement input must be a string like "K I", not {}'.format(element))
if not isUnit(wave_range[0]): wave_range = [w*u.micron for w in wave_range]
t = Nist.query(wave_range[0],wave_range[1],linename=element,energy_level_unit='eV',wavelength_type=wavelength_type)
if noclean == False:
for m in clean:
t = t[~t[m].mask]
if len(t) == 0 and verbose == True: print('\nNo lines found; check element, wavelength range, or set noclean=True')
return(t)
def queryXMatch(db,radius=30.*u.arcsec,catalog='2MASS',file='',desigCol='DESIGNATION',raCol='RA',decCol='DEC',verbose=False,clean=True,drop_repeats=True,use_select_columns=False,select_columns=[],prefix=None,info=False,debug=False,*args):
'''
Purpose
Queries databases in the XXX XMatch service (REF), including SIMBAD
This is the preferred manner for extracting data for large numbers of sources
Required Inputs:
:param: db: a pandas Dataframe (FUTURE: astropy Table, dict, or file name for csv, txt or xls file).
This must contain column(s) for designation (specified in `desigCol`) and/or RA (specified in `raCol`) and DEC (specified in `decCol`)
.. _`spl.properCoordinates()` : api.html#spl.properCoordinates
Optional Inputs:
:param radius: Search radius, nominally in arcseconds although can be set by assigning and astropy.unit value (default = 30 arcseconds)
:param desigCol: column in db that specifies source designations ('Jhhmmss[.]s±ddmmss[.]s')
:param raCol: column in db that specifies source RAs (in degrees)
:param decCol: column in db that specifies source DECs (in degrees)
:param catalog: Database to query, which can be set one of the follow presets or any catalog listed in astroquery.xmatch.XMatch.get_available_tables():
* 'SIMBAD' (or set ``SIMBAD``=True): query SIMBAD (coordinate search only)
* '2MASS' (or set ``2MASS``=True): query the 2MASS All-Sky Catalog of Point Sources (`Cutri et al. 2003 <http://adsabs.harvard.edu/abs/2003yCat.2246....0C>`_), Vizier id II/246
* 'SDSS' (or set ``SDSS``=True): query the SDSS Photometric Catalog, Release 12 (NEED REF), Vizier id V/147
* 'SDSS9' (or set ``SDSS``=True): query the SDSS Photometric Catalog, Release 9 (`Adelman-McCarthy et al. 2012 <http://adsabs.harvard.edu/abs/2012ApJS..203...21A>`_), Vizier id V/147
* 'ALLWISE' (or set ``ALLWISE``=True): the AllWISE Data Release (`Cutri et al. 2014 <http://adsabs.harvard.edu/abs/2014yCat.2328....0C>`_), Vizier id II/328
* 'DENIS' (or set ``DENIS``=True): the DENIS DR3 (DENIS Consortium 2005), Vizier id B/denis/denis
* 'GAIA-DR1': the GAIA DR1 Catalog (`Gaia Collaboration et al. 2016 <http://adsabs.harvard.edu/abs/2016yCat.1337....0G>`_), Vizier id I/337
* 'GAIA' or 'GAIA-DR2' (or set ``GAIA``=True): the GAIA DR2 Catalog (REF TBD), Vizier id I/345/gaia2, accessed using astroquery.gaia
:param nearest: Set to True to return only the single nearest source to each coordinate (default = True)
:param clean: Set to True to clean the SIMBAD output and reassign to a predefined set of parameters (default = True)
:param file: Write the output to a csv or xlsx file (default = '' or not saved)
:param verbose: Give lots of feedback (default = False)
:param sort: String specifying the parameter to sort the returned SIMBAD table by; by default this is the offset from the input coordinate (default = 'sep')
:param return_pandas: Return a pandas table as opposed to an astropy Table (default = True)
:param reject_type: Set to string or list of strings to filter out object types not desired. Useful for crowded fields (default = None)
Output:
A pandas Dataframe that contains data from the search, or a blank frame if no sources found
Example:
>>> import splat
>>> from astropy import units as u
>>> c = spl.properCoordinates('J053625-064302')
>>> q = spl.querySimbad(c,radius=15.*u.arcsec,reject_type='**')
>>> print(q)
NAME OBJECT_TYPE OFFSET ... K_2MASS K_2MASS_E
----------------------- ----------- ------------- ... ------- ---------
BD-06 1253B Star 4.8443894429 ...
[SST2010] 3 Star 5.74624887682 ... 18.36 0.1
BD-06 1253 Ae* 7.74205447776 ... 5.947 0.024
BD-06 1253A ** 7.75783861347 ...
2MASS J05362590-0643020 brownD* 13.4818185612 ... 12.772 0.026
2MASS J05362577-0642541 Star 13.983717577 ...
'''
callloop = 5
# pre-defined catalogs
XMATCH_CATALOGS = {
'SIMBAD': {'altname': [],'vref': u'simbad', 'select_columns': ['main_id','ra','dec','main_type','sp_type','plx','pmra','pmdec','radvel','B', 'V', 'R', 'J', 'H', 'K', 'u', 'g', 'r', 'i', 'z']},\
'2MASS': {'altname': [],'vref': u'vizier:II/246/out', 'select_columns': ['2MASS','RAJ2000','DEJ2000','Jmag','e_Jmag','Hmag','e_Hmag','Kmag','e_Kmag','MeasureJD']},\
'DENIS': {'altname': [],'vref': u'vizier:B/denis/denis', 'select_columns': ['DENIS','RAJ2000','DEJ2000','Imag','e_Imag','Jmag','e_Jmag','Kmag','e_Kmag','Obs.JD']},\
'SDSS': {'altname': ['SDSS16'],'vref': u'vizier:V/154/sdss16', 'select_columns': ['SDSS16','RAdeg','DEdeg','umag','e_umag','gmag','e_gmag','rmag','e_rmag','imag','e_imag','zmag','e_zmag','pmRA','e_pmRA','pmDE','e_pmDE','ObsDate','objID','SpObjID','spInst','spType','spCl','subCl','MJD']},\
'SDSS12': {'altname': ['SDSS12'],'vref': u'vizier:V/147/sdss12', 'select_columns': ['SDSS12','RAdeg','DEdeg','umag','e_umag','gmag','e_gmag','rmag','e_rmag','imag','e_imag','zmag','e_zmag','pmRA','e_pmRA','pmDE','e_pmDE','ObsDate','objID','SpObjID','spType','spCl']},\
'SDSS9': {'altname': [],'vref': u'vizier:V/139/sdss9', 'select_columns': ['SDSS9','RAdeg','DEdeg','umag','e_umag','gmag','e_gmag','rmag','e_rmag','imag','e_imag','zmag','e_zmag','pmRA','e_pmRA','pmDE','e_pmDE','ObsDate','objID','SpObjID','spType','spCl']},\
# 'CATWISE': {'altname': ['CAT'],'vref': u'vizier:II/365/catwise', 'select_columns': ['objID','RA_ICRS','DE_ICRS','Name','MJD','pmRA','e_pmRA','pmDE','e_pmDE','W1mproPM','e_W1mproPM','W2mproPM','e_W2mproPM']},\
'ALLWISE': {'altname': [],'vref': u'vizier:II/328/allwise', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
'GAIA-DR1': {'altname': ['GAIADR1','GAIA1'],'vref': u'vizier:I/337/gaia', 'select_columns': ['source_id','ra','dec','ref_epoch','phot_g_mean_mag','phot_g_mean_flux','phot_g_mean_flux_error','parallax','parallax_error','pmra','pmra_error','pmdec','pmdec_error']},\
'GAIA-DR2': {'altname': ['GAIADR2','GAIA2'],'vref': u'vizier:I/345/gaia2', 'select_columns': ['source_id','ra','dec','phot_g_mean_mag','phot_g_mean_flux','phot_g_mean_flux_error','parallax','parallax_error','pmra','pmra_error','pmdec','pmdec_error']},\
'GAIA-EDR3': {'altname': ['GAIA-DR3','GAIAEDR3','GAIA3','GAIA'],'vref': u'vizier:I/350/gaiaedr3', 'select_columns': ['source_id','ra','dec','phot_g_mean_mag','phot_g_mean_flux','phot_g_mean_flux_error','parallax','parallax_error','pmra','pmra_error','pmdec','pmdec_error']},\
'PANSTARRS': {'altname': ['PAN-STARRS','PS1'], 'vref': u'vizier:II/349/ps1', 'select_columns': ['objID','RAJ2000','DEJ2000','Epoch','gmag','e_gmag','rmag','e_rmag','imag','e_imag','zmag','e_zmag','ymag','e_ymag']},
'UKIDSS': {'altname': ['UKIDSS-LAS','UKIDSS-LAS9','UKIDSS-DR9','UKIDSS-LAS-DR9'], 'vref': u'vizier:II/319/las9', 'select_columns': ['JName','RAJ2000','DEJ2000','Epoch','yAperMag3','yAperMag3Err','j_1AperMag3','j_1AperMag3Err','hAperMag3','hAperMag3Err','kAperMag3','kAperMag3Err','mergedClass']},
# not yet integrated
# 'WISE': {'altname': ['WISE'],'vref': u'vizier:II/311/wise', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'UCAC': {'altname': ['UCAC'],'vref': u'vizier:II/322A/las9', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'MOVERS': {'altname': ['MOVERS'],'vref': u'vizier:J/AJ/151/41/movers', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'LATEMOVERS': {'altname': ['LATEMOVERS','LATE-MOVERS'],'vref': u'vizier:J/AJ/153/92/lmovers', 'select_columns': ['AllWISE','RAJ2000','DEJ2000','W1mag','e_W1mag','W2mag','e_W2mag','W3mag','e_W3mag','W4mag','e_W4mag','pmRA','e_pmRA','pmDE','e_pmDE','ID']},\
# 'WISE': {'vref': u'II/311', 'select_columns':
# 'VISTA': {'vref': u'II/329', 'select_columns':
# 'CFHT': {'vref': u'II/317', 'select_columns':
# 'LEHPM': {'vref': u'J/A+A/421/763', 'select_columns':
# 'SIPS': {'vref': u'J/A+A/435/363', 'select_columns':
# 'UCAC': {'vref': u'I/340/ucac5', 'select_columns':
# 'USNO': {'vref': u'I/284', 'select_columns':
# 'LSPM': {'vref': u'I/298', 'select_columns':
}
# give a summary of the built-in catalogs
if info==True:
print('Currently available input catalogs:')
for k in list(XMATCH_CATALOGS.keys()):
line = '\t{}: '.format(k)
if len(XMATCH_CATALOGS[k]['altname'])>0:
line=line+'(or'
for a in XMATCH_CATALOGS[k]['altname']: line=line+' {}'.format(a)
line=line+') '
print(line+'Vizier reference: {}'.format(str(XMATCH_CATALOGS[k]['vref'])))
if 'vizier:' in str(XMATCH_CATALOGS[k]['vref']):
catsp = str(XMATCH_CATALOGS[k]['vref']).split('/')
ctref = catsp[0].replace('vizier:','')
for ct in catsp[1:-1]: ctref=ctref+'/'+ct
print('\tVizier URL = https://cdsarc.unistra.fr/viz-bin/cat/{}\n'.format(ctref))
else: print()
return
# check db has DESIGNATION and fill in columns
# print(db.columns,raCol in list(db.columns),decCol in list(db.columns))
if desigCol not in list(db.columns) or raCol not in list(db.columns) or decCol not in list(db.columns):
db = prepDB(db,raCol=raCol,decCol=decCol,desigCol=desigCol)
if desigCol not in list(db.columns):
raise ValueError('\nInput database must have at least the designation column {}; this one has {}'.format(desigCol,db.columns))
# add RA and DEC if needed
# if raCol not in list(db.columns) or decCol not in list(db.columns):
# db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db[desigCol]]
# db[raCol] = [c.ra.degree for c in db['COORDINATES']]
# db[decCol] = [c.dec.degree for c in db['COORDINATES']]
basecols = [desigCol,raCol,decCol]
if not isUnit(radius): radius = radius*u.arcsec
# define catalog
if len(args) > 0: catalog = args[0]
cat = checkDict(catalog,XMATCH_CATALOGS)
if cat == False:
cat = catalog.upper()
vref = 'vizier:'+catalog
else:
vref = XMATCH_CATALOGS[cat]['vref']
# if catalog.upper() in list(XMATCH_CATALOGS.keys()):
# cat = catalog.upper()
# vref = XMATCH_CATALOGS[cat]['vref']
if use_select_columns == True and len(XMATCH_CATALOGS[cat]['select_columns']) > 0:
select_columns = XMATCH_CATALOGS[cat]['select_columns']
# else: select_columns = []
# check that catalog is there
if XMatch.is_table_available(vref) == False:
print('\n{} is not one of the catalogs in astroquery.xmatch; try using queryVizer()'.format(catalog))
return db
if prefix == None: prefix = cat
# use XMatch
t = Table()
t = t.from_pandas(db[basecols])
t_match = XMatch.query(t,vref,radius,colRA1=raCol,colDec1=decCol,columns=["**", "+_r"])
db_match = t_match.to_pandas()
if debug==True:
print('Found {} matches'.format(len(db_match)))
print(db_match.iloc[0])
# reject repeats if desired
if drop_repeats == True:
db_match.drop_duplicates(subset=desigCol,keep='first',inplace=True)
db_match.reset_index(drop=True,inplace=True)
# constrain columns and rename
if len(select_columns)>0:
if len(select_columns) == 0:
newcols = list(db_match.columns)
else:
newcols = copy.deepcopy(basecols)
newcols.append('angDist')
newcols.extend(select_columns)
# check that all columns are present
ncdup = copy.deepcopy(newcols)
for s in ncdup:
if s not in list(db_match.columns):
print('Warning: could not find column named {}'.format(s))
newcols.remove(s)
if len(newcols) > 0: db_match = db_match[newcols]
# rename columns
if prefix != None:
rename = {}
for c in list(db_match.columns): rename[c] = prefix+'_'+c
for c in list(basecols): rename[c] = c
db_match = db_match.rename(index=str,columns=rename)
# merge and drop redundant columns
db_merge = pandas.merge(db,db_match,how='left',on=desigCol,suffixes=('','_DROP'))
for c in list(db_merge.columns):
if '_DROP' in c: del db_merge[c]
if debug==True:
print(db_merge.iloc[0])
# save out
if file != '':
if file.split('.')[-1] == 'csv' or file.split('.')[-1] == 'txt':
db_merge.to_csv(file,index=False)
elif file.split('.')[-1] == 'xls' or file.split('.')[-1] == 'xlsx':
db_merge.to_excel(file,index=False)
else:
print('\nWarning: did not know how to save to {}; not saving'.format(file))
return db_merge
#####################################################
########### ADDING SPECTRA TO LIBRARY ###########
#####################################################
def importSpectra(*args,**kwargs):
'''
Purpose
imports a set of spectra into the SPLAT library; requires manager access.
:Note:
**this program is in beta testing; bugs/errors are likely**
:Optional parameters:
:param data_folder = "./": Full path to folder containing data; by default this is the current directory
:param review_folder = "./review/": Full path to folder in which review materials will be kept; by default a new folder ``review`` will be created inside the data_folder
:param spreadsheet = "": Filename for a spreadsheet (ascii, tab- or comma-delimited) listing the input spectra, one per row. At least one column must be named ``filename`` or ``file`` that contains the name of the data file; the following columns are also recommended:
* ``designation``: source desigation; e.g., ``J15420830-2621138`` (strongly recommended)
* ``ra`` and ``dec``: Right Ascension and declination in decimal format (only needed if no designation column provided)
* ``name``: source name, designation will be used if not provided
* ``type``, ``opt_type``, ``nir_type``: spectral type of source (string); ``type`` will default to ``lit_type``
* ``date`` or ``observation_date``: date of observation in format YYYYMMDD
* ``slit``: slit width used (for computing resolution)
* ``airmass``: airmass of observation
* ``observer``: last name of primary observer
* ``data_reference``: bibcode of data reference
:Output:
- Source DB update file: spreadsheet containing update to source_data.txt, saved in review folder as source_data.txt
- Spectral DB update file: spreadsheet containing update to spectral_data.txt, saved locally as UPDATE_spectral_data.txt
- Photometry DB update file: spreadsheet containing update to photometry_data.txt, saved locally as UPDATE_photometry_data.txt
'''
# check user access
if checkAccess() == False:
print('\nSpectra may only be imported into library by designated manager or while online; please email {}'.format(splat.SPLAT_EMAIL))
return
# check online
# if spl.checkOnline() == False:
# print('\nWarning! You are not currently online so you will not be able to retrieve SIMBAD and Vizier data\n')
# set up optional inputs
simbad_radius = kwargs.get('simbad_radius',60.*u.arcsec)
if not isUnit(simbad_radius): simbad_radius=simbad_radius*u.arcsec
vizier_radius = kwargs.get('vizier_radius',30.*u.arcsec)
if not isUnit(vizier_radius): vizier_radius=vizier_radius*u.arcsec
data_folder = kwargs.get('data_folder','./')
data_folder = kwargs.get('dfolder',data_folder)
data_folder = kwargs.get('folder',data_folder)
if data_folder[-1] != '/':
data_folder+='/'
review_folder = kwargs.get('review_folder','{}/review/'.format(data_folder))
review_folder = kwargs.get('rfolder',review_folder)
if review_folder[-1] != '/':
review_folder+='/'
spreadsheet = kwargs.get('spreadsheet','')
spreadsheet = kwargs.get('sheet',spreadsheet)
spreadsheet = kwargs.get('entry',spreadsheet)
instrument = kwargs.get('instrument','UNKNOWN')
verbose = kwargs.get('verbose',True)
# make sure relevant files and folders are in place
if not os.path.exists(review_folder):
try:
os.makedirs(review_folder)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# raise NameError('\nCannot find review folder {}'.format(review_folder))
if not os.path.exists(data_folder):
raise NameError('\nCannot find data folder {}'.format(data_folder))
if not os.path.exists('{}/published'.format(review_folder)):
try:
os.makedirs('{}/published'.format(review_folder))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
if not os.path.exists('{}/unpublished'.format(review_folder)):
try:
os.makedirs('{}/unpublished'.format(review_folder))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# if spreadsheet is given, use this to generate list of files
if spreadsheet != '':
try:
t_input = fetchDatabase(spreadsheet)
except:
try:
t_input = fetchDatabase(data_folder+spreadsheet)
except:
raise NameError('\nCould not find spreadsheet {} in local or data directories\n'.format(spreadsheet))
tkeys = list(t_input.keys())
if 'FILENAME' in tkeys:
files = t_input['FILENAME']
elif 'FILE' in tkeys:
files = t_input['FILE']
elif 'FILES' in tkeys:
files = t_input['FILES']
else:
raise NameError('\nSpreadsheet {} does not have a column named filename; aborting\n'.format(spreadsheet))
if data_folder not in files[0]:
files = [data_folder+f for f in files]
# otherwise search for *.fits and *.txt files in data folder
else:
files = glob.glob(os.path.normpath(data_folder+'*.fits'))+glob.glob(os.path.normpath(data_folder+'*.txt'))
if len(files) == 0:
raise NameError('\nNo spectral files in {}\n'.format(data_folder))
# what instrument is this?
s = splat.Spectrum(filename=files[0])
if 'INSTRUME' in list(s.header.keys()):
instrument = s.header['INSTRUME'].replace(' ','').upper()
if 'INSTR' in list(s.header.keys()):
instrument = s.header['INSTR'].replace(' ','').upper()
if 'MODENAME' in list(s.header.keys()):
instrument+=' {}'.format(s.header['MODENAME'].replace(' ','').upper())
if instrument.upper().replace(' ','_') in list(splat.INSTRUMENTS.keys()):
instrument_info = splat.INSTRUMENTS[instrument.upper().replace(' ','_')]
else:
instrument_info = {'instrument_name': instrument, 'resolution': 0.*u.arcsec, 'slitwidth': 0.}
# prep tables containing information
t_spec = Table()
for c in list(splat.DB_SPECTRA.keys()):
t_spec[c] = Column([' '*200 for f in files],dtype='str')
t_src = Table()
for c in list(splat.DB_SOURCES.keys()):
t_src[c] = Column([' '*200 for f in files],dtype='str')
source_id0 = numpy.max(splat.DB_SOURCES['SOURCE_KEY'])
spectrum_id0 = numpy.max(splat.DB_SPECTRA['DATA_KEY'])
# read in files into Spectrum objects
if verbose: print('\nReading in {} files from {}'.format(len(files),data_folder))
# splist = []
t_spec['DATA_FILE'] = Column(files,dtype='str')
t_spec['SPECTRUM'] = [splat.Spectrum(filename=f) for f in files]
t_spec['INSTRUMENT'] = [instrument_info['instrument_name'] for f in files]
# for f in files:
# splist.append()
# populate spec array
if verbose: print('\nGenerating initial input tables')
t_spec['SOURCE_KEY'] = Column(numpy.arange(len(files))+source_id0+1,dtype='int')
t_spec['DATA_KEY'] = Column(numpy.arange(len(files))+spectrum_id0+1,dtype='int')
# t_spec['SPECTRUM'] = [sp for sp in splist]
t_spec['QUALITY_FLAG'] = Column(['OK' for f in t_spec['DATA_FILE']],dtype='str')
t_spec['PUBLISHED'] = Column(['N' for f in t_spec['DATA_FILE']],dtype='str')
# measurements
t_spec['MEDIAN_SNR'] = Column([sp.computeSN() for sp in t_spec['SPECTRUM']],dtype='float')
t_spec['SPEX_TYPE'] = Column([splat.classifyByStandard(sp,string=True,method=kwargs.get('method','kirkpatrick'),mask_telluric=True)[0] for sp in t_spec['SPECTRUM']],dtype='str')
t_spec['SPEX_GRAVITY_CLASSIFICATION'] = Column([splat.classifyGravity(sp,string=True) for sp in t_spec['SPECTRUM']],dtype='str')
# populate spectral data table from fits file header
for i,sp in enumerate(t_spec['SPECTRUM']):
if 'DATE_OBS' in list(sp.header.keys()):
t_spec['OBSERVATION_DATE'][i] = sp.header['DATE_OBS'].replace('-','')
t_spec['JULIAN_DATE'][i] = Time(sp.header['DATE_OBS']).mjd
if 'DATE' in list(sp.header.keys()):
t_spec['OBSERVATION_DATE'][i] = sp.header['DATE'].replace('-','')
if verbose: print(i,t_spec['OBSERVATION_DATE'][i],properDate(t_spec['OBSERVATION_DATE'][i],output='YYYYMMDD'))
t_spec['JULIAN_DATE'][i] = Time(sp.header['DATE']).mjd
if 'TIME_OBS' in list(sp.header.keys()):
t_spec['OBSERVATION_TIME'][i] = sp.header['TIME_OBS'].replace(':',' ')
if 'MJD_OBS' in list(sp.header.keys()):
t_spec['JULIAN_DATE'][i] = sp.header['MJD_OBS']
if 'OBSERVER' in list(sp.header.keys()):
t_spec['OBSERVER'][i] = sp.header['OBSERVER']
if 'RESOLUTION' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = sp.header['RESOLUTION']
elif 'RES' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = sp.header['RES']
elif 'SLITW' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = instrument_info['resolution']*(instrument_info['slitwidth'].value)/sp.header['SLITW']
elif 'SLTW_ARC' in list(sp.header.keys()):
t_spec['RESOLUTION'][i] = instrument_info['resolution']*(instrument_info['slitwidth'].value)/sp.header['SLTW_ARC']
if 'AIRMASS' in list(sp.header.keys()):
t_spec['AIRMASS'][i] = sp.header['AIRMASS']
if 'VERSION' in list(sp.header.keys()):
v = sp.header['VERSION']
t_spec['REDUCTION_SPEXTOOL_VERSION'][i] = 'v{}'.format(v.split('v')[-1])
# populate spectral data table from spreadsheet
if spreadsheet != '':
# if 'FILENAME' in tkeys:
# t_spec['DATA_FILE'] = t_input['FILENAME']
if 'DATE' in tkeys:
t_spec['OBSERVATION_DATE'] = [properDate(str(a),output='YYYYMMDD') for a in t_input['DATE']]
# for a in t_input['DATE']:
# print(a,spl.properDate(str(a)),Time(spl.properDate(str(a),output='YYYY-MM-DD')),Time(spl.properDate(str(a),output='YYYY-MM-DD')).mjd)
t_spec['JULIAN_DATE'] = [Time(properDate(str(a),output='YYYY-MM-DD')).mjd for a in t_input['DATE']]
if 'RESOLUTION' in tkeys:
t_spec['RESOLUTION'] = [r for r in t_input['RESOLUTION']]
# CHANGE THIS TO BE INSTRUMENT SPECIFIC
if 'SLIT' in tkeys:
t_spec['RESOLUTION'] = [t_spec['RESOLUTION']*(instrument_info['slitwidth'].value)/float(s) for s in t_input['SLIT']]
if 'AIRMASS' in tkeys:
t_spec['AIRMASS'] = t_input['AIRMASS']
if 'OBSERVER' in tkeys:
t_spec['OBSERVER'] = t_input['OBSERVER']
if 'DATA_REFERENCE' in tkeys:
t_spec['DATA_REFERENCE'] = t_input['DATA_REFERENCE']
for i,ref in enumerate(t_spec['DATA_REFERENCE']):
if ref != '':
t_spec['PUBLISHED'][i] = 'Y'
# for c in splist[0].header.keys():
# if c != 'HISTORY':
# print('{} {}'.format(c,splist[0].header[c]))
t_src['SOURCE_KEY'] = t_spec['SOURCE_KEY']
t_src['GRAVITY_CLASS_NIR'] = t_spec['SPEX_GRAVITY_CLASSIFICATION']
t_src['GRAVITY_CLASS_NIR_REF'] = Column(['SPL' for sp in t_spec['SPECTRUM']],dtype='str')
t_spec['COMPARISON_SPECTRUM'] = [splat.STDS_DWARF_SPEX[spt] for spt in t_spec['SPEX_TYPE']]
t_spec['COMPARISON_TEXT'] = [' '*200 for spt in t_spec['SPEX_TYPE']]
for i,spt in enumerate(t_spec['SPEX_TYPE']):
t_spec['COMPARISON_TEXT'][i] = '{} standard'.format(spt)
# determine coordinates as best as possible
for i,sp in enumerate(t_spec['SPECTRUM']):
# if i == 0:
# for k in list(sp.header.keys()):
# print(k,sp.header[k])
if 'TCS_RA' in list(sp.header.keys()) and 'TCS_DEC' in list(sp.header.keys()):
sp.header['RA'] = sp.header['TCS_RA']
sp.header['DEC'] = sp.header['TCS_DEC']
sp.header['RA'] = sp.header['RA'].replace('+','')
if t_src['DESIGNATION'][i].strip() == '' and 'RA' in list(sp.header.keys()) and 'DEC' in list(sp.header.keys()):
if sp.header['RA'] != '' and sp.header['DEC'] != '':
t_src['DESIGNATION'][i] = 'J{}+{}'.format(sp.header['RA'].replace('+',''),sp.header['DEC']).replace(':','').replace('.','').replace('+-','-').replace('++','+').replace('J+','J').replace(' ','')
# print('DETERMINED DESIGNATION {} FROM RA/DEC'.format(t_src['DESIGNATION'][i]))
if t_src['RA'][i].strip() == '' and t_src['DESIGNATION'][i].strip() != '':
coord = properCoordinates(t_src['DESIGNATION'][i])
t_src['RA'][i] = coord.ra.value
t_src['DEC'][i] = coord.dec.value
# print('DETERMINED RA/DEC FROM DESIGNATION {}'.format(t_src['DESIGNATION'][i]))
# print(t_src['DESIGNATION'],t_src['RA'],t_src['DEC'])
# populate source data table from spreadsheet
if spreadsheet != '':
if 'DESIGNATION' in tkeys:
t_src['DESIGNATION'] = t_input['DESIGNATION']
t_src['NAME'] = t_src['DESIGNATION']
# may want to check how we overrule fits file headers
coord = [properCoordinates(s) for s in t_src['DESIGNATION']]
t_src['RA'] = [c.ra.value for c in coord]
t_src['DEC'] = [c.dec.value for c in coord]
if 'NAME' in tkeys:
t_src['NAME'] = t_input['NAME']
if 'RA' in tkeys and 'DEC' in tkeys:
if isNumber(t_input['RA'][0]):
t_src['RA'] = t_input['RA']
t_src['DEC'] = t_input['DEC']
if 'TYPE' in tkeys:
t_src['LIT_TYPE'] = t_input['TYPE']
if 'OPT_TYPE' in tkeys:
t_src['OPT_TYPE'] = t_input['OPT_TYPE']
if 'NIR_TYPE' in tkeys:
t_src['NIR_TYPE'] = t_input['NIR_TYPE']
if 'J' in tkeys:
t_src['J_2MASS'] = t_input['J']
if 'J_E' in tkeys:
t_src['J_2MASS_E'] = t_input['J_E']
if 'H' in tkeys:
t_src['H_2MASS'] = t_input['H']
if 'H_E' in tkeys:
t_src['H_2MASS_E'] = t_input['H_E']
if 'K' in tkeys:
t_src['KS_2MASS'] = t_input['K']
if 'KS' in tkeys:
t_src['KS_2MASS'] = t_input['KS']
if 'K_E' in tkeys:
t_src['KS_2MASS_E'] = t_input['K_E']
if 'KS_E' in tkeys:
t_src['KS_2MASS_E'] = t_input['KS_E']
# for c in DB_SOURCES.keys():
# if c not in t_src.keys():
# t_src[c] = Column([' '*50 for sp in splist],dtype='str') # force string
# transfer spectral types
for i,t in enumerate(t_src['NIR_TYPE']):
if t.replace(' ','') == '':
t_src['NIR_TYPE'][i] = t_spec['SPEX_TYPE'][i]
t_src['NIR_TYPE_REF'][i] = 'SPL'
if t_src['LIT_TYPE'][i].replace(' ','') == '':
t_src['LIT_TYPE'][i] = t_spec['SPEX_TYPE'][i]
t_src['LIT_TYPE_REF'][i] = 'SPL'
# now do a SIMBAD search for sources based on coordinates
if kwargs.get('nosimbad',False) == False:
if verbose:
print('\nSIMBAD search')
_querySimbad2(t_src,simbad_radius=simbad_radius)
# fill in missing 2MASS photometry with Vizier query
if kwargs.get('novizier',False) == False:
if verbose:
print('\n2MASS photometry from Vizier')
if not checkOnline():
if verbose:
print('\nCould not perform Vizier search, you are not online')
else:
for i,jmag in enumerate(t_src['J_2MASS']):
if float('{}0'.format(jmag.replace('--',''))) == 0.0:
t_vizier = getPhotometry(properCoordinates(t_src['DESIGNATION'][i]),radius=vizier_radius,catalog='2MASS')
# multiple sources; choose the closest
if len(t_vizier) > 0:
t_vizier.sort_values('_r')
# print(len(t_vizier),t_vizier.keys())
# while len(t_vizier)>1:
# t_vizier.remove_row(1)
if verbose:
print('\n{}'.format(t_src['DESIGNATION'][i]))
print(t_vizier)
t_src['DESIGNATION'][i] = 'J{}'.format(t_vizier['_2MASS'][0])
t_src['J_2MASS'][i] = str(t_vizier['Jmag'][0]).replace('--','')
t_src['J_2MASS_E'][i] = str(t_vizier['e_Jmag'][0]).replace('--','')
t_src['H_2MASS'][i] = str(t_vizier['Hmag'][0]).replace('--','')
t_src['H_2MASS_E'][i] = str(t_vizier['e_Hmag'][0]).replace('--','')
t_src['KS_2MASS'][i] = str(t_vizier['Kmag'][0]).replace('--','')
t_src['KS_2MASS_E'][i] = str(t_vizier['e_Kmag'][0]).replace('--','')
# add in distance if spectral type and magnitude are known
for i,spt in enumerate(t_src['LIT_TYPE']):
if spt.replace(' ','') != '' and float('{}0'.format(str(t_src['J_2MASS'][i]).replace('--',''))) != 0.0:
# print(spt,t_src['J_2MASS'][i],t_src['J_2MASS_E'][i])
dist = estimateDistance(spt=spt,filter='2MASS J',mag=float(t_src['J_2MASS'][i]))
if not numpy.isnan(dist[0]):
t_src['DISTANCE_PHOT'][i] = dist[0]
t_src['DISTANCE_PHOT_E'][i] = dist[1]
t_src['DISTANCE'][i] = dist[0]
t_src['DISTANCE_E'][i] = dist[1]
if float('{}0'.format(str(t_src['PARALLAX'][i]).replace('--',''))) != 0.0 and float('{}0'.format(str(t_src['PARALLAX_E'][i]).replace('--',''))) != 0.0 :
t_src['DISTANCE'][i] = 1000./float(t_src['PARALLAX'][i])
t_src['DISTANCE_E'][i] = float(t_src['DISTANCE'][i])*float(t_src['PARALLAX_E'][i])/float(t_src['PARALLAX'][i])
# compute vtan
if float('{}0'.format(str(t_src['MU'][i]).replace('--',''))) != 0.0 and float('{}0'.format(str(t_src['DISTANCE'][i]).replace('--',''))) != 0.0:
t_src['VTAN'][i] = 4.74*float(t_src['DISTANCE'][i])*float(t_src['MU'][i])/1000.
# clear up zeros
if float('{}0'.format(str(t_src['J_2MASS'][i]).replace('--',''))) == 0.0:
t_src['J_2MASS'][i] = ''
t_src['J_2MASS_E'][i] = ''
if float('{}0'.format(str(t_src['H_2MASS'][i]).replace('--',''))) == 0.0:
t_src['H_2MASS'][i] = ''
t_src['H_2MASS_E'][i] = ''
if float('{}0'.format(str(t_src['KS_2MASS'][i]).replace('--',''))) == 0.0:
t_src['KS_2MASS'][i] = ''
t_src['KS_2MASS_E'][i] = ''
if float('{}0'.format(str(t_src['PARALLAX'][i]).replace('--',''))) == 0.0:
t_src['PARALLAX'][i] = ''
t_src['PARALLAX_E'][i] = ''
if float('{}0'.format(str(t_src['MU'][i]).replace('--',''))) == 0.0:
t_src['MU'][i] = ''
t_src['MU_E'][i] = ''
t_src['MU_RA'][i] = ''
t_src['MU_DEC'][i] = ''
if float('{}0'.format(str(t_src['RV'][i]).replace('--',''))) == 0.0:
t_src['RV'][i] = ''
t_src['RV_E'][i] = ''
if float('{}0'.format(str(t_src['VSINI'][i]).replace('--',''))) == 0.0:
t_src['VSINI'][i] = ''
t_src['VSINI_E'][i] = ''
if float('{}0'.format(str(t_src['SIMBAD_SEP'][i]).replace('--',''))) == 0.0:
t_src['SIMBAD_SEP'][i] = ''
if t_src['GRAVITY_CLASS_NIR'][i] == '':
t_src['GRAVITY_CLASS_NIR_REF'][i] = ''
# compute J-K excess and color extremity
if spt.replace(' ','') != '' and float('{}0'.format(str(t_src['J_2MASS'][i]).replace('--',''))) != 0.0 and float('{}0'.format(str(t_src['KS_2MASS'][i]).replace('--',''))) != 0.0:
t_src['JK_EXCESS'][i] = float(t_src['J_2MASS'][i])-float(t_src['KS_2MASS'][i])-typeToColor(spt,'J-K')[0]
if t_src['JK_EXCESS'][i] == numpy.nan or t_src['JK_EXCESS'][i] == '' or t_src['JK_EXCESS'][i] == 'nan':
t_src['JK_EXCESS'][i] = ''
elif float(t_src['JK_EXCESS'][i]) > 0.3:
t_src['COLOR_EXTREMITY'][i] == 'RED'
elif float(t_src['JK_EXCESS'][i]) < -0.3:
t_src['COLOR_EXTREMITY'][i] == 'BLUE'
else:
pass
# check for previous entries
t_src['SHORTNAME'] = [designationToShortName(d) for d in t_src['DESIGNATION']]
if 'SHORTNAME' not in list(splat.DB_SOURCES.keys()):
splat.DB_SOURCES['SHORTNAME'] = [designationToShortName(d) for d in splat.DB_SOURCES['DESIGNATION']]
for i,des in enumerate(t_src['DESIGNATION']):
# check if shortnames line up
if t_src['SHORTNAME'][i] in splat.DB_SOURCES['SHORTNAME']:
for c in list(t_src.keys()):
t_src[c][i] = splat.DB_SOURCES[c][numpy.where(splat.DB_SOURCES['SHORTNAME'] == t_src['SHORTNAME'][i])][0]
t_spec['SOURCE_KEY'][i] = t_src['SOURCE_KEY'][i]
# check if SIMBAD names line up
elif t_src['SIMBAD_NAME'][i] != '' and t_src['SIMBAD_NAME'][i] in splat.DB_SOURCES['SIMBAD_NAME']:
for c in t_src.keys():
if t_src[c][i] == '':
t_src[c][i] = splat.DB_SOURCES[c][numpy.where(splat.DB_SOURCES['SIMBAD_NAME'] == t_src['SIMBAD_NAME'][i])][0]
t_spec['SOURCE_KEY'][i] = t_src['SOURCE_KEY'][i]
else:
pass
# check to see if prior spectrum was taken on the same date (possible redundancy)
matchlib = splat.searchLibrary(idkey=t_src['SOURCE_KEY'][i],date=t_spec['OBSERVATION_DATE'][i])
# previous observation on this date found - retain in case this is a better spectrum
if len(matchlib) > 0.:
mkey = matchlib['DATA_KEY'][0]
if verbose:
print('Previous spectrum found in library for data key {}'.format(mkey))
t_spec['COMPARISON_SPECTRUM'][i] = splat.Spectrum(int(mkey))
t_spec['COMPARISON_TEXT'][i] = 'repeat spectrum: {}'.format(mkey)
# no previous observation on this date - retain the spectrum with the highest S/N
else:
matchlib = splat.searchLibrary(idkey=t_src['SOURCE_KEY'][i])
if len(matchlib) > 0:
matchlib.sort('MEDIAN_SNR')
matchlib.reverse()
t_spec['COMPARISON_SPECTRUM'][i] = splat.Spectrum(int(matchlib['DATA_KEY'][0]))
t_spec['COMPARISON_TEXT'][i] = 'alternate spectrum: {} taken on {}'.format(matchlib['DATA_KEY'][0],matchlib['OBSERVATION_DATE'][0])
# print(matchlib['DATA_KEY'][0])
# print(t_spec['COMPARISON_TEXT'][i])
# generate check plots
legend = []
for i,sp in enumerate(t_spec['SPECTRUM']):
legend.extend(['Data Key: {} Source Key: {}\n{}'.format(t_spec['DATA_KEY'][i],t_spec['SOURCE_KEY'][i],t_spec['SPECTRUM'][i].name),'{} {}'.format(t_spec['COMPARISON_SPECTRUM'][i].name,t_spec['COMPARISON_TEXT'][i])])
for s in t_spec['COMPARISON_SPECTRUM']: print(s)
splot.plotBatch([s for s in t_spec['SPECTRUM']],comparisons=[s for s in t_spec['COMPARISON_SPECTRUM']],normalize=True,output=review_folder+'/review_plots.pdf',legend=legend,noise=True,telluric=True)
# output database updates
if 'SHORTNAME' in t_src.keys():
t_src.remove_column('SHORTNAME')
if 'SELECT' in t_src.keys():
t_src.remove_column('SELECT')
if 'SELECT' in t_spec.keys():
t_spec.remove_column('SELECT')
if 'SOURCE_SELECT' in t_spec.keys():
t_spec.remove_column('SOURCE_SELECT')
if 'SPECTRUM' in t_spec.keys():
t_spec.remove_column('SPECTRUM')
if 'COMPARISON_SPECTRUM' in t_spec.keys():
t_spec.remove_column('COMPARISON_SPECTRUM')
if 'COMPARISON_TEXT' in t_spec.keys():
t_spec.remove_column('COMPARISON_TEXT')
# for i in numpy.arange(len(t_spec['NOTE'])):
# t_spec['NOTE'][i] = compdict[str(t_spec['DATA_KEY'][i])]['comparison_type']
t_src.write(review_folder+'/source_update.csv',format='ascii.csv')
t_spec.write(review_folder+'/spectrum_update.csv',format='ascii.csv')
# open up windows to review spreadsheets
# NOTE: WOULD LIKE TO MAKE THIS AUTOMATICALLY OPEN FILE
# app = QtGui.QApplication(sys.argv)
# window = Window(10, 5)
# window.resize(640, 480)
# window.show()
# app.exec_()
print('\nSpectral plots and update speadsheets now available in {}'.format(review_folder))
response = input('Please review and edit, and press any key when you are finished...\n')
# NEXT STEP - MOVE FILES TO APPROPRIATE PLACES, UPDATE MAIN DATABASES
# source db
t_src = fetchDatabase(review_folder+'/source_update.csv',csv=True)
# if 'SIMBAD_SEP' in t_src.keys():
# t_src.remove_column('SIMBAD_SEP')
# for col in t_src.colnames:
# tmp = t_src[col].astype(splat.DB_SOURCES[col].dtype)
# t_src.replace_column(col,tmp)
# t_merge = vstack([splat.DB_SOURCES,t_src])
# t_merge.sort('SOURCE_KEY')
# if 'SHORTNAME' in t_merge.keys():
# t_merge.remove_column('SHORTNAME')
# if 'SELECT' in t_merge.keys():
# t_merge.remove_column('SELECT')
# t_merge.write(review_folder+DB_SOURCES_FILE,format='ascii.tab')
# spectrum db
t_spec = fetchDatabase(review_folder+'/spectrum_update.csv',csv=True)
# move files
for i,file in enumerate(t_spec['DATA_FILE']):
t_spec['DATA_FILE'][i] = '{}_{}.fits'.format(t_spec['DATA_KEY'][i],t_spec['SOURCE_KEY'][i])
# print(file[-4:],t_spec['DATA_FILE'][i])
if file[-4:] == 'fits':
if t_spec['PUBLISHED'][i] == 'Y':
copyfile(file,'{}/published/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/published/'.format(t_spec['DATA_FILE'][i],review_folder))
else:
copyfile(file,'{}/unpublished/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/unpublished/'.format(t_spec['DATA_FILE'][i],review_folder))
else:
# print(data_folder+file)
sp = splat.Spectrum(file=file)
if t_spec['PUBLISHED'][i] == 'Y':
sp.export('{}/published/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/published/'.format(t_spec['DATA_FILE'][i],review_folder))
else:
sp.export('{}/unpublished/{}'.format(review_folder,t_spec['DATA_FILE'][i]))
# if verbose:
# print('Moved {} to {}/unpublished/'.format(t_spec['DATA_FILE'][i],review_folder))
# save off updated spectrum update file
t_spec.write(review_folder+'/spectrum_update.csv',format='ascii.csv')
# merge and export - THIS WASN'T WORKING
# for col in t_spec.colnames:
# print(col,DB_SPECTRA[col].dtype)
# tmp = t_spec[col].astype(splat.DB_SPECTRA[col].dtype)
# t_spec.replace_column(col,tmp)
# t_merge = vstack([splat.DB_SPECTRA,t_spec])
# t_merge.sort('DATA_KEY')
# if 'SHORTNAME' in t_merge.keys():
# t_merge.remove_column('SHORTNAME')
# if 'SELECT' in t_merge.keys():
# t_merge.remove_column('SELECT')
# if 'SOURCE_SELECT' in t_merge.keys():
# t_merge.remove_column('SOURCE_SELECT')
# if 'DATEN' in t_merge.keys():
# t_merge.remove_column('DATEN')
# t_merge.write(review_folder+splat.DB_SPECTRA_FILE,format='ascii.tab')
if verbose:
print('\nDatabases updated; be sure to add these to primary databases in {}'.format(SPLAT_PATH+DB_FOLDER))
print('and to move spectral files from {}/published and {}/unpublished/ to {}\n'.format(review_folder,review_folder,SPLAT_PATH+DATA_FOLDER))
return
|
aburgasserREPO_NAMEsplatPATH_START.@splat_extracted@splat-main@splat@database.py@.PATH_END.py
|
{
"filename": "backend_svg.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py2/matplotlib/backends/backend_svg.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
import six
from six import unichr
from six.moves import xrange
import base64
import codecs
import gzip
import hashlib
import io
import logging
import re
import uuid
import numpy as np
from matplotlib import cbook, __version__, rcParams
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import is_writable_file_like, maxdict
from matplotlib.colors import rgb2hex
from matplotlib.font_manager import findfont, get_font
from matplotlib.ft2font import LOAD_NO_HINTING
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
from matplotlib import _png
_log = logging.getLogger(__name__)
backend_version = __version__
# ----------------------------------------------------------------------
# SimpleXMLWriter class
#
# Based on an original by Fredrik Lundh, but modified here to:
# 1. Support modern Python idioms
# 2. Remove encoding support (it's handled by the file writer instead)
# 3. Support proper indentation
# 4. Minify things a little bit
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
def escape_cdata(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
_escape_xml_comment = re.compile(r'-(?=-)')
def escape_comment(s):
s = escape_cdata(s)
return _escape_xml_comment.sub('- ', s)
def escape_attrib(s):
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace("\"", """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def short_float_fmt(x):
"""
Create a short string representation of a float, which is %f
formatting with trailing zeros and the decimal point removed.
"""
return '{0:f}'.format(x).rstrip('0').rstrip('.')
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
class XMLWriter(object):
def __init__(self, file):
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__indentation = " " * 64
def __flush(self, indent=True):
# flush internal buffers
if self.__open:
if indent:
self.__write(">\n")
else:
self.__write(">")
self.__open = 0
if self.__data:
data = ''.join(self.__data)
self.__write(escape_cdata(data))
self.__data = []
## Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. The method returns
# an opaque identifier that can be passed to the <b>close</b>
# method, to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag)
self.__data = []
self.__tags.append(tag)
self.__write(self.__indentation[:len(self.__tags) - 1])
self.__write("<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = sorted(six.iteritems(attrib))
for k, v in attrib:
if not v == '':
k = escape_cdata(k)
v = escape_attrib(v)
self.__write(" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as a Unicode string.
def comment(self, comment):
self.__flush()
self.__write(self.__indentation[:len(self.__tags)])
self.__write("<!-- %s -->\n" % escape_comment(comment))
##
# Adds character data to the output stream.
#
# @param text Character data, as a Unicode string.
def data(self, text):
self.__data.append(text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None, indent=True):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush(indent)
elif self.__open:
self.__open = 0
self.__write("/>\n")
return
if indent:
self.__write(self.__indentation[:len(self.__tags)])
self.__write("</%s>\n" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
self.start(*(tag, attrib), **extra)
if text:
self.data(text)
self.end(indent=False)
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
# ----------------------------------------------------------------------
def generate_transform(transform_list=[]):
if len(transform_list):
output = io.StringIO()
for type, value in transform_list:
if type == 'scale' and (value == (1.0,) or value == (1.0, 1.0)):
continue
if type == 'translate' and value == (0.0, 0.0):
continue
if type == 'rotate' and value == (0.0,):
continue
if type == 'matrix' and isinstance(value, Affine2DBase):
value = value.to_values()
output.write('%s(%s)' % (
type, ' '.join(short_float_fmt(x) for x in value)))
return output.getvalue()
return ''
def generate_css(attrib={}):
if attrib:
output = io.StringIO()
attrib = sorted(six.iteritems(attrib))
for k, v in attrib:
k = escape_attrib(k)
v = escape_attrib(v)
output.write("%s:%s;" % (k, v))
return output.getvalue()
return ''
_capstyle_d = {'projecting' : 'square', 'butt' : 'butt', 'round': 'round',}
class RendererSVG(RendererBase):
FONT_SCALE = 100.0
fontd = maxdict(50)
def __init__(self, width, height, svgwriter, basename=None, image_dpi=72):
self.width = width
self.height = height
self.writer = XMLWriter(svgwriter)
self.image_dpi = image_dpi # the actual dpi we want to rasterize stuff with
self._groupd = {}
if not rcParams['svg.image_inline']:
assert basename is not None
self.basename = basename
self._imaged = {}
self._clipd = OrderedDict()
self._char_defs = {}
self._markers = {}
self._path_collection_id = 0
self._imaged = {}
self._hatchd = OrderedDict()
self._has_gouraud = False
self._n_gradients = 0
self._fonts = OrderedDict()
self.mathtext_parser = MathTextParser('SVG')
RendererBase.__init__(self)
self._glyph_map = dict()
str_height = short_float_fmt(height)
str_width = short_float_fmt(width)
svgwriter.write(svgProlog)
self._start_id = self.writer.start(
'svg',
width='%spt' % str_width,
height='%spt' % str_height,
viewBox='0 0 %s %s' % (str_width, str_height),
xmlns="http://www.w3.org/2000/svg",
version="1.1",
attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"})
self._write_default_style()
def finalize(self):
self._write_clips()
self._write_hatches()
self._write_svgfonts()
self.writer.close(self._start_id)
self.writer.flush()
def _write_default_style(self):
writer = self.writer
default_style = generate_css({
'stroke-linejoin': 'round',
'stroke-linecap': 'butt'})
writer.start('defs')
writer.start('style', type='text/css')
writer.data('*{%s}\n' % default_style)
writer.end('style')
writer.end('defs')
def _make_id(self, type, content):
content = str(content)
if rcParams['svg.hashsalt'] is None:
salt = str(uuid.uuid4())
else:
salt = rcParams['svg.hashsalt']
if six.PY3:
content = content.encode('utf8')
salt = salt.encode('utf8')
m = hashlib.md5()
m.update(salt)
m.update(content)
return '%s%s' % (type, m.hexdigest()[:10])
def _make_flip_transform(self, transform):
return (transform +
Affine2D()
.scale(1.0, -1.0)
.translate(0.0, self.height))
def _get_font(self, prop):
fname = findfont(prop)
font = get_font(fname)
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _get_hatch(self, gc, rgbFace):
"""
Create a new hatch pattern
"""
if rgbFace is not None:
rgbFace = tuple(rgbFace)
edge = gc.get_hatch_color()
if edge is not None:
edge = tuple(edge)
dictkey = (gc.get_hatch(), rgbFace, edge)
oid = self._hatchd.get(dictkey)
if oid is None:
oid = self._make_id('h', dictkey)
self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge), oid)
else:
_, oid = oid
return oid
def _write_hatches(self):
if not len(self._hatchd):
return
HATCH_SIZE = 72
writer = self.writer
writer.start('defs')
for ((path, face, stroke), oid) in six.itervalues(self._hatchd):
writer.start(
'pattern',
id=oid,
patternUnits="userSpaceOnUse",
x="0", y="0", width=six.text_type(HATCH_SIZE),
height=six.text_type(HATCH_SIZE))
path_data = self._convert_path(
path,
Affine2D().scale(HATCH_SIZE).scale(1.0, -1.0).translate(0, HATCH_SIZE),
simplify=False)
if face is None:
fill = 'none'
else:
fill = rgb2hex(face)
writer.element(
'rect',
x="0", y="0", width=six.text_type(HATCH_SIZE+1),
height=six.text_type(HATCH_SIZE+1),
fill=fill)
writer.element(
'path',
d=path_data,
style=generate_css({
'fill': rgb2hex(stroke),
'stroke': rgb2hex(stroke),
'stroke-width': six.text_type(rcParams['hatch.linewidth']),
'stroke-linecap': 'butt',
'stroke-linejoin': 'miter'
})
)
writer.end('pattern')
writer.end('defs')
def _get_style_dict(self, gc, rgbFace):
"""
return the style string. style is generated from the
GraphicsContext and rgbFace
"""
attrib = {}
forced_alpha = gc.get_forced_alpha()
if gc.get_hatch() is not None:
attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
if rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = short_float_fmt(rgbFace[3])
else:
if rgbFace is None:
attrib['fill'] = 'none'
else:
if tuple(rgbFace[:3]) != (0, 0, 0):
attrib['fill'] = rgb2hex(rgbFace)
if len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = short_float_fmt(rgbFace[3])
if forced_alpha and gc.get_alpha() != 1.0:
attrib['opacity'] = short_float_fmt(gc.get_alpha())
offset, seq = gc.get_dashes()
if seq is not None:
attrib['stroke-dasharray'] = ','.join([short_float_fmt(val) for val in seq])
attrib['stroke-dashoffset'] = short_float_fmt(float(offset))
linewidth = gc.get_linewidth()
if linewidth:
rgb = gc.get_rgb()
attrib['stroke'] = rgb2hex(rgb)
if not forced_alpha and rgb[3] != 1.0:
attrib['stroke-opacity'] = short_float_fmt(rgb[3])
if linewidth != 1.0:
attrib['stroke-width'] = short_float_fmt(linewidth)
if gc.get_joinstyle() != 'round':
attrib['stroke-linejoin'] = gc.get_joinstyle()
if gc.get_capstyle() != 'butt':
attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]
return attrib
def _get_style(self, gc, rgbFace):
return generate_css(self._get_style_dict(gc, rgbFace))
def _get_clip(self, gc):
cliprect = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
clippath_trans = self._make_flip_transform(clippath_trans)
dictkey = (id(clippath), str(clippath_trans))
elif cliprect is not None:
x, y, w, h = cliprect.bounds
y = self.height-(y+h)
dictkey = (x, y, w, h)
else:
return None
clip = self._clipd.get(dictkey)
if clip is None:
oid = self._make_id('p', dictkey)
if clippath is not None:
self._clipd[dictkey] = ((clippath, clippath_trans), oid)
else:
self._clipd[dictkey] = (dictkey, oid)
else:
clip, oid = clip
return oid
def _write_clips(self):
if not len(self._clipd):
return
writer = self.writer
writer.start('defs')
for clip, oid in six.itervalues(self._clipd):
writer.start('clipPath', id=oid)
if len(clip) == 2:
clippath, clippath_trans = clip
path_data = self._convert_path(clippath, clippath_trans, simplify=False)
writer.element('path', d=path_data)
else:
x, y, w, h = clip
writer.element(
'rect',
x=short_float_fmt(x),
y=short_float_fmt(y),
width=short_float_fmt(w),
height=short_float_fmt(h))
writer.end('clipPath')
writer.end('defs')
def _write_svgfonts(self):
if not rcParams['svg.fonttype'] == 'svgfont':
return
writer = self.writer
writer.start('defs')
for font_fname, chars in six.iteritems(self._fonts):
font = get_font(font_fname)
font.set_size(72, 72)
sfnt = font.get_sfnt()
writer.start('font', id=sfnt[1, 0, 0, 4].decode("mac_roman"))
writer.element(
'font-face',
attrib={
'font-family': font.family_name,
'font-style': font.style_name.lower(),
'units-per-em': '72',
'bbox': ' '.join(
short_float_fmt(x / 64.0) for x in font.bbox)})
for char in chars:
glyph = font.load_char(char, flags=LOAD_NO_HINTING)
verts, codes = font.get_path()
path = Path(verts, codes)
path_data = self._convert_path(path)
# name = font.get_glyph_name(char)
writer.element(
'glyph',
d=path_data,
attrib={
# 'glyph-name': name,
'unicode': unichr(char),
'horiz-adv-x':
short_float_fmt(glyph.linearHoriAdvance / 65536.0)})
writer.end('font')
writer.end('defs')
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group.
"""
if gid:
self.writer.start('g', id=gid)
else:
self._groupd[s] = self._groupd.get(s, 0) + 1
self.writer.start('g', id="%s_%d" % (s, self._groupd[s]))
def close_group(self, s):
self.writer.end('g')
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def _convert_path(self, path, transform=None, clip=None, simplify=None,
sketch=None):
if clip:
clip = (0.0, 0.0, self.width, self.height)
else:
clip = None
return _path.convert_to_string(
path, transform, clip, simplify, sketch, 6,
[b'M', b'L', b'Q', b'C', b'z'], False).decode('ascii')
def draw_path(self, gc, path, transform, rgbFace=None):
trans_and_flip = self._make_flip_transform(transform)
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
path_data = self._convert_path(
path, trans_and_flip, clip=clip, simplify=simplify,
sketch=gc.get_sketch_params())
attrib = {}
attrib['style'] = self._get_style(gc, rgbFace)
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
self.writer.element('path', d=path_data, attrib=attrib)
if gc.get_url() is not None:
self.writer.end('a')
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if not len(path.vertices):
return
writer = self.writer
path_data = self._convert_path(
marker_path,
marker_trans + Affine2D().scale(1.0, -1.0),
simplify=False)
style = self._get_style_dict(gc, rgbFace)
dictkey = (path_data, generate_css(style))
oid = self._markers.get(dictkey)
style = generate_css({k: v for k, v in six.iteritems(style)
if k.startswith('stroke')})
if oid is None:
oid = self._make_id('m', dictkey)
writer.start('defs')
writer.element('path', id=oid, d=path_data, style=style)
writer.end('defs')
self._markers[dictkey] = oid
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
writer.start('g', attrib=attrib)
trans_and_flip = self._make_flip_transform(trans)
attrib = {'xlink:href': '#%s' % oid}
clip = (0, 0, self.width*72, self.height*72)
for vertices, code in path.iter_segments(
trans_and_flip, clip=clip, simplify=False):
if len(vertices):
x, y = vertices[-2:]
attrib['x'] = short_float_fmt(x)
attrib['y'] = short_float_fmt(y)
attrib['style'] = self._get_style(gc, rgbFace)
writer.element('use', attrib=attrib)
writer.end('g')
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 5) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 9 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 9 * uses_per_path + 3 < (len_path + 5) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
writer = self.writer
path_codes = []
writer.start('defs')
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
d = self._convert_path(path, transform, simplify=False)
oid = 'C%x_%x_%s' % (self._path_collection_id, i,
self._make_id('', d))
writer.element('path', id=oid, d=d)
path_codes.append(oid)
writer.end('defs')
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
clipid = self._get_clip(gc0)
url = gc0.get_url()
if url is not None:
writer.start('a', attrib={'xlink:href': url})
if clipid is not None:
writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
attrib = {
'xlink:href': '#%s' % path_id,
'x': short_float_fmt(xo),
'y': short_float_fmt(self.height - yo),
'style': self._get_style(gc0, rgbFace)
}
writer.element('use', attrib=attrib)
if clipid is not None:
writer.end('g')
if url is not None:
writer.end('a')
self._path_collection_id += 1
def draw_gouraud_triangle(self, gc, points, colors, trans):
# This uses a method described here:
#
# http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html
#
# that uses three overlapping linear gradients to simulate a
# Gouraud triangle. Each gradient goes from fully opaque in
# one corner to fully transparent along the opposite edge.
# The line between the stop points is perpendicular to the
# opposite edge. Underlying these three gradients is a solid
# triangle whose color is the average of all three points.
writer = self.writer
if not self._has_gouraud:
self._has_gouraud = True
writer.start(
'filter',
id='colorAdd')
writer.element(
'feComposite',
attrib={'in': 'SourceGraphic'},
in2='BackgroundImage',
operator='arithmetic',
k2="1", k3="1")
writer.end('filter')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
# Just skip fully-transparent triangles
if avg_color[-1] == 0.0:
return
trans_and_flip = self._make_flip_transform(trans)
tpoints = trans_and_flip.transform(points)
writer.start('defs')
for i in range(3):
x1, y1 = tpoints[i]
x2, y2 = tpoints[(i + 1) % 3]
x3, y3 = tpoints[(i + 2) % 3]
c = colors[i][:]
if x2 == x3:
xb = x2
yb = y1
elif y2 == y3:
xb = x1
yb = y2
else:
m1 = (y2 - y3) / (x2 - x3)
b1 = y2 - (m1 * x2)
m2 = -(1.0 / m1)
b2 = y1 - (m2 * x1)
xb = (-b1 + b2) / (m1 - m2)
yb = m2 * xb + b2
writer.start(
'linearGradient',
id="GR%x_%d" % (self._n_gradients, i),
x1=short_float_fmt(x1), y1=short_float_fmt(y1),
x2=short_float_fmt(xb), y2=short_float_fmt(yb))
writer.element(
'stop',
offset='0',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': short_float_fmt(c[-1])}))
writer.element(
'stop',
offset='1',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': "0"}))
writer.end('linearGradient')
writer.element(
'polygon',
id='GT%x' % self._n_gradients,
points=" ".join([short_float_fmt(x)
for x in (x1, y1, x2, y2, x3, y3)]))
writer.end('defs')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
href = '#GT%x' % self._n_gradients
writer.element(
'use',
attrib={'xlink:href': href,
'fill': rgb2hex(avg_color),
'fill-opacity': short_float_fmt(avg_color[-1])})
for i in range(3):
writer.element(
'use',
attrib={'xlink:href': href,
'fill': 'url(#GR%x_%d)' % (self._n_gradients, i),
'fill-opacity': '1',
'filter': 'url(#colorAdd)'})
self._n_gradients += 1
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
self.writer.start('g', attrib=attrib)
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
self.writer.end('g')
def option_scale_image(self):
return True
def get_image_magnification(self):
return self.image_dpi / 72.0
def draw_image(self, gc, x, y, im, transform=None):
h, w = im.shape[:2]
if w == 0 or h == 0:
return
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
# Can't apply clip-path directly to the image because the
# image has a transformation, which would also be applied
# to the clip-path
self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
oid = gc.get_gid()
url = gc.get_url()
if url is not None:
self.writer.start('a', attrib={'xlink:href': url})
if rcParams['svg.image_inline']:
bytesio = io.BytesIO()
_png.write_png(im, bytesio)
oid = oid or self._make_id('image', bytesio.getvalue())
attrib['xlink:href'] = (
"data:image/png;base64,\n" +
base64.b64encode(bytesio.getvalue()).decode('ascii'))
else:
self._imaged[self.basename] = self._imaged.get(self.basename, 0) + 1
filename = '%s.image%d.png'%(self.basename, self._imaged[self.basename])
_log.info('Writing image file for inclusion: %s', filename)
_png.write_png(im, filename)
oid = oid or 'Im_' + self._make_id('image', filename)
attrib['xlink:href'] = filename
attrib['id'] = oid
if transform is None:
w = 72.0 * w / self.image_dpi
h = 72.0 * h / self.image_dpi
self.writer.element(
'image',
transform=generate_transform([
('scale', (1, -1)), ('translate', (0, -h))]),
x=short_float_fmt(x),
y=short_float_fmt(-(self.height - y - h)),
width=short_float_fmt(w), height=short_float_fmt(h),
attrib=attrib)
else:
alpha = gc.get_alpha()
if alpha != 1.0:
attrib['opacity'] = short_float_fmt(alpha)
flipped = (
Affine2D().scale(1.0 / w, 1.0 / h) +
transform +
Affine2D()
.translate(x, y)
.scale(1.0, -1.0)
.translate(0.0, self.height))
attrib['transform'] = generate_transform(
[('matrix', flipped.frozen())])
self.writer.element(
'image',
width=short_float_fmt(w), height=short_float_fmt(h),
attrib=attrib)
if url is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def _adjust_char_id(self, char_id):
return char_id.replace("%20", "_")
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):
"""
draw the text by converting them to paths using textpath module.
Parameters
----------
prop : `matplotlib.font_manager.FontProperties`
font property
s : str
text to be converted
usetex : bool
If True, use matplotlib usetex mode.
ismath : bool
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
writer = self.writer
writer.comment(s)
glyph_map=self._glyph_map
text2path = self._text2path
color = rgb2hex(gc.get_rgb())
fontsize = prop.get_size_in_points()
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = short_float_fmt(gc.get_alpha())
if not ismath:
font = text2path._get_font(prop)
_glyphs = text2path.get_glyphs_with_font(
font, s, glyph_map=glyph_map, return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in six.iteritems(glyph_map_new):
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
attrib['style'] = generate_css(style)
font_scale = fontsize / text2path.FONT_SCALE
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for glyph_id, xposition, yposition, scale in glyph_info:
attrib={'xlink:href': '#%s' % glyph_id}
if xposition != 0.0:
attrib['x'] = short_float_fmt(xposition)
if yposition != 0.0:
attrib['y'] = short_float_fmt(yposition)
writer.element(
'use',
attrib=attrib)
writer.end('g')
else:
if ismath == "TeX":
_glyphs = text2path.get_glyphs_tex(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
else:
_glyphs = text2path.get_glyphs_mathtext(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
# we store the character glyphs w/o flipping. Instead, the
# coordinate will be flipped when this characters are
# used.
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in six.iteritems(glyph_map_new):
char_id = self._adjust_char_id(char_id)
# Some characters are blank
if not len(glyph_path[0]):
path_data = ""
else:
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
font_scale = fontsize / text2path.FONT_SCALE
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for char_id, xposition, yposition, scale in glyph_info:
char_id = self._adjust_char_id(char_id)
writer.element(
'use',
transform=generate_transform([
('translate', (xposition, yposition)),
('scale', (scale,)),
]),
attrib={'xlink:href': '#%s' % char_id})
for verts, codes in rects:
path = Path(verts, codes)
path_data = self._convert_path(path, simplify=False)
writer.element('path', d=path_data)
writer.end('g')
def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath, mtext=None):
writer = self.writer
color = rgb2hex(gc.get_rgb())
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = short_float_fmt(gc.get_alpha())
if not ismath:
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fontsize = prop.get_size_in_points()
fontfamily = font.family_name
fontstyle = prop.get_style()
attrib = {}
# Must add "px" to workaround a Firefox bug
style['font-size'] = short_float_fmt(fontsize) + 'px'
style['font-family'] = six.text_type(fontfamily)
style['font-style'] = prop.get_style().lower()
style['font-weight'] = six.text_type(prop.get_weight()).lower()
attrib['style'] = generate_css(style)
if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
# If text anchoring can be supported, get the original
# coordinates and add alignment information.
# Get anchor coordinates.
transform = mtext.get_transform()
ax, ay = transform.transform_point(mtext.get_position())
ay = self.height - ay
# Don't do vertical anchor alignment. Most applications do not
# support 'alignment-baseline' yet. Apply the vertical layout
# to the anchor point manually for now.
angle_rad = np.deg2rad(angle)
dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
ha_mpl_to_svg = {'left': 'start', 'right': 'end',
'center': 'middle'}
style['text-anchor'] = ha_mpl_to_svg[mtext.get_ha()]
attrib['x'] = short_float_fmt(ax)
attrib['y'] = short_float_fmt(ay)
attrib['style'] = generate_css(style)
attrib['transform'] = "rotate(%s, %s, %s)" % (
short_float_fmt(-angle),
short_float_fmt(ax),
short_float_fmt(ay))
writer.element('text', s, attrib=attrib)
else:
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
writer.element('text', s, attrib=attrib)
if rcParams['svg.fonttype'] == 'svgfont':
fontset = self._fonts.setdefault(font.fname, set())
for c in s:
fontset.add(ord(c))
else:
writer.comment(s)
width, height, descent, svg_elements, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
svg_glyphs = svg_elements.svg_glyphs
svg_rects = svg_elements.svg_rects
attrib = {}
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
# Apply attributes to 'g', not 'text', because we likely
# have some rectangles as well with the same style and
# transformation
writer.start('g', attrib=attrib)
writer.start('text')
# Sort the characters by font, and output one tspan for
# each
spans = OrderedDict()
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
style = generate_css({
'font-size': short_float_fmt(fontsize) + 'px',
'font-family': font.family_name,
'font-style': font.style_name.lower(),
'font-weight': font.style_name.lower()})
if thetext == 32:
thetext = 0xa0 # non-breaking space
spans.setdefault(style, []).append((new_x, -new_y, thetext))
if rcParams['svg.fonttype'] == 'svgfont':
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
fontset = self._fonts.setdefault(font.fname, set())
fontset.add(thetext)
for style, chars in six.iteritems(spans):
chars.sort()
same_y = True
if len(chars) > 1:
last_y = chars[0][1]
for i in xrange(1, len(chars)):
if chars[i][1] != last_y:
same_y = False
break
if same_y:
ys = six.text_type(chars[0][1])
else:
ys = ' '.join(six.text_type(c[1]) for c in chars)
attrib = {
'style': style,
'x': ' '.join(short_float_fmt(c[0]) for c in chars),
'y': ys
}
writer.element(
'tspan',
''.join(unichr(c[2]) for c in chars),
attrib=attrib)
writer.end('text')
if len(svg_rects):
for x, y, width, height in svg_rects:
writer.element(
'rect',
x=short_float_fmt(x),
y=short_float_fmt(-y + height),
width=short_float_fmt(width),
height=short_float_fmt(height)
)
writer.end('g')
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
clipid = self._get_clip(gc)
if clipid is not None:
# Cannot apply clip-path directly to the text, because
# is has a transformation
self.writer.start(
'g', attrib={'clip-path': 'url(#%s)' % clipid})
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
if rcParams['svg.fonttype'] == 'path':
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath, mtext)
else:
self._draw_text_as_text(gc, x, y, s, prop, angle, ismath, mtext)
if gc.get_url() is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
return self._text2path.get_text_width_height_descent(s, prop, ismath)
class FigureCanvasSVG(FigureCanvasBase):
filetypes = {'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
fixed_dpi = 72
def print_svg(self, filename, *args, **kwargs):
with cbook.open_file_cm(filename, "w", encoding="utf-8") as fh:
filename = getattr(fh, 'name', '')
if not isinstance(filename, six.string_types):
filename = ''
if cbook.file_requires_unicode(fh):
detach = False
else:
if six.PY3:
fh = io.TextIOWrapper(fh, 'utf-8')
else:
fh = codecs.getwriter('utf-8')(fh)
detach = True
result = self._print_svg(filename, fh, **kwargs)
# Detach underlying stream from wrapper so that it remains open in
# the caller.
if detach:
if six.PY3:
fh.detach()
else:
fh.reset()
fh.stream = io.BytesIO()
return result
def print_svgz(self, filename, *args, **kwargs):
with cbook.open_file_cm(filename, "wb") as fh, \
gzip.GzipFile(mode='w', fileobj=fh) as gzipwriter:
return self.print_svg(gzipwriter)
def _print_svg(self, filename, fh, **kwargs):
image_dpi = kwargs.pop("dpi", 72)
self.figure.set_dpi(72.0)
width, height = self.figure.get_size_inches()
w, h = width * 72, height * 72
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(
self.figure, width, height, image_dpi,
RendererSVG(w, h, fh, filename, image_dpi),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
def get_default_filetype(self):
return 'svg'
class FigureManagerSVG(FigureManagerBase):
pass
svgProlog = """\
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Created with matplotlib (http://matplotlib.org/) -->
"""
@_Backend.export
class _BackendSVG(_Backend):
FigureCanvas = FigureCanvasSVG
FigureManager = FigureManagerSVG
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py2@matplotlib@backends@backend_svg.py@.PATH_END.py
|
{
"filename": "repr_.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/pyccl/_core/repr_.py",
"type": "Python"
}
|
import numpy as np
from ..pyutils import _get_spline1d_arrays, _get_spline2d_arrays
from .caching import _to_hashable, hash_
class Table:
"""Build nice tables.
Comments describing the capabilities of each method are included below.
"""
def __init__(self, *, n_y=6, n_x=6, decimals=2,
legend="", newline="\n\t", missing="...",
data_x=None, data_y=None, data_z=None, meta=[]):
self.data_x = data_x
self.data_y = data_y
self.data_z = data_z
self.meta = meta
self.n_y = n_y
self.n_x = n_x
self.newline = newline
self.missing = missing
self.legend = legend
self.entryl = 7 + decimals
self.div = max(self.entryl, len(self.legend))
self.form = f"{self.entryl}.{decimals}e"
self.idx = np.arange(self.n_y//2).tolist() + \
np.arange(-self.n_y//2, 0).tolist()
def divider(self, new_line: bool):
# Horizontal line in Table.
# +=============+=======================+
divider = f"+{'=' * (self.div+2)}+"
divider += f"{'='*(1+(self.n_x*(self.entryl+1))+len(self.missing)+1)}+"
divider += new_line * f"{self.newline}"
return divider
def wrap(self, expr, a, b=None):
# Wrap the expression with `a` at the beginning and `b` at the end.
return f"{a}{expr}{b if b is not None else a}"
def print_left(self, expr):
# Print the left part of a row of the Table (see `fullrow`).
return self.wrap(self.wrap(f"{expr:{self.div}}", " "), "|")
def print_elements(self, arr):
# Print the elements of `arr`.
return " ".join([f"{i:{self.form}}" for i in arr])
def print_right(self, arr, num):
# Print the right part of a row of the Table (see `fullrow`).
s = self.wrap(self.print_elements(arr[:num//2]), " ")
s += f"{self.missing}"
s += self.wrap(self.wrap(
self.print_elements(arr[-num//2:]),
" "), "", f"|{self.newline}")
return s
def fullrow(self, s1, s2, num):
# Print a full row of the Table.
# | 1.00e-02 | 1.71e-01 ... 3.31e-05 |
s = self.print_left(f"{s1:{'' if isinstance(s1, str) else self.form}}")
s += self.print_right(s2, num)
return s
def missing_row(self):
# Print row with skipped values.
# | ... | ... |
s = self.wrap(self.wrap(f"{self.missing:^{self.div}}", " "), "|")
length = len(self.divider(new_line=False)) - len(s) - 3
s += self.wrap(self.wrap(
f"{self.missing:^{length}}",
" "), "", f"|{self.newline}")
return s
def metadata(self):
# If an object carries metadata that need to be included in `__repr__`
# pass them here in a list. Each element will start a new row.
s = ""
for m in self.meta:
s += self.wrap(self.wrap(
f"{m:<{len(self.divider(new_line=False)) - 4}}",
" "), "|") + self.newline
return s
def build(self):
# Build the table.
s = self.divider(new_line=True)
s += self.fullrow(f"{self.legend}", self.data_x, self.n_x)
s += self.divider(new_line=True)
s += "".join([self.fullrow(self.data_y[i], self.data_z[i], self.n_x)
for i in self.idx[:self.n_y//2]])
s += self.missing_row()
s += "".join([self.fullrow(self.data_y[i], self.data_z[i], self.n_x)
for i in self.idx[-self.n_y//2:]])
s += self.divider(new_line=bool(self.meta))
s += self.metadata()
s += self.divider(new_line=False) if self.meta else ""
return s
def build_string_Cosmology(self):
"""Build the ``Cosmology`` representation.
Cosmology equivalence is tested via its representation. Therefore,
there is limiting behavior where ``'=='`` will return ``False``
even though the compared cosmologies return the same theoretical
predictions. This happens whenever:
* Exactly one Cosmology is an instance of ``CosmologyCalculator``.
* Cosmologies defined with different parameter sets, where one can
be computed from the other (e.g. ``sigma8`` and ``A_s``).
* Instances of ``CosmologyCalculator`` which do not contain exactly
the same linear & non-linear power spectrum entries.
Example output::
<pyccl.cosmology.Cosmology>
Omega_b = 0.05
Omega_c = 0.25
h = 0.67
n_s = 0.96
sigma8 = 0.81
extra_parameters =
test = {'param': 18.4}
HASH_ACCURACY_PARAMS = 0x1959cbc9
HASH_PK = 0xbca03ab0
"""
newline = "\n\t"
cls = self.__class__
def test_eq(key, val, default):
# Neutrino masses can be a list, so use `np.all` for comparison.
# `np.all` is expensive, so only use that with `m_nu`.
if key not in ["m_nu", "z_mg", "df_mg"]:
return val == default
return np.all(val == default)
def printdict(dic):
# Print the non-default parameters listed in a parameter dictionary.
base = cls.__base__ if cls.__qualname__ != "Cosmology" else cls
params = base.__signature__.parameters
defaults = {param: value.default for param, value in params.items()}
dic = {key: val for key, val in dic.items()
if not test_eq(key, val, defaults.get(key))}
dic.pop("extra_parameters", None)
if not dic:
return ""
length = max(len(key) for key, val in dic.items())
tup = _to_hashable(dic)
s = ""
for param, value in tup:
s += f"{newline}{param:{length}} = {value}"
return s
def printextras(dic):
# Print any extra parameters.
if dic["extra_parameters"] is None:
return ""
tup = _to_hashable(dic["extra_parameters"])
s = f"{newline}extra_parameters ="
for key, value in tup:
s += f"{newline}\t{key} = {dict(value)}"
return s
def metadata():
# Print hashes for the accuracy parameters and the stored Pk2D's.
H = hex(hash_(self._accuracy_params))
s = f"{newline}HASH_ACCURACY_PARAMS = {H}"
if self.__class__.__qualname__ == "CosmologyCalculator":
# only need the pk's if we compare CosmologyCalculator objects
H = 0
if self.has_linear_power:
H += sum([hash_(pk) for pk in self._pk_lin.values()])
if self.has_nonlin_power:
H += sum([hash_(pk) for pk in self._pk_nl.values()])
H = hex(H)
s += f"{newline}HASH_PK = {H}"
return s
s = "<pyccl.cosmology.Cosmology>"
s += printdict(self._params_init_kwargs)
s += printdict(self._config_init_kwargs)
s += printextras(self._params_init_kwargs)
s += metadata()
return s
def build_string_Pk2D(self, na=6, nk=6, decimals=2):
"""Build the ``Pk2D`` representation.
Example output ::
<pyccl.Pk2D>
+===============+=============================================+
| a \\ log10(k) | -4.30e+00 -4.16e+00 ... 9.29e-01 1.02e+00 |
+===============+=============================================+
| 1.00e-02 | 1.71e-01 2.36e-01 ... 5.82e-05 3.31e-05 |
| 1.26e-02 | 2.78e-01 3.82e-01 ... 9.15e-05 5.21e-05 |
| ... | ... |
| 9.77e-01 | 1.14e+03 1.57e+03 ... 3.31e-01 1.88e-01 |
| 1.00e+00 | 1.17e+03 1.60e+03 ... 3.39e-01 1.93e-01 |
+===============+=============================================+
| is_log = True , extrap_orders = (1, 2) |
| HASH_ARRS = 0x1d3524ad |
+===============+=============================================+
"""
if not self.has_psp:
return "pyccl.Pk2D(empty)"
# get what's needed from the Pk2D object
a, lk, pk = self.get_spline_arrays()
lk /= np.log(10) # easier to read in log10
islog = str(bool(self.psp.is_log))
extrap = (self.psp.extrap_order_lok, self.psp.extrap_order_hik)
H = hex(sum([hash_(obj) for obj in [a, lk, pk]]))
newline = "\n\t" # what to do when starting a new line
legend = "a \\ log10(k)" # table legend
meta = [f"is_log = {islog:5.5s}, extrap_orders = {extrap}"]
meta += [f"HASH_ARRS = {H:34}"]
T = Table(n_y=na, n_x=nk, decimals=decimals, legend=legend,
newline=newline, data_x=lk, data_y=a, data_z=pk, meta=meta)
s = build_string_simple(self) + f"{newline}"
s += T.build()
return s
def build_string_simple(self):
"""Simple representation.
Example output ::
<pyccl.emulator.Emulator>
"""
return f"<{self.__module__}.{self.__class__.__qualname__}>"
def build_string_from_attrs(self):
"""Build a representation for an object from a list of attribute names
given in the hook ``__repr_attrs__``.
Example output::
<pyccl.halos.halo_model.HMCalculator>
mass_function = MassFuncTinker08, HASH = 0xd3b29dd3
halo_bias = HaloBiasTinker10, HASH = 0x9da644b5
mass_def = pyccl.halos.MassDef(Delta=500, rho_type=critical)
"""
params = {param: getattr(self, param) for param in self.__repr_attrs__}
defaults = {param: value.default
for param, value in self.__signature__.parameters.items()
if param != "self"}
s = build_string_simple(self)
newline = "\n\t"
for param, value in params.items():
if param in defaults and value == defaults[param]:
# skip printing when value is the default
continue
s += f"{newline}{param} = "
if "\n" in repr(value):
# if too long, print the type and its hash
name = value.__class__.__qualname__
H = hex(hash_(value))
s += f"{name}, HASH = {H}"
else:
s += f"{value}"
return s
def build_string_Tracer(self):
"""Buld a representation for a Tracer.
.. note:: Tracer insertion order is important.
Example output ::
<pyccl.tracers.Tracer>
num kernel transfer prefac bessel
0 0x82ad882c232406bb 0xa0657c0f1c98fd77 0 2
1 0x7ab385bb323530da None 0 0
"""
def get_tracer_info(tr):
# Return a string with info for the C-level tracer.
kernel = []
if tr.kernel is not None:
kernel.append(_get_spline1d_arrays(tr.kernel.spline))
kernel = hex(hash_(kernel)) if kernel else 'None'
transfer = []
if tr.transfer is not None:
attrs = ["fa", "fk"]
for attr in attrs:
spline = getattr(tr.transfer, attr, None)
if spline is not None:
transfer.append(_get_spline1d_arrays(spline))
spline = getattr(tr.transfer, "fka", None)
if spline is not None:
transfer.append(_get_spline2d_arrays(spline))
transfer.append(tr.transfer.is_log)
transfer.append((tr.transfer.extrap_order_lok,
tr.transfer.extrap_order_hik))
transfer = hex(hash_(transfer)) if transfer else 'None'
prefac = tr.der_angles
bessel = tr.der_bessel
return kernel, transfer, prefac, bessel
def print_row(newline, num, kernel, transfer, prefac, bessel):
s = f"{num:^3}{kernel:^20}{transfer:^20}{prefac:^8}{bessel:^8}"
return f"{newline}{s}"
tracers = self._trc
if not tracers:
return "pyccl.Tracer(empty=True)"
newline = "\n\t"
s = build_string_simple(self)
s += print_row(newline, "num", "kernel", "transfer", "prefac", "bessel")
for num, tracer in enumerate(tracers):
s += print_row(newline, num, *get_tracer_info(tracer))
return s
def build_string_Tk3D(self, na=2, nk=4, decimals=2):
"""Build a representation for a Tk3D object.
Example output ::
<pyccl.Tk3D>
+================+=============================================+
| a \\ log10(k1) | -4.00e+00 -3.33e+00 ... 1.33e+00 2.00e+00 |
+================+=============================================+
| 5.00e-02 | 4.46e+07 9.62e+06 ... 2.07e+02 4.46e+01 |
| ... | ... |
| 1.00e+00 | 2.00e+09 4.30e+08 ... 9.26e+03 2.00e+03 |
+================+=============================================+
+================+=============================================+
| a \\ log10(k2) | -4.00e+00 -3.33e+00 ... 1.33e+00 2.00e+00 |
+================+=============================================+
| 5.00e-02 | 4.46e+01 1.78e+00 ... 2.82e-10 1.12e-11 |
| ... | ... |
| 1.00e+00 | 2.00e+03 7.94e+01 ... 1.26e-08 5.01e-10 |
+================+=============================================+
| is_log = True , extrap_orders = (1, 1) |
| HASH_ARRS = 0x780972f4 |
+================+=============================================+
"""
if not self.has_tsp:
return "pyccl.Tk3D(empty)"
# get what's needed from the Tk3D object
a, lk1, lk2, tks = self.get_spline_arrays()
lk1 /= np.log(10) # easier to read in log10
lk2 /= np.log(10) # easier to read in log10
islog = str(bool(self.tsp.is_log))
extrap = (self.tsp.extrap_order_lok, self.tsp.extrap_order_hik)
H = hex(sum([hash_(obj) for obj in [a, lk1, lk2, *tks]]))
newline = "\n\t"
meta = [f"is_log = {islog:5.5s}, extrap_orders = {extrap}"]
meta += [f"HASH_ARRS = {H:34}"]
# we will print 2 tables
if not self.tsp.is_product:
# get the start and the end of the trispectrum, diagonally in `k`
tks = [tks[0][:, 0, :], tks[0][:, :, -1]]
T = Table(n_y=na, n_x=nk, decimals=decimals, newline=newline,
data_y=a, legend="a \\ log10(k1)", meta=[])
s = build_string_simple(self) + f"{newline}"
T.data_x, T.data_z = lk1, tks[0]
s += T.build() + f"{newline}"
T.legend = "a \\ log10(k2)"
T.data_x, T.data_z = lk2, tks[1]
T.meta = meta
s += T.build()
return s
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@pyccl@_core@repr_.py@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/treemap/marker/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="treemap.marker", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@treemap@marker@_colorscale.py@.PATH_END.py
|
{
"filename": "projections.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/optimize/_trustregion_constr/projections.py",
"type": "Python"
}
|
"""Basic linear factorizations needed by the solver."""
from __future__ import division, print_function, absolute_import
from scipy.sparse import (bmat, csc_matrix, eye, issparse)
from scipy.sparse.linalg import LinearOperator
import scipy.linalg
import scipy.sparse.linalg
try:
from sksparse.cholmod import cholesky_AAt
sksparse_available = True
except ImportError:
import warnings
sksparse_available = False
import numpy as np
from warnings import warn
__all__ = [
'orthogonality',
'projections',
]
def orthogonality(A, g):
"""Measure orthogonality between a vector and the null space of a matrix.
Compute a measure of orthogonality between the null space
of the (possibly sparse) matrix ``A`` and a given vector ``g``.
The formula is a simplified (and cheaper) version of formula (3.13)
from [1]_.
``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.
References
----------
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
"On the solution of equality constrained quadratic
programming problems arising in optimization."
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
"""
# Compute vector norms
norm_g = np.linalg.norm(g)
# Compute Frobenius norm of the matrix A
if issparse(A):
norm_A = scipy.sparse.linalg.norm(A, ord='fro')
else:
norm_A = np.linalg.norm(A, ord='fro')
# Check if norms are zero
if norm_g == 0 or norm_A == 0:
return 0
norm_A_g = np.linalg.norm(A.dot(g))
# Orthogonality measure
orth = norm_A_g / (norm_A*norm_g)
return orth
def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A using ``NormalEquation`` approach.
"""
# Cholesky factorization
factor = cholesky_AAt(A)
# z = x - A.T inv(A A.T) A x
def null_space(x):
v = factor(A.dot(x))
z = x - A.T.dot(v)
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.1.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# z_next = z - A.T inv(A A.T) A z
v = factor(A.dot(z))
z = z - A.T.dot(v)
k += 1
return z
# z = inv(A A.T) A x
def least_squares(x):
return factor(A.dot(x))
# z = A.T inv(A A.T) x
def row_space(x):
return A.T.dot(factor(x))
return null_space, least_squares, row_space
def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A - ``AugmentedSystem``."""
# Form augmented system
K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))
# LU factorization
# TODO: Use a symmetric indefinite factorization
# to solve the system twice as fast (because
# of the symmetry).
try:
solve = scipy.sparse.linalg.factorized(K)
except RuntimeError:
warn("Singular Jacobian matrix. Using dense SVD decomposition to "
"perform the factorizations.")
return svd_factorization_projections(A.toarray(),
m, n, orth_tol,
max_refin, tol)
# z = x - A.T inv(A A.T) A x
# is computed solving the extended system:
# [I A.T] * [ z ] = [x]
# [A O ] [aux] [0]
def null_space(x):
# v = [x]
# [0]
v = np.hstack([x, np.zeros(m)])
# lu_sol = [ z ]
# [aux]
lu_sol = solve(v)
z = lu_sol[:n]
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.2.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# new_v = [x] - [I A.T] * [ z ]
# [0] [A O ] [aux]
new_v = v - K.dot(lu_sol)
# [I A.T] * [delta z ] = new_v
# [A O ] [delta aux]
lu_update = solve(new_v)
# [ z ] += [delta z ]
# [aux] [delta aux]
lu_sol += lu_update
z = lu_sol[:n]
k += 1
# return z = x - A.T inv(A A.T) A x
return z
# z = inv(A A.T) A x
# is computed solving the extended system:
# [I A.T] * [aux] = [x]
# [A O ] [ z ] [0]
def least_squares(x):
# v = [x]
# [0]
v = np.hstack([x, np.zeros(m)])
# lu_sol = [aux]
# [ z ]
lu_sol = solve(v)
# return z = inv(A A.T) A x
return lu_sol[n:m+n]
# z = A.T inv(A A.T) x
# is computed solving the extended system:
# [I A.T] * [ z ] = [0]
# [A O ] [aux] [x]
def row_space(x):
# v = [0]
# [x]
v = np.hstack([np.zeros(n), x])
# lu_sol = [ z ]
# [aux]
lu_sol = solve(v)
# return z = A.T inv(A A.T) x
return lu_sol[:n]
return null_space, least_squares, row_space
def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A using ``QRFactorization`` approach.
"""
# QRFactorization
Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')
if np.linalg.norm(R[-1, :], np.inf) < tol:
warn('Singular Jacobian matrix. Using SVD decomposition to ' +
'perform the factorizations.')
return svd_factorization_projections(A, m, n,
orth_tol,
max_refin,
tol)
# z = x - A.T inv(A A.T) A x
def null_space(x):
# v = P inv(R) Q.T x
aux1 = Q.T.dot(x)
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
v = np.zeros(m)
v[P] = aux2
z = x - A.T.dot(v)
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.1.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# v = P inv(R) Q.T x
aux1 = Q.T.dot(z)
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
v[P] = aux2
# z_next = z - A.T v
z = z - A.T.dot(v)
k += 1
return z
# z = inv(A A.T) A x
def least_squares(x):
# z = P inv(R) Q.T x
aux1 = Q.T.dot(x)
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
z = np.zeros(m)
z[P] = aux2
return z
# z = A.T inv(A A.T) x
def row_space(x):
# z = Q inv(R.T) P.T x
aux1 = x[P]
aux2 = scipy.linalg.solve_triangular(R, aux1,
lower=False,
trans='T')
z = Q.dot(aux2)
return z
return null_space, least_squares, row_space
def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):
"""Return linear operators for matrix A using ``SVDFactorization`` approach.
"""
# SVD Factorization
U, s, Vt = scipy.linalg.svd(A, full_matrices=False)
# Remove dimensions related with very small singular values
U = U[:, s > tol]
Vt = Vt[s > tol, :]
s = s[s > tol]
# z = x - A.T inv(A A.T) A x
def null_space(x):
# v = U 1/s V.T x = inv(A A.T) A x
aux1 = Vt.dot(x)
aux2 = 1/s*aux1
v = U.dot(aux2)
z = x - A.T.dot(v)
# Iterative refinement to improve roundoff
# errors described in [2]_, algorithm 5.1.
k = 0
while orthogonality(A, z) > orth_tol:
if k >= max_refin:
break
# v = U 1/s V.T x = inv(A A.T) A x
aux1 = Vt.dot(z)
aux2 = 1/s*aux1
v = U.dot(aux2)
# z_next = z - A.T v
z = z - A.T.dot(v)
k += 1
return z
# z = inv(A A.T) A x
def least_squares(x):
# z = U 1/s V.T x = inv(A A.T) A x
aux1 = Vt.dot(x)
aux2 = 1/s*aux1
z = U.dot(aux2)
return z
# z = A.T inv(A A.T) x
def row_space(x):
# z = V 1/s U.T x
aux1 = U.T.dot(x)
aux2 = 1/s*aux1
z = Vt.T.dot(aux2)
return z
return null_space, least_squares, row_space
def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15):
"""Return three linear operators related with a given matrix A.
Parameters
----------
A : sparse matrix (or ndarray), shape (m, n)
Matrix ``A`` used in the projection.
method : string, optional
Method used for compute the given linear
operators. Should be one of:
- 'NormalEquation': The operators
will be computed using the
so-called normal equation approach
explained in [1]_. In order to do
so the Cholesky factorization of
``(A A.T)`` is computed. Exclusive
for sparse matrices.
- 'AugmentedSystem': The operators
will be computed using the
so-called augmented system approach
explained in [1]_. Exclusive
for sparse matrices.
- 'QRFactorization': Compute projections
using QR factorization. Exclusive for
dense matrices.
- 'SVDFactorization': Compute projections
using SVD factorization. Exclusive for
dense matrices.
orth_tol : float, optional
Tolerance for iterative refinements.
max_refin : int, optional
Maximum number of iterative refinements
tol : float, optional
Tolerance for singular values
Returns
-------
Z : LinearOperator, shape (n, n)
Null-space operator. For a given vector ``x``,
the null space operator is equivalent to apply
a projection matrix ``P = I - A.T inv(A A.T) A``
to the vector. It can be shown that this is
equivalent to project ``x`` into the null space
of A.
LS : LinearOperator, shape (m, n)
Least-Square operator. For a given vector ``x``,
the least-square operator is equivalent to apply a
pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
to the vector. It can be shown that this vector
``pinv(A.T) x`` is the least_square solution to
``A.T y = x``.
Y : LinearOperator, shape (n, m)
Row-space operator. For a given vector ``x``,
the row-space operator is equivalent to apply a
projection matrix ``Q = A.T inv(A A.T)``
to the vector. It can be shown that this
vector ``y = Q x`` the minimum norm solution
of ``A y = x``.
Notes
-----
Uses iterative refinements described in [1]
during the computation of ``Z`` in order to
cope with the possibility of large roundoff errors.
References
----------
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
"On the solution of equality constrained quadratic
programming problems arising in optimization."
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
"""
m, n = np.shape(A)
# The factorization of an empty matrix
# only works for the sparse representation.
if m*n == 0:
A = csc_matrix(A)
# Check Argument
if issparse(A):
if method is None:
method = "AugmentedSystem"
if method not in ("NormalEquation", "AugmentedSystem"):
raise ValueError("Method not allowed for sparse matrix.")
if method == "NormalEquation" and not sksparse_available:
warnings.warn(("Only accepts 'NormalEquation' option when"
" scikit-sparse is available. Using "
"'AugmentedSystem' option instead."),
ImportWarning)
method = 'AugmentedSystem'
else:
if method is None:
method = "QRFactorization"
if method not in ("QRFactorization", "SVDFactorization"):
raise ValueError("Method not allowed for dense array.")
if method == 'NormalEquation':
null_space, least_squares, row_space \
= normal_equation_projections(A, m, n, orth_tol, max_refin, tol)
elif method == 'AugmentedSystem':
null_space, least_squares, row_space \
= augmented_system_projections(A, m, n, orth_tol, max_refin, tol)
elif method == "QRFactorization":
null_space, least_squares, row_space \
= qr_factorization_projections(A, m, n, orth_tol, max_refin, tol)
elif method == "SVDFactorization":
null_space, least_squares, row_space \
= svd_factorization_projections(A, m, n, orth_tol, max_refin, tol)
Z = LinearOperator((n, n), null_space)
LS = LinearOperator((m, n), least_squares)
Y = LinearOperator((n, m), row_space)
return Z, LS, Y
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@optimize@_trustregion_constr@projections.py@.PATH_END.py
|
{
"filename": "analyse_cv.ipynb",
"repo_name": "SpandanCh/Barnard5_filaments",
"repo_path": "Barnard5_filaments_extracted/Barnard5_filaments-main/analyse_cv.ipynb",
"type": "Jupyter Notebook"
}
|
# Analyse different properties along the spines of the filaments and orthogonal cuts
```python
import astropy.io.fits as fits
import astropy.units as u
import numpy as np
import matplotlib.pyplot as plt
```
### Produce the orthogonal cuts to the spines
#### following the procedure detailed in Schmiedeke et al., 2021
```python
from radfil import radfil_class as rfc
```
```python
def callRadfil(hdu_all, filspine, filmask, distance, cut_separation=None, print_scale_and_sampling=False,
shift_to_param_peak=False):
"""
provides parameters of the orthogonal cuts
inputs :
hdu_all : HDU containing the integrated intensity, required
filspine : boolean mask of the filaments spine, required
filmask : boolean mask of the filament, required
distance : distance to the region with units, required
cut_separation : desired separation of the cuts, in pixels. Required
print_scale_and_sampling : if set 'True', will print out the beam size, pixel size and spacing between the cuts
shift_to_param_peak : if set 'True', the centres of the cuts would be shifted to the parameter peak, instead.
"""
# load data and header
alldata = hdu_all[0].data
allhead = hdu_all[0].header
# allhead['bunit'] = 'Jy/pixel'
bmaj = allhead['bmaj'] * u.degree # beam
arobj = rfc.radfil(alldata, mask=filmask, filspine=filspine, header=allhead,
distance=(distance.to(u.pc)).value)
#
# extract the spine profiles
beamscale = (distance * bmaj.to(u.radian).value).to(u.au)
imagescale = arobj.imgscale.to(u.au)
sampling = int(np.round((beamscale / imagescale), 0))
if cut_separation is not None:
sampling = cut_separation
if print_scale_and_sampling:
print(' beamscale = {}'.format(beamscale))
print(' imagescale = {}'.format(imagescale))
print(' sampling = {}'.format(sampling))
arobj.build_profile(samp_int=sampling, shift=shift_to_param_peak)
return arobj
```
```python
```
#### set universal parameters data files
```python
distance = 302 * u.pc # distance to B5
centerCoords = ['03:47:38.992 32:52:50.00', '03:47:40.4 32:51:33.6'] # centres of the two filaments
data_dir = 'data_files_prev_works/filaments_anika/' # parent directory for the data files
dataFile = data_dir + 'nh3_11_whole_mom0_8as_3px.fits' # integrated intensity file
hdu_all = fits.open(dataFile) # HDU with integrated intensity
```
##### for filament 1
```python
fil1File = data_dir + 'B5_nh3_fil1.fits' # filament 1
fil1MaskFile = data_dir + 'B5_mask_fil1.fits' # file for filament 1 mask
fil1SpineFile = data_dir + 'B5_spine_fil1.fits' # file for spine of filament 1
fil1mask = fits.getdata(fil1MaskFile).astype(bool) # boolean mask of filament 1
fil1spine = fits.getdata(fil1SpineFile).astype(bool) # boolean mask of spine of fila-1
cuts_fil1 = callRadfil(hdu_all, fil1spine, fil1mask, distance, cut_separation=4) # store the parameters of the cuts
dict_fil1 = cuts_fil1.dictionary_cuts
```
No binning is applied.

##### for filament 2
```python
fil2File = data_dir + 'B5_nh3_fil2.fits' # filament 2
fil2MaskFile = data_dir + 'B5_mask_fil2.fits' # file for filament 2 mask
fil2SpineFile = data_dir + 'B5_spine_fil2.fits' # file for spine of filament 2
fil2mask = fits.getdata(fil2MaskFile).astype(bool) # boolean mask of filament 2
fil2spine = fits.getdata(fil2SpineFile).astype(bool) # boolean mask of spine of fila-2
cuts_fil2 = callRadfil(hdu_all, fil2spine, fil2mask, distance, cut_separation=4) # store the parameters of the cuts
dict_fil2 = cuts_fil2.dictionary_cuts
```
No binning is applied.

```python
```
##### centre of the cuts, position of highest mom0 value along the cut
```python
cent_spn_fil1 = dict_fil1['plot_peaks']
cent_spn_fil2 = dict_fil2['plot_peaks']
cent_spn_both = [cent_spn_fil1, cent_spn_fil2]
```
##### co-ordinates of the end-points of the cuts
```python
coords_endpt_cuts_fil1 = np.asarray(dict_fil1['plot_cuts'])
coords_endpt_cuts_fil2 = np.asarray(dict_fil2['plot_cuts'])
# x1,y1 and x2,y2 are the co-ordinates of the end points of the respective cuts
# for filament 1
x1_spn_fil1 = coords_endpt_cuts_fil1[:,0,0]
y1_spn_fil1 = coords_endpt_cuts_fil1[:,0,1]
x2_spn_fil1 = coords_endpt_cuts_fil1[:,1,0]
y2_spn_fil1 = coords_endpt_cuts_fil1[:,1,1]
# for filament 2
x1_spn_fil2 = coords_endpt_cuts_fil2[:,0,0]
y1_spn_fil2 = coords_endpt_cuts_fil2[:,0,1]
x2_spn_fil2 = coords_endpt_cuts_fil2[:,1,0]
y2_spn_fil2 = coords_endpt_cuts_fil2[:,1,1]
# store for both filaments together
x1_spn_both = [x1_spn_fil1, x1_spn_fil2]
x2_spn_both = [x2_spn_fil1, x2_spn_fil2]
y1_spn_both = [y1_spn_fil1, y1_spn_fil2]
y2_spn_both = [y2_spn_fil1, y2_spn_fil2]
```
```python
```
### Show spines and orthogonal cuts
##### region masks for the two filaments
```python
from os import listdir
fls = listdir('data_files_prev_works/masks_anika')
fls = [i for i in fls if 'rgrd.fits' in i]
msk_regs = {ms[:-10] : fits.getdata('data_files_prev_works/masks_anika/'+ms) for ms in fls}
```
```python
from aplpy import FITSFigure
from astropy.io.fits import PrimaryHDU
import astropy.units as u
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
```
```python
from astropy.wcs import WCS
hd2d = fits.getheader(dataFile)
wcs = WCS(hd2d).celestial
```
WARNING: FITSFixedWarning: 'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'. [astropy.wcs.wcs]
```python
# generate a FITSFigure for the integrated intensity
fig = FITSFigure(dataFile)
# show the colour map
fig.show_colorscale( cmap='Greys', vmin=10, vmax=300, stretch='linear')
# add beam
fig.add_beam(color='k')
# set ticks
fig.ticks.set_color('black')
# write name of the region : B5
fig.add_label(0.07, 0.07, 'B5', relative=True, color= 'k', size=15)
# show contours of filament 1 and filament 2
fig.show_contour(msk_regs['fil1'], colors=['k'], levels=[0])
fig.show_contour(msk_regs['fil2'], colors=['k'], levels=[0])
# zoom around the two filaments
fig.recenter(56.918, 32.87, width=0.03, height=0.07)
# show the orthogonal cuts to filament 1
for i in range(1,15):
ra1, dec1 = wcs.wcs_pix2world(x1_spn_fil1[i], y1_spn_fil1[i], 0) # end points of the cuts in RA-Dec
ra2, dec2 = wcs.wcs_pix2world(x2_spn_fil1[i], y2_spn_fil1[i], 0)
iline = np.array([[ra1, ra2],[dec1,dec2]])
if i in [6,9,14] :
fig.show_lines([iline], color = 'r', linewidth=5) # highlight the cuts corresponding to
# the C_V examples in main text
else:
fig.show_lines([iline], color = 'r') # other cuts
# show the orthogonal cuts to filament 1
for i in range(1,21):
ra1, dec1 = wcs.wcs_pix2world(x1_spn_fil2[i], y1_spn_fil2[i], 0) # end points of the cuts in RA-Dec
ra2, dec2 = wcs.wcs_pix2world(x2_spn_fil2[i], y2_spn_fil2[i], 0)
iline = np.array([[ra1, ra2],[dec1,dec2]])
if i in [5,10,14] :
fig.show_lines([iline], color = 'r', linewidth=5) # highlight the cuts corresponding to
# the C_V examples in main text
else:
fig.show_lines([iline], color = 'r') # other cuts
fig.savefig('plots/spines-cuts.pdf', dpi=200)
```
WARNING: FITSFixedWarning: 'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'. [astropy.wcs.wcs]
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
/Users/spandan/opt/anaconda3/envs/analysis/lib/python3.10/site-packages/astropy/visualization/wcsaxes/core.py:235: UserWarning: No contour levels were found within the data range.
cset = super().contour(*args, **kwargs)
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
/Users/spandan/opt/anaconda3/envs/analysis/lib/python3.10/site-packages/astropy/visualization/wcsaxes/core.py:235: UserWarning: No contour levels were found within the data range.
cset = super().contour(*args, **kwargs)

```python
```
#### x- and y-coordinates of the entire cut from the end points
```python
def xy_cuts(x1=None, y1=None, x2=None, y2=None, extra_pix=3):
"""
returns arrays of x- and y-coordinates of all the points in a cut, from the end points
inputs:
x1, y1 : co-ordinates of the start point of the cut, required
x2, y2 : co-ordinates of the end point of the cut, required
extra_pix : how many pixels should the cut be extended by. Default is 3
(used to see the change in the parameter across the boundary of the filament)
"""
m = (y1 - y2) / (x1 - x2) # calculate the slope of the line
x_arr = 0
y_arr = 0
if abs(m) <= 1 : # if slope<1, more x values than y
x_arr = np.arange(x1-extra_pix, x2+extra_pix,1) # generate the x-values of the positions
y_arr = y1 + m* (x_arr - x1) # corresponding y values using y - y1 = m (x - x1)
elif abs(m)>1 : # if slope<1, more x values than y
y_arr = np.arange(y1-extra_pix, y2+extra_pix,1) # generate the y-values of the positions
x_arr = x1 + (y_arr - y1)/m # corresponding x values using y - y1 = m (x - x1)
xarr_rnd = np.round(x_arr) # round-up the co-ordinates to nearest integer
yarr_rnd = np.round(y_arr)
x_int = xarr_rnd.astype(int)
y_int = yarr_rnd.astype(int)
return x_int, y_int
```
#### store the x- and y-coordinates of cuts
##### filament 1
```python
dict_coords_cut_spn_fil1 = {}
for i in range(len(x1_spn_fil1)):
x_ls, y_ls = xy_cuts(x1 = x1_spn_fil1[i], y1 = y1_spn_fil1[i], x2 = x2_spn_fil1[i], y2 = y2_spn_fil1[i],
extra_pix=3)
dict_coords_cut_spn_fil1['cut_'+str(i+1)] = [x_ls, y_ls]
```
##### filament 2
```python
dict_coords_cut_spn_fil2 = {}
for i in range(len(x1_spn_fil2)):
xarr, yarr = xy_cuts(x1 = x1_spn_fil2[i], y1 = y1_spn_fil2[i], x2 = x2_spn_fil2[i], y2 = y2_spn_fil2[i],
extra_pix=1)
dict_coords_cut_spn_fil2['cut_'+str(i+1)] = [xarr, yarr]
```
```python
```
###### wrapper to store the cut co-ordinates
```python
def dict_coords_spine(fila=None, extra_pix=None):
dict_coords = {}
x1_spn = x1_spn_both[fila-1]
y1_spn = y1_spn_both[fila-1]
x2_spn = x2_spn_both[fila-1]
y2_spn = y2_spn_both[fila-1]
for i in range(len(x1_spn)):
xarr, yarr = xy_cuts(x1 = x1_spn[i], y1 = y1_spn[i], x2 = x2_spn[i], y2 = y2_spn[i], extra_pix=extra_pix)
dict_coords['cut_'+str(i+1)] = [xarr, yarr]
return dict_coords
```
```python
```
###### calculate distance from centre
```python
def calc_dist(x_cent=None, y_cent=None, x_pos=None, y_pos=None, with_sign=False):
"""
returns distance, absolute or with sign, of a list of pixels from a given central pixel
inputs :
x_cent, y_cent : co-ordinates of the central pixel, required
x_pos, y_pos : arrays containing the co-ordinates of the list of pixels, required
with_sign : whether to include the sign (vector distance). Default is 'False'
"""
del_x = x_cent - x_pos # difference in x from the central pixel
del_y = y_cent - y_pos # difference in y from the central pixel
dist = np.sqrt(del_x**2 + del_y**2) # absolute distance
sign=1
if with_sign:
# Order the distances as positive of negative, with the central pixel as reference (0).
# To find whether a pixels has positive or negative distance, we first check if the line is more
# inclined towards x- or y-axis. This is because multiple pixels might have the same co-ordinates in the
# other axis, and therefore, the respective difference in that co-ordinates will be 0.
# We then divide the differences in that co-ordinates by the absolute differences.
# We thus obtain 1 or -1 for positive or negative distance, respectively, for each pixel, and multiply
# that to the absolute distance
xmin = np.min(x_pos)
xmax = np.max(x_pos)
ymin = np.min(y_pos)
ymax = np.max(y_pos)
xdiff = xmax - xmin
ydiff = ymax - ymin
if xdiff > ydiff :
sign = del_x/abs(del_x)
elif ydiff > xdiff :
sign = del_y/abs(del_y)
return dist*sign
```
```python
```
```python
def dict_dist_frm_cent(fila=None, dict_coords_cut=None, centre_spine=None):
"""
wrapper to store distance of each pixel in a cut from the spine centre
"""
dict_dist = {}
for i in range(len(dict_coords_cut)):
dict_dist['cut_'+str(i+1)] = calc_dist(x_cent=centre_spine[i][0], y_cent=centre_spine[i][1],
x_pos=dict_coords_cut['cut_'+str(i+1)][0],
y_pos=dict_coords_cut['cut_'+str(i+1)][1], with_sign=True)
return dict_dist
```
```python
```
### extended velocity
```python
vel_ext = fits.getdata('sorted_comps/vel_extended.fits')
```
```python
```
### Calculate column density from intensity of $\rm NH_3$ (1,1)
##### read in integrated intensity
```python
mom0_11 = fits.getdata(data_dir + 'nh3_11_whole_8as_3px_mom0_w-model.fits')
```
### convert moment 0 to mass in the filaments (as Schmiedeke et al., 2021, page 6)
##### moment 0 in the filaments
```python
mom0_11_fil1 = mom0_11 * fil1mask
mom0_11_fil2 = mom0_11 * fil2mask
mom0_11_fil1[mom0_11_fil1 == 0] = np.nan
mom0_11_fil2[mom0_11_fil2 == 0] = np.nan
```
##### subtract background
```python
mom0_bgsub_fil1 = mom0_11_fil1 - np.nanmin(mom0_11_fil1)
mom0_bgsub_fil2 = mom0_11_fil2 - np.nanmin(mom0_11_fil2)
```
##### Convert K km/s to Jy/beam km/s
```python
from radio_beam import Beam
```
```python
my_beam = Beam.from_fits_header(hd2d) # get beam from header
# restfrq = hd_tmb['restfrq'] * u.Hz
restfrq = 23694495500 * u.Hz # rest-frequency for (1,1)
jybm2K = (1*u.Jy).to(u.K, u.brightness_temperature(restfrq, my_beam)).value # Jy/beam to K
K2jybm = 1./jybm2K # K to Jy/beam
```
##### convert beam to pixel
```python
# beam area
bmaj = hd2d['bmaj']*u.deg # major axis of beam from header
bmin = hd2d['bmin']*u.deg # minor axis of beam from header
beamArea = np.pi / (4.0 * np.log(2)) * bmaj.to(u.arcsec) * bmin.to(u.arcsec) # area
# pixel area
xPixSize = abs(hd2d['cdelt1']) * u.degree
xPixSize = xPixSize.to(u.arcsec)
pixelArea = (xPixSize ** 2)
# pixel to beam conversion
pixel2beam = pixelArea.value / beamArea.value
beam2pixel = 1 / pixel2beam
# Kelvin conversion factors
equiv = u.brightness_temperature(restfrq)
jybeam2kelvin = ((1* u.Jy/beamArea).to(u.K, equivalencies=equiv)).value
kelvin2jybeam = 1 / jybeam2kelvin
```
##### moment 0 in Jy/beam per pixel
```python
mom0_11_jy = mom0_11 * K2jybm * pixel2beam
```
##### Intensity to mass, using conversion facor from Schmiedeke et al., 2021
```python
mass_fil1 = mom0_bgsub_fil1 * K2jybm * pixel2beam * 1.5
mass_fil2 = mom0_bgsub_fil2 * K2jybm * pixel2beam * 1.5
```
##### Pixel length
```python
pix_len = (xPixSize.to(u.radian) * 302).value
```
#### M/L in each cut
```python
MpL_fil1 = []
for i in range(len(x1_spn_fil1)):
xarr, yarr = xy_cuts(x1 = x1_spn_fil1[i], y1 = y1_spn_fil1[i], x2 = x2_spn_fil1[i], y2 = y2_spn_fil1[i],
extra_pix=1)
mom0_cut = mom0_11_jy[yarr, xarr]
mom0_cut_bgsub = mom0_cut - 0.0028
ms = np.nansum(mom0_cut_bgsub * 1.5)
MpL_fil1.append((ms/pix_len))
```
```python
MpL_fil2 = []
for i in range(len(x1_spn_fil2)):
xarr, yarr = xy_cuts(x1 = x1_spn_fil2[i], y1 = y1_spn_fil2[i], x2 = x2_spn_fil2[i], y2 = y2_spn_fil2[i],
extra_pix=0)
mom0_cut = mom0_11_jy[yarr, xarr]
mom0_cut_bgsub = mom0_cut - 0.00266
ms = np.nansum(mom0_cut_bgsub * 1.5)
MpL_fil2.append((ms/pix_len))
```
```python
ML_both = [MpL_fil1, MpL_fil2]
```
```python
```
### Calculate $\rm{C_v}$
```python
import scipy.constants as c
def calc_cv_chen(del_v=None, ML=None, ret_err=False, edel_v=None):
"""
calculates C_V parameter (Chen et al., 2020)
inputs :
del_v : difference in velocity at the two boundary points of the cut, required
ML : mass per unit length (in solar mass per pc) of the cut, required
ret_err : if set 'True', returns the estimated error in C_V instead, Default is 'False'
edel_v : error in del_v, required if ret_err is set
"""
numer = (del_v*1000/2.0)**2 # numerator in the equation, (del_vh)^2
denom = (c.G) * ML * (2e30) * (c.parsec)**-1 # denominator, G(M/L), converted to SI units
val = numer/denom # C_V
if ret_err:
ecv = ( np.sqrt(4 * (edel_v/del_v)**2 + (0.7/1.5)**2 )) *val # calculate error in C_V
return ecv
else :
return val
```
#### calculate $\rm{\Delta v_h}$ required to have $\rm{C_v} = 1$
```python
def del_v_for_cv_1(ML=None):
"""
returns the value for del_vh, for which C_V = 1
input :
ML : mass per unit length of the cut, in solar mass per pc. Required
"""
denom = (c.G) * ML * (2e30) * (c.parsec)**-1 # denominator, G(M/L), converted to SI units
delv = np.sqrt(denom) # for C_V = 1, numerator=denominator
delv_kms = delv/1000 # convert delv into kms
return delv_kms
```
```python
```
### wrapper to plot parameter across filament cuts
```python
def plot_ortho_cut(param=None, extra_pix=None, fila=1, new_ax=True, c='b', ax=None, ret_cuts=False,
print_cut_num=False, marker_size=30,
plot_cut_bound=False, colour_cut_bound='k', plot_cut_cent=False, colour_cut_cent='r',
print_cv=False, x_axis_phys=False, plot_sel_cuts=False, sel_cuts=None, print_delv=False):
"""
plot a parameter across the cuts to a filament
inputs :
param : parameter to plot, required
extra_pix : number of pixels by which the cut should be extended outside the filament boundary, required
fila : which filament to plot, 1 or 2. Default is 1
ax : matplotlib axes on which to plot the gradients
new_ax : if set 'True', a new set of axes is generated on which to plot the gradients
c : colour of the plotted points, default is 'b'
marker_size : size of the points to be plotted, default is 30
ret_cuts : if set 'True', returns the dictionary with the cut coordinates, default is 'False'
print_cut_num : if set 'True', prints the number of the cut along with the plot, default is 'False'
plot_cut_bound : if set 'True', marks the filament boundary as a vertical dotted line. Default is 'False'
colour_cut_bound : colour of the filament boundary to be plotted, default is black
plot_cut_cent : if set 'True', marks the spine centre as a vertical dotted line. Default is 'False'
colour_cut_cent : colour of the line showing the spine centre
x_axis_phys : if set 'True', distances are marked in pc, instead of pixel distances. Default is 'False'
plot_sel_cuts : if set 'True', only plots some selected cuts. Default is 'False'
sel_cuts : list of cuts to plot
print_cv : if set 'True', prints the respective C_V value for that cut. Default is 'False'
print_delv : if set 'True', prints the del v required to make C_V=1. Default is 'False'
"""
# centres of the spine in each cut of the given filament
centre_spine = cent_spn_both[fila-1]
# co-ordinates of the cuts
dict_coords = dict_coords_spine(fila=fila, extra_pix=extra_pix)
# distances of each pixel in the cuts from teh spine centres
dict_dist = dict_dist_frm_cent(fila=fila, dict_coords_cut=dict_coords, centre_spine=centre_spine)
if ret_cuts:
return dict_coords
if new_ax:
if fila == 1:
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(16,9))
elif fila == 2:
fig, ax = plt.subplots(nrows=4, ncols=4, figsize=(16,12))
ax_flat = ax.flat
if plot_sel_cuts:
plot_range = sel_cuts
else :
plot_range = range(len(centre_spine))
k = 0
for i in plot_range:
axs = ax_flat[k]
xi, yi = dict_coords['cut_'+str(i+1)]
dist_i = dict_dist['cut_'+str(i+1)]
if x_axis_phys :
dist_i = dist_i * pix_len
param_list = [param[yi[j], xi[j]] for j in range(len(xi))]
#axs.plot(dist_i, param_list, c+'.')
axs.scatter(dist_i, param_list, color=c, s=marker_size)#+'.')
if print_cut_num:
axs.text(0.1,0.9, 'cut = ' + str(i+1), horizontalalignment='left', transform=axs.transAxes)
# print(plot_cut_bound)
if plot_cut_bound :
end_x1, end_y1 = x1_spn_both[fila-1][i], y1_spn_both[fila-1][i]
end_x2, end_y2 = x2_spn_both[fila-1][i], y2_spn_both[fila-1][i]
dist1 = calc_dist(x_cent=centre_spine[i][0], y_cent=centre_spine[i][1], x_pos=end_x1, y_pos=end_y1,
with_sign=True)
dist2 = calc_dist(x_cent=centre_spine[i][0], y_cent=centre_spine[i][1], x_pos=end_x2, y_pos=end_y2,
with_sign=True)
if x_axis_phys:
dist1 = dist1 * pix_len
dist2 = dist2 * pix_len
axs.axvline(x=dist1*-1, ls=':', c=colour_cut_bound)
axs.axvline(x=dist2, ls=':', c=colour_cut_bound)
if plot_cut_cent :
axs.axvline(x=0, ls=':', c=colour_cut_cent)
if print_cv :
vel1 = param[int(np.round(end_y1)), int(np.round(end_x1))]
vel2 = param[int(np.round(end_y2)), int(np.round(end_x2))]
del_v = np.abs(vel1 - vel2)
cv = calc_cv_chen(del_v=del_v, ML=ML_both[fila-1][i])
axs.text(0.05, 0.9, r'$C_v = $' + str(np.round(cv, 4)),
horizontalalignment='left', transform=axs.transAxes)
elif print_delv :
vel1 = param[int(np.round(end_y1)), int(np.round(end_x1))]
vel2 = param[int(np.round(end_y2)), int(np.round(end_x2))]
del_v = np.abs(vel1 - vel2)/2
delv_cv_1 = del_v_for_cv_1(ML_both[fila-1][i])
axs.text(0.45, 0.9, r'$\Delta v = $' + str(np.round(del_v, 2)) + r'$km\,s^{-1}$',
horizontalalignment='right', transform=axs.transAxes)
axs.text(0.45, 0.85, r'$\Delta v_{C_v = 1} = $' + str(np.round(delv_cv_1, 2)) + r'$km\,s^{-1}$',
horizontalalignment='right', transform=axs.transAxes)
k += 1
```
```python
```
### Plot velocity gradient in orthogonal cuts
```python
fig, ax = plt.subplots(nrows=4, ncols=4, figsize=(16,12))
plot_ortho_cut(param=vel_ext, extra_pix=20, fila=1, ax=ax, new_ax=False, marker_size=10, plot_cut_bound=True,
plot_cut_cent=True, print_delv=True, x_axis_phys=True, plot_sel_cuts=True, sel_cuts=range(1,15))
for i in range (4):
ax[i,0].set_ylabel(r'$v_{los}\ (km\,s^{-1})$', labelpad=10)
fig.supxlabel('distance from spine (pc)', size='xx-large')
ax[3,2].set_visible(False)
ax[3,3].set_visible(False)
plt.tight_layout()
fig.savefig('plots/vel_grad_w_cv_fila_1_all.pdf')
```
/var/folders/sk/jfjx4bmj537bjltqh3y2ldph0000gn/T/ipykernel_91483/3435525933.py:20: RuntimeWarning: invalid value encountered in divide
sign = del_x/abs(del_x)

```python
fig, ax = plt.subplots(nrows=4, ncols=5, figsize=(20,13))
plot_ortho_cut(param=vel_ext, extra_pix=16, fila=2, ax=ax, new_ax=False, marker_size=10, plot_cut_bound=True,
plot_cut_cent=True, print_delv=True, x_axis_phys=True, plot_sel_cuts=True, sel_cuts=range(1,21))
for i in range (4):
ax[i,0].set_ylabel(r'$v_{los}\ (km\,s^{-1})$', labelpad=10)
fig.supxlabel('distance from spine (pc)', size='xx-large')
plt.tight_layout()
fig.savefig('plots/vel_grad_w_cv_fila_2_all.pdf')
```
/var/folders/sk/jfjx4bmj537bjltqh3y2ldph0000gn/T/ipykernel_91483/3435525933.py:22: RuntimeWarning: invalid value encountered in divide
sign = del_y/abs(del_y)
/var/folders/sk/jfjx4bmj537bjltqh3y2ldph0000gn/T/ipykernel_91483/3435525933.py:20: RuntimeWarning: invalid value encountered in divide
sign = del_x/abs(del_x)

#### Sample cuts for the main text
```python
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,3))
plot_ortho_cut(param=vel_ext, extra_pix=20, fila=1, ax=ax, new_ax=False, marker_size=10, plot_cut_bound=True,
plot_cut_cent=True, print_delv=True, x_axis_phys=True, plot_sel_cuts=True, sel_cuts=[6, 9, 14])
ax[0].set_ylabel(r'$v_{los}\ (km\,s^{-1})$', labelpad=10)
fig.supxlabel('distance from spine (pc)', size='xx-large')
plt.tight_layout()
fig.savefig('plots/vel_grad_w_cv_fila_1_sample.pdf')
```
/var/folders/sk/jfjx4bmj537bjltqh3y2ldph0000gn/T/ipykernel_91483/3435525933.py:20: RuntimeWarning: invalid value encountered in divide
sign = del_x/abs(del_x)

```python
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,3))
plot_ortho_cut(param=vel_ext, extra_pix=16, fila=2, ax=ax, new_ax=False, marker_size=10, plot_cut_bound=True,
plot_cut_cent=True, print_delv=True, x_axis_phys=True, plot_sel_cuts=True, sel_cuts=[5,10,14])
ax[0].set_ylabel(r'$v_{los}\ (km\,s^{-1})$', labelpad=10)
fig.supxlabel('distance from spine (pc)', size='xx-large')
plt.tight_layout()
fig.savefig('plots/vel_grad_w_cv_fila_2_sample.pdf')
```
/var/folders/sk/jfjx4bmj537bjltqh3y2ldph0000gn/T/ipykernel_91483/3435525933.py:22: RuntimeWarning: invalid value encountered in divide
sign = del_y/abs(del_y)
/var/folders/sk/jfjx4bmj537bjltqh3y2ldph0000gn/T/ipykernel_91483/3435525933.py:20: RuntimeWarning: invalid value encountered in divide
sign = del_x/abs(del_x)

```python
```
### Plot distribution of $\rm{C_v}$
```python
evel_ext = fits.getdata('sorted_comps/evel_extended.fits')
```
##### Make list of the $\rm{C_v}$ values
```python
fila=1
cv_list_fil1 = []
ecv_list_fil1 = []
for i in range(len(cent_spn_both[fila-1])):
end_x1, end_y1 = x1_spn_both[fila-1][i], y1_spn_both[fila-1][i] # coords of start pixel
end_x2, end_y2 = x2_spn_both[fila-1][i], y2_spn_both[fila-1][i] # coords of end pixel
vel1 = vel_ext[int(np.round(end_y1)), int(np.round(end_x1))] # vel of start pixel
vel2 = vel_ext[int(np.round(end_y2)), int(np.round(end_x2))] # vel of end pixel
del_v = np.abs(vel1 - vel2) # diff in velocity
evel1 = evel_ext[int(np.round(end_y1)), int(np.round(end_x1))] # error in velocities
evel2 = evel_ext[int(np.round(end_y2)), int(np.round(end_x2))]
edel_v = evel1 + evel2 # error in vel diff
cv = calc_cv_chen(del_v=del_v, ML=ML_both[fila-1][i]) # C_V for the cut
ecv = calc_cv_chen(del_v=del_v, ML=ML_both[fila-1][i], ret_err=True, edel_v=edel_v) # error in C_V
#
# print(edel_v)
cv_list_fil1.append(cv)
ecv_list_fil1.append(ecv)
# print(end_x1)
```
```python
fila=2
cv_list_fil2 = []
ecv_list_fil2 = []
for i in range(len(cent_spn_both[fila-1])):
end_x1, end_y1 = x1_spn_both[fila-1][i], y1_spn_both[fila-1][i] # coords of start pixel
end_x2, end_y2 = x2_spn_both[fila-1][i], y2_spn_both[fila-1][i] # coords of end pixel
vel1 = vel_ext[int(np.round(end_y1)), int(np.round(end_x1))] # vel of start pixel
vel2 = vel_ext[int(np.round(end_y2)), int(np.round(end_x2))] # vel of end pixel
del_v = np.abs(vel1 - vel2) # diff in velocity
evel1 = evel_ext[int(np.round(end_y1)), int(np.round(end_x1))] # error in velocities
evel2 = evel_ext[int(np.round(end_y2)), int(np.round(end_x2))]
edel_v = evel1 + evel2 # error in vel diff
cv = calc_cv_chen(del_v=del_v, ML=ML_both[fila-1][i]) # C_V for the cut
ecv = calc_cv_chen(del_v=del_v, ML=ML_both[fila-1][i], ret_err=True, edel_v=edel_v) # error in C_V
# print(edel_v)
cv_list_fil2.append(cv)
ecv_list_fil2.append(ecv)
# print(end_x1)
```
```python
```
#### KDE
```python
from scipy.stats import gaussian_kde
```
##### combine lists of $\rm C_V$ and their errors for cuts across both filaments
```python
cv_list_combi = cv_list_fil1 + cv_list_fil2
ecv_list_combi = ecv_list_fil1 + ecv_list_fil2
```
###### genarate x-grid based on the spread of $\rm C_V$ values
```python
cv_min = -0.001
cv_max = 0.055
x_grid = np.linspace(cv_min,cv_max, 100)
```
###### calculate KDE with and without the errors as weights
```python
kde_w_weight = gaussian_kde(cv_list_combi, weights=np.asarray(ecv_list_combi))
kde_wo_weight = gaussian_kde(cv_list_combi)
pdf_w_weight = kde_w_weight.evaluate(x_grid)
pdf_wo_weight = kde_wo_weight.evaluate(x_grid)
eh = kde_w_weight(x_grid)
```
```python
plt.plot(x_grid, pdf_wo_weight, color='blue', alpha=0.5, lw=3)
plt.xlabel(r'$C_v$')
plt.ylabel('density', labelpad=5)
plt.xticks(np.arange(0, cv_max, 0.01))
plt.tight_layout()
# plt.savefig('plots/kde_cv_both_fila.pdf')
```
```python
```
### incorporate errors in $\rm C_v$
```python
```
|
SpandanChREPO_NAMEBarnard5_filamentsPATH_START.@Barnard5_filaments_extracted@Barnard5_filaments-main@analyse_cv.ipynb@.PATH_END.py
|
{
"filename": "remote_memory_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/eager/memory_tests/remote_memory_test.py",
"type": "Python"
}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory leaks in remote eager execution."""
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.eager.memory_tests import memory_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.training import server_lib
class RemoteWorkerMemoryTest(test.TestCase):
def __init__(self, method):
super(RemoteWorkerMemoryTest, self).__init__(method)
# used for remote worker tests
self._cached_server = server_lib.Server.create_local_server()
self._cached_server_target = self._cached_server.target[len("grpc://"):]
def testMemoryLeakInLocalCopy(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
remote.connect_to_remote_host(self._cached_server_target)
# Run a function locally with the input on a remote worker and ensure we
# do not leak a reference to the remote tensor.
@def_function.function
def local_func(i):
return i
def func():
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
x = array_ops.zeros([1000, 1000], dtypes.int32)
local_func(x)
memory_test_util.assert_no_leak(
func, num_iters=100, increase_threshold_absolute_mb=50)
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@eager@memory_tests@remote_memory_test.py@.PATH_END.py
|
{
"filename": "zeus_sampler.py",
"repo_name": "Samreay/Barry",
"repo_path": "Barry_extracted/Barry-master/barry/samplers/zeus_sampler.py",
"type": "Python"
}
|
import logging
import os
import numpy as np
from barry.samplers.sampler import Sampler
class ZeusSampler(Sampler):
def __init__(self, num_walkers=None, temp_dir=None, num_steps=20000, autoconverge=True, print_progress=False):
self.logger = logging.getLogger("barry")
self.num_steps = num_steps
self.num_walkers = num_walkers
self.temp_dir = temp_dir
if temp_dir is not None and not os.path.exists(temp_dir):
os.makedirs(temp_dir, exist_ok=True)
self.autoconverge = autoconverge
self.print_progress = print_progress
def get_file_suffix(self):
return "zeus_chain.npy"
def fit(self, model, save_dims=None, uid=None):
"""
Fit the model
Parameters
----------
model : class <Model>
An instance of one of barry's model classes
save_dims : int, optional
Only return values for the first ``save_dims`` parameters.
Useful to remove numerous marginalisation parameters if running
low on memory or hard drive space.
uid : str, optional
A unique identifier used to differentiate different fits
if two fits both serialise their chains and use the
same temporary directory
Returns
-------
dict
A dictionary containing the chain and the weights
"""
import zeus
log_posterior = model.get_posterior
start = model.get_start
num_dim = model.get_num_dim()
assert log_posterior is not None
assert start is not None
filename = self.get_filename(uid)
if os.path.exists(filename):
self.logger.info(f"Not sampling, returning result from Zeus file {filename}.")
return self.load_file(filename)
if self.num_walkers is None:
self.num_walkers = num_dim * 4
self.logger.debug("Fitting framework with %d dimensions" % num_dim)
if save_dims is None:
save_dims = num_dim
self.logger.debug("Fitting framework with %d dimensions" % num_dim)
self.logger.info("Using Zeus Sampler")
callbacks = []
if self.autoconverge:
# Default convergence criteria from Zeus docos. Seem reasonable.
cb0 = zeus.callbacks.AutocorrelationCallback(ncheck=100, dact=0.01, nact=50)
cb1 = zeus.callbacks.MinIterCallback(nmin=500)
callbacks = [cb0, cb1]
pos = start(num_walkers=self.num_walkers)
self.logger.info("Sampling posterior now")
sampler = zeus.EnsembleSampler(self.num_walkers, num_dim, log_posterior, verbose=False)
sampler.run_mcmc(pos, self.num_steps, callbacks=callbacks, progress=self.print_progress)
self.logger.debug("Fit finished")
tau = zeus.AutoCorrTime(sampler.get_chain())
burnin = int(2 * np.max(tau))
samples = sampler.get_chain(discard=burnin, flat=True)
likelihood = sampler.get_log_prob(discard=burnin, flat=True)
self._save(samples, likelihood, filename, save_dims)
return {"chain": samples, "weights": np.ones(len(likelihood)), "posterior": likelihood}
def _save(self, chain, likelihood, filename, save_dims):
res = np.vstack((likelihood, chain[:, :save_dims].T)).T
np.save(filename, res.astype(np.float32))
def load_file(self, filename):
results = np.load(filename)
likelihood = results[:, 0]
flat_chain = results[:, 1:]
return {"chain": flat_chain, "weights": np.ones(len(likelihood)), "posterior": likelihood}
|
SamreayREPO_NAMEBarryPATH_START.@Barry_extracted@Barry-master@barry@samplers@zeus_sampler.py@.PATH_END.py
|
{
"filename": "_tickwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/colorbar/_tickwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tickwidth", parent_name="isosurface.colorbar", **kwargs
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@colorbar@_tickwidth.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "giuspugl/MCMole3D",
"repo_path": "MCMole3D_extracted/MCMole3D-master/setup.py",
"type": "Python"
}
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mcmole3d-giuspugl",
version="0.0.1",
author="Giuseppe Puglisi",
author_email="giuse.puglisi@gmail.com",
description="Monte-Carlo realization of Galactic CO emission",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/giuspugl/MCMole3D",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Operating System :: OS Independent",
],
)
|
giuspuglREPO_NAMEMCMole3DPATH_START.@MCMole3D_extracted@MCMole3D-master@setup.py@.PATH_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/barpolar/marker/colorbar/_title.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "barpolar.marker.colorbar"
_path_str = "barpolar.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.barpolar.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
plotly.graph_objs.barpolar.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h". Note that the
title's location used to be set by the now deprecated
`titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.barpolar.marke
r.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.barpolar.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@barpolar@marker@colorbar@_title.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/solarsystem/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
astroquery.solarsystem
----------------------
a collection of Solar-System related data services
"""
from .jpl import SBDB, SBDBClass, Horizons, HorizonsClass
from .imcce import Miriade, MiriadeClass, Skybot, SkybotClass
from .mpc import MPC, MPCClass
__all__ = ["SBDB", "SBDBClass", "Horizons", "HorizonsClass",
"Miriade", "MiriadeClass", "Skybot", "SkybotClass",
"MPC", "MPCClass"]
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@solarsystem@__init__.py@.PATH_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/pie/outsidetextfont/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="pie.outsidetextfont", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@pie@outsidetextfont@_familysrc.py@.PATH_END.py
|
{
"filename": "test_em.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/tractor/test_em.py",
"type": "Python"
}
|
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
from tractor.emfit import em_fit_1d_samples
import numpy as np
from tractor.fitpsf import em_init_params
mus = np.array([1.77, 3.7])
stds = np.array([3.9, 1.])
amps = np.array([0.8, 0.2])
N = 1000
#X = np.array(N)
a = np.random.uniform(size=N)
assert(len(amps) == 2)
z = (a > amps[0]) * 1
print('z', np.unique(z))
print('n = 0:', np.sum(z == 0))
print('n = 1:', np.sum(z == 1))
X = np.random.normal(size=N) * stds[z] + mus[z]
K = 2
ww = np.ones(K)
mm = np.zeros(K)
vv = np.array([1., 2.])
for i in range(3):
r = em_fit_1d_samples(X, ww, mm, vv)
print('result:', r)
print('fit / true:')
print('A', ww, amps)
print('mu', mm, mus)
print('std', np.sqrt(vv), stds)
plt.clf()
n, b, p = plt.hist(X, 50, histtype='step', color='b')
B = (b[1] - b[0])
lo, hi = plt.xlim()
xx = np.linspace(lo, hi, 500)
gtrue = [a * N * B / (np.sqrt(2. * np.pi) * s) *
np.exp(-0.5 * (xx - m)**2 / s**2)
for (a, m, s) in zip(amps, mus, stds)]
plt.plot(xx, gtrue[0] + gtrue[1], 'b-', lw=2, alpha=0.5)
plt.plot(xx, gtrue[0], 'b-', lw=2, alpha=0.5)
plt.plot(xx, gtrue[1], 'b-', lw=2, alpha=0.5)
gfit = [a * N * B / (np.sqrt(2. * np.pi) * s) *
np.exp(-0.5 * (xx - m)**2 / s**2)
for (a, m, s) in zip(ww, mm, np.sqrt(vv))]
plt.plot(xx, gfit[0] + gfit[1], 'r-', lw=2, alpha=0.5)
plt.plot(xx, gfit[0], 'r-', lw=2, alpha=0.5)
plt.plot(xx, gfit[1], 'r-', lw=2, alpha=0.5)
plt.savefig('testem.png')
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@tractor@test_em.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/treemap/marker/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap.marker"
_path_str = "treemap.marker.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
# color
# -----
@property
def color(self):
"""
Sets the color of the line enclosing each sector. Defaults to
the `paper_bgcolor` value.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the line enclosing each sector.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.marker.Line`
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@treemap@marker@_line.py@.PATH_END.py
|
{
"filename": "tracer_likelihood.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/Sampling/Likelihoods/tracer_likelihood.py",
"type": "Python"
}
|
import numpy as np
from lenstronomy.Util import class_creator
class TracerLikelihood(object):
"""Class to evaluate the tracer map."""
def __init__(
self,
tracer_data,
kwargs_model,
kwargs_imaging,
tracer_likelihood_mask=None,
linear_solver=True,
):
"""
:param tracer_bands_compute: list of bools with same length as data objects, indicates which "band" to include in the
fitting
:param tracer_likelihood_mask: boolean 2d arrays of size of images marking the pixels to be
:param linear_solver: boolean, if True, linear solver for surface brigthness amplitude is activated
evaluated in the likelihood
"""
self._tracer_light_model_band = kwargs_model.get("tracer_source_band", 0)
self.image_model = class_creator.create_im_sim(
kwargs_model=kwargs_model, linear_solver=linear_solver, **kwargs_imaging
)
self.tracerModel = class_creator.create_tracer_model(
tracer_data, kwargs_model, tracer_likelihood_mask=tracer_likelihood_mask
)
def logL(
self,
kwargs_tracer_source,
kwargs_lens=None,
kwargs_source=None,
kwargs_lens_light=None,
kwargs_ps=None,
kwargs_special=None,
kwargs_extinction=None,
param=None,
):
"""
:param kwargs_lens: lens model keyword argument list according to LensModel module
:param kwargs_source: source light keyword argument list according to LightModel module
:param kwargs_lens_light: deflector light (not lensed) keyword argument list according to LightModel module
:param kwargs_ps: point source keyword argument list according to PointSource module
:param kwargs_special: special keyword argument list as part of the Param module
:param kwargs_extinction: extinction parameter keyword argument list according to LightModel module
:return: log likelihood of the data given the model
"""
(
kwargs_lens,
kwargs_source,
kwargs_lens_light,
kwargs_ps,
) = self.image_model.update_linear_kwargs(
param,
model_band=self._tracer_light_model_band,
kwargs_lens=kwargs_lens,
kwargs_source=kwargs_source,
kwargs_lens_light=kwargs_lens_light,
kwargs_ps=kwargs_ps,
)
logL = self.tracerModel.likelihood_data_given_model(
kwargs_tracer_source=kwargs_tracer_source,
kwargs_lens=kwargs_lens,
kwargs_source=kwargs_source,
kwargs_extinction=kwargs_extinction,
kwargs_special=kwargs_special,
)
if np.isnan(logL) is True:
return -(10**15)
return logL
@property
def num_data(self):
"""
:return: number of image data points
"""
return self.tracerModel.num_data_evaluate
def reset_point_source_cache(self, cache=True):
"""
:param cache: boolean
:return: None
"""
self.tracerModel.reset_point_source_cache(cache=cache)
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@Sampling@Likelihoods@tracer_likelihood.py@.PATH_END.py
|
{
"filename": "constants.py",
"repo_name": "ska-sa/hibayes",
"repo_path": "hibayes_extracted/hibayes-master/hibayes/constants.py",
"type": "Python"
}
|
"""
# constants.py
Constants used for calculations within ledabayes package
"""
from numpy import pi, sqrt, log
# Constants
muJy2Jy = 1.0e-6
sqDeg2sr = 4.0 * pi * pi / 129600.0
sqrtTwo = sqrt(2.0)
Jy2muJy = 1.0e6
beamFac = pi / (4.0 * log(2.0))
|
ska-saREPO_NAMEhibayesPATH_START.@hibayes_extracted@hibayes-master@hibayes@constants.py@.PATH_END.py
|
{
"filename": "abscal_inspect_2458085.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/abscal_inspect/abscal_inspect_2458085.ipynb",
"type": "Jupyter Notebook"
}
|
# Stage 2 Absolute Calibration Nightly Notebook
**Josh Dillon**, Last Revised 9/23/20
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from hera_cal import io, redcal, apply_cal, abscal, utils
from hera_cal.smooth_cal import build_time_blacklist
from hera_qm.metrics_io import load_metric_file
import pyuvdata
import glob
import os
from copy import deepcopy
import inspect
import h5py
import matplotlib.cm as cm
from IPython.display import display, HTML
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
display(HTML("<style>.container { width:100% !important; }</style>"))
```
<style>.container { width:100% !important; }</style>
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# lst_blacklist_string = '0-1.3 2.5-4.3 5.0-5.7 6.5-9.1 10.6-11.5 11.9-14.3 16.3-1.3'
# abscal_model_glob = '/lustre/aoc/projects/hera/zmartino/hera_calib_model/H3C/abscal_files_unique_baselines/zen.2458894.?????.uvh5'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
# os.environ["LST_BLACKLIST_STRING"] = lst_blacklist_string
# os.environ["ABSCAL_MODEL_GLOB"] = abscal_model_glob
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
lst_blacklist_string = os.environ['LST_BLACKLIST_STRING']
abscal_model_glob = os.environ['ABSCAL_MODEL_GLOB']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
print(f'lst_blacklist_string = "{lst_blacklist_string}"')
print(f'abscal_model_glob = "{abscal_model_glob}"')
```
JD = "2458085"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458085"
lst_blacklist_string = ""
abscal_model_glob = "/lustre/aoc/projects/hera/H1C_IDR3/abscal_model/zen.245804*.HH.uvRXLS.uvh5"
```python
print('Looking for data in', data_path, 'on JD', JD)
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.sum.uvh5')))
if len(data_list) == 0:
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.uvh5')))
print('...found {} data files.'.format(len(data_list)))
abscal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.abs.calfits')))
print('...found {} abscal files.'.format(len(abscal_list)))
omnical_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.sum.omni.calfits')))
print('...found {} omnical files.'.format(len(omnical_list)))
```
Looking for data in /lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458085 on JD 2458085
...found 73 data files.
...found 73 abscal files.
...found 73 omnical files.
# Load And Inspect a Single File
```python
# get all JDs and LSTs
_, _, file_lst_arrays, file_time_arrays = io.get_file_times(data_list)
# parse lst_blacklist_string
lst_blacklists = []
if len(lst_blacklist_string) > 0:
lst_blacklists = [tuple([float(arg) for arg in arg_pair.split('-', maxsplit=1)])
for arg_pair in lst_blacklist_string.split(' ')]
# get times that are blacklisted and reshape them like file_time_arrays
time_blacklisted_flat = build_time_blacklist(np.hstack(file_time_arrays), lst_blacklists=lst_blacklists)
time_blacklisted = [fta.astype(bool) for fta in file_time_arrays]
n = 0
for i in range(len(file_time_arrays)):
time_blacklisted[i] = np.zeros_like(time_blacklisted[i], dtype=bool)
for j in range(len(file_time_arrays[i])):
time_blacklisted[i][j] = time_blacklisted_flat[n]
n += 1
# pick the central time from among the not-LST blacklisted files, if possible
good_indices = [i for i, tb in enumerate(time_blacklisted) if not np.any(tb)]
if len(good_indices) > 0:
file_index = good_indices[len(good_indices)//2]
else:
file_index = len(data_list)//2
file_JD = '.'.join([s for s in data_list[file_index].split('.') if s.isdigit()])
```
```python
# Load abscal gains and determine ex_ants
hc = io.HERACal(abscal_list[file_index])
gains, gain_flags, _, _ = hc.read()
ex_ants = [ant for ant in gain_flags if np.all(gain_flags[ant])]
# Get min_bl_cut, we only want to compare baselines actually used in absolute calibration
try:
min_bl_cut = float(hc.history.replace('\n','').split('--min_bl_cut')[-1].split('--')[0].strip())
except:
print('Could not find min_bl_cut, setting to 1 m.')
min_bl_cut = 1.0
# Load the most common redundant baseline longer than min_bl_cut
hd = io.HERAData(data_list[file_index])
bls_to_plot = []
for pol in ['ee', 'nn']:
reds = redcal.get_reds({ant: hd.antpos[ant] for ant in hd.data_ants}, pols=[pol])
# reds = redcal.filter_reds(reds, ex_ants=ex_ants)
reds = sorted(reds, key=len, reverse=True)
bl_lens = np.array([np.linalg.norm(hd.antpos[red[0][1]] - hd.antpos[red[0][0]]) for red in reds])
try:
bl_group_to_plot = (np.array(reds)[bl_lens >= min_bl_cut])[0]
except:
bl_group_to_plot = reds[0]
bls_to_plot.extend(bl_group_to_plot)
# reds = sorted(reds, key=len, reverse=True)
data, flags, nsamples = hd.read(bls=bls_to_plot)
apply_cal.calibrate_in_place(data, gains, data_flags=flags, cal_flags=gain_flags)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
plt.figure(figsize=(8,8))
plt.scatter(np.array([hd.antpos[ant][0] for ant in hd.data_ants]),
np.array([hd.antpos[ant][1] for ant in hd.data_ants]), c='w', s=0)
for ant in hd.data_ants:
pos = hd.antpos[ant]
bad = ant in [ant[0] for ant in ex_ants]
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1],str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title('Antenna Positions on {} (Red = Flagged)'.format(file_JD));
plt.axis('equal')
plt.tight_layout()
plt.show()
```

### Figure 1: Array and Flagged Antennas
#### OBSERVER CHECKLIST:
* Check that the array configuration looks reasonable.
* Check that all flags expected to be flagged are actually flagged but also that not everything is getting flagged.
```python
#check whether the model is redudnant by looking at the history
model_is_redundant = ('--model_is_redundant' in "".join(hc.history.split()))
# Find files that overlap with this file
abscal_matched_files = list(abscal.match_times(data_list[file_index],
sorted(glob.glob(abscal_model_glob)),
filetype='uvh5', atol=1e-5))
hdm = io.HERAData(abscal_matched_files)
# Get model baselines to load
model_bls = hdm.bls
model_antpos = hdm.antpos
if isinstance(model_bls, dict):
model_bls = list(model_bls.values())[0]
model_antpos = {ant: pos for antpos in hdm.antpos.values() for ant, pos in antpos.items()}
_, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(bls_to_plot, model_bls,
hd.antpos, model_antpos=model_antpos,
model_is_redundant=model_is_redundant)
model, model_flags, _ = hdm.read(bls=model_bl_to_load)
# Rephase model at index of best match to mean LST in the data
model_index = np.argmin(np.abs(model.lsts - np.mean(data.lsts)))
model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_blvecs, model.freqs, np.mean(data.lsts) - model.lsts[model_index],
lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)
if not model_is_redundant:
model, _, _ = utils.red_average(model, flags=model_flags)
```
```python
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for pol in ['ee', 'nn']:
for func, plot, ylabel in zip([np.abs, np.angle], [plt.semilogy, plt.plot], ['Amplitude (Jy)', 'Phase (Radians)']):
plt.figure(figsize=(16,4))
for bl in [k for k in bls_to_plot if k[2] == pol]:
ant0, ant1 = utils.split_bl(bl)
blvec = hd.antpos[ant0[0]] - hd.antpos[ant1[0]]
if (ant0 not in ex_ants) and (ant1 not in ex_ants):
to_plot = deepcopy(data[bl])
to_plot[flags[bl]] = np.nan + 1.0j * np.nan
to_plot = np.nanmedian(np.real(to_plot), axis=0) + 1.0j * np.nanmedian(np.imag(to_plot), axis=0)
plot(hd.freqs/1e6, func(to_plot))
for bl in [k for k in model if k[2] == pol]:
plot(hd.freqs/1e6, func(model[bl][model_index]), 'k-', label='Abscal Model')
plt.xlabel('Frequency (MHz)')
plt.ylabel(ylabel)
plt.legend(loc='lower right')
plt.title('{}-Polarized, {:f} m East, {:f} m North Visibility on {}'.format(pol, blvec[0], blvec[1], file_JD))
```




### Figure 2: Example redundant baseline group, absolute calibrated, compared to the Abscal Model
#### OBSERVER CHECKLIST:
* Check that the data all look pretty redundant.
* Check that the model isn't wildly out of line with the data.
# Load a whole day
```python
# Load chisq and flagging info from abscal gains
ant_flags_dict = {}
chisq_ee_dict = {}
chisq_nn_dict = {}
cspa_med_dict = {}
ants = set([])
for cal in abscal_list:
hc = io.HERACal(cal)
_, flags, cspa, chisq = hc.read()
ants |= set(flags.keys())
ant_flags_dict[cal] = {ant: np.all(flags[ant]) for ant in flags}
chisq_ee_dict[cal] = chisq['Jee']
chisq_nn_dict[cal] = chisq['Jnn']
cspa_med_dict[cal] = {ant: np.nanmedian(cspa[ant], axis=1) for ant in cspa}
all_flagged_dict = {ant: np.all([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
cspa = {ant: np.hstack([np.squeeze(cspa_med_dict[cal][ant]) / \
~ant_flags_dict[cal][ant] for cal in abscal_list]) for ant in ants}
ee_chisq = np.vstack(np.array(list(chisq_ee_dict.values())))
nn_chisq = np.vstack(np.array(list(chisq_nn_dict.values())))
```
invalid value encountered in true_divide
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
# save middle-numbered ants with a minimal number of flags
ants_to_save = {}
for pol in ['Jee', 'Jnn']:
min_flags = np.min([np.sum(~np.isfinite(cspa[ant]))
for ant in cspa if ant[1] == pol])
ant_candidates = sorted([ant for ant in cspa if ant[1] == pol and
np.sum(~np.isfinite(cspa[ant])) == min_flags])
Nac = len(ant_candidates)
ants_to_save[pol] = ant_candidates[(Nac // 2 - 1):(Nac // 2 + 1)]
# Reload abscal gains
times_dict = {}
gain_dict = {}
flag_dict = {}
for cal in abscal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
times_dict[cal] = hc.times
gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
times = np.hstack(list(times_dict.values()))
lsts = 12 / np.pi * pyuvdata.utils.get_lst_for_time(times, *hd.telescope_location_lat_lon_alt_degrees)
gains = {ant: np.vstack([gain_dict[cal][ant] for cal in gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flags = {ant: np.vstack([flag_dict[cal][ant] for cal in flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flag_mask = np.all([f for f in flags.values()], axis=0)
```
# Inspect a whole day
```python
# for overplotting blacklisted LSTs
my_cmap = cm.binary
my_cmap.set_under('k', alpha=0)
blacklist = np.ones_like(ee_chisq) * np.hstack(time_blacklisted)[:, np.newaxis]
```
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("binary"))
```python
# Grid and plot overall chi^2 for each polarization
ee_chisq = np.vstack(np.array(list(chisq_ee_dict.values())))
nn_chisq = np.vstack(np.array(list(chisq_nn_dict.values())))
fig, axes = plt.subplots(1, 2, figsize=(20,12))
for ax, cs, t in zip(axes, [ee_chisq, nn_chisq], ['ee-polarized', 'nn-polarized']):
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(cs / ~flag_mask, aspect='auto', vmin=0, cmap='inferno', vmax=10, interpolation='nearest', extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title('Overall Abscal $\chi^2$ / $N_{bls}$: ' + t)
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, label='$\chi^2$ / $N_{bls}$ (unitless)')
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 3 Overall Abscal $\chi^2 / N_{bls}$
This computes the difference between the calibrated data and the abscal model, normalized by the thermal noise. Grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing.
#### OBSERVER CHECKLIST:
* Look for regions of high $\chi^2$ that are not blacklisted.
```python
# Pick vmax to not saturate 90% of the data
vmax = np.max([np.percentile(np.abs(gains[ants_to_save[pol][1]][~flag_mask]), 90) for pol in ['Jee', 'Jnn']])
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(3, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(gains_here), aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Abscal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.07)
# Now plot median gain spectra and time series
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(np.abs(gains_here[~np.hstack(time_blacklisted), :]), axis=0))
ax.set_ylim([0, vmax])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Non-Blacklisted Abscal Gain Amplitude Spectrum of Antenna {ant[0]}: {pol[1:]}-polarized')
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.nanmedian(np.abs(gains_here[~np.hstack(time_blacklisted), :]), axis=1),
'b.', label='Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.nanmedian(np.abs(gains_here[np.hstack(time_blacklisted), :]), axis=1),
'r.', label='Blacklisted LSTs')
ax.set_ylim([0, vmax])
ax.set_xlabel('LST (hours)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Abscal Gain Amplitude Time-Series of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
plt.tight_layout()
```
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered

### Figure 4: Example Abscal Gain Amplitudes
Abscal gain amplitudes for an example antenna. In the waterfall, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted amplitude as a function of frequency (middle row) and the median amplitude as a function of time (bottom row)
#### OBSERVER CHECKLIST:
* Look to see that non-blacklisted times are relatively stable in amplitude
* Check to see if the bandpass looks reasonable
```python
# Plot abscal gain phase waterfalls for a single antenna/refant
fig, axes = plt.subplots(3, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(gains_ratio_here), aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.07)
# Now plot median gain spectra and time series
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
re_med = np.nanmedian(gains_ratio_here[~np.hstack(time_blacklisted), :].real, axis=0)
im_med = np.nanmedian(gains_ratio_here[~np.hstack(time_blacklisted), :].imag, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(re_med + 1.0j * im_med))
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel(f'Phase of g$_{{ant0[0]}}$ / g$_{{ant1[0]}}$')
ax.set_title(f'Median Non-Blacklisted Abscal Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
# Now plot a single gain angle time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
# pick channel with minimum phase variance in the middle 100 channels
possible_chans = np.arange(len(hd.freqs))[len(hd.freqs)//2 - 50:len(hd.freqs)//2 + 50]
best_chan = np.argmin(np.var(np.angle(gains_ratio_here), axis=0)[len(hd.freqs)//2 - 50:len(hd.freqs)//2 + 50])
chan = possible_chans[best_chan]
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.angle(gains_ratio_here[~np.hstack(time_blacklisted), chan]),
'b.', label='Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.angle(gains_ratio_here[np.hstack(time_blacklisted), chan]),
'r.', label='Blacklisted LSTs')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('LST (hours)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]} at Channel {chan}: {pol[1:]}-polarized')
ax.legend()
plt.tight_layout()
```
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered

### Figure 5: Example Abscal Gain Phases
Relative gain phases of two example antennas. In the waterfall, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted phases as a function of frequency (middle row) and the phase of the specific channel within 50 channels of the middle with minimal phase variance (bottom row).
#### OBSERVER CHECKLIST:
* Look for regions of "hashy" phase structure that are not blacklisted or attributable to RFI.
# Metadata
```python
print(redcal.version.history_string())
```
------------
This file was produced by the function <module>() in <ipython-input-1-c6de44361328> using:
git_branch: HEAD
git_description: v3.0-801-ga11cb1a2
git_hash: a11cb1a24a2630acc7d98e3c7b45b3ad4b26bc4b
git_origin: git@github.com:HERA-Team/hera_cal.git
version: 3.0
------------
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@abscal_inspect@abscal_inspect_2458085.ipynb@.PATH_END.py
|
{
"filename": "_plot_bgcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/_plot_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Plot_BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="plot_bgcolor", parent_name="layout", **kwargs):
super(Plot_BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "layoutstyle"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@_plot_bgcolor.py@.PATH_END.py
|
{
"filename": "test_ls372_agent_integration.py",
"repo_name": "simonsobs/socs",
"repo_path": "socs_extracted/socs-main/tests/integration/test_ls372_agent_integration.py",
"type": "Python"
}
|
import os
import ocs
import pytest
from integration.util import docker_compose_file # noqa: F401
from integration.util import create_crossbar_fixture
from ocs.base import OpCode
from ocs.testing import create_agent_runner_fixture, create_client_fixture
from socs.testing.device_emulator import create_device_emulator
# Set the OCS_CONFIG_DIR so we read the local default.yaml file always
os.environ['OCS_CONFIG_DIR'] = os.getcwd()
run_agent = create_agent_runner_fixture(
'../socs/agents/lakeshore372/agent.py',
'ls372',
args=["--log-dir", "./logs/"])
client = create_client_fixture('LSASIM')
wait_for_crossbar = create_crossbar_fixture()
def build_init_responses():
values = {'*IDN?': 'LSCI,MODEL372,LSASIM,1.3',
'SCAN?': '01,1',
'INSET? A': '0,010,003,00,1',
'INNAME? A': 'Input A',
'INTYPE? A': '1,04,0,15,0,2',
'TLIMIT? A': '+0000',
'OUTMODE? 0': '2,6,1,0,0,001',
'HTRSET? 0': '+120.000,8,+0000.00,1',
'OUTMODE? 2': '4,16,1,0,0,001',
'HTRSET? 2': '+120.000,8,+0000.00,1'}
for i in range(1, 17):
values[f'INSET? {i}'] = '1,007,003,21,1'
values[f'INNAME? {i}'] = f'Channel {i:02}'
values[f'INTYPE? {i}'] = '0,07,1,10,0,1'
values[f'TLIMIT? {i}'] = '+0000'
# Heaters
values.update({'RANGE? 0': '0',
'RANGE? 2': '1',
'STILL?': '+10.60',
'HTR?': '+00.0005E+00'})
# Senor readings
values.update({'KRDG? 1': '+293.873E+00',
'SRDG? 1': '+108.278E+00',
'KRDG? A': '+00.0000E-03',
'SRDG? A': '+000.000E+09'})
return values
emulator = create_device_emulator(build_init_responses(), relay_type='tcp', port=7777)
@pytest.mark.integtest
def test_testing(wait_for_crossbar):
"""Just a quick test to make sure we can bring up crossbar."""
assert True
@pytest.mark.integtest
def test_ls372_init_lakeshore(wait_for_crossbar, emulator, run_agent, client):
resp = client.init_lakeshore()
# print(resp)
assert resp.status == ocs.OK
# print(resp.session)
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_enable_control_chan(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.enable_control_chan()
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_disable_control_chan(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.disable_control_chan()
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_start_acq(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.acq.start(sample_heater=False, run_once=True)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.STARTING.value
client.acq.wait()
resp = client.acq.status()
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_heater_range(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_heater_range(range=1e-3, heater='sample', wait=0)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_excitation_mode(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_excitation_mode(channel=1, mode='current')
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_excitation(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_excitation(channel=1, value=1e-9)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_get_excitation(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.get_excitation(channel=1)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
assert resp.session['data']['excitation'] == 2e-3
@pytest.mark.integtest
def test_ls372_set_resistance_range(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_resistance_range(channel=1, resistance_range=2)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_get_resistance_range(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
client.set_resistance_range(channel=1, resistance_range=2)
resp = client.get_resistance_range(channel=1)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
assert resp.session['data']['resistance_range'] == 63.2
@pytest.mark.integtest
def test_ls372_set_dwell(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_dwell(channel=1, dwell=3)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_get_dwell(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.get_dwell(channel=1)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
assert resp.session['data']['dwell_time'] == 7
@pytest.mark.integtest
def test_ls372_set_pid(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_pid(P=40, I=2, D=0)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_active_channel(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_active_channel(channel=1)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_autoscan(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_autoscan(autoscan=True)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_output_mode(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_output_mode(heater='still', mode='Off')
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_heater_output(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_heater_output(heater='still', output=50)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_still_output(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_still_output(output=50)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_get_still_output(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.get_still_output()
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_engage_channel(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.engage_channel(channel=2, state='on')
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_engage_autorange(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.engage_autorange(channel=3, state='on')
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_set_calibration_curve(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.set_calibration_curve(channel=4, curve_number=28)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_get_input_setup(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
resp = client.get_input_setup(channel=4)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_sample_custom_pid(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
response = {'SCAN?': '02, 0',
'KRDG? 2': '102E-3',
'RANGE? 0': '5',
'SRDG? 2': '15.00E+03',
'HTRSET? 0': '50,8,+0003.00,1'}
emulator.define_responses(response)
resp = client.custom_pid.start(setpoint=0.102, heater='sample', channel=2,
P=2500, I=1 / 20, update_time=0, sample_heater_range=3.16e-3,
test_mode=True)
print('resp:', resp)
print('resp.status', resp.status)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.STARTING.value
client.custom_pid.wait()
resp = client.custom_pid.status()
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
@pytest.mark.integtest
def test_ls372_still_custom_pid(wait_for_crossbar, emulator, run_agent, client):
client.init_lakeshore()
response = {'SCAN?': '05, 0',
'KRDG? 5': '95E-3',
'SRDG? 5': '15.00E+03',
'RANGE? 2': '1',
'OUTMODE? 2': '4,5,1,0,0,001',
'HTRSET? 2': '+1020.000,8,+0000.00,1'}
emulator.define_responses(response)
resp = client.custom_pid.start(setpoint=0.95, heater='still', channel=5,
P=0, I=1. / 7, update_time=0, test_mode=True)
print('resp:', resp)
print('resp.status', resp.status)
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.STARTING.value
client.custom_pid.wait()
resp = client.custom_pid.status()
assert resp.status == ocs.OK
assert resp.session['op_code'] == OpCode.SUCCEEDED.value
|
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@tests@integration@test_ls372_agent_integration.py@.PATH_END.py
|
{
"filename": "callbacks_constructor.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/how_to/callbacks_constructor.ipynb",
"type": "Jupyter Notebook"
}
|
# How to propagate callbacks constructor
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [Callbacks](/docs/concepts/callbacks)
- [Custom callback handlers](/docs/how_to/custom_callbacks)
:::
Most LangChain modules allow you to pass `callbacks` directly into the constructor (i.e., initializer). In this case, the callbacks will only be called for that instance (and any nested runs).
:::warning
Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children of the object. This can lead to confusing behavior,
and it's generally better to pass callbacks as a run time argument.
:::
Here's an example:
```python
# | output: false
# | echo: false
%pip install -qU langchain langchain_anthropic
import getpass
import os
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
```
```python
from typing import Any, Dict, List
from langchain_anthropic import ChatAnthropic
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
from langchain_core.prompts import ChatPromptTemplate
class LoggingHandler(BaseCallbackHandler):
def on_chat_model_start(
self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs
) -> None:
print("Chat model started")
def on_llm_end(self, response: LLMResult, **kwargs) -> None:
print(f"Chat model ended, response: {response}")
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs
) -> None:
print(f"Chain {serialized.get('name')} started")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:
print(f"Chain ended, outputs: {outputs}")
callbacks = [LoggingHandler()]
llm = ChatAnthropic(model="claude-3-sonnet-20240229", callbacks=callbacks)
prompt = ChatPromptTemplate.from_template("What is 1 + {number}?")
chain = prompt | llm
chain.invoke({"number": "2"})
```
Chat model started
Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0'))]] llm_output={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None
AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0')
You can see that we only see events from the chat model run - no chain events from the prompt or broader chain.
## Next steps
You've now learned how to pass callbacks into a constructor.
Next, check out the other how-to guides in this section, such as how to [pass callbacks at runtime](/docs/how_to/callbacks_runtime).
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@how_to@callbacks_constructor.ipynb@.PATH_END.py
|
{
"filename": "GRGeometryTests.ipynb",
"repo_name": "GRTLCollaboration/engrenage",
"repo_path": "engrenage_extracted/engrenage-main/tests/GRGeometryTests.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# Some tests that calculate known geometric quantities
# restart the kernel to clear past work
from IPython.core.display import HTML
HTML("<script>Jupyter.notebook.kernel.restart()</script>")
```
<script>Jupyter.notebook.kernel.restart()</script>
```python
# load the required python modules
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import odeint
import time
import sys
import random
import matplotlib.pyplot as plt
%matplotlib inline
# homemade code
sys.path.append('../')
from source.mytests import * # go here to change the test conditions
from source.Grid import *
# Input parameters for grid and evolution here
max_r = 9.0
num_points = 106
log_factor = 1.0
my_grid = Grid(max_r, num_points, log_factor)
N = my_grid.num_points_r
```
```python
# This test sets up values such that R is known but phi = 0
initial_state = get_test_state_1(my_grid)
#unpackage the vector for readability
u, v , phi, hrr, htt, hpp, K, arr, att, app, lambdar, shiftr, br, lapse = np.array_split(initial_state, NUM_VARS)
#plot initial conditions
plt.xlabel('r')
r = my_grid.r_vector
plt.plot(r, phi, label='phi')
plt.plot(r, hrr, label='hrr')
plt.plot(r, htt, label='htt')
plt.plot(r, hpp, '--', label='hpp')
plt.plot(r, lambdar, label='lambdar')
plt.legend(loc='best')
plt.grid()
```

```python
print(r,lambdar)
```
[-0.2195122 -0.13170732 -0.04390244 0.04390244 0.13170732 0.2195122
0.30731707 0.39512195 0.48292683 0.57073171 0.65853659 0.74634146
0.83414634 0.92195122 1.0097561 1.09756098 1.18536585 1.27317073
1.36097561 1.44878049 1.53658537 1.62439024 1.71219512 1.8
1.88780488 1.97560976 2.06341463 2.15121951 2.23902439 2.32682927
2.41463415 2.50243902 2.5902439 2.67804878 2.76585366 2.85365854
2.94146341 3.02926829 3.11707317 3.20487805 3.29268293 3.3804878
3.46829268 3.55609756 3.64390244 3.73170732 3.8195122 3.90731707
3.99512195 4.08292683 4.17073171 4.25853659 4.34634146 4.43414634
4.52195122 4.6097561 4.69756098 4.78536585 4.87317073 4.96097561
5.04878049 5.13658537 5.22439024 5.31219512 5.4 5.48780488
5.57560976 5.66341463 5.75121951 5.83902439 5.92682927 6.01463415
6.10243902 6.1902439 6.27804878 6.36585366 6.45365854 6.54146341
6.62926829 6.71707317 6.80487805 6.89268293 6.9804878 7.06829268
7.15609756 7.24390244 7.33170732 7.4195122 7.50731707 7.59512195
7.68292683 7.77073171 7.85853659 7.94634146 8.03414634 8.12195122
8.2097561 8.29756098 8.38536585 8.47317073 8.56097561 8.64878049
8.73658537 8.82439024 8.91219512 9. ] [-8.04805643e-01 -5.51549514e-01 -2.04650618e-01 2.04650618e-01
5.51549514e-01 8.04805643e-01 9.79371200e-01 1.09090740e+00
1.15437440e+00 1.18250380e+00 1.18536936e+00 1.17059059e+00
1.14376850e+00 1.10894607e+00 1.06900789e+00 1.02599639e+00
9.81349815e-01 9.36076116e-01 8.90877926e-01 8.46241511e-01
8.02499800e-01 7.59876987e-01 7.18520108e-01 6.78521436e-01
6.39934414e-01 6.02785005e-01 5.67079811e-01 5.32811877e-01
4.99964857e-01 4.68515989e-01 4.38438221e-01 4.09701722e-01
3.82274943e-01 3.56125347e-01 3.31219908e-01 3.07525439e-01
2.85008787e-01 2.63636948e-01 2.43377114e-01 2.24196682e-01
2.06063230e-01 1.88944487e-01 1.72808288e-01 1.57622529e-01
1.43355136e-01 1.29974030e-01 1.17447114e-01 1.05742261e-01
9.48273244e-02 8.46701578e-02 7.52386452e-02 6.65007453e-02
5.84245459e-02 5.09783255e-02 4.41306238e-02 3.78503159e-02
3.21066906e-02 2.68695302e-02 2.21091899e-02 1.77966750e-02
1.39037151e-02 1.04028334e-02 7.26740965e-03 4.47173708e-03
1.99107123e-03 -1.98328976e-04 -2.11916841e-03 -3.79309128e-03
-5.24066492e-03 -6.48137151e-03 -7.53360750e-03 -8.41469015e-03
-9.14087062e-03 -9.72735307e-03 -1.01883190e-02 -1.05369563e-02
-1.07854922e-02 -1.09452290e-02 -1.10265837e-02 -1.10391278e-02
-1.09916300e-02 -1.08920989e-02 -1.07478267e-02 -1.05654321e-02
-1.03509036e-02 -1.01096412e-02 -9.84649788e-03 -9.56581928e-03
-9.27148200e-03 -8.96693047e-03 -8.65521198e-03 -8.33901002e-03
-8.02067571e-03 -7.70225743e-03 -7.38552859e-03 -7.07201359e-03
-6.76301202e-03 -6.45962104e-03 -6.16275620e-03 -5.87317053e-03
-5.59147223e-03 -6.06191202e-03 -5.39746508e-04 -5.29058723e-04
-5.18685274e-04 -5.08613955e-04]
```python
# calculate the diagnostics, the Ham constraint should just have
# the term in bar_R so this tests the calculation of that quantity
# get the various derivs that we need to evolve things in vector form
# second derivatives
d2phidx2 = np.dot(my_grid.derivatives.d2_matrix, phi )
d2hrrdx2 = np.dot(my_grid.derivatives.d2_matrix, hrr )
d2httdx2 = np.dot(my_grid.derivatives.d2_matrix, htt )
d2hppdx2 = np.dot(my_grid.derivatives.d2_matrix, hpp )
d2lapsedx2 = np.dot(my_grid.derivatives.d2_matrix, lapse )
d2shiftrdx2 = np.dot(my_grid.derivatives.d2_matrix, shiftr)
# first derivatives
dphidx = np.dot(my_grid.derivatives.d1_matrix, phi )
dhrrdx = np.dot(my_grid.derivatives.d1_matrix, hrr )
dhttdx = np.dot(my_grid.derivatives.d1_matrix, htt )
dhppdx = np.dot(my_grid.derivatives.d1_matrix, hpp )
darrdx = np.dot(my_grid.derivatives.d1_matrix, arr )
dattdx = np.dot(my_grid.derivatives.d1_matrix, att )
dappdx = np.dot(my_grid.derivatives.d1_matrix, app )
dKdx = np.dot(my_grid.derivatives.d1_matrix, K )
dlambdardx = np.dot(my_grid.derivatives.d1_matrix, lambdar)
dshiftrdx = np.dot(my_grid.derivatives.d1_matrix, shiftr )
dbrdx = np.dot(my_grid.derivatives.d1_matrix, br )
dlapsedx = np.dot(my_grid.derivatives.d1_matrix, lapse )
bar_R_test = np.zeros_like(phi)
chris_rrr = np.zeros_like(phi)
chris_ttr = np.zeros_like(phi)
chris_ppr = np.zeros_like(phi)
Ricci_rr = np.zeros_like(phi)
Ricci_tt = np.zeros_like(phi)
Ricci_pp = np.zeros_like(phi)
h_tensor = np.array([hrr, htt, hpp])
a_tensor = np.array([arr, att, app])
em4phi = np.exp(-4.0*phi)
dhdr = np.array([dhrrdx, dhttdx, dhppdx])
d2hdr2 = np.array([d2hrrdx2, d2httdx2, d2hppdx2])
# Calculate some useful quantities
########################################################
# \hat \Gamma^i_jk
flat_chris = get_flat_spherical_chris(r)
# (unscaled) \bar\gamma_ij and \bar\gamma^ij
bar_gamma_LL = get_metric(r, h_tensor)
bar_gamma_UU = get_inverse_metric(r, h_tensor)
# The connections Delta^i, Delta^i_jk and Delta_ijk
Delta_U, Delta_ULL, Delta_LLL = get_connection(r, bar_gamma_UU, bar_gamma_LL, h_tensor, dhdr)
conformal_chris = get_conformal_chris(Delta_ULL, r)
chris_rrr = conformal_chris[i_r][i_r][i_r][:]
chris_ttr = conformal_chris[i_t][i_t][i_r][:]
chris_ppr = conformal_chris[i_p][i_p][i_r][:]
bar_Rij = get_ricci_tensor(r, h_tensor, dhdr, d2hdr2, lambdar, dlambdardx,
Delta_U, Delta_ULL, Delta_LLL, bar_gamma_UU, bar_gamma_LL)
Ricci_rr = bar_Rij[i_r][i_r]
Ricci_tt = bar_Rij[i_t][i_t]
Ricci_pp = bar_Rij[i_p][i_p]
bar_Ricci_tensor = np.array([Ricci_rr,Ricci_tt,Ricci_pp])
bar_R_test = get_trace(bar_Ricci_tensor, bar_gamma_UU)
r_diagnostics = r[num_ghosts:(N-num_ghosts)]
```
```python
# Calculate the rescaled quantities for comparison
########################################################
# rescaled \bar\gamma_ij and \bar\gamma^ij
r_gamma_LL = get_rescaled_metric(h_tensor)
r_gamma_UU = get_rescaled_inverse_metric(h_tensor)
# \bar A_ij, \bar A^ij and the trace A_i^i, then Asquared = \bar A_ij \bar A^ij
a_UU = get_a_UU(a_tensor, r_gamma_UU)
traceA = get_trace_A(a_tensor, r_gamma_UU)
Asquared = get_Asquared(a_tensor, r_gamma_UU)
# The rescaled connections Delta^i, Delta^i_jk and Delta_ijk
rDelta_U, rDelta_ULL, rDelta_LLL = get_rescaled_connection(r, r_gamma_UU,
r_gamma_LL, h_tensor, dhdr)
# rescaled \bar \Gamma^i_jk
r_conformal_chris = get_rescaled_conformal_chris(rDelta_ULL, r)
r_chris_rrr = r_conformal_chris[i_r][i_r][i_r][:]
r_chris_ttr = r_conformal_chris[i_t][i_t][i_r][:]
r_chris_ppr = r_conformal_chris[i_p][i_p][i_r][:]
# rescaled Ricci tensor
rbar_Rij = get_rescaled_ricci_tensor(r, h_tensor, dhdr, d2hdr2, lambdar, dlambdardx,
rDelta_U, rDelta_ULL, rDelta_LLL,
r_gamma_UU, r_gamma_LL)
```
```python
# Plot the Ricci scalar
bar_R_plot = bar_R_test[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, bar_R_plot)
# compare to analytic result for R for test 1
epowr = np.exp(r_diagnostics)
x = r_diagnostics
tmp = (1.0 + x*x/epowr)**(0.5)
R_analytic = (1.0 / 8.0 / (x*x) * (epowr + x*x)**(-3.0) *
(16.0 * (x**6.0) * tmp
- epowr * (x**4.0) * (-20.0 + 12.0*x + 7.0*x*x - 48.0*tmp)
+ 16.0 * epowr**3.0 * (-1.0 + tmp)
+ 8.0 * epowr**2.0 * x*x * (8.0 - 9.0*x + x*x + 6 * tmp)))
plt.plot(x, R_analytic, '--')
plt.xlabel('r')
plt.ylabel('R value')
plt.xlim(0,3)
#plt.ylim(0,1)
plt.grid()
```

```python
# Plot christoffels
chris_rrr_d = chris_rrr[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, chris_rrr_d)
plt.plot(r,r_chris_rrr)
# compare to analytic result for R for test 1
epowr = np.exp(r_diagnostics)
x = r_diagnostics
chris_rrr_analytic = -(0.5 * x * (x-2.0) / (epowr + x*x))
plt.plot(x, chris_rrr_analytic, '--')
plt.xlabel('r')
plt.ylabel('chris value')
plt.xlim(0,3)
#plt.ylim(0,1)
plt.grid()
```

```python
# Plot christoffels
chris_ttr_d = chris_ttr[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, chris_ttr_d)
plt.plot(r,r_chris_ttr)
# compare to analytic result for R for test 1
epowr = np.exp(r_diagnostics)
x = r_diagnostics
chris_ttr_analytic = (( 4.0 * epowr + 2.0*x + x**3.0 ) / (4.0 * x * epowr + 4.0 * x**3.0))
plt.plot(x, chris_ttr_analytic, '--')
#plt.plot(x, 1.0/x, '--')
plt.xlabel('r')
plt.ylabel('chris value')
#plt.xlim(0,3)
#plt.ylim(-1,1)
plt.grid()
```

```python
# Plot christoffels
chris_ppr_d = chris_ppr[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, chris_ppr_d)
plt.plot(r,r_chris_ppr)
# compare to analytic result for R for test 1
epowr = np.exp(r_diagnostics)
x = r_diagnostics
chris_ppr_analytic = chris_ttr_analytic
plt.plot(x, chris_ppr_analytic, '--')
plt.xlabel('r')
plt.ylabel('chris value')
#plt.xlim(0,3)
#plt.ylim(-1,1)
plt.grid()
```

```python
# Plot comps of Ricci - R_rr
Ricci_ij_d = Ricci_rr[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, Ricci_ij_d)
plt.plot(r, rbar_Rij[i_r][i_r])
# compare to analytic result for R for test 1
epowr = np.exp(r_diagnostics)
x = r_diagnostics
Ricci_analytic = ( (1.0/8.0) * (epowr + x*x)**-2.0
* ( x*x * (12 - 4.0*x - 3.0*x*x)
+ 4.0 * epowr * (10.0 - 8.0*x + x*x)))
plt.plot(x, Ricci_analytic, '--')
plt.xlabel('r')
plt.ylabel('Rij value')
plt.xlim(0,7)
#plt.ylim(0,1)
plt.grid()
```

```python
# Plot comps of Ricci - R_tt
Ricci_ij_d = Ricci_tt[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, Ricci_ij_d)
plt.plot(r_diagnostics, r_diagnostics * r_diagnostics * rbar_Rij[i_t][i_t][num_ghosts:(N-num_ghosts)])
# compare to analytic result for R for test 1
epowr = np.exp(r_diagnostics)
x = r_diagnostics
tmp = (1.0 + x*x/epowr)**(0.5)
Ricci_analytic_tt = ( 0.25 * epowr**2.0 * tmp * ((epowr + x*x)**(-4.0))
* ( -4.0 * epowr**2.0
+ epowr * x**2.0 * (6.0 -10.0*x +x*x)
- x**4.0 * (-2.0 + 2.0*x + x*x))) + 1.0
plt.plot(x, Ricci_analytic_tt, '--')
plt.xlabel('r')
plt.ylabel('Rij value')
plt.xlim(0,5)
plt.ylim(-1,1)
plt.grid()
```

```python
# Plot comps of Ricci - R_pp
Ricci_ij_d = Ricci_pp[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, Ricci_ij_d)
plt.plot(r_diagnostics, r_diagnostics * r_diagnostics * rbar_Rij[i_p][i_p][num_ghosts:(N-num_ghosts)])
# compare to analytic result for R for test 1
epowr = np.exp(r_diagnostics)
x = r_diagnostics
Ricci_analytic_pp = Ricci_analytic_tt
plt.plot(x, Ricci_analytic_pp, '--')
plt.xlabel('r')
plt.ylabel('Rij value')
plt.xlim(0,5)
plt.ylim(-1,1)
plt.grid()
```

```python
# This test sets up values such that R is known but bar_R = 0
initial_state = get_test_state_2(my_grid)
#unpackage the vector for readability
u, v , phi, hrr, htt, hpp, K, arr, att, app, lambdar, shiftr, br, lapse = np.array_split(initial_state, NUM_VARS)
#plot initial conditions
plt.xlabel('r')
plt.plot(r, phi, label='phi')
plt.plot(r, hrr, label='hrr')
plt.plot(r, htt, label='htt')
plt.plot(r, lambdar, label='lambdar')
plt.legend(loc='best')
plt.grid()
```

```python
# get the various derivs that we need to evolve things in vector form
# second derivatives
d2phidx2 = np.dot(my_grid.derivatives.d2_matrix, phi )
d2hrrdx2 = np.dot(my_grid.derivatives.d2_matrix, hrr )
d2httdx2 = np.dot(my_grid.derivatives.d2_matrix, htt )
d2hppdx2 = np.dot(my_grid.derivatives.d2_matrix, hpp )
d2lapsedx2 = np.dot(my_grid.derivatives.d2_matrix, lapse )
d2shiftrdx2 = np.dot(my_grid.derivatives.d2_matrix, shiftr)
# first derivatives
dphidx = np.dot(my_grid.derivatives.d1_matrix, phi )
dhrrdx = np.dot(my_grid.derivatives.d1_matrix, hrr )
dhttdx = np.dot(my_grid.derivatives.d1_matrix, htt )
dhppdx = np.dot(my_grid.derivatives.d1_matrix, hpp )
darrdx = np.dot(my_grid.derivatives.d1_matrix, arr )
dattdx = np.dot(my_grid.derivatives.d1_matrix, att )
dappdx = np.dot(my_grid.derivatives.d1_matrix, app )
dKdx = np.dot(my_grid.derivatives.d1_matrix, K )
dlambdardx = np.dot(my_grid.derivatives.d1_matrix, lambdar)
dshiftrdx = np.dot(my_grid.derivatives.d1_matrix, shiftr )
dbrdx = np.dot(my_grid.derivatives.d1_matrix, br )
dlapsedx = np.dot(my_grid.derivatives.d1_matrix, lapse )
bar_R_test = np.zeros_like(phi)
chris_rrr = np.zeros_like(phi)
chris_ttr = np.zeros_like(phi)
chris_ppr = np.zeros_like(phi)
Ricci_rr = np.zeros_like(phi)
Ricci_tt = np.zeros_like(phi)
Ricci_pp = np.zeros_like(phi)
h_tensor = np.array([hrr, htt, hpp])
a_tensor = np.array([arr, att, app])
em4phi = np.exp(-4.0*phi)
dhdr = np.array([dhrrdx, dhttdx, dhppdx])
d2hdr2 = np.array([d2hrrdx2, d2httdx2, d2hppdx2])
# Calculate some useful quantities
########################################################
# \hat \Gamma^i_jk
flat_chris = get_flat_spherical_chris(r)
# (unscaled) \bar\gamma_ij and \bar\gamma^ij
bar_gamma_LL = get_metric(r, h_tensor)
bar_gamma_UU = get_inverse_metric(r, h_tensor)
# The connections Delta^i, Delta^i_jk and Delta_ijk
Delta_U, Delta_ULL, Delta_LLL = get_connection(r, bar_gamma_UU, bar_gamma_LL, h_tensor, dhdr)
conformal_chris = get_conformal_chris(Delta_ULL, r)
chris_rrr = conformal_chris[i_r][i_r][i_r][:]
chris_ttr = conformal_chris[i_t][i_t][i_r][:]
chris_ppr = conformal_chris[i_p][i_p][i_r][:]
bar_Rij = get_ricci_tensor(r, h_tensor, dhdr, d2hdr2, lambdar, dlambdardx,
Delta_U, Delta_ULL, Delta_LLL, bar_gamma_UU, bar_gamma_LL)
Ricci_rr = bar_Rij[i_r][i_r]
Ricci_tt = bar_Rij[i_t][i_t]
Ricci_pp = bar_Rij[i_p][i_p]
bar_Ricci_tensor = np.array([Ricci_rr,Ricci_tt,Ricci_pp])
bar_R_test = get_trace(bar_Ricci_tensor, bar_gamma_UU)
r_diagnostics = r[num_ghosts:(N-num_ghosts)]
# Get the Ham constraint eqn (13) of Baumgarte https://arxiv.org/abs/1211.6632
Ham_test = em4phi * ( bar_R_test #should be zero
- 8.0 * bar_gamma_UU[i_r][i_r] * (dphidx * dphidx + d2phidx2)
# These terms come from \bar\Gamma^r d_r \phi from the \bar D^2 \phi term
+ 8.0 * bar_gamma_UU[i_t] * dphidx * flat_chris[i_r][i_t][i_t]
+ 8.0 * bar_gamma_UU[i_p] * dphidx * flat_chris[i_r][i_p][i_p]
+ 8.0 * Delta_U[i_r] * dphidx) #should be zero
```
```python
# Plot the result
Ham_plot = Ham_test[num_ghosts:(N-num_ghosts)]
plt.plot(r_diagnostics, Ham_plot)
#analytic result for test 2
epowr = np.exp(r_diagnostics)
x = r_diagnostics
R_analytic = - (0.5 * epowr * (epowr + x*x)**-3.0
* ( x*x * (12.0 - 12.0*x + x*x)
+ 4.0 * epowr * (6.0 - 6.0* x + x*x)))
plt.plot(x, R_analytic, '--')
plt.xlabel('r')
plt.ylabel('Ham value')
plt.xlim(0,3)
#plt.ylim(-12,1)
plt.grid()
```

```python
```
```python
```
|
GRTLCollaborationREPO_NAMEengrenagePATH_START.@engrenage_extracted@engrenage-main@tests@GRGeometryTests.ipynb@.PATH_END.py
|
{
"filename": "test_regression.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/test_autofit/mapper/prior/test_regression.py",
"type": "Python"
}
|
import pytest
import autofit as af
@pytest.fixture(
name="prior"
)
def make_prior():
return af.GaussianPrior(
mean=1,
sigma=2,
lower_limit=3,
upper_limit=4
)
@pytest.fixture(
name="message"
)
def make_message(prior):
return prior.message
def test_copy_limits(message):
copied = message.copy()
assert message.lower_limit == copied.lower_limit
assert message.upper_limit == copied.upper_limit
def test_multiply_limits(message):
multiplied = message * message
assert message.lower_limit == multiplied.lower_limit
assert message.upper_limit == multiplied.upper_limit
multiplied = 1 * message
assert message.lower_limit == multiplied.lower_limit
assert message.upper_limit == multiplied.upper_limit
@pytest.fixture(
name="uniform_prior"
)
def make_uniform_prior():
return af.UniformPrior(
lower_limit=10,
upper_limit=20,
)
def test_sum_from_arguments(prior, uniform_prior):
added = prior + prior
new = added.gaussian_prior_model_for_arguments({
prior: uniform_prior
})
assert new.instance_from_prior_medians() == 30
def test_negative_from_arguments(prior, uniform_prior):
negative = -prior
new = negative.gaussian_prior_model_for_arguments({
prior: uniform_prior
})
assert new.instance_from_prior_medians() == -15
def test_sum_with_float(prior, uniform_prior):
added = prior + 15
new = added.gaussian_prior_model_for_arguments({
prior: uniform_prior
})
assert new.instance_from_prior_medians() == 30
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@test_autofit@mapper@prior@test_regression.py@.PATH_END.py
|
{
"filename": "model_fitting.py",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/jdaviz/configs/default/plugins/model_fitting/model_fitting.py",
"type": "Python"
}
|
import re
import numpy as np
from copy import deepcopy
import astropy.units as u
from specutils import Spectrum1D
from specutils.utils import QuantityModel
from traitlets import Bool, List, Unicode, observe
from jdaviz.configs.default.plugins.model_fitting.fitting_backend import fit_model_to_spectrum
from jdaviz.configs.default.plugins.model_fitting.initializers import (MODELS,
initialize,
get_model_parameters)
from jdaviz.core.events import SnackbarMessage, GlobalDisplayUnitChanged
from jdaviz.core.registries import tray_registry
from jdaviz.core.template_mixin import (PluginTemplateMixin,
SelectPluginComponent,
SpectralSubsetSelectMixin,
DatasetSelectMixin,
DatasetSpectralSubsetValidMixin,
NonFiniteUncertaintyMismatchMixin,
AutoTextField,
AddResultsMixin,
TableMixin,
with_spinner)
from jdaviz.core.custom_traitlets import IntHandleEmpty
from jdaviz.core.user_api import PluginUserApi
from jdaviz.core.unit_conversion_utils import (all_flux_unit_conversion_equivs,
flux_conversion_general)
__all__ = ['ModelFitting']
class _EmptyParam:
def __init__(self, value, unit=None):
self.value = value
self.unit = unit
self.quantity = u.Quantity(self.value,
self.unit if self.unit is not None else u.dimensionless_unscaled)
@tray_registry('g-model-fitting', label="Model Fitting", viewer_requirements='spectrum')
class ModelFitting(PluginTemplateMixin, DatasetSelectMixin,
SpectralSubsetSelectMixin, DatasetSpectralSubsetValidMixin,
NonFiniteUncertaintyMismatchMixin,
AddResultsMixin, TableMixin):
"""
See the :ref:`Model Fitting Plugin Documentation <specviz-model-fitting>` for more details.
Only the following attributes and methods are available through the
:ref:`public plugin API <plugin-apis>`:
* :meth:`~jdaviz.core.template_mixin.PluginTemplateMixin.show`
* :meth:`~jdaviz.core.template_mixin.PluginTemplateMixin.open_in_tray`
* :meth:`~jdaviz.core.template_mixin.PluginTemplateMixin.close_in_tray`
* ``cube_fit``
Only exposed for Cubeviz. Whether to fit the model to the cube instead of to the
collapsed spectrum.
* ``dataset`` (:class:`~jdaviz.core.template_mixin.DatasetSelect`):
Dataset to fit the model.
* ``spectral_subset`` (:class:`~jdaviz.core.template_mixin.SubsetSelect`)
* ``model_component`` (:class:`~jdaviz.core.template_mixin.SelectPluginComponent`)
* ``poly_order``
* ``model_component_label`` (:class:`~jdaviz.core.template_mixin.AutoTextField`)
* :meth:`create_model_component`
* :meth:`remove_model_component`
* :meth:`model_components`
* :meth:`valid_model_components`
* :meth:`get_model_component`
* :meth:`set_model_component`
* :meth:`reestimate_model_parameters`
* ``equation`` (:class:`~jdaviz.core.template_mixin.AutoTextField`)
* :meth:`equation_components`
* ``add_results`` (:class:`~jdaviz.core.template_mixin.AddResults`)
* ``residuals_calculate`` (bool)
Whether to calculate and expose the residuals (model minus data).
* ``residuals`` (:class:`~jdaviz.core.template_mixin.AutoTextField`)
Label of the residuals to apply when calling :meth:`calculate_fit` if ``residuals_calculate``
is ``True``.
* :meth:`calculate_fit`
"""
dialog = Bool(False).tag(sync=True)
template_file = __file__, "model_fitting.vue"
form_valid_model_component = Bool(False).tag(sync=True)
# model components:
model_comp_items = List().tag(sync=True)
model_comp_selected = Unicode().tag(sync=True)
poly_order = IntHandleEmpty(0).tag(sync=True)
comp_label = Unicode().tag(sync=True)
comp_label_default = Unicode().tag(sync=True)
comp_label_auto = Bool(True).tag(sync=True)
comp_label_invalid_msg = Unicode().tag(sync=True)
model_equation = Unicode().tag(sync=True)
model_equation_default = Unicode().tag(sync=True)
model_equation_auto = Bool(True).tag(sync=True)
model_equation_invalid_msg = Unicode().tag(sync=True)
eq_error = Bool(False).tag(sync=True)
component_models = List([]).tag(sync=True)
display_order = Bool(False).tag(sync=True)
cube_fit = Bool(False).tag(sync=True)
# residuals (non-cube fit only)
residuals_calculate = Bool(False).tag(sync=True)
residuals_label = Unicode().tag(sync=True)
residuals_label_default = Unicode().tag(sync=True)
residuals_label_auto = Bool(True).tag(sync=True)
residuals_label_invalid_msg = Unicode('').tag(sync=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._units = {}
self._fitted_model = None
self._fitted_spectrum = None
self.component_models = []
self._initialized_models = {}
self._display_order = False
# description displayed under plugin title in tray
self._plugin_description = 'Fit an analytic model to data or a subset of data.'
# create the label first so that when model_component defaults to the first selection,
# the label automatically defaults as well
self.model_component_label = AutoTextField(self, 'comp_label', 'comp_label_default',
'comp_label_auto', 'comp_label_invalid_msg')
self.model_component = SelectPluginComponent(self,
items='model_comp_items',
selected='model_comp_selected',
manual_options=list(MODELS.keys()))
if self.config == 'cubeviz':
# use mean whenever extracting the 1D spectrum of a cube to initialize model params
self.dataset._spectral_extraction_function = 'Mean'
# by default, require entries to be in spectrum-viewer (not other cubeviz images, etc)
# in cubeviz, the cube_fit toggle will then replace this filter to filter for cubes
self.dataset.add_filter('layer_in_spectrum_viewer')
self.equation = AutoTextField(self, 'model_equation', 'model_equation_default',
'model_equation_auto', 'model_equation_invalid_msg')
self.residuals = AutoTextField(self, 'residuals_label', 'residuals_label_default',
'residuals_label_auto', 'residuals_label_invalid_msg')
headers = ['model', 'data_label', 'spectral_subset', 'equation']
if self.config == 'cubeviz':
headers += ['cube_fit']
self.table.headers_avail = headers
self.table.headers_visible = headers
# when model parameters are added as columns, only show the value/unit columns by default
# (other columns can be show in the dropdown by the user)
self.table._new_col_visible = lambda colname: colname.split(':')[-1] not in ('fixed', 'uncert', 'std') # noqa
# set the filter on the viewer options
self._update_viewer_filters()
self.hub.subscribe(self, GlobalDisplayUnitChanged,
handler=self._on_global_display_unit_changed)
@property
def _default_spectrum_viewer_reference_name(self):
return getattr(
self.app._jdaviz_helper, '_default_spectrum_viewer_reference_name', 'spectrum-viewer'
)
@property
def _default_flux_viewer_reference_name(self):
return getattr(
self.app._jdaviz_helper, '_default_flux_viewer_reference_name', 'flux-viewer'
)
@property
def user_api(self):
expose = ['dataset']
if self.config == "cubeviz":
expose += ['cube_fit']
expose += ['spectral_subset', 'model_component', 'poly_order', 'model_component_label',
'model_components', 'valid_model_components',
'create_model_component', 'remove_model_component',
'get_model_component', 'set_model_component', 'reestimate_model_parameters',
'equation', 'equation_components',
'add_results', 'residuals_calculate', 'residuals']
expose += ['calculate_fit', 'clear_table', 'export_table']
return PluginUserApi(self, expose=expose)
def _param_units(self, param, model_type=None):
"""Helper function to handle units that depend on x and y"""
y_params = ["amplitude", "amplitude_L", "intercept", "scale"]
if param == "slope":
return str(u.Unit(self._units["y"]) / u.Unit(self._units["x"]))
elif model_type == 'Polynomial1D':
# param names are all named cN, where N is the order
order = int(float(param[1:]))
return str(u.Unit(self._units["y"]) / u.Unit(self._units["x"])**order)
elif param == "temperature":
return str(u.K)
elif param == "scale" and model_type == "BlackBody":
return str("")
return self._units["y"] if param in y_params else self._units["x"]
def _update_parameters_from_fit(self):
"""Insert the results of the model fit into the component_models"""
for m in self.component_models:
submodel_index = None
name = m["id"]
if hasattr(self._fitted_model, "submodel_names"):
for i in range(len(self._fitted_model.submodel_names)):
if name == self._fitted_model.submodel_names[i]:
m_fit = self._fitted_model[name]
submodel_index = i
if submodel_index is None:
continue
elif self._fitted_model.name == name:
m_fit = self._fitted_model
else:
# then the component was not in the fitted model
continue
temp_params = []
for i in range(0, len(m_fit.parameters)):
temp_param = [x for x in m["parameters"] if x["name"] ==
m_fit.param_names[i]]
temp_param[0]["value"] = m_fit.parameters[i]
# The submodels don't have uncertainties attached, only the compound model
if self._fitted_model.stds is not None:
std_name = temp_param[0]["name"]
if submodel_index is not None:
std_name = f"{std_name}_{submodel_index}"
if std_name in self._fitted_model.stds.param_names:
temp_param[0]["std"] = self._fitted_model.stds[std_name]
temp_params += temp_param
m["parameters"] = temp_params
self.send_state('component_models')
def _update_parameters_from_QM(self):
"""
Parse out result parameters from a QuantityModel, which isn't
subscriptable with model name
"""
if hasattr(self._fitted_model, "submodel_names"):
submodel_names = self._fitted_model.submodel_names
submodels = True
else:
submodel_names = [self._fitted_model.name]
submodels = False
fit_params = self._fitted_model.parameters
param_names = self._fitted_model.param_names
for i in range(len(submodel_names)):
name = submodel_names[i]
m = [x for x in self.component_models if x["id"] == name][0]
temp_params = []
if submodels:
idxs = [j for j in range(len(param_names)) if
int(param_names[j][-1]) == i]
else:
idxs = [j for j in range(len(param_names))]
# This is complicated by needing to handle parameter names that
# have underscores in them, since QuantityModel adds an underscore
# and integer to indicate to which model a parameter belongs
for idx in idxs:
if submodels:
temp_param = [x for x in m["parameters"] if x["name"] ==
"_".join(param_names[idx].split("_")[0:-1])]
else:
temp_param = [x for x in m["parameters"] if x["name"] ==
param_names[idx]]
temp_param[0]["value"] = fit_params[idx]
temp_params += temp_param
m["parameters"] = temp_params
self.send_state('component_models')
def _update_initialized_parameters(self):
# If the user changes a parameter value, we need to change it in the
# initialized model
for m in self.component_models:
name = m["id"]
for param in m["parameters"]:
quant_param = u.Quantity(param["value"], param["unit"])
setattr(self._initialized_models[name], param["name"],
quant_param)
def _warn_if_no_equation(self):
if self.model_equation == "" or self.model_equation is None:
example = "+".join([m["id"] for m in self.component_models])
snackbar_message = SnackbarMessage(
f"Error: a model equation must be defined, e.g. {example}",
color='error',
sender=self)
self.hub.broadcast(snackbar_message)
return True
else:
return False
def _update_viewer_filters(self, event={}):
if event.get('new', self.cube_fit):
# only want image viewers in the options
self.add_results.viewer.filters = ['is_image_viewer']
else:
# only want spectral viewers in the options
self.add_results.viewer.filters = ['is_spectrum_viewer']
@observe('cube_fit')
def _cube_fit_changed(self, event={}):
self._update_viewer_filters(event=event)
sb_unit = self.app._get_display_unit('sb')
spectral_y_unit = self.app._get_display_unit('spectral_y')
if event.get('new'):
self._units['y'] = sb_unit
self.dataset.add_filter('is_flux_cube')
self.dataset.remove_filter('layer_in_spectrum_viewer')
else:
self._units['y'] = spectral_y_unit
self.dataset.add_filter('layer_in_spectrum_viewer')
self.dataset.remove_filter('is_flux_cube')
self.dataset._clear_cache()
if sb_unit != spectral_y_unit:
# We make the user hit the reestimate button themselves
for model_index, comp_model in enumerate(self.component_models):
self.component_models[model_index]["compat_display_units"] = False
self.send_state('component_models')
@observe("dataset_selected")
def _dataset_selected_changed(self, event=None):
"""
Callback method for when the user has selected data from the drop down
in the front-end. It is here that we actually parse and create a new
data object from the selected data. From this data object, unit
information is scraped, and the selected spectrum is stored for later
use in fitting.
Parameters
----------
event : str
IPyWidget callback event object. In this case, represents the data
label of the data collection object selected by the user.
"""
if not hasattr(self, 'dataset') or self.app._jdaviz_helper is None or self.dataset_selected == '': # noqa
# during initial init, this can trigger before the component is initialized
return
selected_spec = self.dataset.selected_obj
if selected_spec is None:
return
# Replace NaNs from collapsed Spectrum1D in Cubeviz
# (won't affect calculations because these locations are masked)
selected_spec.flux[np.isnan(selected_spec.flux)] = 0.0
def _default_comp_label(self, model, poly_order=None):
abbrevs = {'BlackBody': 'BB', 'PowerLaw': 'PL', 'Lorentz1D': 'Lo'}
abbrev = abbrevs.get(model, model[0].upper())
if model == "Polynomial1D":
abbrev += f'{poly_order}'
# append a number suffix to avoid any duplicates
ind = 1
while abbrev in [cm['id'] for cm in self.component_models]:
abbrev = f'{abbrev.split("_")[0]}_{ind}'
ind += 1
return abbrev
@observe('model_comp_selected', 'poly_order')
def _update_comp_label_default(self, event={}):
self.display_order = self.model_comp_selected == "Polynomial1D"
self.comp_label_default = self._default_comp_label(self.model_comp_selected,
self.poly_order)
@observe('comp_label')
def _comp_label_changed(self, event={}):
if not len(self.comp_label.strip()):
# strip will raise the same error for a label of all spaces
self.comp_label_invalid_msg = 'label must be provided'
return
if self.comp_label in [cm['id'] for cm in self.component_models]:
self.comp_label_invalid_msg = 'label already in use'
return
self.comp_label_invalid_msg = ''
def _update_model_equation_default(self):
self.model_equation_default = '+'.join(cm['id'] for cm in self.component_models)
def _reinitialize_with_fixed(self):
"""
Reinitialize all component models with current values and the
specified parameters fixed (can't easily update fixed dictionary in
an existing model)
"""
temp_models = []
for m in self.component_models:
fixed = {}
# Set the initial values as quantities to make sure model units
# are set correctly.
initial_values = {p["name"]: u.Quantity(p["value"], p["unit"]) for p in m["parameters"]}
for p in m["parameters"]:
fixed[p["name"]] = p["fixed"]
# Have to initialize with fixed dictionary
temp_model = MODELS[m["model_type"]](name=m["id"], fixed=fixed,
**initial_values, **m.get("model_kwargs", {}))
temp_models.append(temp_model)
return temp_models
def create_model_component(self, model_component=None, model_component_label=None,
poly_order=None):
"""
Add a new model component to the list of available model components
Parameters
----------
model_component : str
Type of model component to add. If not provided, will default according to
``model_component``.
model_component_label : str
Name of the model component to add. If not provided, will default according to
``model_component_label`` (if ``model_component_label.auto`` is True and
``model_component`` is passed as an argument, then the default label will be recomputed
rather than applying the current value).
poly_order : int
Order of the polynomial if ``model_component`` is (or defaults to) "Polynomial1D".
Will raise an error if provided and ``model_component`` is not "Polynomial1D".
If not provided, will default according to ``poly_order``.
"""
model_comp = model_component if model_component is not None else self.model_comp_selected
if model_comp != "Polynomial1D" and poly_order is not None:
raise ValueError("poly_order should only be passed if model_component is Polynomial1D")
poly_order = poly_order if poly_order is not None else self.poly_order
# if model_component was passed and different than the one set in the traitlet, AND
# model_component_label is not passed, AND the auto is enabled on the label, then
# recompute a temporary default model label rather than use the value set in the traitlet
if model_comp != self.model_comp_selected and model_component_label is None and self.model_component_label.auto: # noqa
comp_label = self._default_comp_label(model_comp, poly_order)
else:
comp_label = model_component_label if model_component_label is not None else self.comp_label # noqa
# validate provided label (only allow "word characters"). These should already be
# stripped by JS in the UI element, but we'll confirm here (especially if this is ever
# extended to have better API-support)
if re.search(r'\W+', comp_label) or not len(comp_label):
raise ValueError(f"invalid model component label '{comp_label}'")
if comp_label in [cm['id'] for cm in self.component_models]:
raise ValueError(f"model component label '{comp_label}' already in use")
new_model = self._initialize_model_component(model_comp, comp_label, poly_order=poly_order)
self.component_models = self.component_models + [new_model]
# update the default label (likely adding the suffix)
self._update_comp_label_default()
self._update_model_equation_default()
def _initialize_model_component(self, model_comp, comp_label, poly_order=None):
new_model = {"id": comp_label, "model_type": model_comp,
"parameters": [], "model_kwargs": {}}
model_cls = MODELS[model_comp]
# Need to set the units the first time we initialize a model component, after this
# we listen for display unit changes
if self._units.get('x', '') == '':
self._units['x'] = self.app._get_display_unit('spectral')
if self._units.get('y', '') == '':
if self.cube_fit:
self._units['y'] = self.app._get_display_unit('sb')
else:
self._units['y'] = self.app._get_display_unit('spectral_y')
if model_comp == "Polynomial1D":
# self.poly_order is the value in the widget for creating
# the new model component. We need to store that with the
# model itself as the value could change for another component.
new_model["model_kwargs"] = {"degree": poly_order}
elif model_comp == "BlackBody":
new_model["model_kwargs"] = {"output_units": self._units["y"],
"bounds": {"scale": (0.0, None)}}
initial_values = {}
for param_name in get_model_parameters(model_cls, new_model["model_kwargs"]):
# access the default value from the model class itself
default_param = getattr(model_cls, param_name, _EmptyParam(0))
default_units = self._param_units(param_name,
model_type=new_model["model_type"])
if default_param.unit is None:
# then the model parameter accepts unitless, but we want
# to pass with appropriate default units
initial_val = u.Quantity(default_param.value, default_units)
else:
# then the model parameter has default units. We want to pass
# with jdaviz default units (based on x/y units) but need to
# convert the default parameter unit to these units
if default_param.unit != default_units:
pixar_sr = self.app.data_collection[0].meta.get('PIXAR_SR', 1)
viewer = self.app.get_viewer("spectrum-viewer")
cube_wave = viewer.slice_value * u.Unit(self.app._get_display_unit('spectral'))
equivs = all_flux_unit_conversion_equivs(pixar_sr, cube_wave)
initial_val = flux_conversion_general([default_param.value],
default_param.unit,
default_units, equivs)
else:
initial_val = default_param
initial_values[param_name] = initial_val
if self.cube_fit:
# We need to input the whole cube when initializing the model so the units are correct.
if self.dataset_selected in self.app.data_collection.labels:
data = self.app.data_collection[self.dataset_selected].get_object(statistic=None)
else: # User selected some subset from spectrum viewer, just use original cube
data = self.app.data_collection[0].get_object(statistic=None)
masked_spectrum = self._apply_subset_masks(data, self.spectral_subset)
else:
masked_spectrum = self._apply_subset_masks(self.dataset.selected_spectrum,
self.spectral_subset)
mask = masked_spectrum.mask
if mask is not None:
if mask.ndim == 3:
spectral_mask = mask.all(axis=(0, 1))
else:
spectral_mask = mask
init_x = masked_spectrum.spectral_axis[~spectral_mask]
orig_flux_shape = masked_spectrum.flux.shape
init_y = masked_spectrum.flux[~mask]
if mask.ndim == 3:
init_y = init_y.reshape(orig_flux_shape[0],
orig_flux_shape[1],
len(init_x))
else:
init_x = masked_spectrum.spectral_axis
init_y = masked_spectrum.flux
if init_y.unit != self._units['y']:
# equivs for spectral density and flux<>sb
pixar_sr = masked_spectrum.meta.get('_pixel_scale_factor', 1.0)
equivs = all_flux_unit_conversion_equivs(pixar_sr, init_x)
init_y = flux_conversion_general([init_y.value],
init_y.unit,
self._units['y'],
equivs)
initialized_model = initialize(
MODELS[model_comp](name=comp_label,
**initial_values,
**new_model.get("model_kwargs", {})),
init_x, init_y)
# need to loop over parameters again as the initializer may have overridden
# the original default value. However, if we toggled cube_fit, we may need to override
for param_name in get_model_parameters(model_cls, new_model["model_kwargs"]):
param_quant = getattr(initialized_model, param_name)
new_model["parameters"].append({"name": param_name,
"value": param_quant.value,
"unit": str(param_quant.unit),
"fixed": False})
self._initialized_models[comp_label] = initialized_model
new_model["Initialized"] = True
new_model["initialized_display_units"] = self._units.copy()
new_model["compat_display_units"] = True # always compatible at time of creation
return new_model
def _check_model_component_compat(self, axes=['x', 'y'], display_units=None):
if display_units is None:
display_units = [u.Unit(self._units[ax]) for ax in axes]
disp_physical_types = [unit.physical_type for unit in display_units]
for model_index, comp_model in enumerate(self.component_models):
compat = True
for ax, ax_physical_type in zip(axes, disp_physical_types):
comp_unit = u.Unit(comp_model["initialized_display_units"][ax])
compat = comp_unit.physical_type == ax_physical_type
if not compat:
break
self.component_models[model_index]["compat_display_units"] = compat
# length hasn't changed, so we need to force the traitlet to update
self.send_state("component_models")
self._check_model_equation_invalid()
def _on_global_display_unit_changed(self, msg):
if msg.axis == 'spectral_y':
axis = 'y'
elif msg.axis == 'spectral':
axis = 'x'
else:
return
if axis == 'y' and self.cube_fit:
# The units have to be in surface brightness for a cube fit.
uc = self.app._jdaviz_helper.plugins['Unit Conversion']
if msg.unit != uc._obj.sb_unit_selected:
self._units[axis] = uc._obj.sb_unit_selected
self._check_model_component_compat([axis], [u.Unit(uc._obj.sb_unit_selected)])
return
# update internal tracking of current units
self._units[axis] = str(msg.unit)
self._check_model_component_compat([axis], [msg.unit])
def remove_model_component(self, model_component_label):
"""
Remove an existing model component.
Parameters
----------
model_component_label : str
The label given to the existing model component
"""
if model_component_label not in [x["id"] for x in self.component_models]:
raise ValueError(f"model component with label '{model_component_label}' does not exist")
self.component_models = [x for x in self.component_models
if x["id"] != model_component_label]
del self._initialized_models[model_component_label]
self._update_comp_label_default()
self._update_model_equation_default()
def get_model_component(self, model_component_label, parameter=None):
"""
Get a (read-only) dictionary representation of an existing model component.
Parameters
----------
model_component_label : str
The label given to the existing model component
parameter : str
Optional. The name of a valid parameter in the model component, in which case only
the information on that parameter is returned.
"""
try:
model_component = [x for x in self.component_models
if x["id"] == model_component_label][0]
except IndexError:
raise ValueError(f"'{model_component_label}' is not a label of an existing model component") # noqa
comp = {"model_type": model_component['model_type'],
"parameters": {p['name']: {'value': p['value'],
'unit': p['unit'],
'std': p.get('std', np.nan),
'fixed': p['fixed']} for p in model_component['parameters']}} # noqa
if parameter is not None:
return comp['parameters'].get(parameter)
return comp
def set_model_component(self, model_component_label, parameter, value=None, fixed=None):
"""
Set the value or fixed attribute of a parameter in an existing model component.
Parameters
----------
model_component_label : str
The label given to the existing model component
parameter : str
The name of a valid parameter in the model component.
value : float
Optional. The new initial value of the parameter. If not provided or None, will
remain unchanged.
fixed : bool
Optional. The new state of the fixed attribute of the parameter. If not provided
or None, will remain unchanged.
Returns
-------
updated dictionary of the parameter representation
"""
cms = self.component_models
try:
model_component = [x for x in cms if x["id"] == model_component_label][0]
except IndexError:
raise ValueError(f"'{model_component_label}' is not a label of an existing model component") # noqa
try:
parameter = [p for p in model_component['parameters'] if p['name'] == parameter][0]
except IndexError:
raise ValueError(f"'{parameter}' is not the name of a parameter in the '{model_component_label}' model component") # noqa
if value is not None:
if not isinstance(value, (int, float)):
raise TypeError("value must be a float")
parameter['value'] = value
if fixed is not None:
if not isinstance(fixed, bool):
raise TypeError("fixed must be a boolean")
parameter['fixed'] = fixed
self.component_models = []
self.component_models = cms
return parameter
def vue_reestimate_model_parameters(self, model_component_label=None, **kwargs):
self.reestimate_model_parameters(model_component_label=model_component_label)
def reestimate_model_parameters(self, model_component_label=None):
"""
Re-estimate all free parameters in a given model component given the currently selected
data and subset selections.
Parameters
----------
model_component_label : str or None.
The label given to the existing model component. If None, will iterate over all model
components.
"""
if model_component_label is None:
return [self.reestimate_model_parameters(model_comp["id"])
for model_comp in self.component_models]
try:
model_index, model_comp = [(i, x) for i, x in enumerate(self.component_models)
if x["id"] == model_component_label][0]
except IndexError:
raise ValueError(f"'{model_component_label}' is not a label of an existing model component") # noqa
# store user-fixed parameters so we can revert after re-initializing
fixed_params = {p['name']: p for p in model_comp['parameters'] if p['fixed']}
new_model = self._initialize_model_component(model_comp['model_type'],
model_component_label,
poly_order=model_comp['model_kwargs'].get('degree', None)) # noqa
# revert fixed parameters to user-value
new_model['parameters'] = [fixed_params.get(p['name'], p) for p in new_model['parameters']]
self.component_models[model_index] = new_model
# length hasn't changed, so we need to force the traitlet to update
self.send_state("component_models")
# model units may have changed, need to re-check their compatibility with display units
self._check_model_component_compat()
# return user-friendly info on revised model
return self.get_model_component(model_component_label)
@property
def model_components(self):
"""
List of the labels of existing model components
"""
return [x["id"] for x in self.component_models]
@property
def valid_model_components(self):
"""
List of the labels of existing valid (due to display units) model components
"""
return [x["id"] for x in self.component_models if x["compat_display_units"]]
@property
def equation_components(self):
"""
List of the labels of model components in the current equation
"""
return re.split(r'[+*/-]', self.equation.value.replace(' ', ''))
def vue_add_model(self, event):
self.create_model_component()
def vue_remove_model(self, event):
self.remove_model_component(event)
@observe('model_equation')
def _check_model_equation_invalid(self, event=None):
# Length is a dummy check to test the infrastructure
if len(self.model_equation) == 0:
self.model_equation_invalid_msg = 'model equation is required.'
return
if '' in self.equation_components:
# includes an operator without a variable (ex: 'C+')
self.model_equation_invalid_msg = 'incomplete equation.'
return
components_not_existing = [comp for comp in self.equation_components
if comp not in self.model_components]
if len(components_not_existing):
if len(components_not_existing) == 1:
msg = "is not an existing model component."
else:
msg = "are not existing model components."
self.model_equation_invalid_msg = f'{", ".join(components_not_existing)} {msg}'
return
components_not_valid = [comp for comp in self.equation_components
if comp not in self.valid_model_components]
if len(components_not_valid):
if len(components_not_valid) == 1:
msg = ("is currently disabled because it has"
" incompatible units with the current display units."
" Remove the component from the equation,"
" re-estimate its free parameters to use the new units"
" or revert the display units.")
else:
msg = ("are currently disabled because they have"
" incompatible units with the current display units."
" Remove the components from the equation,"
" re-estimate their free parameters to use the new units"
" or revert the display units.")
self.model_equation_invalid_msg = f'{", ".join(components_not_valid)} {msg}'
return
self.model_equation_invalid_msg = ''
@observe("dataset_selected", "dataset_items", "cube_fit")
def _set_default_results_label(self, event={}):
label_comps = []
if hasattr(self, 'dataset') and (len(self.dataset.labels) > 1 or self.app.config == 'mosviz'): # noqa
label_comps += [self.dataset_selected]
label_comps += ["model"]
self.results_label_default = " ".join(label_comps)
@observe("results_label")
def _set_residuals_label_default(self, event={}):
self.residuals_label_default = self.results_label+" residuals"
@with_spinner()
def calculate_fit(self, add_data=True):
"""
Calculate the fit.
Parameters
----------
add_data : bool
Whether to add the resulting spectrum/cube to the app as a data entry according to
``add_results``.
Returns
-------
fitted model
fitted spectrum/cube
residuals (if ``residuals_calculate`` is set to ``True``)
"""
if not self.spectral_subset_valid:
valid, spec_range, subset_range = self._check_dataset_spectral_subset_valid(return_ranges=True) # noqa
raise ValueError(f"spectral subset '{self.spectral_subset.selected}' {subset_range} is outside data range of '{self.dataset.selected}' {spec_range}") # noqa
if len(self.model_equation_invalid_msg):
raise ValueError(f"model equation is invalid: {self.model_equation_invalid_msg}")
if self.cube_fit:
ret = self._fit_model_to_cube(add_data=add_data)
else:
ret = self._fit_model_to_spectrum(add_data=add_data)
if ret is None: # pragma: no cover
# something went wrong in the fitting call and (hopefully) already raised a warning,
# but we don't have anything to add to the table
return ret
if self.cube_fit:
# cube fits are currently unsupported in tables
return ret
row = {'model': self.results_label if add_data else '',
'data_label': self.dataset_selected,
'spectral_subset': self.spectral_subset_selected,
'equation': self.equation.value}
equation_components = self.equation_components
for comp_ind, comp in enumerate(equation_components):
for param_name, param_dict in self.get_model_component(comp).get('parameters', {}).items(): # noqa
colprefix = f"{comp}:{param_name}_{comp_ind}"
row[colprefix] = param_dict.get('value')
row[f"{colprefix}:unit"] = param_dict.get('unit')
row[f"{colprefix}:fixed"] = param_dict.get('fixed')
row[f"{colprefix}:std"] = param_dict.get('std')
self.table.add_item(row)
return ret
def vue_apply(self, event):
self.calculate_fit()
def _fit_model_to_spectrum(self, add_data):
"""
Run fitting on the initialized models, fixing any parameters marked
as such by the user, then update the displayed parameters with fit
values
"""
if self._warn_if_no_equation():
return
models_to_fit = self._reinitialize_with_fixed()
masked_spectrum = self._apply_subset_masks(self.dataset.selected_spectrum,
self.spectral_subset)
try:
fitted_model, fitted_spectrum = fit_model_to_spectrum(
masked_spectrum,
models_to_fit,
self.model_equation,
run_fitter=True,
window=None
)
except AttributeError:
msg = SnackbarMessage("Unable to fit: model equation may be invalid",
color="error", sender=self)
self.hub.broadcast(msg)
return
selected_spec = self.dataset.selected_obj
if '_pixel_scale_factor' in selected_spec.meta:
fitted_spectrum.meta['_pixel_scale_factor'] = selected_spec.meta['_pixel_scale_factor']
self._fitted_model = fitted_model
self._fitted_spectrum = fitted_spectrum
if add_data:
self.app.fitted_models[self.results_label] = fitted_model
self.add_results.add_results_from_plugin(fitted_spectrum)
if self.residuals_calculate:
# NOTE: this will NOT load into the viewer since we have already called
# add_results_from_plugin above.
self.add_results.add_results_from_plugin(masked_spectrum-fitted_spectrum,
label=self.residuals.value,
replace=False)
self._set_default_results_label()
# Update component model parameters with fitted values
if isinstance(self._fitted_model, QuantityModel):
self._update_parameters_from_QM()
else:
self._update_parameters_from_fit()
# Also update the _initialized_models so we can use these values
# as the starting point for cube fitting
self._update_initialized_parameters()
if self.residuals_calculate:
return fitted_model, fitted_spectrum, masked_spectrum-fitted_spectrum
return fitted_model, fitted_spectrum
def _fit_model_to_cube(self, add_data):
if self._warn_if_no_equation():
return
if self.dataset_selected in self.app.data_collection.labels:
data = self.app.data_collection[self.dataset_selected]
else: # User selected some subset from spectrum viewer, just use original cube
data = self.app.data_collection[0]
# First, ensure that the selected data is cube-like. It is possible
# that the user has selected a pre-existing 1d data object.
if data.ndim != 3:
snackbar_message = SnackbarMessage(
f"Selected data {self.dataset_selected} is not cube-like",
color='error',
sender=self)
self.hub.broadcast(snackbar_message)
return
# Get the primary data component
if "_orig_spec" in data.meta:
spec = data.meta["_orig_spec"]
else:
spec = data.get_object(cls=Spectrum1D, statistic=None)
sb_unit = self.app._get_display_unit('sb')
if spec.flux.unit != sb_unit:
spec = spec.with_flux_unit(sb_unit)
snackbar_message = SnackbarMessage(
"Fitting model to cube...",
loading=True, sender=self)
self.hub.broadcast(snackbar_message)
# Retrieve copy of the models with proper "fixed" dictionaries
models_to_fit = self._reinitialize_with_fixed()
# Apply masks from selected spectral subset
spec = self._apply_subset_masks(spec, self.spectral_subset)
# Also mask out NaNs for fitting. Simply adding filter_non_finite to the cube fit
# didn't work out of the box, so doing this for now.
if spec.mask is None:
spec.mask = np.isnan(spec.flux)
else:
spec.mask = spec.mask | np.isnan(spec.flux)
try:
fitted_model, fitted_spectrum = fit_model_to_spectrum(
spec,
models_to_fit,
self.model_equation,
run_fitter=True,
window=None
)
except ValueError:
snackbar_message = SnackbarMessage(
"Cube fitting failed",
color='error', loading=False, sender=self)
self.hub.broadcast(snackbar_message)
raise
# Save fitted 3D model in a way that the cubeviz
# helper can access it.
if add_data:
for m in fitted_model:
temp_label = "{} ({}, {})".format(self.results_label, m["x"], m["y"])
self.app.fitted_models[temp_label] = m["model"]
output_cube = Spectrum1D(flux=fitted_spectrum.flux, wcs=fitted_spectrum.wcs)
selected_spec = self.dataset.selected_obj
if '_pixel_scale_factor' in selected_spec.meta:
output_cube.meta['_pixel_scale_factor'] = selected_spec.meta['_pixel_scale_factor']
# Create new data entry for glue
if add_data:
self.add_results.add_results_from_plugin(output_cube)
self._set_default_results_label()
snackbar_message = SnackbarMessage(
"Finished cube fitting",
color='success', loading=False, sender=self)
self.hub.broadcast(snackbar_message)
return fitted_model, output_cube
def _apply_subset_masks(self, spectrum, subset_component):
"""
For a spectrum/spectral cube ``spectrum``, add a mask attribute
if none exists. Mask excludes non-selected spectral subsets.
"""
# only look for a mask if there is a selected subset:
if subset_component.selected == subset_component.default_text:
return spectrum
spectrum = deepcopy(spectrum)
subset_mask = subset_component.selected_subset_mask
if spectrum.mask is not None:
if subset_mask.ndim == 3:
if spectrum.mask.ndim == 1:
# if subset mask is 3D and the `spectrum` mask is 1D, which
# happens when `spectrum` has been collapsed from 3D->1D,
# then also collapse the 3D mask in the spatial
# dimensions (0, 1) so that slices in the spectral axis that
# are masked in all pixels become masked in the spectral subset:
subset_mask = np.all(subset_mask, axis=(0, 1))
spectrum.mask |= subset_mask
else:
if subset_mask.ndim < spectrum.flux.ndim:
# correct the shape of spectral/spatial axes when they're different:
subset_mask = np.broadcast_to(subset_mask, spectrum.flux.shape)
elif (subset_mask.ndim == spectrum.flux.ndim and
subset_mask.shape != spectrum.flux.shape):
# if the number of dimensions is correct but shape is
# different, rearrange the arrays for specutils:
subset_mask = np.swapaxes(subset_mask, 1, 0)
spectrum.mask = subset_mask
return spectrum
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@jdaviz@configs@default@plugins@model_fitting@model_fitting.py@.PATH_END.py
|
{
"filename": "test_ensemble.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/tests/unit_tests/retrievers/test_ensemble.py",
"type": "Python"
}
|
from typing import List, Optional
from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain.retrievers.ensemble import EnsembleRetriever
class MockRetriever(BaseRetriever):
docs: List[Document]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: Optional[CallbackManagerForRetrieverRun] = None,
) -> List[Document]:
"""Return the documents"""
return self.docs
def test_invoke() -> None:
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="b")]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key=None
)
ranked_documents = ensemble_retriever.invoke("_")
# The document with page_content "b" in documents2
# will be merged with the document with page_content "b"
# in documents1, so the length of ranked_documents should be 3.
# Additionally, the document with page_content "b" will be ranked 1st.
assert len(ranked_documents) == 3
assert ranked_documents[0].page_content == "b"
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="d")]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key=None
)
ranked_documents = ensemble_retriever.invoke("_")
# The document with page_content "d" in documents2 will not be merged
# with any document in documents1, so the length of ranked_documents
# should be 4. The document with page_content "a" and the document
# with page_content "d" will have the same score, but the document
# with page_content "a" will be ranked 1st because retriever1 has a smaller index.
assert len(ranked_documents) == 4
assert ranked_documents[0].page_content == "a"
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="d", metadata={"id": 2})]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key="id"
)
ranked_documents = ensemble_retriever.invoke("_")
# Since id_key is specified, the document with id 2 will be merged.
# Therefore, the length of ranked_documents should be 3.
# Additionally, the document with page_content "b" will be ranked 1st.
assert len(ranked_documents) == 3
assert ranked_documents[0].page_content == "b"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@tests@unit_tests@retrievers@test_ensemble.py@.PATH_END.py
|
{
"filename": "2D_gaussian.ipynb",
"repo_name": "bfarr/kombine",
"repo_path": "kombine_extracted/kombine-master/examples/2D_gaussian.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
from scipy.stats import multivariate_normal as mvn
import kombine
```
Import some cool visualization stuff.
```python
from matplotlib import pyplot as plt
import corner
import prism
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
prism.inline_ipynb()
```
# 2-D Gaussian Target Distribution
```python
ndim = 2
```
Construct a pickleable, callable object to hold the target distribution.
```python
class Target(object):
def __init__(self, cov):
self.cov = cov
self.ndim = self.cov.shape[0]
def logpdf(self, x):
return mvn.logpdf(x, mean=np.zeros(self.ndim), cov=self.cov)
def __call__(self, x):
return self.logpdf(x)
```
Generate a random covariance matrix and construct the target.
```python
A = np.random.rand(ndim, ndim)
cov = A*A.T + ndim*np.eye(ndim);
lnpdf = Target(cov)
```
Create a uniformly distributed ensemble and burn it in.
```python
nwalkers = 500
sampler = kombine.Sampler(nwalkers, ndim, lnpdf)
p0 = np.random.uniform(-10, 10, size=(nwalkers, ndim))
p, post, q = sampler.burnin(p0)
```
See what burnin did.
```python
prism.corner(sampler.chain)
```
<video controls>
<source src="data:video/x-m4v;base64,AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQABuGFtZGF0AAACrwYF//+r3EXpvebZSLeWLNgg2SPu73gyNjQgLSBjb3JlIDE1MiByMjg1NCBlOWE1OTAzIC0gSC4yNjQvTVBFRy00IEFWQyBjb2RlYyAtIENvcHlsZWZ0IDIwMDMtMjAxNyAtIGh0dHA6Ly93d3cudmlkZW9sYW4ub3JnL3gyNjQuaHRtbCAtIG9wdGlvbnM6IGNhYmFjPTEgcmVmPTMgZGVibG9jaz0xOjA6MCBhbmFseXNlPTB4MzoweDExMyBtZT1oZXggc3VibWU9NyBwc3k9MSBwc3lfcmQ9MS4wMDowLjAwIG1peGVkX3JlZj0xIG1lX3JhbmdlPTE2IGNocm9tYV9tZT0xIHRyZWxsaXM9MSA4eDhkY3Q9MSBjcW09MCBkZWFkem9uZT0yMSwxMSBmYXN0X3Bza2lwPTEgY2hyb21hX3FwX29mZnNldD0tMiB0aHJlYWRzPTEyIGxvb2thaGVhZF90aHJlYWRzPTIgc2xpY2VkX3RocmVhZHM9MCBucj0wIGRlY2ltYXRlPTEgaW50ZXJsYWNlZD0wIGJsdXJheV9jb21wYXQ9MCBjb25zdHJhaW5lZF9pbnRyYT0wIGJmcmFtZXM9MyBiX3B5cmFtaWQ9MiBiX2FkYXB0PTEgYl9iaWFzPTAgZGlyZWN0PTEgd2VpZ2h0Yj0xIG9wZW5fZ29wPTAgd2VpZ2h0cD0yIGtleWludD0yNTAga2V5aW50X21pbj0yNSBzY2VuZWN1dD00MCBpbnRyYV9yZWZyZXNoPTAgcmNfbG9va2FoZWFkPTQwIHJjPWNyZiBtYnRyZWU9MSBjcmY9MjMuMCBxY29tcD0wLjYwIHFwbWluPTAgcXBtYXg9NjkgcXBzdGVwPTQgaXBfcmF0aW89MS40MCBhcT0xOjEuMDAAgAAAE2BliIQAN//+9vD+BTY7mNCXEc3onTMfvxW4ujQ3vc4AAAMABmRj5QDPgsyfqAgSELTRaC/CRKcUVi05/Cv4k97L2KzzmytRr2iujexnx9/eDrhVaP/OxrAL0H/NIV2gADCOAEX3XB7OTMiTXfsH23ctOXKzsXQNwZ7z6c3wWV0Ac7zH51OunKjogCGeqXfBqJmCFPDwCXk8aFrvWQhHu5Cn8/dDRrqRRL60CcIEsoaLcObjOS6DdoOaX8c7h3dTPU3lXI2L4PjcAvRkA1bYmozumHjAtRC/AdjRAvHdLKPb0fjdoDG8AaxmEMXYIP/8LvA+Korsq6wqkabF3yZi5KQlNv8o0qIYDYJu5sJoTQ/D+R0CzAZY7mi0pOufPmRP3nPlCXoYLMPXeJTZl8hvNcseR+nNA9Ih0emImMamya/GsnEZ80nk7FrshGhe8H3x5ny/PJOnHKi70YEweSeICMJqQ1AATMAUdyn07zO5ViSkPR6gp72sgm0vVxNIVJBggzGEvSrQv+a/y2FP8OFo9Mtpi3+8xtuxWNamArtPh+1sR5vbGa+1luET8mkXvj/lrx6vDygASHovevo7xZwFDdC3gaD4hJx+dhCX4ZDREzs/WttRXJTPVOJY1lyVTy+O7d9RG1eim6vKB/+FqOam1A7pv7/94vh8vVtmnJ4QEJk5ecuLsE7cTwMNYXTZYABYrIXF+/QsmAAAhjMo7ivEVSiNS1IelLDmW7p78/mxfsbRupTa/l/bFXA46lDLnkYRuGNAVGzMUK42GyE2okmAYvRprqUiSgX+J3TdsPbAjJiZXC0wfmLD9XuesLNKxINLh3iiJUiMppK7rn6pZ9gGmUxk8npOlWRqUasdgL0XN5uCnFsmLnQwch5Y5I/f5ggFwx79arocbXGBsTrHRx4QABLnCg3n+DFG71KMTy3HqDRxDwgytn7uqDoGE7G3XZdfpwbFmp4pST2cmAuAW6fijsPjWXw4gA1nIRxPsiHdQLAhn5UxbuiI1+W/+SFyMheefn9FB6NuwUqkwQTDd3z7H5Bmwe6tnt/6nVnJjzhyK9V7a6n1g6DsBzZCxTQXqd/++OP8C6ug5H/QvOJVXsSpGlZXYMKIM+MXM+IBkPCxhGR44ucuLa8ZLXnRgn6UeWa5ozvwipjEjsN++sgIuFSPfAbf80F83z8cgjCevCRk7nTv5dfj7BtnfbAHcW1a7cwujOzwdDvaVzUpLU4iE93MHyMeD9Yt/LeKptkJWTLLdlDE2KBj02E74Yv1ZJ5ycxasygpJxxj7YaFV2jdolEHYaW8FSe769BCSmNclb+cLWCNNMa7IqAmyplEtqVOifxwgIqSzogaG40wS410uZLDqecAxgAHd2/aHB9AVftUVXokAAbYbKiO7xZ9a7Sb2ZBUf7SySSI1jY4GTN5FxAsAda+9fu/kse6nqEZzqmOVCKEov69XGdL5rbdMZY5zfFTzOkyNeyvPnuIZOuk8vaT3WXmUiDGldP/NO6Ko3jR9W3f0UzU1YUSOKrvLMJqV1IO+Bo4JHQ1kRrS67d5M4/0ni4eBRGw6D0j3JdjyaeWK7odR98eP4eJ8zEZVN+AtSMs1nVV+YpwIV80e5UK/Jk7p+i47sXFqXpgnhUHxLn3/F/TJAjTqH01YBcPHghj9OU3FMo1YqZH4/AYdXtBfJET3bTQaMpPQ+NN7zKnMR8b+xZH/Tqdui9mXyrWbq9J20NHi4o1zGwbD4c+1ooaVG7ZZVyF1XmCmQYRldh8WCrJQZpJXf/D1AgJa8781FV08kYjBCtBqntiWhsfz98BS36kw9OwrNKFKbCssp63ha7tFqCJhnBejInhsy9//A2X7RCNoy33W2GOJegO5xUvTqi8Nn7udBgjzpqgNEH3WwNeoOEJ9mTi3SobfkMS7MQ/JtwzOwQlRkXfr2KV3Sgk97WAA4QBleNSzllePWikeCCCLKiC+HObkHz416K8+I4IfD8Mxcm14MczOEOG8rh8lkGLQefW9r6WGuKxsRezj8mjfqxUNRDUAcJdMR927reLMrWSGc0wJuvA9XtVcPACooY06iAr3PXN5/mCBiMIZAjlcIQGwfGSoCyMdXCYpTB9BLJG0Tz2nIb4piQTViC1jBqm7X/qR/sJX7AOOW9jJf5TA8w4kvlbFPXpGak9ELGKTZCqTuJX8yjt/zcGvLxH2KF/d+VoVLPMtLq+AdCSgfcvRJHVP6VPDYhvuuuFWwLP6hyKJVnsgAAApmasdQ8YphsR6d9lcFMYQO3IeSlKZvfINbQretUxhRJ+A3t1UK7KkjHh5zAw3q8aVTKkTUB8ebp3svM4w+E9jKwtGoabZAAEZJBXVoBZq+PVZxXCpB6xcDSBsEn/2OSLn7HRFSraq65R5MiBgrho4Slhw2v0Yl4WLo/g1hgitHRbpWLXQvfOx4NsY/elwORoZMB9wnHSKCbFjPRugAARfULS55k3ZdnNID1tegb8kFxzaoJMoVEiptSfRTqpdSrlZfd/bhamaqw28uxFWBF+ho6VQ4uDIeBMTHY6HRs8IFhiiOaJu3b/7lGZpbecvdtfoeKLuE1MVXCfEwyuh0Z2lFLGrtuZNkI52FLzq44etKHAiRd4qE7w6SoFNDHaKCA6CiGB80nu8xAbbaK2PP/d6z5yw4kd8oy9NlIQ6FMPMYuVzYAncl3IwZrhzfeKDn1+deqVhmmLPYxsxLSSeIrG7tx0m4g263g2gZbpmB7VZE34D58hpAZuTh+dIrP12Er4qAZemKJaWFGqGvJb6lYki3Qpf/mh/VVEbk4LReFaX1aTrVLZk5WbVL4fi+pVTWqsxzSYkfsMKgYR3uhnU5B9n0oelP9aAGpCvCZF8EJLCz8mbDVFf9F88eqkBoUD3dhRXXauAQyUGEv+AS3wLj+JPTy8Fp0UoAiCK67Nulm0SLYgMmLYlqrjs94S+TyRqiKgTpM0MrmZTD5+mz5Sb3pPtqZhvyoxcWX05FkTfH7jk755AfBF4XskEthZbWPNuyiOze6dqYh+D0s39l//6vB/o4Vg2QYR6OIUWC7DMa17sM97TwRkCBi1bTPrx3+yjwlP3eMxASzKg3KFmGdWzdvYMgxzELNWfrl59dPNflrMi2hj1J4ICBt1ymWC8nYMUv38ZxXozUGhEYgoqVk2Vz4Km58y/BRtUrAiOKoD6yYxTXmoAOqR2q1a7GnJvwv9AIframsXQ7YQ46D6ZKpltXUQs51R6b09RYO1KGwP1VnNEuEB7co6VH9sXvK98nxypD8VuNEckZO6HyC0BxC+ggT1ISd/V9dEZDweDI3aDUYoCDCuEWpolNerEQ2WkPLCjowOvYdJKm0ApqVPjBPx6HpufUSGLHItyj5XMK478LDVd2r9/+yLtE8YczbKMKjrhm0dAmzhpOvvO5Qf+OKa3m/qTsX/pZaz2rpI+plVNkpbxwqVdnlydGmZllC8725tKcrph21aucvJ7/lObQUOSzG4Pvq2Y96zfynpex+pdbRnI2Ss1Bd8igbzZkGB0U0scdrwzBhijD0+CxQo6EG6+KHpliGvMdaRRV28/SJyN5ssvhuPF6qM1t4tdYJehT3dqxWmwDy3fRrspdBxnR16YqjeIiRSVFZ/Qd7WePhKzf5z/WdkK7kRrGVe0Rf2PlWSmzdauGuq83131QqPzEqPmbblr6ApfIB/OfsevDu5uepIzG/xesepdkGQA1XgmspElv3iUYcfPqVu+MPoW1Jj7aAWK70jvczGn8ggHs9vmMz/uct9bjJ9Wf5RJdFqRxPfBQvQsRR0ghz1lDFRn7IuHMWRJ7JqfiOipE/xwLNAG7Abr7hydaggVrOg81Mcq6Xe3Fj6eclH8DXxT4BAUbONaKz+u8e/DgxGnewUrGIA8JPoMjjdt7/THtw+H7x1X0y6q4UL+PYKsDHKuEigdH/FPvZcy+mu/OeQmFgkZHVbGWlTWwvSaD4gdmqR07Sup7VVi92uII/dbtPvRhZX8PFKNBFW1vIOgnJzRQQSEnHzK6zKkqKQiGhrBVWejfRh4JPeLZcLtnGSWDGwHB6GBo/QeTUUOHq0OosoncQfWNJAWwQeVgdXlP2OgUCvRYGjqUVHcjL+3umdJE+TvDlQ2tL0nEepGczi7xkb1rSuljNqsuUO2BDbI+1c0hmlsiYNZm1kSMvDgWzTTYCB/iDL8RN5Xgg85Hq8ULv1Ht4yBXirN4ZYNQ+qM58nuAhwvrVzXN8ilQPQTM0LAGIIBs3Uq5ydSqG7oiVtEXAWisvqwdFnrYqUg/qIMhS54IAOM0TCI1IdpuLn+3MIGgTjT/R9VYxqRYUdrS0gvVOfXL4BZoKbXDm6xuNnmwP/ngXBtwkREHfRev8mXZNTpv++qqHtXnNDRRH0T+qb434f8FHSjyhGO/pnyWF5Y2eIOTA3intf7NfdnwbakZFfAa6hHVbXnbgxCr8RSFnpCNwn+PruJgJnwJYMged8TeLBF8Hpu6JoJO1lv0V2mWDc8XDDzJ85m1pNn77GsJHnvxkyQRZzErJ4gIMkVovfKs3DT+77f+3QvCUG/nndA+XzqFVX4caxuRWa5GbBS29KJ+CHKNAsNC5d0Z+vGLCg44Rd8D3fjs/eKdJgXNkuwxBuroupHN68BU2P2wcwZzmDWq5bhHMFx9NvTOVXJOfV1O3QZDEbhVmlDPpcHlDl+gahkZbcUU8xOiYUa2QhasZRVj6tuVHC8ynd8jLilVB2B5pTfj9i+LSPIE4KUEupUVHQ66jQMEjcAS5EDnDsxkvbpWOh3WvugSrFY18fD/6P8vESaaOzbkWAP85mPFC9VQ9+fytzWZaECXn4EIWHjN6osoU+xRkkZLBIb3N8qe2kogRMxjwdVeoP/NanHNxfw/AA3aUNjrNeJfMw5kRPTx/mon+P9rFxZ4clPv3UsMMGaWY67wb2hs6hEqjt6eCAhwiM7Pnxk1jYAu8gMnygRunKxsznPrHGG5DXLXSVXjJpU6gNhLD8QDpTRdB2Wfzd2SfIjGa5YobGHVy8YHkdY0DfHJhc/fc56F8buLWoF8JWvl9OX2XYqyi0Y4Bg5tLFrA0q0Wn8+ORM60/dPZwUs+2J0CC/QZwh0/mdMg+B4XrPN7XLaKSTMmMHdfil4IVMoQw95M+I1mfcBX04UBOR1MEDQthM1IJPX/7xmONwB+CrSOx0gZrGKUN+eV/HctnnGfVUV/W0K1m6DRKXrACPlMYBZHEaZKt71zUl+YUolrARaQ2tyWCTpxcrLojA45jdN4pON2j5/FUMdOlI72zQnM/7CyPvzl//FTlk+is3sLL3NnKTG51mELfmQYYz2Q3oNEbDChQd7coZmB8OoXS6hoBjK9gdBPF7qpE7VQ5gXWyo+PY/xSXlP3XYZS/o2hIP+R5rfNZxbXa/jOuEkyIQLw2o4oaucdV83PvfbCb0wuTEr6rSlNn7BNP9zt8VGrxH4hNJ9mTWpVIdFj58/voGnmGJOfqs3j/2v/9zwKvoyDuaeq8sW91RTMx732grGGTe+fPHScj/On17rldDX0EgB3RTdFCGjj8E1FGU02d6a8FmSpzj+P5i8GpszjMU5W+RdI5izBzeqtV5CUn1HGKeM/V8COKQwH+RODjGL/0vTMKhL2BdgNYd1NVjv1Vp+ihdvf4+ARAZ82I6hEhsyc3LleaZzb3Ptv/CgSFtQifmOv8wHjDFaTJOBTsFhfWQohcEF2Gdxiiq5WxnInhPPKL3/H7eJhU39GAcTYo5JJQeERdhXU0gFipHpKTLDOQeq59YuQu8FfG2AJrTnyipf86LYciDOd9JcbX1X6iOHUTQrW+2zVUTRnbIb4sWsqDIP+IKJaf4z0EUc18TpTM42SXl+1lCWvsfnaFvHZdJ3vKG1UyL5+gfVHfwA5rkIcZdlbUGtQFqh8tWYIrMwzm6akvGzcqwAc1UDKpiZyHhWD1EYUPNp2lr4Y0iSyhRFiuJatE7Ft3HZf2trnzP+E8lQj5jRbWit0ylX6vSonzyR9tJgcMk+muK6TC54C5QBJ+u3mWf6nF/luXZhKuEFJzPqRrUi2nlyKWVh8DtcHhpqiumNo03Pjj6kGCrguttDSa1W3QGH6niMPw0M7RDTqQDuPmL8J9kId9hUjw2VmWqciZxlJUABykTZwao4K3DXFuowG9Qixg1Di4im4Jj+PElTH/PWf3rEuFSdDHIffsobqF/4DLw9fqozT1I8rldeHlRrvx7ADkfc9fqxm5s08q+avfbCXx8BIvd+hpWonxtSM8gR4Yho7gNggA4MHH/CRifLdBhiLsZO+4CK2cXNtsPVwxnFxzQ+HD70DKRwYaSYm4xZwv6FzkjRY4HJYwfujxDYEsVATciGWVfPSCDMVKvtKyDTnf6W7Ruo2mcJTb+7pgPoYah39Svj/3cHV626+aVHCNls++rcNlidNStfVQC3iNk2HVjB4DYHLvD5bZ9ayx4w2jLKmxmSGA/Jy7u4npAD4L7wzQQvdSLCyjOzP6WEgF5eoLYycyhfXYW0n90lhp/wMF/kzrdQInuu9c8wxU01nGBz+JoRbCqdlKpQXaWZdf3hGnjy5ufp506zuLHdn/JLGjiN9XSF66UE1DqxUj943IZ6SoC0zIq729QP0E6whAAAKI0GaJGxDf/6nhAD3OgfQCyxlvVzNnFn1y+GRCO4Kf5v3vIdtHhwh4HRl6hgubMmmx4gA4BAOCEvlU7MsmGnT99hmzBIBQiAhktecqBZsB47XyWvx58ZTCNMYOS9ErzYoseA82d9deu2qu8mMtnpcZtjbppZLp0YIW8Ou+1kwOS87UHvz3ffSkI8Ws2/EMLaYq/YtO7wPq8WK82J3RPc45XHmdjP2DjZWLxJ5u0ei1yp5a1f4iXjF+iwmojjSvJATUWwOx4psBEuabrOkBCr9/GCDXRNk91ZEu+BMRNa/iTY5kA11c4kFlsE3R4DotTK8rUIbPJyDVt45ojFRi5TeqZ1NG9aRO48G27leJ0eg9V2okXEKBCaIZJdkbSNQmyvv8HDTpgcKPJBjDQhQI6wkba3OLo6hMy+2c51edXlOYV8uH3JMXc79E3WGpPynRxT4L+Z30QZgmztgnb2n3ntY656IWIs3mGVSCLkHbxcfI37w+5+u8MXRxNZyTZpaTRnDd86q+Cfk5Kf+cObI46yXsLAzj9KATZ+89NdBOw20sfa/z3DdPgByyD3VOVLCZago+nRsRRYY1LKkz/786qfgwhlukVPS7Y4M6UT3V1XoibXWcgNOD3mIA7sBdpxHdbwLehFlaDInQhx0COfkt/QUG9F4oyzKtM0q2T3XoCV3saOZwjp7+thNLRSHruTEwOvOZg1ZiRMY4WSLatKcL0wHv3ayXyYqWBvq/KkjhBNPf0ZX5zRGTCvWZiOTy6KOFtaGe+TsqCZBu+sD5qL6hoBGxEGRYyK2DxQpXtI9Jje8IU2bdJVKM2g1WswrDxocYpzZjkwO3LtQHPSJu+A+L21caz7ag6teKm9yHwyVq6Qq9bLDsrbY4IM3I3O3vAm/i0s4SB4D8T+Oj+eDS7F8PUckoukyYLQCq7IpvtHsgjdOP9fHwvVjUVEqpGH9NDK7Pgvkf/BEUJ6OuMOJto0yUk5bLDNzQtyTJRcnHW4GWYqbQBaNkApkcSZDmGzhdMvCnaQamnOc45IpicYxmRc/BRjw/pWQn9+F8LlVA4dfYdTKrSzUSjQWjfqqSYs9BD/Tm+x36eLM9AkZG/44V6uSLbgplawr+df8neH/90SDCX8prHicECzJXfZyCKKYUpec4cTMT7vwdyiIg1ym4RjPDjLBd/5+WRTrSIZEg4Bx+F5jmdzoFaxKO6ghrJUaWPMyEguvmEnNfQ74eeqByNa7iisPBLDRQMFCJaeeZJNdUuqY9RSo3JdeFNKPgn9w7f4VzxDNZCEQ8RW3hEW8iCxp8EJWy5hpTM9WrKdmqSR9m1js0rdz+aLgIxAJ6ZzYTH7Atshbkg5UnEx2nq/A6aWuhrgEHWdX0H7fF6m5hCrme0vcbswQVgbIdrj6r5fH67g8vziUsjIHMQTIUL+YM5nFhLQQPuadgObepyxNy4rbamvAiPLBUzHRSr2fHygak/wHsNUF6gDC4ZiaW7T8RS5fUblUlH5yNiSbTbgeSSS8TyTPYTUbna46pv6J6isuRN3NsIlrKMypyAAXayr/v3NLjSRRCvVFgbDEVPKTPvPxtb+AM7X4lNcmfB6qTOPRoOA3rF40O+npacR+JJZjHiQKwadaziAYlOlHJuVE7h2eIwra830CWff2oGyRBE2vT2NKUkHAR71oPmHrhjw8fQLFGmzmTwVH6vUIf0ro0mUF3b0zKv3Pm6eGRU6SEoq2g5z2tay9VAMLahrX5Xmpedp7z9t4F1iNWP9hrnhVR8ck4/iV0QjlkgrIeS/M2Fuy0eRvR0ukwCmc1LfvyX1w8K+uAkjD6k4yGYvd5CP4J51npBiwBimlVHxeqL9ms0QX5UO0HVkTNZMQhsi9ASOnYDzwb5d4x0LyROSNlJtDIeQdgqTeTO4EZAAoKHU2BwvrkdTdeZ7U2UB4Y4FgXVvwkB1ml+8X1zlOb64oAH10lrjea/u7QpNzjke6FrC//vqoe6qBMFp2KDooXXYyc7SmJYDLhbr2IuHUqrSx7T1KR5zPNF8gn4Kz7HvIFtLbJzckbMHLq5omSRas2BmITLZevBjnaCN7O4GlYuaeloCmdvUf7G5gwKYqoOh7EK+jkmGcgQ64cV/meps2ebLimGhXiE3z1eEqSpqOFvetE8C4H5mkaySlBwvDipqH4CJlFsqlDfiwxMjV/GRyHu2ql+o9E9w0ZIGL4NJ5VN64k5JLTQgWaQhrW+0Apw2SyOVL+0+g7oy3nAdmIbWFyp7nHlV8/RRhACPn8ytdiC2bJBV/6HXSwDrP968lD3BtvUPRirOeHjwiELeLzjsLZOa0/zwn8RhGrycxTrAI8HHKlhhMCx82sDG26lBUPtehe3OSvvUZKz77cRsSqC0s3WndT8mavC2ubvRgnGdoxpoRQ8O5RrnbMDYzArONdAt8PcMpGFH/zGKmCB4iInx9LJ+grH1WsGnsbc+Kpfl0NvqIbUGfeXWjplaySJ59Q70kpPvWG5xTKYK29XbmetJUh6B0WKer44p+VlBa7F8XBuKMVlORhqd8u+musDcjHcQutXU+a472FsXB9tJONvAhueYn/G6yq/N3PCReDFEqpNwpwlq31D4wPBWO4ypYCQb8qsJqAbVNDmH780ImL43nBwjqh6VK8mujXfjqMtFDU9f3F3Neyhj7vYZKoYKQQ7G4O77SklTZw9SsLJBeguFEMYRcNxPsjvD+f+TX0/xph38uq88TShbjWCVHmX3R68vniq1NKUiDLCl2u8Vm/V4yvF6jPs/sFbowOSGSTu2AE/4X/L0YZ5hez8GnuTJJQVjXZut6/seWfWHB5pitG4rpeG4UMMcs4NdefKYf6rMV57mUgaFodvvBRgmrxDv2aCse32SpItuKpIHipJxBEPy7k8tfra6wyYUJDtU0zgE0aAjP/Hr8DwIBEXQxsbPrilShYPaaWDC6BOnC9ob/T0GMuXTRonS3IuO1Xvu6nKkXJBQowvucj6fE9dXo8KSb/kpeB2bliXO9IkP56mYL2zjRTCf5usqW0OFTRPGpjOiYiB0eyVEsyPaEndvtkC9N+3/FAZtp54Dy6tDXxoyftjqAeMkcaRmUvvxE7jgqu9TWNf3E3CAgb3MQvQh4mKTHQC6GIbulWXD/5/1XmEg8lCkLFHtIIR+AQQuamoMePDbvh4gKjvvXWhCxgz6vLtRladUkcaje3+//ccCGmr/C8YMyNHeQ9TkFKH/Kia2p4fmiAcNM/WYg0L++i4tdXFZKrZvf76BvnHPQqUVVsnqWBlzh6Y9LGo9t60WmG2jTbpklDQl5mREaLy0LfcvshCN2XmByHC55gHGbOo7AQyNktX1mQ15RNbHqjF/9rmW3jSp/qKVnVCiaxhBazBsurBzma85oDaI23TiWkF0jFqWDLhi6C3b+6vgdsz3C9PZoXKaxHD6oPUaIrAdfJxHrvR6y16b4him4mioDAU3+y9Ho5er+kAAAB99BnkJ4hX8AOKCVZVhUVMAIyO5K0lV/NxqMmpmoHSa5L0jEC/XnN6cAwB17Hcmm+f403RT38pjDFrQM8pw9gzuoa8FyI1m4vS9HLQsdatTSs05Cmyrbw9DxI9WaYWreqhz6UUcW+D0+6jBe8FFsu6Y5GlpLFBTstdqol5lTRjmv9S0/h4F3TOj4R2qpzGOrUMMBF2w8mArHjesGYRA3bkUpvih1vdLmxR5jYI1E2X4WqyeRSdsW9+JhechpOugBPfsvD8Xc9LYZFWSoxu1/DwLoR3gNjf4FCAb3Ip5t4kl9DnrFgx6hvNKJTH+cmSdIZvHzjmILjLiS1R16eDXz3p93T22xzW0r/9GuAeqbv30y+Y0JRw8iMjHtg+Ix2547cn3f/GBXoM8QoghdzJ2KS2ORK30Nm8BQz6mVXohoTlaIO8uLd+7V/VJtAOZxQ6nZytt0WvrEqHW2UYiFrfC/VL1drEOXiDGT0gqyflhkq2hMxH7xZ+7qqGmXcS8AC2RXRcwXQF9ijMSR2ej5HkP/rO02chm9OZgWhjRhoGX3bU3nUk/YN7yfFnC10OBNeXhg90Wq6Dw3/2KmI6xuBhqPs3WCXhr+UKXzwTgEpjHjXFy6FqWirxweL7tDvLz5cZ/WxDtnULSzy6nl0yEdqEX+TCABhkLQVh/eL08dixTw27QDaxqhJUIKL7IkMZg3hUWAUJkzp1KMm9NcDPGs75FzW3/RLTLDMQRQV7V6nItnG5d6CwzyYHjMAC3LUUIuzQzw0mrhA8fRuHt1WTrlS9/3Rh5//jl0n2ylaVOnfIwfh0OQ19wA8LgvVU28S8Jsoh8KKkUJM+XF7Hi3liSCS19hsYUF/WzuranCykO0GpQyeEgmBiMWvPdjINiXDzMMDe/hz9HYmF3EtimKoRwbf/S4uoIEWNb3vQK0Fku96c1b8YGDaHUIHwwV/pPI/wuJnaBkfvFxew3CDV1cR567kQ9/R+ZOAu/achPecbbyeKZ3T/i+xUvP+bKPuZUsWpMK4X/DlGCljLejRUjDDMGOi1y1H/VYMgGDpjqxvHKbm1fwEoUEPZOWaJ9rtYs5r8bW5TlxKXOvHVddjCOT1uxED6voD9ZDTpNemiQY43K59BgkDisG7ljwYUXc1oZAXNSXttzgEZwLFVFKLR4F/5x61C7uV9gLM+kgsdE9N/vTIC1HZwidVEP9quhFyE4mXW71loz6nzbOZjaADd6oJ7pnOCRBvhbdg+9TcbyroRNDIXK6Uk0h+7SR+iNwL9wAE613GT5ih4SOJOxX/4tu81QuJZ3BLuwC2dEISZpiPST33mWtMZgGjW4p+aUO4tkqE0bKrYCitAk5DymzcZ7iETwWF2q3tzYu1k6Jljd1XSZhzUE5iiGwXJ/VBdZV2Dqj5jzMpAYu/8bC3sPKwZF8gvhwRpUYITfyO3qf3nSJfzZbGWEyfxQcmyjjdlG4hcC3GRr3wrSXYaAwTpG56JmEqEg8Z9x/VCswWYHYqIjD4H8s2O+AlDJDlEgwuqcqlGnXxgg6WSnN3WfPj5eSsUtiOHABIkQJWGjx4IQvxXlTQJps9a8avY2ALqdeO9FWIGohN6OPYlpMr4hGVp0bssHOq3rnNlSTnCvUmUX08ivUboiXgqraqbqJHeMWl1PP3EqdVD+OinKF+R9QCY8f1mlLlNBIt3HgrQmbPUyyx/oBDQH2mwOGmeGDUQdV2y8YzUwM4WBSkvvQvJQqqrdxK6Puxk+V4x1/XDRy9dIYNFzKngpgStrMarB9GYN7NHJhIm44nc6AEy3L5kJijqopminfLGc7ky39xkjuC9+p4E4o4MFQLzrvWJHH5QbrsiCRZ5PMPq8ZQZVkCNFKQujFEVyx4AlZRTG7jD/99emfNgIJW+WrebkghE9P/CScScq5A7r+pQB2ayxgEbT8NWH6NhWugVYVtLnm4/fTJ+vFN3Riqrg3sUv5h0qefQOGf3/ZPX+5v/4OLjSEJvzDJxQya2OpEVP1f6G1U5JO3T2O0oCMwHomUQmvtR1wrmKnkOHgAguvtIINVbTaVkA6r3QMz9zNUur/RWonOSGJLer6yVtAz4NaqcwdH3Kkk5DYHhOlazwaBTzBoo+0A7O3G89PTPRFmJHUXMMAFrCuN2ANQsZuhR09XCj1lH3Wdyr3Vicg5hWQpvrmYzwucXvc/zElc6MDus6duAI17osnTM2vYGA2rTv5/iDKGTTxBZW1YG2CKeulTGisqm5RpZpvYVdpwUDgyLWSJDGL4HaemzYHhpZiiqt7l3fgEoQv2AOze+AHl+DXuGuafAQyMBWX4zuWSkI+VQeKQygiPXuTg+ew8QywNhyY6+D5RaiXXCQMIOZ/asoGceCn+uAE7SGHymFo3M413T1J1R4iBRCz4WVpTXF6tRu3QWmwsHUoFjpAnHR+rIlA3WOs045SHWkVhcdhw7XntG9BvKtSlJmY9hOPb3eF4O5YYUIdxpMgg23ovS/qn8dg68MJjeMMiLS2kgtm+/qs3rtQgk/1zIj+8pYzM9vKWkcnOtVnUxTRafwEehvlME7GFxmjujPjiX3hXmRjAkPu3tIkTck8SpwFwFgEJUPQJtNjSb7/3aCmWyT1DuNu2zOp3sWcTet8Q1Ci+RmOkbvqi9KyyvRawIOm34W4iGvsoGfSOSEwCfbVseFbgEgjn0klJbZw42EYsQAABrIBnmF0Qn8AIr6iLr7WrrTBbGQAJxzv1gWahQRABcKIrR6YKroR6J6IcQZ1EV9YQJ9i8bdCbKTBE7q0cdSy7fBhot9b5HfFF3YHACvju6vQdT9eRW2IqT8DWMrAJqbi2GKIjTUaCRg2zjqr88tlqwi5UBl1cOMi9TlKyUjp2Uq0kzsO9y8tPfrCea/SSlOsuac6CjNDmJBoU3uXUkv37z1oXObPV5TLs20CU/hPlHTOwEvVPfDZUF/E02byP4m7kA8vKOmjrkZ2sOcGcXB1y0yyo1xo3wmGfBpgAuMFnW4twfKGjffBpCF842KJIeNxZ+N2LtC8qv/WxHtd4Apvu+y+WNMEY6uNnAP1zmssTuyF/5pjSike/6BJWPjUdQxL5XcW9jK1YDvwnBh9BNrDwUhY21XQE42I2UVaUp6cXGwyk08McBNvhBeHl5COigljXlg85aef1OYnHF39APkNsXscyuLXCSCQX3scq2AYR+y8cKLP1OqUu9DwfyHKlBEeuE535LKRfCrtK9NaJg0RNh0K5/0Blo1gghL+ECBSiUQlwyWeHdzVpHF5gqGJWt/MqsTGbPnR6rAeeEVWFHXc8ks8X48u/2oKDW4x8XZCIlcoFErDW6yj1YVxTmeRDsUSQgMQ2U4FMpKIVdp+jbNRuNWSwIeAuKbG/NlsbQnpqcNogiFE1JY5gvHfdqKb7maxVS+CHOrn/2GoV9dzIvVWrzwvN+q6IkJnFaaBYH0PSVsqINU2ShzKhz3YB6nTzyAwpi636yqxfc+xK7sxzNtCDqDlK9PyKW1EGB3ozs+ImTq+u6aBpZzhDh4O7EUXMFlAm1zrJibU7PFw1EMWc6nQdfXgN54XvoU0DCqHj3P0wV8Js80ih9M8yfclKEPn9+c0S6en0XDWTR+FE19nNBGLzDXZtFQ+XjhV/efnfc2JVaiIEHeVf9oO75X684KU+Orsy7OEry17TKz+4YmXbdKH0lLrvENgL2uwIwe4Q51Ku/zUFW8aV0hgqU0dmoMgr63ZxD9RED28Ju+H9y6T82k2Sly+tbafSka6Rh7s6VFd9IUeEEi9NKQ3i/rjUiypjw0ZUHL94pyIq6/P46KBrdC2NrYFYe1Bq6KedBq/qVawp4mP4KcyvYVXorrGaJNUi0m1CIVDwpV68nS/UUuL3jb4ef9QZDLanZdFV8MDhZVlQN2Zta4L9F8Pyc9wmWFNtW+c2TsTxx+/JoR24ivVv/Ma1DN2ctQ0QS5N7De2ZcR0HH/UWLIknZr9PMNgEEphJhUzOSyIG22Qi6T6ES4y56kYnyOVMXurRxt7s0U/4sqCz6xyLMgvylb4ZFX0OzdOkekijXtJ8034Ebdv4DmDWCTzHzlivQndNyCL2SclCAZffi9R8zjIYBOziZ0wh490gizgGI+bQJcdpwFeL0Xszm7ie+28JfN9oA3I7plgezIwEiKLHqzw2gUfB4CFKoy7N/rahDtUHazKCyFbnq7rjrXxZH43n7ET8ux6TE30BYQpAYRxtaIvd2bWzaJ5nDA5y2WlA/MiG0B3dpPhSf4/fg/uZqFn4SZeXdOtz4kt6aYbuhYU1Sc8ZtL5c17pGXd13irtdJBpOFeQ6sXNFPNpRCKX2kAcrxcyjwPj/uk6h2ARCwEImkKFx6dk7jh/18megABGPsw4CodIYr7VUNRd/zNuRAIH4X6jV+MYCpF1pl6z161m5G1BcFYifwMiq5yvrP+Kk4D/ydoMvnWINgcqbufgUsrZ3Iar5662bTpHVmzKQwKZ8GAFC8o5NHvWAWOeEBSPx8cjr3/eb93pyX3AL5MzmNDOSs/yY1JTa5eyvW2UGnE8+m6vJHM5NwUpmX5+Nz+FrPapwXfDlzndypR5k2p6Ppj7RuDGWYsjc281dxVV8xRU2soddffDoxSb+qei/+EyegD+ohQyt7I2gukHe9MxEgXqwDli0wS23QWJuj6jywZGf4SVpIGeJF0m/erfc/Q8ot/I8k6TiaFyfbqJXlLgi2m2vzPX1JcLB89M/JqzsNkrY05li8rDFvRAb9cXaRL//NPEvzU7V2S3QFqNYNg6CBP9aSfLZQ51/+s4p2acnhVmLIOnejGL40twq2jJz6clnd92cdHUdwXpaMliAiSlt3Xr52dvQjhUyPUEHSJLUhBNOUnxvrtW2f1i+bTokVgoloverWeDhpErfYdsjqSzKOSPTm5PJTVTzK0cA1WT2PpDC4oALZNnBMwiV/RoOHmPIykLi5mIeeyvLxNGN6Df9zr59XAEMJmBXhWtnv+2nzhvVrKAAAAE5QGeY2pCfwAiu2wXrACFPFdDcx/QaiOtkMxtOL/G4ZlGWfd5t1DfW69g3/5+WIu0+1wuFSxX+FBtea8apFwsbLXxSB3XpEqqmUd0hSdGGJ4yb2Fo+bdu62gEPv/+X4vuHgaFWj15TAawoutFnTDUClpsMYR5+1YWLnTROpFuZw0hs/HpGLmf0RDvMABDF9s2sAHjvNXTD0yWkBzVFeso1FOfacr8zOrptq6llEYYoAK9JmioO4U+yOgyELop1U7hzUB3RApU2Eo3km7NdEINFF8+b1tB8rLq9+7m5+p25mgxlLa39o6LEIx4YVONtR8NhgWRG1xmEvylH/q4QSxO/H+rih884Vat4BBoyR7KPnvCx9uz2JwMwCeEbelQsU6U2PmiiZnOzoLYXLESdKWCVvcwTAo7De2YEuY2CCMRFlpDm01VZlPdggw5yYYb/sTEPQCqtGNx6jBufoPx7BfbB5Drl/3KWRXZccCpCweaJFUjrhIR5obnR+oLoWevzYWL48mtZL/1YtRWck42WJCk38DN+X0T6rO4kFFKqmDdSJMCjab4lpnGyvd12tuGvWXrJ7XKG+kRnH45akeoht8pccXy9GnqFMzgG/H2JC8Bajg9t5v10yr41jwUNppoqI9oNdf632xWDov2G9PROHO2dW97fbyYeaQYywSyRoaHjlWqihcsCqKsnh8hnKaFgsoj8xWaoCCEzy6ab1XULUjDDZLzal/o334vnxbsJ7zvnJMZ+6hM4izCyUGgG8e4JtGz9Wi0cwHWzEsFQ0JAAmQS8jeX7l4fIX43eLlveit/SPsQ9WfMwfrvuYEufld/Yvtjo585iWjvp3XRVIBm2nxjXVKOaREKJ/NAPbmWl5RsekKD9lN2kL+cRjWJJT/Oi7DYl7+aopIK21xH02m27Im/ZhKPml+rsKEhjN8/6Ru2PB+SyoFNIm/sGumDU6A9sUK4WkKm/mc8XsUwwgdsTOrl06HpDpe+49O9qIcL2dRkoY6zOUJJrIghFVhNt88G1Zbg3B6ItuS1N9QbQIsxbJOe8wiLvRmGTtIkGLKCUyYy9a+H4mVArPhOfs+uL8tAUd67iOFqWqYuNfWj9mjsbfm6XyWBD0KQO9LSnQZgExc6pnYRAXh3beyZFKTs+uxyJMkG8S0bAKAo5hrRUGozS8Q2Wt7ayKa8VnOamKQhizX6jGSUjyAxZgwyj1jtRGo8PzBu50B59Pu1XEkvSycKDKkqJi54mhogbNzObW3UZqTZ2kRCPzGssxTYzqphhQgZyGGepfjc6SFQwvkl2ha+pUFdzNshEcHDtC2LdOAFDkTS01CQbFWpkVHbULijAspyM8ftrYlQoNZsxn1lviwk344XV+BMGGm4lRH8LSgxraaJSgZoXKuGjIRO66oCMHO6Mz/9eY+TNInaYyOF4ArEplbP3rYfJmKBkg9k0jpV2zy3RXRWZZl2/4BIuudVcdB93dTDYRjseYpKRBsyWcg4SFmTfhWJ6dMbtg9yP43k/oNG+7yvriSokDtZ5MltJRMY70kjt7lP299PIbQ/3H5kSBIIzAu58b6AtKUToYtTNRexj5qwb1U4iGtXQ32k8Se2nLbrEFx1NOIsVlH9KdgpNQUmSIxLPOIcuTooZDBBZCqK5I6GkhrHxvCCtxUf/iXw5bKRr6c2WnKnAAAHW0GaaEmoQWiZTAhv//6nhAA+HBrNRJAB+YF8BRWp4/80Ka8nrvvbIJdiw07PzP8mdsVMZ25KIRzfqgecJ2hyf5AkcM5SUQoC6ROqxZWOG9cQp4g2oZJwbikxdkGIW4gPwXmdiQykZQn6Uiruea2sRg6O6qcPTowK+h0uR2X6pTVND45jo1BRMAEHby99697cN9CREwIMWOzln2rHKlTXvid29C0ICbjKIlM0mzKENoXNoPJskotQv4c+SmxlhyOiEpttpmj2iDakL/eiRdcgkVSH8waAbvOB43NIqO/vp7WqzMSZMqWdG1FuxmWevBRPJZewjhGlssSy/eL+E3KQ2dXB/qqsD+NjW/MLxLCHM8Ob6fzO2poBcL3roKRMYt3bJ53voV4tCwgaFJAs4tf9RNwKmY08Jw16pdwQA86Je1tPZ0rTN46A2L8NMYXLpQvA5fQcvdByHfjxp++xPDIZl9JlLjNa+F7eyuJpnVj4hzXoh7puIw9uTg3G70baVRJEnwB0u+7PQ/6omSTtniyaSueE092ZLB3HYYvSHGHRr4AtkRWYK7ewUNbhyikglUGSH1XzbuYWILlKlum9C1tt+GN2ZAYZz4EvU/+/sHvtGFeV/LyNL0wMHZawl9nH3FcmBrKv3lrZR8FXnbxjIAvTa2Fd7CtPw1rzwQbUHxOfnUvtw+q1Jcuk59M3zSox3mI388ABzhL4i9ZdMJSKDH1hRMfg9EuYuSR697JnSMAABAR6HqIGGdCdmEbV54eULXJA+l+UwqRc1+U2S6mASTHEsy48zmvOnNwGrU+zNvCdnD5RSp2PiRoHDtS5cMjY2dLs1T4yUF33PsEnwDwyDcft3XguqLMRglIkbdDdu2LyMfhIQdQBZHgMy3oUuxrhPiz19ihgorKMTYPyV4kZ8iA1/4/4UcgLVNGPorFaiJPLbR7xOAuWD6ZIvm5RkTbO3B9ulOwFpUl6BeagbUHDaGRe+z+7jVu0HgLmzyuUELfHU38ugSTfc2YoZw1EE6bfT5ry+GKVRNIQ6Pzl5DmjREL+Qerhniyq9gDFaF6g1gX6x0bdfx+gmoKD3JzUd0gDP/gWj15gIt/Lppxhq8PV8lkj+sOdgHKwrav//EtnI/hcoJD950psGMLUU8aT/lmOpXEFDB/lL5hS9G8DG8XASpwfRjY1Gjo8ZO5V+xMiG5vo3NUY2FI+2zCmimmZhUvdZTfG59Uw4US8JO329FquwiHT7cR6ymvcQiaVzVMT5+95L3OGWtiHkc1hLufrOliEkecrPd0s+WYZ6+g9+qTvhA793duL7kFdEIvXZ/I1rvloXsPX9yjDJBB4EwBjCiahCqHUd0drfIgbOmxwUKKjY748OIhUbUZI8v/Tw9TcrsFUoF6HpfNeSSidQvEzE3c2ynvOgeVylCYz2pJDQ9wVX3O0ARztmxA2XIOODT8JhJ+BPkkrJBhI5WM6E6agt70X2r2TkiskTBlmm4FXNw2ygMsff8cl6stTeHEDaN+ILmpYCQwS103eHSVDJ+i+evwTswc/3s+pCF1eZeW+HWcshtU6FFob6J9yuAf2glFpBKuKoasoM8YfsaELiZWDt6lrXs779U4/nf+zWtfsgQGGsfjXEUHXVM5O0VV2Pk7kUrE2uhYvstHKQKjpHOUTa1uvfzS3+MAcAn0i1WEHbE7MvbmIrF61KeSfW9CEi2WagkIFRI9FVQJ9m2WTG6IM7Gn4ecU0iOVIdoBS9YSnY3NZ3NJf/fAX/wLgq+7U11TFChYu3vRPXaKqclJ1XSBgoZnkNu6KFi7+v98/nsmqxDRyFHcdR9APRc2a70QbcFLeCVdB59qldoqKMJ3VwUwgskHOgFh70u4nbbjcomkNqCEjh/0Gxe3gJ+oUl/6i4dlcOv5AVIYjIzi9K4yX/TaChS2se5/0t4uzts08pkp0JIl0CHO5SiKG5rYyJTkHxuATRfDR9eEzwpnSov0pL4O5iTWW+H5ryx4pI9AIQ6rjv/UAgn8OMgJmDHSlVyuaGTO3YgKR6Y2jhX/Vfi81hJE8wbj3dmo+F00+SFigfbpt0I/5cgqMzSkfNhwgZoen8ZQd/bMsSf8OnR2ek5/dUF0/nMXjiaRUXujClGqBiuTtSwa0t4/84Q47IOARrSjZqA+8IuCwOpF/j72/xrKtN8Cc+jTHXcUXE8jLs60NaWHhhjUjBWC0R18O23w5I6OIV9op9GmeSRRiY8g9XuixocJVjkDcd2W1mr7fzs29DyLbJ44BbMNvF+ukbFiiwrnhNeJOH9VHqik6bH4wxILeRORZmvlQV2idZyEApCco6YUHj8UvLoqUttWm4CSGetu9jt99VNd9WGngj5rLHC9r6E1nRBUo0yuYw4wenX9VMuu6S9Titp8+2KKVczyciApeoHuiAU3o8FjjyxZCAUwStMn5Fg3TRZ2gtUojqgslXEFhj0vSXMnlAunSORJEkGEMWXpAhZxCzqEKdeJhEW+0AGgPntpoY2PZl2FZI7Nv9WTdbtEjAAAFYEGehkURLCv/ADOQxiIjH4JOtSAtzFQAB0PrB9EVLImJrxIDPOKKhEinRloqDSBDaDQUsqXIZ2eyVz9DR2rqN/CiFc4NHG0Pt2TEKHOB1V6ZRhF+FrLvWtZaX2NtfFT5eL4DbI6goBUKCRJHhbCk0wgFOFYE3lajEluVYrQaXL7VgRx5gT6Wsk4b6NHw6b2Jr3LrhMy5z6ydr8mf7zNWMPJUB9N/XzvI6xsM5aN9P1B9sFilhsOwi6AvM3s7p3PDs4VhCXMhxmiFMz+A6DhC4ON5RG47S6DrXhRZlBP586wgHX1eqPQdmtZ5T2xJK/cVNlAT6EQCFV8IfkNx3jhFCM0znbNicsf52EgNLfDvE6UBiToQuwkxnwtU93pIqWt2ccnOxYZUjEZGk9FOBkAAd7e6WNtV52l19jlu9A5/Awwr8NcM+dIQmvRqgAIV3GJancrY/XOJ/m4TDZWW54/PhiARg2a+26GV5wP53qi7X+8zG4wgqtc6RBeFDvIJKzmla40I5Kp4PTQBhv6F2i/VcmIxwApGJmbEEfp3zEkwp3arE+YeodV6xYBSL06wTX8/lr6hIp1ABuGA8rX/oPyNhgOslOx/VuV7yvenXtuOZGnon631i7RRhnu+vAd1IEMlYsoLWJPg4IxteT/4cS7e2pJKgU3MzVgmkmAtpLNzNLKV7TrRsAQGGBa9cVcRQjwHBwpWNvr017VDASoJjREQavmwMF5eMRb99b8/tM33hOlPfVVVOIkQbLg3wbf5cZR1NrzG2AVV+54XYfjQbdmH73lUHthSMqVFDtr0/P+sysKzRI3P+CHtqyKBziYFO1nJ6ZTWJiQXE9nD0oxBtsOfa9XZmgseOfrTWsl0aJsBTsP77+0QaGjxx8AYUtqSPuqBkmWyZJg7bvGxEt6XHPENFTVB0cv4oqYEmm50Et2yEIgKe88cvb9bmPapjqE56J+aW8Gd9PQgGAby71P/6twjQMONct7F8NOiV4HogI7RyetODzvwkuvGrZwIaUF3etRRFwWgHTckclr4BRGsOJTeUL3BpSUdfA9GZdF9Ga+i3wnL4rz57xFt7NBcpmcAhDCo3jKyhza99VkMVrwBHDo3tcFtUJP1UG00TOB1PIZfyvkuSBT5KLuB0jwycZ5I+Cgq4YZY3SU+K2L4fG0kv4NBHWFyD3dGrOF2vZZF2vDCrPR6o5VSY49C7L0PwE0zSaenibcUvsuTgu9eeDTqKl5rBvFZweph1LlOXza5L5G1hfwUbuURGuRqySF3Gz6j7ttdndfAT+XlrSY62jbB2QAHqewD4Ks83/OqCQesx7GTGHxAF0h4uLs+iPzRuhEcJzy+BZF++2yyd3u6G5r1lhGcA0H+pN8hltjFDDgAPaedHTEsmg1e8Z74W7SBugEU0xEg870MihpFMqReCeky//MEkOBxflUq+mx4v2tYWJkEORZs3VlzOJDuLxG1kmJ19i9bCGSbzVrwc2pBADgBcelQJaYAAv1rQAyanpmVv6Cd341D8c1dD6T2mH4HeInNuGe9FITDjIsn2uRRTS6c4asGAC/5MuGuOt7oMn5oF3OOwITJtG2sZ6dlmta8yit5ps4LMWmOGw/aQ2bmdsCGMh8u5zfyeABDi2azgWaRrQe/4Y2zZXGVBAao5F6qy7b+erxG+4ih5DplumAGccT5pEzAXJjFTC/dxR9YIIR70bIp9/W8BbyhaRwmDd/14Co3rrQ3LU6XNjaXf1a/JfFIp0DLJv3gZjsbIGUBjCa8Ew9coPplaBOYDlOpVHufN4UILK6tl1hej+OLhMKfSzdr0nlIXmvL+u3b3d3u2J3YNx7vYxgRAAADowGepXRCfwBBWk8v5Zfg2vEUYNkwFAAhsih8NE5Qd3TMdXdeb/2R3hmFvCvubG2FlbF7M1kqv/vJDA6LYQXhX78l/TLNpx6+Ukl+1wlm/qAnQAa+8525Nizyh5zkYtC0Az2CF5KFg1z7BTfcquyfGqZDd+XCCsdcmC5NnH0t/fmV26ZLlGnNpD88JIE89tE9q1n0j2ril9tf/Q2g63MlRmjB8V4MToR0XmeVhI90xHPBTcfMlK6hiAdFAY6wPiipFa6a92l0nhBvoQr3TOY6gOvSacEpuTixWxHJ/g72jXj/PmIdQ9y6GAoAKKinUpjpnceO6egZR/IRZ/ynUjqweHQ0J0PtQjvNuQ9yUTQesb8j/Zetwbqqd717oh/f9LNgiRbC4ND9Xniq9PJ+OzGvUt2oQ2ZfTOgGMwHzytbSSATrX9zbKYK/PFZNCTrGMf77F34sRJDcHsSWNJ830yZkPXrbiU974eVxk2RJI5BR6z76PFqnhMFe5RLtOkAUkzlyBYwdlsrq5XOfBxEoG0wp7B/xD1pwLeYCFqg5/OIPCLuJ9pX+WFiJ4tJrlmar+SkbSndymO8aETJ/8qSIZfQj7THaJu6IQJQZbfMBXrUFSaCeKwrgZ5FwXa/mjR5OR5F5zNMZGpx0BYCFNbr8rDN9b0Y1BpYWWJGEMdaOz/IfRYi9J0szMcaxQBNn0hHcB6gQ/I/7xGmveWuQ5a0fQyV11toSo6m+9DzJOosWtCjE9V/FNtIM+uxodTUlInH/sJnU97lPJR3pOUcpbPQXSAP7auz6dR8X4EJCDBv3VvZAAvEOET2ybRQCyAUIwj2x+kMx7ZZivlGSvfJInW6jNjrT0BhIAKkpLDwFhfsRXd9Nbc7lSgzqoInIya0+OsoUimeutHUv46B0AJfKeqNED1wEryDSP6A0f7Udlw8LOBW6/ZEOZF22bGWbViDaFMvwRqDPvIoEGxT7ZFihb+c3jodbDIeS+KEIe6jwRnkyl3IdQ1R2g++Na7ZB1GIXahqEHYzfA3V3cmQ/rFXyov2Qb9IHYWXZ+vK105esW3UEyUqQVjg31RUfFdmN/Gd2xEI/HNCnUbREbM7q2EUZdlLt9IKIDJOTQH1qAgaDsQFfnGXXg4AhLzvuFV/XUs/jZ3TEN+eW4BY9SBB9xsY41/a718DbQFhKhcpRC63RpaYNsut8VK7gBcQ3kzVAsYacExBTp7bfIXGZAoNgN9DNHsOaxmJmzpMHBoUAAAN7AZ6nakJ/AEF21qCoHhePKO6Dy2tUiST1g9RBW6ooAA4caqzam8wvbcs+c6WsuSdDzijDi4rVVr9W65O6G0SmOHv8I1T2b0yhPqOgEXfNvcyvTt8hGEEFp2Zwx9qZosuDOn1ZWwC7joH4gbQy6BwmtItQ6NDNN2PdjhG79C1m9f0LYzTQiVG9Cllhh94V293PdblVF1O0At8FevGrJdrWEH3DnU/kzV149/dC0dWSn3gEWX590YnizrSEOELWqsbakM5U8hQQXYef5REhNTqFoMkawTN57RdoZzf3SrtoQRt1/0QfuOHudFKWsRvTDvGm8aGnFyu25B6fPItpURUuPjkwZVBUf2U23lqBs+fOBKt/jZBid+4pgXcJHLhTqX/fvIUQkFQbFvHS7YBfG4euDkSImqKhsY5rK3fNBUzEOUVuFEpdRr41p5GL2231mPrHAFLEtEqTAzPx0o81jPDuqYvqSJuOe7WYGgB1zVel6LWHHu6Q1zvK7TrGOy3H75aFD6O5Ym2Z68Nq6HQiGYRyaQaEY8kq1dWzJ82Ihn+rT5SKzwacTR8Vu9ZMMTfckkVAoYKtqjjvaiT23atnynlxADCgBEMR5rYeocSEttMQP+0obIU1hpBA5vYbEh5AJJwjaStIbic24wHzFKTVjz56E5wqjwukZkbwjd3RPraKUIEEG4qoRSucTcu6NGBxbkIVdeYmwHXzSOoiRz9btoucTr+W169rPfQ0Vq1+Q3BgRmYAuMctCORfBM+QR0AYS6vghmg4aGgTqL0ivF40SYqG/uuQqdqMykvItam/z+b9QUQ+MjUavDD8rp3bSvFnPedSMZEQi/8OzERy81l8vHTn55qQWFaLX0FVbSyajbWkZvokE2Gb6XYzoQ8+SNK7ZMFzJWwhKdbtro8xIiEZSLvZK+WEoodKGFQlFVcMLKGCHtQv5tUWgeNz5tEy9zei4wqhSIo0HgAi17f2NKC28xZBcO+E6bVkbR8eCp2DC5M/bxEqzsJIva1KTRSTKj/jiw64Wl2EJCWB28a2QzHnGbl9ScIWbyWJzDsY0UuW4xjGk+f4MC7dKtuKsO+8BLMSixBDG6TDRhSev5ks6202JFTFPAhDq3OpWE3+bsomY2oxOQXSB5z9NIl+MNaJjsnooYODoV2S7CLAFnGjlbEHtBriS8PGhOXDkolYpWnAAAAFxEGarEmoQWyZTAhv//6nhAB0de/gS27iADIKM5Xv5t2U3WjNgh//DvXYMAgGebiskgbxy5/AbT8MUF+Qn/E8KwGvr+t09z1EMi75EC0tcqTk7df60OzAkqtk4y5BTBXHHxunXskj0u8sIbe69W8FkKtqVjIWDPBqWk+LpcOpMPNDbtmZqCXp30aBR4eqtCVPdiJSlA4pcSAOdK/AYWlG92utxPD1kb3UK8e9W5tjn5xx2n3hzHVgkCgaHPRDi7MRl2gEP7ystpm94ZA3pAFxPQZRHeTShZn33gPV1S9NNpFYmlwMyVQs5YzPUnDgiis+MjIfR5qBf4hyyJZoWeycM4QlPySyuFue9C/EtEeN2kD4UBRUhUU0NFcxwWQUv+dFPnNVoU2RagcrPKF1tgdSiTHjbQaUB1JPS+Vih+Ve6zbJYBXzhu8sCp0mrEAZl2HMc87HJiGfThgl/61RJqY7qbRjwUh35TA76JBCZTP8bhE26YkSBB2ZVacxKPNBsoi/4WYIF5D6reErgqgw8U7UrCiuoBP8L8ANl0yK4awMi0JbOq9ffCEmEusjZ/0Mk5Q5kkgQ/meVfmQ4oF/hRz3YFB2jH/tgHD9ofGYPUl8Usod8MxTlXBzGMpEhRtG9en0SrgdTc+rQH18Q0ymklpy9nK4UqWiSs/e8HNSZpknVx4kiZEtJKTxUxFlE19FInbUtttABvUgq0UpPOSgHNZ1maoWHNc5LdofbL8+lhek1UyXGcwgVIZ37G4BiTbXwTR6JXqHPb8wmQTcurjPG6WowroL4sxvHsyZSOdhfroViit0VTyVHR5+G7eyH55XowcB1V9Xb56L5NRssWKDMNnw+4dSoVnM8HQ69YHlqmgeHBbURMZLYjxcj17MqB0DCCqpokpbfRz+SboYWLRKhIolBymnnmyTbUXLDRtFgTnudkrF3WvliHeDBy0G1zMYksjw9ORThMyVKERYfQEFt/vFpe5nztWFNX/WjhYcaoiZvc2Er9GzU+Fy7ZSgEGFEStNmpiPWFI5BSkjV+yzWH4BflYHwv/nMBnhSmdYccrpXZjCir492CaRsBrKPB4KMjBAVTKZXIWQDevL5vfnNz+Xf54T5aMTNUwmTjAfDSxkDDCAtyAfyzaPQWmOuTvcpWAtAZp8sQKKMYDn5y8uIYI1+TvdrVZPVXHwcbNudin1vSotf6NxuhMNPmVlZRbMabXCRq0payff6pTOWi9WXDXC77Z88ejxWMZT6T0zbD2JvWtDaKKb5wIwvrJwnqL8N7mBHrd4Ox6EQXxbZ8vqDIJmzYZDrAPwEttB666kQMesFEpsrAYdyuxuUikznK2H6BesGUALa7DvCqb65N4piPAizBMJIQ3IUsLR/j7KMdgZYYn38A1QjTAQJS5abBf9sYQ/6Dk6c04Di5t8SU1ZVlV1cbtq/7ahfz1cAd3Ke+IB6O3jzTzaF7ux1Zx+N9NvIVZkTUVBbm84LsEMpjeKaekJUvHxIEDMppZGpENdR0TCTKbxcXwsd89VetXq+otcqdw5Jo6IF/SO0Rh0LVrrwN6Eh8vKF+801A7iDP0+khqkBbmb9yeCaQ/In+gTpLJ6imjKHn+eFUDQjJMJo7XGs/hT6o5yaHUO+9r87TpcRdYdx7YgTN/OgkpA7TNrhx754nFzqQb0dSzzqD5jUjbwk8Plo1vNDQ++WUIFYcfmo03cyT07d+bM33tP+Ox4prriRZmlru9QiYiLBQzEHymalCYKojNKrZDPg/J5xzzEjjyKdLZfcTeXgBPpI57zFfwoyfoE7EkiFR9fu6Ij3y0mtN61ox2+asgKyw4frk3Ko+Vt5QbdLLGgASZ87W7Km9hYn4ExW/LhscVVK28jN4O1TNysUXTR7QBZeN2BQWdlcvyz/fZCEIF9+YYy2fqsVSZe1hCFHaCIL8mU/BJUQFh9tA8mSJP3+wAhE8GKNrVTFUE/sM5L4z7o4CygAABENBnspFFSwr/wBfrDUUd3u5sCLZ1FCcpg62ABUndSS7m1W3RisUJcsjxzAAuSrCbf95q695VjKIk05vefwg2PKntqH/6nkqVWyS74H0ijlZkbV6dbXPGgK9BmWKUMXjgZApB+SEiooe3iviLlpEDjA2zAZ70HueefX0Bt24bWFmof38NelND1FYasIiXzss/TllPDzK/kTsdRmkwTlF65Sf8BtzhmSm2Me7N1BHQkAAOoxi8JX6Z80QLsA0edhpGH8ayfabFD+JPHblfhjZT5/RZrlPSMiWxslbDSoL6+8P3z52Bs9i0NnKQVyw68oZAAlcvg6ep8PO1WiUHr/xP1CjUrFkwOvBV7fP2j3YDtt8oedA95zOAcqV1KEQBRmYvOsG/iTZ/MJZ4z+N3si3dK7IESb1CfPQ/T8vP0RHWon8sKJUFii1MIYJB2ZhrpxNCj1nHSnsW8x1BE8c3TmrCGkQMrtjSQ/oBmwY2j6OJ08GYKFe6qSdfZjyuCoGJffsYC2QdXgEkRQQdbUGwARRMhLckwpi/PExM0eG+/ft9P/Ym0Slbgag4LzYV/qMVoOHf1+w56tEXGYOKL5vlsQ2WA/wQbO8tl2JB5GQhBQ89DCTgRgvGxNUVySlyOcjEJQJ7lCwV2901I/EZi3Oj1dpex6BTSO5EmysyO8v1QJ6Itd9WHkNlLhDgZSBHVIGv9m6hqnPA2eE0YYiirnKPdOAGHAj/j/8g8vV+8/U+1+/tu45QQJP+cUaxvDaOrs1TQ4DkVuuk6J7VYW3cXvreA3cds379ZQPEkFP4vrQ7j1WLAQ+18fkUxQinpSMHkPhXqN6sdbcnaIksgV6QWy/QGAY8S6b6dV82/e0slb/CWXYLFzC36J9m3htiM04dJrjPDE9yiLF3w3BcFz8fdkt5/jUKYNpGpoopJ33/xWqXY2yYtFgMqNBLhbKR1WVwiDwCttjdfCboAx0hKcm/J4PsgiXrhUejdslGmODLgPEepRCxGV4vIWZkcNDQchelwe5drcC1+75Jy+o3aKyotvXpvnJinAR9f8wnA/EkBQygmLCmeDtbBvZ2xer9kNTMcxGT7NLp/T1alv/UFX7vhfoqHlNoKNU/zgdDhTQmXZjVy7JY1JOTPQbMlkkLP0WmNUOMvAE0IE0xgl8JB5/HYyGODroitngFa4CuMhI2S6ZFIqoPEdRkpzpR6Q0ITwBFobZ/54JGQ/g0OuJUHdOL5+IQFagSuK6Y9AsR+1JOrkke138sK1rYJW7o4QdtVDYn6j5hyWBulxVUxrewT79mZ3Ec3TQUBSql6sRZIyzTqgE0v/Dy139IqQsrgjLu+phK7DZdoI0PA4ugVUWX3b6hh+Zr04jZ+vVe75YC4PTeRwSeeoI3yQWBXJ3DBzQ1RLfOSidJAz8ZLnsdcB4nPZOOM3t5ckRTJx9YK5zCjFz4xhvhI2HkyF+zTckYQAAA0YBnul0Qn8AQWvIBBpqU+ICWiwAC9cqq0c4PdUrtn9OEYyvi3Sa0kuq/Uhkh47cG5Y+B3UtY+yawEjH4KJdWfrfM4+16GeIUK/+lBLLAmk/s6deLzhtnqxsPKAbpd3JBXgoAO6kNvpBh8/lr7THciqyfWhbUGKkOKRNDGeR6iTtypohlCTJ6vqRixbR/ZH2VG5SMWUBbwIz9c2/UYasa3TPqn2IbXb+jDFJCb9oRPXEfMVHbeJDo2tmO+yUp2wQ6e7sk/5nRyw5iiE7WN3DbPQ3W9brswMzEE13EUMLaVmHg4mURs8feLrCME0HOpERVErHgin9gx6hde2XRigg5+u2RtooBu1J1QGNHA7CQ/PUDluLwuMml+ETAR1BQA/dik7QZFd35DdJgPKBTFhTlPe6a00SepRDaK4cmSXtwyHH3wCVdjsAe5sTz1zUAjFkYTPkl1CAyy9jeT67h3ty6CbyrZqSeMuzcbutQBE6khvb8RD7fjrKau021n36iOu/pfPUm0kYRahfEYOHE9vlRoSbWp0ePI0Mws3263qGo+p+p7dyrxczvhMVCQnuca0tt9kK22r2ZLioPUTtnKAl/kKAmr7JNsjkMLkY+MZkT0n6GFWmxJEyj4Qci4Ckn0oGqFIW5zRQ5uCKWT11HpXE0S8gr6W9VtjX+8HpM9mLmwwlDxXWGIAPjyscgAXs/kU5VnC2c/ItMs1wmieTM7/zMtx8ws3PKDVDUL3p5/tobImsTogAAAi0F/iKj6XTaN1jR/LnhgsJqvbB+6QLM4pvYnw0dYXqBZi5qr8ytsy5yufXq22hpjXyAoFDbCZL76JYaYJdYyOmtKV33Q7Yv9zmba0yrfIWbdXP7zXNJJ0EX7SDBJLE5BMWNCLbRmcm2i1/03YKo+BhvwU8XkZ/HWETOyAvPSNkbD1WpIzw8TG3tejLau281GRWxthZsGXevhyUqCD9UWhGt8EY9hkEirldubhDw+lOa76fX9hhV7K/8mwGq3NkyEFN/ctSh8qMQj6y2tSk8EByqoREcL0P70RfG3owcjSOiFXDXX4O7gFZk163f6z3V1pWqlvgi12oTMUAJglN7DMOBqYl5aezEB4IC4yYaMgOmV6AAAACrwGe62pCfwB8UQLG+iS4ALoFvS4Oc1CAvAoKeL6x4ctDHFrhuNjpGCyeMIPRkosLvIKVCNXviNoiJWH9N6HdhiPJ9iwtHpz/wlHp/n0eBzNHJM18uEuO4y17XBjiXJ6VCY0lFjX3Y0HnowWb4G4cdRy2FnJ0OYQzQg61RYIfcAsxzk54gW3ca/iPGlkZvCyFBtDGsqfok7hLZEo2DXMIURHxz+zd4Oee1pMbi3W1EHI0C23xNcS6s5jQugROwIR/BWzDz4/+gCBwa4N/3zCsPPiF8nZVQkExx6gBmQf0IdrEjhjtkvpczNIxqGMIoVrkNFLpzvLOaAjap94rqAWpC7vrjBYylJ3O3lI8IDX/SYSvHvjzQNqG5DqLPsdNzRM/b7RG+4AgTv3wEsad+XfvFgFyhtt15OEkhCMqETDXHfuCUSXSYJJ0Ur4AUuNvHUIOYtvYpsylkS60XcHSqTbGNJgsBiQQS0ERgoCo9QA/jU7k8Yih2w/VaIFxgk6yjEpy4parwDLVkAo/rSDi+Uz9Sb6zLN76c9pWs3EeA3+0QE7i2N885PN5Q3oD49euTJ0akAxSgZ95pK50I3pTw0fOev+/TFwJdvXx2deEJv0z9avneJUcW1JkuT+dtnZzp59OUNCZAL1ypblT0GcwEIHONGqqpIPfp7eEAMR6aTBXKSuBT2PlRIWXSY3Pf1NBVTaP2PWAqBHjyrlKvozPgLIYuoqinHw7yu/kV4BUyXW0IyQU29Fh/H6CDO+oyUkaePOVRv452+oMALw954jB+T2WhBUIcaYhQ1DmiNTgkPBwarrVFH5XAMGtimJdwva1CLoq8aZxGR3EAueiwKOeTiyN776TYkmdidFd5/v+EEskvfb40f4utxP6xt5Q1KacHwVz9GDR3khzxqxTlNCKAKnwcAAABSVBmvBJqEFsmUwIb//+p4QB269/Alt3EAIGqzX/srferxFLKiicaG5pC/FBOno2TiOJSqkhZi7pcqr43jyR92CKHAwzY30U8vheA5vWi+3e6xFabxxCckbkGfRrUC4+wriVrsU1DYRJwXJu2VqD0Yg4jcxiuhOpNyc/5xdMaIHv87lVMcbGhhVlinn/H9FynZ7BfZ6+2WvmL4tT88iY4motkqYQeUpMMGUqihzcvGL02sGkBSB8N5uzo027z9fIxz7vIiwQnPi1+vMlW2vj2opzIx9JywMTuaNqzwi6eEpjx85FL4f4r/XimoJpdslDJFxTIHoJw4+IIxxsy5paDxbkeM6qGHBt4lWg2PUqRIMaV7AH+fJNv7gf8GQKeiysgViPGjhdqp4Jx//XoYPklXwm+DUeCbYwVxrBQIzQW1NUw1lugR2B6Xt7WdbA0wV9CrUIbRXpgKCPW23m5KfbqMsYDLPc/Pk1ENiZDQLw2rLii23ddAhQzIhMAmSJMNz7A+3+EkYMiietcC+LXrb8wz81E5dqoP2cb9vR0uaFMi1wQ1VSMBOob9HRQG2KBywQrBXQyhgGiR1GPiDzh0JA3BfKYJ9A0ly0P2/7PEM36w8SyJVwAByDyPQTjOlvIbastvrHPwS5wVw01kMmZ2fXoAcITUmMDJecZbFvcF4AOBdSte/PAZedshl0YBciJpTCebgBKF+3DIGQpnNePBPEQWjtVcHV8o3JMPNs541u6pzPKybb6bX3V/mH+T2moqhv4TAwSIlXMPvFcb7GmWDGQtHRv/3uuYjsQkiCmweC8w+wEjElVISyTj09ewSmzLpY+yjXFctLopYogJ2ST3KBE9602tPO5A9tZ6dlNkgU6/IFzODDfCWplPbtHx93k3VkbwuWMedqv1EU0kWBOmGW0poNxlgsbXQasFq04UNpv/P8khwhfMRoswT7xBSeMHIVL5eKgXizriPSs5sl9x8y6Y60XvLvINq3JpEiBYLdktJ0XIX+Nhil0hSfzXrYkvhxOCu10fTp756LANjjbl1zdzhqM4536uB8YBITU9ynhEha+VPm+7pZMXE5Z6qADFdqEXXk/TonELdcGpgSs8Om3BdUPERhxantHkxzedyeBi6LF0wDW/DkMWgZ0TyhPsO5nWy41qk3/JlR/YxKidtD8hALgqmPPfOh1AbzeSPCUz5u6tTt9sBqldfc+1SZYciPXK6nFD+hTi3LxROpPMMPoR5PtlAISBkyTb7e6oB9SzL+loZadfgFGqJYwSDo22n0+4YvqR/9c1ILmNwZWcEJhQyLTwsL2JMYvnmC4UicxjRYogs/bu57pWRRoz/EglMobBR9y8Q6kU2azco0kbvHHNzk0ijvMKK7uXdXMcd5euOTkkBJgPHBIbywR1msgVE9IN/J8CfC01Qmzz1GjB971AA3W9dihWZDn26puUqLf05uUx0BIzFYnX+XZgogItjZWsltd9kGgB9QpVSiBaqnL34qhmkAKyt6oA0EiUX/fvrFj/YqR8Px3oFWNz/QmrQlxV+Euj7pDM7qPd4yAbWMyhxe6pw5q5dF8qyVJ2Oxdgz2MWsU7dzYarD7Hbcc+EGToTZ5NtTjKtycg66qsBt+QEIB7PkA65XL/g/n6ZI2yAbydc3z1GknfHvmprT2e26zwUPfFcj0dMRbVnaSERc2LPQ7vK9noC7d6eNSmgMYmMf87oH+bfx9TUw8OsCFcHGzuzWioiFFTxKRaxokIDJ55uUxJ5R4jYEAAATuQZ8ORRUsK/8BWrARmva/A2EhABB9LAPBaOmLqEi7iPa3pc1g4ykeUks0giFhCHrsQCXGFTT0Vt04U3/oS+cdgXvNxuOI5CWZGtuv7j34dp8hOVQj/bm2eesRLwB2rmYgJuAklw/Kqhd28z3742BQ4L12kWy+OmmQEi8DHdWMTzmmbvRqUvUVjU7w6Qdq0kVlQ1untcPzgUK+nnilkmn+Tj+A9awikSCDDOhCuRdnqFnKwU9BCZ4ysop5YLUu2BKEk7DIApVU3pW9B2XlLGlyidcAMuTN5O2CTkzInEX1zwKO/C3sXJw+JTRBVK9c71mEpGJnJNF2GjTV/jeFq6+6AdFBbNk/lIRJ4VLKZ1NpR/iDYsJm0hH7c7wc/b8ZWWzv2pZFVtiYbrsKWELu+3Zx9vwR0LB3YvpY8c/RYLTZAqMMoSnXVANCEPD1tBEiCvrENOaqpuwCfCLwdnKRh4FYwDBn6eMkBAj80pia1oQ6REr1Gsum1YyLYmtLVwiSdr5Ks70Uk8xHACjMeKINCeCHZuFRkcNZwBMnqcbwKzosrdQpfqABjXE10f++BAJo2aA1utRd+DMlG7PHQijJ0+wq7pWA0kgJZz1PAT5Gh8Rr8vKtDn23i86c7kIbj4NSZPiuOgBOSpokRFha03lDWSFYdzuAtnUBZiB1U/wrYmUOV1wsKMQtXPCjtBSgjCBz/GZdZcBUtfjsYt7eqH2T7WUYBM/iU8tXMZzNW0BZHpMp+MIfb1OEyO4V5b2ehYsm+YTktPfWsypz9MG26D50RoUtn4cmG/EA0P+WBBWn0NjnWrtRAqKGCDjlRjedUbNOXAQzyIdsvlaAublZ9mbY+1Uck/qP4ALIbwtiO9f5XRsN/C1FpVdBjXWAPN2oRjSeRupEuEvv/eOJ6uf82UaCrAmAZXPuHFfdMt85O0EE9w2mYaMByn+dic8zu6f2aGeZADcNcg/0EONjHRRDY7fJ14iKUcoo5Jg8m/W1zqI4MaPbsMfJy0QS8t7YDNVloNd9l4s8JXapvH/9Pp1Mi8RLVkHuxMTGrpObwMAD+WU7tSQFEZrwGDxrwgzu6bzAYB36EMH94ydMZD4PFw5QD1XDrgcUHNZdx4NTdmNXji9QP1jckS44lJ7hdwWQ9J25WMPr1t2vhxBqvyWQpQdBUWpCJ1u/VSznnD0GewBgdpQvWzhSzGKLVE+nsr/CkszauBe4qaBOKUbcwQIl7vsg4mePf1g1etBke0U+K19eas+HQTMtLxmfbuuba7O2N95uSyugbY3kF5lc5byJKhGs7X2sRHERH3seg1J+1ns8iVuUlDfewEoZsv8Wx6dbHHwltMcRHkhSP6y9NmV7kTwzr6QzC+BR3CYuyTvMzyUuz03kT0UZhSyAv0F0oPLgRJ/cVVR9J0ZQfJ4DV9E5ZBAqKOu5dFI3NGhf7X/ZsrWB5dGnJ/oUMqAe+YhbGmaiCaRIZmVb9loGaQeFZRPRQH+Ng1U0fGNkizehuOC2xL/0cpIAybAtATGaYWHuBMh818tjwghaQwIDIVIkCfvhhKrGJxgo/emQ+EYug6dN2wNP/q71bhKp5o1IqPjq5n+UHANNLu4a1Nde9gtYOpmqCY29/z94B8Y3lzieFUR24y0saadfsffyx9CWvMMNJonYRxJaz7Tp+PgTHz78y4hSW+dkWf2U3RkAAATHAZ8tdEJ/AO3L8LjBhEnGcbqjGSG3Qt44AQfm/QzFsiL+EE3zZ/H/eXAfgoskaGJdvPYDzNF6b0rd8KScBgBSbSwDnja4q/M2TGW5x0RQ6fF58B+4K/OH3SDjDdUmj2X9QlT6FLIlgGWio7mAR8rOiGyxH7bqCd/BQye4uP5zQtjoGcmG8KuIK51ef6yDEvqsKxES/dY3kFklLA0PwgLrS7g72BcosnN3v/6lR9wfQxPxkZkNQKfylUtSjodAF0cxxm3Nc8TO0RRDam3XayCrIW0WCc4YjWIjqg0hrdDvJqXCYp8K/Kzcy5EGBX/OOonjVPNkfFsm7oAbHgkwMfmW7aoRLjoHPggs02NV8jsN9BVZ+HbjOzKiuvEN0LKg7VVVOzLG91FXA2QlOh3IdYNCUy/mH2PwsoammIqIirFfCN8H6LzywsX08+U24EXVtjpCEj0Kh621YiGEf8VQk8P7cSX2Q5sIOaGjh8skuzOl732dN8CbXmXFJVleInk5urvJXUX9ELHHMX7f74YgXbQMNUMIf6aUp69JNQXcZ29aiL+31p5lYnYVtfpdmPEaXrxz/OxKoqtRANtzekI2UEMpe8iNr13fKfnasdDqH5JNvDZidkDu8hqun27XqdWgzSm9eUE2648Jlki7FH5lem6yfBj6VxjL6CjLjdyJ2EL54MGdpqfxDdT/a5W4bZSgu8UJYlzuINiln3gMcgbbw3CKmTGxK9TGrgEKjRlQyKdEP7Teg5vfI9FTBbFXRrqRBzxENIDrNBhWcFLKmh9a3+aaLHnPW26HOzB7m6Nvxrv+xH8nrxxplHvsSIgqsoN7LeWwdfrgMEHHqVbg3H//MgtitPQkXFDjLueln8iYTaJDO7Gl9ApHxbMhPXi3zw5ILvCC8Ft8wWwYaEfzEMgJneUQb1cmFEct24dJ6RxUC6mcGiH7qAaThpXqTEwRFcsix+pDmFKIjwE+9JPBk6kZtNXCnb48FbcURXZrjuhNTZLw93LWzEBQnynnJc+l5wS/7sS40zsp9lUU1uWJroZ7ALpV3Qfgk5cf3CZYezgIFKL+GqwcTzSxRFrz5/Y+hchUerPgREtNfOwo5uRffQTbupgIizvgh4fYupcycg1e7kPxOuGdWgFCobD4o8e4AcMWdK9YusdiZkqqvFziB9ZcVosDGBOg0vQhvKBjBDIDus7zDCKpJOtfS0PXMVj9TlQRwhXRsmCtm2PXbazc+d1w/UqxmVhR1R8nIi5zVA8M9XuIyESDmeB4vMsrT8LEd4ZBEBulU7T/VYboc+8M/U/5TaLqOR2CbIKCWp7L8gkD9mGrmedyNEBc2Qj/j2++htIpF2992ASGSPsQLow3uBMKkyagpUznpL29sHG1yfuxYg86hVODZg7VjIV5IngMDKEjF3ZdyjwY88mQP58kKDJmNKQ2eoOkEw9UvXvUir8D7t8q1Wt1Xyam781eJZrGhCCJhRzKcV8k5L87Rqbh9Q8s3RqYEIkHk3ikT4pi25OrQf6OQ+mnahd3HoCV3qeAnffeFzcoRP2Qwg9hVEVIr5fnBSjtI8EnzdXmIow5AtkO7pePIUE7iE2tJN1nwt2+ElUu3vjvT8OuJjqxCOEBJf+fVX+b0bIy1Kco/8EAAAROAZ8vakJ/Abh47+ETb3+Eq8IAIRJAkepfrAMWMshehUHyiHD51bg3xg19GpAjOU62VtQt2Pk2CL4fwP/xgSpsuyJiGRGKB1IrY33UTX8ipP1RGgXF6tB3QHUxVZUrkR5n1tL4BTZoGYpZ+bf7vd0yc5bYurgB3LrmX/dlpnA5/wmliwydiGTcwDgyXmwf+Rjfpdg5bZ5UEm2GngXMJIxDCf6BuzwMbH5eZ4LYs8eFwMhQMlsJtOXrzJ7BfOSMI5TiVlIZ7fYDII+jBOqhthtLVUQFinsY8uF/sZ6XriMme+Z9KsGuW8dJ43W9zIAvn02XZLltZFMcSJBPH8gazq/f+4x0f48XOyvcB+HBRyBGhI4ETSwaMIElYvIXHRyYVxC2twThpxE0JGfT6YVO9HdCEHESyBlI+aj4CPokevwHF9abPK99h+c4MK/yYjAx0YIervCpEgGxL/no0S7hHPHAb4jm08CdhzgrlgZi4nrHLGlS5RdnvteP39TZ/ai2E/kj96z8XJbn9vbwMBgR0QKtQnjP5GgkH+hvFDpUHj6d1bfYcOTEPzRirR5TSwKf+Cmk9mLvzHW97uxk0d759DkvI732JhtOo4NHIG++U3n0uGYLmse4IR0Xs2pDBrqaGa3AEJs/e2/VO9WbeuH+P+1MyTm1kqIEved2aAOcCl8x+9hER9hICVPJ1leC4eFOD5/crtW0h12a8mPRksVjKqVKCE5aQAfUTdu0TkBHBBe1IP9VIrg/orcl34vSC3vx6U1QX7b2oPYzw4kzblfG8IaFsvqMNcQV4wpTj11vTuPoa/0+iR1AkY/YwnDteczmTt2wgPTNQ4OYd85EGs0Bq+fCgqjmekAnjEbteO2axXgfk014uQdaoYvRVm9ing6idm754HPfZCFNnlPY/gQTL0dqxTEmrk4W0y1NkVrblg6QY1bXOOltEykcMEtH0GWOfrX3/XS+A6SOrs7hz0BuLLPI5/9j2Olif8eoA0UkAxDpQ9k9DAhkkwcbj5KxT5CCIcUWqKPInjnu2TyCvHrCPgOFJ8NQFW4g7Oe7S+ctVxN3kOtRVyQ6f3xPwPIM8lQpA17zuCBIcUi+8IhG0RaX27qIgWuBcKfIv6Drx6S0ydVqa5ywb08rxqp+GjlfIe9uasnoVvUrBxoQxpELFAz9V1VyCXMKXgVgOa5gCiowlV+0fDw3NqbTIsiOMmGxD9oPvGfx7+t5RBUWEkrwQG179IC5ReoQ9w0r9rPmUuDNjq0ohaW8efCJG+HE4X6xAHMkvVMxcGSO76Rxgmk0w5Rxq0j5Q73wobNzvqYgBw97N63LSxS0JeEuoHlYwy4mxOxaP8piaBv84bXF4h9AYlAvj2/B3wTULGzLZg9aC85+vzErjKycIOIk08Hz97GC/UB4vmEMj+euL5x1u2yBqQOJCveds9PBiRUrPaJZjdnLfB4Z4s1/oKRXzvFBly+4dz7zwgAABPpBmzRJqEFsmUwIb//+p4QSKJQAKHWCwsjULahee1g/BVsplx8zlXXZP8tO6OjLxZQ5i3PrYPlS1k0K82LOvgfKcWI9hwYMpzaEfQAHPy6a7+rxcKThH2JQBBWxn1tNP303hGFxC8JbCv9/HSTd3WqT6jukbP8WInl+RK1G1IHaQAF7Uv/4rh5NluG3CITj6m1UMV4zltaxTlOEIvvHFNKC86dwxHh4n+cseBzhoMta+oib7hAX1PzkZmubirtqFEPUfyTTarRZbrGUt0Z+EwMLH5700oWBJCMUiQFqyX3K1bCJI8egoMlvWW0GppIAPJZTACVomTW3BVi1yJQ8OIXTfFg1jzzegMBemgczPMufFNmKnfuxiLZLn210Fddkl/SaGdUepjiUhLt0kWAVz3bVHwUQH5/Roz1QU12JCa4Gue7cfuRiqJ7A+7RDWwNeJkEpPeJfT+w9yxgli4NJDasHiMpb9JmCyDW+2Ht2wliqvpUd/hRPUUgNI3h0qb2TZfksc1lKO+zUyNLLPQaoxKDIq5kwIy2UWCl/DH9mtXnW8HZ9Gl+zrrNg/lUxtEMlOPn90esbk6oD0yxt2xnr6OTlcBfH1w3yeDdTl/9uPxBXXcWKPcZiD+6gvWPbCCF8EgJUoK3Go8c1mU6Ms7RyEG2FeEW0OrQvYE8J2d8sRVTIIMzGlw0E14uz3U+uxKRHMNxx27DEXN4dRS1qvaqVqUPmd/U86UTsGGt1U6qtYWJUJeueTevV+tIDer8jpicdCJ6oiLdoKDQ6YZbVpzLG2mCXlmT1+b28IeNThAOqowNX6hhZC+C0FBhZXalPsGUIKD9IGDDxx6MF8lwGjoo5nMaAPU25g93CL9/jk6R5xzpEr4wfiyBOs+zaf9CaN3jOymipA3+ctfF6b6hbXX4hsDiz+HSbKZGU8ExaDM7NUQlG0Ry3Gn3I9h8prnHsvyJi3tVQxIfP1gwyQLY3oRlHZT207dS85NjiVX+s0YdTBGdaasY093K/FRwq/wmbmB0amiSacluMp6gTREcZfGCei2yHtuEBAUDArDKX8Nzngoz0gEeB2iTmvGfdg57CkzlPuLc5XSSYl3zh1XFF6PQypzGMiUaI1/4VNHtoFGaby1OYaubvWBhEUApJrACNx+fNrLmEhDSf0tbtlAbnMtIcXfZbMuA8ADQVMGB4LHQnULr9CyiwhSptq27AWd4p41ZVutiDzXMeZicGzfg6VGyWN3GYtnLvmdrqLoP+hPpQw1Cxh0nT+56O9hhEJn8e25hfRXunjfWw+emaYCIRO4nageLdiZbZM/i8cDOguVp2xzj6SrscFRuqHkkJ2dAdtUl1cNGoA8KVC5G8862I9zgKJhhMa+o1wnSVl7Xj7FzieE6fmz+rhhUK+ZxsGt2U9tppyvis8IJOiWf2Qf7S8AbeCrxn0UvncATSNAYoljosAtTambciT0jPguSjcmW30o9G4gNfKUv0zwb5Dlbp4v2kn5VQ0NNFqvI9rMfJPCIdXe17q0UwE7iOB1kjw8Eo8HssIZ6QqLDB/l6l1Jsvc2tebOUdiVnxFxm+zGTqK9eQfLEvfd8J6IoY5TZ99rBhjewpgzKHirr2KorzjAgCy4MhbJzgPq4y4D1uG17BtQB04/kR/Hk0F+RdzwUvdbnKSpt1Vy6aY11Mb8RXOOfnkupPaWSh/h8G+S3gCzD1gAAABJZBn1JFFSwr/wFZYRhTwADVbQ6jGqAQ82/8gWatT5DT2jpwDVndDaAZ6LOCwIOgqcYPLpLkN1X9CbkZy0j00stQYW9iorQkE9j/GoIQHUpKy2Q9ubzvy+VjD0KNEPqVjcBWWFLifhvElBwcrHS+HFuijocno/ECofL8M9BOVVNBWg2XiESCjiv6baEFyF6Cy3ZjzD/4MeEiynLJgJViiLSOjmsqnUY0Y6hmIKTBTY4PgU8teXx6F2ZKpdDRBF8xlTxO6DiSyn50fK9Kij426QMhdpuEGkmq7ddd3BYtDhpdpDB784i14hhuugJCgXuxVMVUQwhVhd5ceNEQIWtzV5udqR6d5TPzei9I8pOklvFBygdt030a4UXYeoD91EH2duMo/1FAJgtiUtWPFQAkrYPYCjSZvryd2Kj7zYbft0ks5zaR9+EM8uI2MdhPOyFpULhU9hlhvvo65am2oQOfuCOqJlOUt8wdF1K0h0xtLalM0BNfN0qUs6pIZWOzvr2WrnhNh84MuiXKwjNBbI8hvxld0cqxMI0lt2N21vr+YLFWGfOgSE28858mUJgRqSmWva911viQbVHpkluVCqMHzCmTJ5+2fgbGgU0taloQxGROpLHZ/pJopRfO2XEURFgY7xu3n8sGj5yTkar+DSpDDC2SPfkyNBlXFE9KWp6smBgC+jR+FUXubHVdZVitQYseJI2S3py9e2SJMDnCkPnPo/17/qPSkL7OUAcWF/URmU90HtlC60QOKxGTW+xgTISK7GEFV+z/z7KV4nhsfilAWGfxQrczQMCLI1s6PMPkGKGJjwLyUbVmSRHZT+fxOVuIAvvDJQqQPS8g5SHG3mzAkGnE4k6bSZqcgZO5MjY545gTeDFd6ebIfvRfgzRro6IEdPcb7G+XWw+I2sHrXv4vO0n98hePXb3t8Z2mLhgJwhBI9mx5fxwg/58EdwNw5CR/i46eTU4SLCx2aDs51k8w920BN0T20h50cqdDnw7QZvqnGMBn/rbldds1iM/bCghNomeQwAmtgiovrhSY26GDAGR2GY1s1Q4X/OXV0StbtIwE7SUdI1RmCioOFaZpAEINs3pxJpYLY/VzVvlNmMCXX5wCQVX9RBhBc+0eYg0JXZvzZLtnd+t3N3Z5LE29xmX3lqXozJXBtbHYG2T50PwWZW4O/lLx31PZP5nCpwwVkbBwASUcaWA+mGGwXn+5K00mz95MnDGWhRA1ZePkmN9U3eEVjeVgvokK7tSiYC4jcNuN5qY2e8dSp47jjPyxhZNCXgVbTGZSIlsD7blWehzSCz69QPlwMOwD59OXXqccQXDBfxDEUYpZnwNsVV2uqogqW5uubgFiGRsf1qsiZO3HmtvvLzT6PpgDf8bbNWbSeeOg7Nc2cBFIQ15rtnOLl7DWIT+ToGk8qvijpU9sU9S41CRqYXsi64IB4Zb5vX+dkdRxz0o/5O0ehjyl6ARWFdtB3w6gYDUu219IL7l2sNf9Kfzz6Snbqby6AY8SR6kWz9uWNLnIA++Y+xZuQ8hByd2k/Uucto+DdTmIDYGoVdx2+KYw6j3vIucrAAAEMAGfcXRCfwHCxnHXUprAANi+PERT6YZ/A8VFayG///4QvvIoG1nU4C05xqCkk51ICzuCPDPhibZIwUJVpnRj8ElU1wWZrTxj1n20Qbd5lZ7DGzH1pJaGNVbf8+J0KUaMnFfFKI6zCMhc0Gi2WuB4HRV5nwhwv47lDbQOPh3eZbCMsab73rboZGUuRue+VmsFne1zy7TttvXpnD/h0s+KifXrtYY4ghIBuF0KJ4WIr4Pg/BVfEL4ye/OidNkcllCtwYvHhsD7AqLuYsMAdnpGb6jto7X8//EtKVSi82lSilYZvboHEJxQAfqZXUGxhoYswU7N5HoAj+n0wh0MKxYXeKZrUh3cjDPbyyhK+6QU6xNND2gNmjKzsiaZr1Xt3IVXRPtr9L2fMrFDuPLpfIUWXaeGro01qHgeJ29nrMsWkxCQoZSKmCuUn/jOiN1fyxSJm27kP8s8FrxffpUOfs985XfT9eeHhM147iCo72MDPmDAJgPk8grNXCC8s1Z47QuPjM3cOpuLfXiiumGDzydyscUgPqQI+il7jvB9pV7ilIQcsi7C2UchmO13SKgRK6w/vPo6mAr8SjB+ZVtksrVSyo9tJhkMj/6gT7MbM+/dbcZCO4iV1nA4//EOtS3cKphzTe3ge+OMdYONXpE+ZTCJemk2UFuJ0sVNX0liu65AqQpex3hR2XTkp8TTqXbag9wbwMQO90La+yB+MI7Yk+nDJwz/j4iiRng+0AGwgjJdfjnGneHgv9fa8263B1qkmgAVaf8GsZiKAZlpFjZecL0gBvXhcnVQHAb2wnOlwV6lMOD2YiMVvP+KmmBsOSxgH83dAlHTqRjzAsUirEyiO4yJ2Xwp+t9xpK87sWFJ7xpvM8CmhbZl4eoz+SHLoRGO9sbx3HPLRQeQD4VCSBvuVSXnkFc0F2A7uxRO7mxIsk+LBiwtkXOMp9QMdeVVWZMj9miNCVljr5v9C99BMTZYlcdBaweu9o6r50Jcq1QyGHYFoQVdaKLYEaFD40s8qgnebzw3M1h3LtubIIGX8sHg/AgxZcPX8jIng7QKO1Q/ofGbuCY07qkvdZQHZA+AOW//6++zKAo38M6p8t3xXX0QMozI3xvELfx6He4J3Av8ZqHLuU4QKFFkIzx4mxiZPYHWJC5QGWfsAEUv6DOqrg9wUMD3VKOhu5ZUDjlDf77Dpd+rH4HLXcAh+8k96+zrzJvnIxorakIStzqcYzpwJplvOuRUxBOdSo+T0YtfJJgnJKOQZPLTJ1clj+w0Vb3BAbDiXGOVfGb+12Ov7Kj1jV2xMFEFd3HyAS9VeQ9pkAfaQ/jjTV6S35YB0p5XsVI+CZragxNR1bnjgd+LP6rKgg/EgQsbzNRTbGjLWdqY+JoWMRJY6s4VnnvCW0RX6gHJZfY6vQpBnhGxJezBdym6nX41wmXzv8AAAAP9AZ9zakJ/A6/p7gBNMY3fXJ+ScItDZHi1equKwkVy6TXJIy3FW8boRvFtBTp63ADoO8Tqtci0GARkyCrH3aCbMXfrIxdq5vl1TDq88Jv4jNdcr/uU553Z+8WsWhellH8MWieazGf/30jASpBoLwgZcN8ixjOX4HPvCf0eZMHoyaM6fgtNfPjix2scH1BVR+M/zFOZfFn+n0TpRrCQoGG+SFu29Z7p0BUeAkzicL4EqpeYIx25vxFRhEK1hv9QtEAXR3GlHTV+kbzYQnMi1Q1hdFzfNQNP5U/Nlok9iOabZGLNw2/4Z19hEtKKEgWk+GA9jQ+zYahX8EpXpbqqx9dKrE+mCqCAB393AFY1vMfkH1AW/7yCbgAsdeZeMwhTSoL/lq8HjVr1riyUxJSQMaE4fmB3T9OeiEuuYrWCUEwTbFy+ZpAsl1ytOt+ECv1sG16fwRwLfTZ9OzxOkWqp5ZVuj0jK45W2TJPXBvULbEjzr1TmXeG4DiG2BPXUaQdI1ALg2Pvb4KraqBtiK1Zw6cJi4E+X5pud/sgFtRCDFcIe7xd90hTnLlKxz1kgShs5N34gaGzifTL7sqLb/MKZ2R9ERydoWhP2hYgvxSY8MJM+2OaSyYyzP+ExSmNuFeeM7RlNrAKMj+s50CSELbJO0Xdnygir/vV5bLm3MGYCAknaxJvpdUIV9FMm2sJ1SuCJKSE40DN5BTWUQoYdk8zjDIWtQCUqryN756ue+qT0eeKQw/yt9O/tmB/tyvrzNWgmO7oy/cCgGWK3TPhrtA6xyhER//s1BDfulIEBWKm9w3A3d88x+7EbOIPi1okiOoJLRyXbFC7HfnIAtu8JOpwBmXbZJINypHDJ7qzuG7d0RSrGnZALdWwqHv+LIlvfp9NGaBSUpZyyeJ6LwFY/pWoJzMsTDxWC70t6CQA+9Gsc2I09kQ4Rd4RDQWgo1MNZoLqFgFlTeDoX0Ju/ZxKv2HzsVP/xUCVptDpuockwby6eT4SRUjdzANic7z0N2B/S1Zqu0Gv1EzwM9SAZHpAzEVvou7nCD0BCk5gQlm2ycgEbvIUadesIN0N8vymgDa272zYBF9OUt34i/FhebzQ/Thp/ZzFUiUmvx9BTCyeP5y4eiBMfXGql87+p3E4UNbK/OKa+bhviG+L1+xjUUgukCfyuB6aG31mzmU7ZZ7LBbDMELhLJIGtJB+IWHFvsUn/sk0CR2iwvHLTrpkPhjbPlC1aLoWkJMigPh9sAKbJRd/soozOc81HTGCF8OvVZip31WAWa3y1dU4Yy2j28CAy6cX/gZwHkCWbpWptShKMk20b9lNo4w6/Po++Drv8VqrX1bCqHrna2svu2eSjcJ0ylCgL0lAAABNdBm3hJqEFsmUwIb//+p4QB7g22ivjIAInXJd0tDVRdTGlhUVscCyjITnnzNI7DH/ozay5/AfSbCyrUOyg/y9W+AjohW5pvJ0Ca3JBxg1Ar7mBfPKXVi39s2xwyEh18XpUzDvAvaslVr2nTyiqYmTgca4qvpWkRzVJiLdw0qzgkZWNaluiTVTTf6hc/KwUMM2HJ8Ai6di+YFpAh8o9HHJQNMTAfwR4+b/E4GNzlX7aXPOvklHO8tPpdRLW1oBlAE1VmeK+eMxPKyaOOtgs2SN4mo6yD7o/fEPLZhbtdn0mWif8lQLvZe++RBByNlkUAC1W5xSNrknzcgw08uhw6YbSLJhGvwcHF5MSSh+Ld8Rvt/uRXgb6B8tZsvVNQ07FtyoY6S1IBo6U2CXRE4j1nkdC2tPsaYxrRHoJYyX/bhkkSrzmE0P0m6KPCk6mkwZO03EX5mAbreFVRHEZUolqglXtcCo5KJNUf6jQJqxRAaryltOlAwLlC+Qc3+h8I7i8OF8LdxDLQnbctfBuzKWD/vazv7Kj1+yYapmBIf6VFbjlBJXfuG+zwvCa+mY3ZDDgso+LVd8SBe2gqIl+m5p0/Rd4n7w84i2yUJ52a5mdgMwGYnq3fOFf74zhySpX0Gh7+5N4RHeEVQnXPVhuND/zjRKdkhXbkC2Q+OHDzXiXsQolGlQLsjC1GBYc8YEacrv3velK2g7cHx0Akj23pPhbPrV1SRrsfqqSAF2vYtPhHIOLqQnlxd/TsrgDg0pU20VW4I+qSIJ01QJMNBlGkhWNxDUNl79CUsbzgqQm5A5SAGn1KJB2mt6svoj7ov6QCFH6UCFoCrOHwwBS13TqccurCOmc98a8Na/5O/ZftFtVoDBAnW+U8JA/8NcBsK0JfK3USfAHm9IkLBr9aWFPGkyMfInqpK9b2tnGeutop2SDmlVYqyHeyZiDWVz8Pz7LGKNar5BJfPulJh51HfB0apWIwL/TzeaRa+8npVo9H19RPGg3iBmfIc6XpjLj90K22usGkq3vb2RoQBvQNBhEDAEgfJpui71rLtu1RS//AZWQKZdt4OSf1MWAEV5Q0FESGXueSxlgKfsU28KiMJmXDMbjA36fmxOpnuhkqq8H0tg0SAUBpzKQoP19HFNWgESD5BDjZ3VuQKjqoOnTpQtnKj2gvIyGB9yUwg7+HF2YHUySqsICIApZHNXnt6lvJ6cIyITUjJhsY3xZM3eCKJwhP5ftyDPte9K4ptYSP0iyOrUJUP3l+E2U0kyn18eB/Eivss2eqWCSsaBH34LAH5sQkmlzgQ/0lxzlLM5WvUdGfJ8nsvE1j9CfG15MOZeTUD4LHMGTshf50ukvnUcxDkbKR50T5GduNWDWNlNViOsMrLK/aR7gG97lYpmFsgxkRUdmILKWFMcNOVx+oLvrwyQZ1XMMvj8wzYKG55D8QhX5cKydRsuRfqJ6RBx42di3sfKx5rFUUUUkYunrH1kjZ79kGcH3pyPzFEouX9i8xn6ocyLbkoevJLocqLQB+1vtsBqvxGokdGuRGJWxSckdonJMujKQ6P4xjRKvqafMVBXk9O5nokFJv0KBEDR9f35imlXKuP487yBaocQAdb0bCVQkIEnmeld5bcbrXXcTZPIBoqlOYXNdMMNZ13ZqV+TEAAARfQZ+WRRUsK/8DGQLABdRpUO0CDzvzM76v5vw7MHCKhzlvlYndrWVtxo5BHhNCcqf2tOpxRjs1GIuvIz9hCyct8iHpdNsjFFM6qdMheSvSwNxVjaxHmZytkaI2WcHVyCIgLXxZCpN4dOxL/3UXjtcKV5T11MhZ/mRLULkCPSt+1sSlksID42yOcdldf4RSeJWVlyP/B6WkBfvLe0k3G5k0zl8Rwbh6TY6Hwy1p7BEiVfvFaffp8nMfMLL89kytuyjBylYfKSvmVs8S5ynTL5JICHVBVFHmk8ADuzm+PBg2Y3oHTNCL1oVEbjgswY6bks0gmd15MWWpAD5IT71ipvzDMgaBzm8asdgcWT1gyYaNkgbzHL3OfTa1g2z2nEAZhhAyz7ICMkyOFjqGEopMEnnXQRTi2BcvFq2DPsWjotRSoeBYfAJ5k8vVqKynTnOg9XO7iQh2DMbhHP/Qm9TPRpdav8aWryy0MkA/lWwWW2xdeU6zRkO53W8f1qaqj5B1QcmvD928oAOnHPw+6hR6D9huQ/M2mZxdiEQbP83nLL64bHhB1LWfdNlB0ByxZwO0iLigKc3rKp24dig5WCEocORfcWRc4x2b7fx6HAreoogCECnSlU/PfwPxDxHcDd4OAKjZhewnhDYDKpzoS2e7REY2pSfqt8qsnwMGTWkTYBHNKue7FOJ7UOWpGqb206yseRHzcm1nqk2eyFYXxECrO7AhR1rhHPGVLNoii1soWZwWq1NBMouqMl5vVRXHYmp7NaWJ/YUpJXKP5Z4DzA1cLEMjGvlXDnxwXaPgDjNhLOWYiVhVk+CIl51QS+PSqQjCqSQ1a33LMa1ztY3d4M5rUUxuriboOc0hRkS9SKc2hFz4mxsHsLvMP3TtHHs3tO3uYAAJRMEBqI/yM0OCCpUFc+0dMn61a2lPiO6oYsLtHlA0fbfcH1ob+JM9qX//PaM0KuqTtVwbf5DJ7CUGDpLn6sjG0MqhAc6wi9vmoNlXCQ7x5sYVeULUbypXpzKqCCwqeFQqpw1xbHKcyTBXQ06QKyDKmSR2D3SB5CHlgyqmsZdAn1dKw0uwyhkl6GnjK/i/JanKYZ8HtAlajhutxf0sdMZ5OCNDed62KBFPoY0tLRabKuvPABz7JU1Lwys6kObfZqhtfIvY2sMbpbOpGSywN3UL1Uc8tNTNchoWwUQrzBKEaFbAM6I8NFBZU2CrlhpFfMvj1sS0+UxVcPLMDbpWav3CUA1SKvRUs3V0x7+o0wdWQt0wAfhuGaQKAvReV5RZPp0fFt+I/Bz1Ku60ecQi98gNCdIxWv/wK6wVRuLIeOSqmroEh4j/57/92slBO8L3AyuHuXICfh2dMrUywceICu3PkctaB9DHZ0oPNDjJpxzJm/dxgzukMmh0f2DnRmJEO5BPpKopE80aiSjclYpjJqnQO2kkNfkR/qEpQqiZUQhsPcG1EFLDcOth953vzpbZ7zUnPepgSrBK6DixnqjRcyq2AAAEFwGftXRCfwPdGwv9Nw75aACIOz7TNoCQz9kWpKiZsrdxnZ7xmarw3Xo0yvkNw709Bmv72nni48NUY9vvweVq874EP5+QpGgTdzawbP5U/G4LV1LBc0rNc6N7lylrdA/Fluq8whRhVYW5jOJ4CqO0pQMf9BrYcbFSoyCNAFsFhBR/0w8UeaCk8P++MEAg+M8VQHPkG/0wbMXEsxB1+eY1ChzWFjBxzoel5ud8IQpOzz9+vadoZXIO3OP4T4E57MSgZkVIBEIIAmYDNTATU1ME7fZqfy8YDHYqKi5u8CEf1XjVIA957GqJ/yTXTTK9Uz5inNeRfjhxpdrZa/nuzp1hAqkITVjEArhtzd2LN3w/4heDeF71pbcB/7BsxK1HGbX/l4QRClJYGKGudM3xkC0nur/jfUSkNzC5kUTvMrAkCahPcxmF5mMCkFU7OIkLWKcROK9x8pYIEbIbTqUnPsr01OpCJiPuZ1kH/9wFEEyJ0Cc0L1A42ADSe0qiqmk8JuEKJzmpED/K34JsOWUv+eCyee2WxIUKPubL3eCIW0K0PJLZIdVXqlwdOh1M0P5pSnaHJNV2wfQTAG2BV7AIAa1BiM6MlGkKwpiC5SPyeRudsM6NfanK/p70FPgsTgcDufD0DFKiIRWcJEnzfhuPe/aDJwF+FhhnLyPfHBdV8ntJBzRAHnXcjvWL3v3voMCuABbI8CsRyBHWciVdQlQroq6H6ZMuA+EpC6EabUtAU4PyExcIyoD6KFt8z0B6wz1LY43qiwisY/8WNVygiFqS9COGhBh01AUhS/O2Fedjo8zo9zdfvvIVclEAM1QqFXOvJipIb5W5eDMnh2pnsMc6miTwDgdbCnokJz8O+GLtiNy5Xc6/d+4CVust97uPeb8VtGd1LpREtJsmYaxcG73Opi0vAU0ej2LpLFDbj8sD8/wn3J1pV9qY+Bl6Ua2m1I+9pTJHnPYT1K/XgARjbQMAgfntJdFtbCBcO8x5Tdow4GK3Wa/jG3wk3Zsmrwkv6h3ELA/2JrDxWx2Rqj3lROq1XrIz+Dz9TYKNMjrKD/hCxHAiVwhteCt3yHncTGD8W2+meicn2xN6udPl1J59937CcynrOXIJMXoZuXfOuuW0zRn1ATKxVlN+3glFYmrIxzFy53+9T2nJphNEdyWU3WMJjW8E9RQ7AF+lmk6230K/G3km2JPozbKMTAFro4q/kl5tjL5lpv1IqlrN6VF21KF++f24z9qvERbTXUAvfz0tTBG2Wb3x3SczIrv5N0Qw/GaE6IS94BClsbsNAL5ZI1MSJFzs9WVZwDKoTJQ1cIYwlH7JaGwMw8gHuTaGno0IX3sKiwvCkYjX2vsuTRAtx7w2jHF0kbWM1pFVFUGw2koYZPiAdW8/bkB8RZIeEQAABFIBn7dqQn8D3JvIAH88t+spveDKlfRHoVl+XnzrrKnmz9qJNkhYUJttE3epDjX5ZX3oyvAxqnCSMw6BBYhtt8LNZKqiZ81AeHh/L3BEzDdYM6cTj88f4BsRUGjh5dOcfko1fwfwOSF2X01wUs/RhykwD4s3X6JBrloniTrMBV/V1xsY0oHDzAT0HN/1vftK/+HZYYSbjyCkJdrwQhtQ7FFQriflCKIUe7XKmufH+rzduM3IaQi8/T/WnqtJ5+JlqPqyvikYMjSC+0WvYWN1mo8iUekXTwVWwLQgsEzKxNNia3R9QhZmTuQLGvp0xmCjzPR5zL1qcGMhQtplwlPwq++XTNGzQvtVTQoEfhK38mQDdqp+lfExvhue5aRFlOfR9eL7yJrylykItQbBX5156PELA6Di1/Wgk0g/ri1M+YZKSxjVwi+AW0ZcvtvW9onXnHp8Bi0tetDOczaDQVVCCH3AIIJ6ICzR7I/hgT0zB5pnsTurr8UCf7sgltXr3nncsofZurmwAyaK1gkfwxmzWGMhsumkwMq5Grito8jVUN1tZARKzMlzSSkXkheqqKiWyuidWYrwfIuVWGutLLV9Yv53WRPN+kOV18j8Q7FvSO+mNqGV26vkdXDIRtEfjWNbgS67I3MWmZIRK+vVnVZQDqpfl1eYlGcXM3OntvMl3Ys/TtZqYc1ehKUesg04Ctw+6VRZVe/HgM3g4+gxMRxlixHwuHi6KUosrdXFgtJUSjW8iQc02yR63AoyDnORXo4nUKKstVMqcg7HHJO6HFJIpTR69EzO/nWleqvOfz64Q3MzzRuui98V5SbhWZ0tzqdorI63ayisHXg6mRwGxCVtlPrmIVVCJsr88fCRrreGtJH9hLSvT60cEPxpCiqFEAwkM249vcqQfVUhr/YfNAV/RzZwxHLuCObzNGwn29CTf3Y4Qrwcu3f7+4YG9ugmthquEGmOTrPmrQnp6jFyIm0RHrkq/9PuwENwRz2G/JXFToWFPHLoO8Y0naGt8qfdSkpmryt8MuTuqaHQYKnmcdfUmugoxS7vaLPQyTfLhndHtFfkdXiI42KdGf/XzL2p0PDMD3txke58Xuk7GKQtBTVzTVmn36MuOFrizs4OUyePqYYK3obEx7Fqn87QJY06f8FWpO7UYEW5RqBKu4ebO9oZ72ULNImLVPL8/ICl1RIHHO6lsK8XGBwKmU2TPJVXOtEx1pyFebktIdspNwcBia6mXolDbh8XDY3sC6D7IxTym2VcaNZMs5h5fu8dRmzcOJCq+FJ75wP0Zd33J1RoL0IraxQmbiL213RcJgKqBc8xqnih9kmgWCyUH59wBU2+StS1sx+fHjykazH4vsNcr4E4VaepSGveGmmUWYXgFAOeNkuzQE6TPChSrwQUkRCwC0L8T1D06eD7wpCG+PJtweikuImk6HKzReKQDOs+rar5iSSsFO9dERvKNI+gqYSJpWuMKUxY/wAABIJBm7xJqEFsmUwIb//+p4QB8elFV2jDaZoz4hNEgAsjtLgd3+2plyTANP9grk5FYRTfITi/3IBXN2XQMKA9Kl1JvKQOcVIWJywgWXw9Btndgam2ewMEH8uqfFmJuc7ViFceNPoutHT+oFNR/7hF//delt5T1DoiGNz0A1k/JEZSSlDFiTEOFYXOAKuKKGRwfPgbGeANdGttNzNpu6l/KpZrwig5V9f8DbXAVoCptB9GpDHMgji08j3P1pni7Kkk2ZS5+4ZEUNRMSwN46twUSYav7XJUvwQ+pHxhfTIACqiQ6YfZa9p4V1yD4bRGaRT9T4pusSoYHYYDrNzagwPVQfDCc8HD/wWlneke+K5lHntEYg9R9rJVAFpdIOGznkygyS+VO3/tVZlRIt1WywsaPX/IbQudzQfA61NQeT16REds9zpiRMTkEmsW75PcLn6kZY6u1TiZ13FkqF80jevqx+8ff4pKSYpHuTV+Z4Ie/DDZWQB4kA9PS3l/vtu6Qcn7qdy+YEycyYazwZSeayEo9plJVmgxgZ7wig5mOmb6V1OzV8uRyXHTdvPdqIKaxTLokB1T/atxG60p/e5OijgyBpoVIh0Cnbep6AkW9LmAeKNALSQwWsc/DaLSeHUBplrrvZ8RGk2iXXRLTFx/xrbI20eQ5IQdf+FY0MB1CSliCiRmt+33/RRRWeF6DSs4Y9QCPQ9oybHjTqBfl1ovPCrRTw4Vk/0FN17Tz6ovqu0FES+wrINyOxuIc4JrTbp3sJO8xWtG9zkc10msxaOPAzyGse+sRfv2d5M5W+eBoN1vAxVwbGMiLzG/GgZFFmzZ1rfBmt8Upy7EGqZk8np6BKdU8Uu+Xb+zRhnT2nYDaSFKbS4nN+2LzGjLOyoASh3Dxahz+2oBakibewbJaEDzJk19BNLSOY/3LeHEvN3Ve6b7IDZM7g8Wft4Ew/M3qFLjbDYWBTvQloAIvlv4IlSPwfw3zV2N5J80HMYfUhtJ9ubI7UbWwBMPeKXXlOS8+SYypkz/d5TBF7uDxX8TuNs3XyMvb9zrS3rY6XtoVw7g+r4a/PS05W5BBUbpgTxDXkG9PiE991mN2jXUu/Dmm96RfTtitv3Pjb6UUlRxzE7NaVsw2Q1BA7Ss3Yzdrpfe7j32ikybnjrf5xXDLOIKzPiuBUUC78T346El3EDIXkpRQeJ4zOYnpc9dlOOZAeLgo76ORLf90mgWu43Vvli6n1nJslDeLdMWROTAOM6pOTiN/GMr9/z3iDCZxQBKaKPzuqile3fUIKf1x1yUX8gbmAM+SGre822UaYT8Fpt3s3+x4VAywgNuJQQj2JLabIJEamhVMryMIBUUosB2Fo2cSJC+AGDT8a/x+Y8pQ2gnzUjUj33vR72d8B4lbRP49yLig7RgNpNf0upQiwhAWl2T6T6sbYtgpDYwSF5bLRW/Jy8rUAcQFLPNWmI/xxRjNBdwXYFaEpvmuLpztYf99grI+pfQ2JyrFQqJ6oXiGYcsRx2RYdT11DJ/0mfuN7ArDH2pX6qEfTbvbV3Y2AAABJJBn9pFFSwr/wFf83wNfUIAQfSwDwWjpi6hIfxzaEKZnzTh5cRINtIis0jNbMNV5f/ANmT8gj7oEqxyOL0F68g+Nj+5f2pf7Zq1AZYlAN2xekkymm6jejVd9AnPl77uBWJPc0s1chwCV3GyXk5A02VRwLGb5El22RIi268Aqov9espxDKvI0OG9nLu7RH7DX+OtnwDtwlsvRaNw+GWSQJKA2VRFATZeZ3ZOtDBM5P5iFSA96dlp2wVwwhJRdCZ03fqGkmkhYJKUbKMR0PgVLKzqacjMNRielrR/wAQoYj3lNQFYJWtrCfAtqrjdAf15wumYs1VYfDaW5rXw0QCbrjy1ooGj14lHaq351fpihtwgBz/8ZDhQ2UvpJFtW7u0M/+S8+xo2e+z3qa9p9OfOId6mtVahvW1GLvYt8qnA0WfwFtUQ5Ag290dvhJDv54moECQehFsjnbLskltpH8KDA5fCJJnQngWO/eBYCcP5XbLZGcQ8qIw0+hueTBt9an1PpY2/Jqft1zyTKH9XaoiMJ6aPpW9a36ACEh1RK8aKo8Y8x71c+M96a5mtksLQ1wUGJ6PDAj7N3jVOSKcwV5IonI2Gs/nhvCZoNKeyT0fo8Kp/JA2BKa76RlLh71ofwDs1AnP11krHTTTtipqntomzrhQXdpww9CCzfXVVdBH78xkbG6UaoLQlRGpSXLOo/ik4jlAf69W4LfqBYEzZyi647Wj/A/Ao/F47u82FqHH0f1cmgSQXDlOT4lYh/ZFgJdxiBSLwa/vyi/ziWWwOIvTZLakt0gzV0WHRpHwhebpj7klzGfUw46/57LuMfD7rJeZmR8pmyJa3gwB5kNQkP70hQjclRrzjSX9fGjeJvL7F7BGN2f+qoc+LTQ0Rt6O5zkzgFDlWMnpLlpMfLQ7Eu6dJcLZ4OlUrh5FiSX7FtrYHKB6BW0bnMHs6fuujBh8LQX4N1qJHoB3d0BCIi9iFVUThbH/wu2ZsTwuVq11XGSYz7lGu+ev4Vr46BCoXB/eCkAxPkkUiYhdEmo3Hc5SlaM8F2T+X59AiMep0Xt1ZGqFcAs28UnExx2hLyi9gouqWyzcwR+xPuBlmb7AcDxcfS4jtV2U2WXgcXRnNeWa5LN9Bx9sGGWauiIyeQYDYE30chZniEvt1LErCKoMSVfDWbbc8kF1YPIpFH9CNEJWig4u4KD3qnrhoeOD5JUzKT9pe1nqp4fft9UCM7/AOPc5sC7X7G95b4P6q0v0LpcOKwB1bk8t0UCjti6ZGcT9L03RK8V/3q6xLSKNIR8rHiTHO72kyAZyL4LVxuoM8egSOb3ZaRdGDJdVQGV+D/vZeJAfhbxof3/gHYzUgYak1ESlbzqdxMuZsMYb8+8jkZJR/xIMu7/vgXgdROlybVu5h0k/+RNk6dgCx2xiP/qYBn5sLV20qEpWJQC79oFaQGHKPpsWI6T91UxDtzQd4Sxv9WHZi0QKiePH94JV/ZCQU+W6prM44q/xYBtyqDWi5EqxUyrXv6bW33JiVrZTL3qPkRlLr5zefF+v/Hmy4x1Oo985CwPGyKw5JaoMAAAQ0AZ/5dEJ/AcNBR8AN12fXxn9h0QV5ITcPmblKWX8JUUfsl7a7KvGMtu/Q+fbLEUC6O7f6GU2e8/kGJbHsT9QOqBRGZzl2dfEegOT2kXTxXhYfB81tdi0ODUvKvy7vHwiDw8LGoNDIpJ6A7ohuy9Yi+zGkeZz97SjL0Ki0yh/THnkYcW4cCBkgHDnWlSZ9qD09AGVSJqe6Tf/2rtvRArdPe9t4tt+LbPAFKOiCujKqbyNZEOo4fwp84rC9ueptNyx07nzk91d4/gem1hbSYy4VkZlk4AnujT0LbV+quVtuwCS9fPBJq8Zk11pSc78NUH0HEEuFnUg8AKY0elRzs58EZ4APKp+UGZjte6znlRuBI52BbKLhXBVac93qm/WhTXTmnEoherj25yILbT2bf9STYIvaaVtzZBIZ1+Mk5to5cQSKPm/hDX8smm+QRbRk38TeQQjxgouyzNdCDGlnG9ZgJVdDbX3eR/u4lDMbCOskJNOGtVV53VST1t/sllcOade4/SYlEuxLvr0y6X1Xvm+k0eLCDlCPAYXmefLKrb2N31DLGhx6j2UPU3V7VCWhtow0vhK6dvFSr2UqzcxaiGiIvARefFgJLHWj57i01rUW4rUnOblKmG0xBxYP/huiPLRR0IWM9M+DkFO+DiUlqB8yDoZhFbYtcRdpGVVRsMeYtHReV1L71kRNWeg0SqQVIBsa2x7MG6zDgLVnpimmUMKyV8yv18goZf15kMUx7QBBnLDd6KK6p+WBM10liEbhE7z+M6IUEi12XrysDlOaz1sh9hq0TifvqQr0qG9wbcPZPCc/sqxOyKlbWhyGwrWH0/TX4wM80/n9hXd0EfQHpCACp+k3g8+haCmJ/xm0CztViPQMwdRBhtfBoUo0rGms/8+PcqrEnLk3NpXXj7Bj/24PM4cHfPMJmSiJ0r7HQhNAWpLm77C2+tHnvZrnaC7w7oXVuggj2B4FWlawS20Co4EzRX7Safn3xkK1Z17PhlnC3TfZhPDEx3MHYFUYvoKqgS2Duqs1/bRO+01n51C9UCA7J+K2aBbOxpVQsKzLCq2qchNRIfWUV44wN+kF7D3xp4LLmJlpA9dXtGSSd9oGzb+6UQnEN2LjyQJHaJbETxa8TkNq/pkVrn+5f45FynIFIYg+r81OuNS2MQX0w8ilAOsVO4A2bhKJdD8L6YwxpRU49fES76VxTiiVQTNmiU4qJilME6BuhJ5DpkTqGmGAOlPnhcgv8LxuqMyFfq0f1g8cpIEyJ2ccX3sDwAtm07ndLIDL4eC/mfgvbi7l26CUYbuPk8pC1P/8m2Ie9wX7MqQRcURQVNKf5WHSWnz8v7tBrZHt3Axx2TI/HWtA4EqU0Q26ah5N37U5LI3rS6RQ+d0MjjqgdFuSyWsh6Tq+pXeQHqtUqufkZ2VufGiLMP9zH1QlOoObkyUAAARHAZ/7akJ/Ab3Qs4oS8pzYEqXcuACdRXAnZRDd391dTgzFExa9KV2SL3U5VNqWXvr75UplVnSJOvT4z40QEFoi+Lu9NJ7OnbKkUf3ZsxjjLK3FmhnSAvKFEiXmsGs0PbCnyh77pvSj4NEcLrizxQiyIhsm4OEWpF8Xw+1a60Pwio1W1hi51D50TBvTTx3GOYvqQMW/igLXv79kDVi0BSpHdzOTHMAUg8fSQXGTPl0HhMimxVr8G7xt2KYef9dduDjrvdzX7dEQIn0Xd5wVKbAEsnpCEgytDStLNMkM7lbsv0ao2FIh5JSGOrTyFJeF9AryftWan+aNDOCgfIj79yyPMqvaC3PJs9QKiHXzq1hup3k2JU9UINI2TphKgYe7fz3kzF9R2CbuKuHnXYYGLsYobQagyTbmlzTs70WwFaQlVTt5uJta4YSdiHTuwQJ6gFEZYr0oXY6FURgT5jtzATy6AtaIgDtPAWKzi8kGr6mlrJVk6Nfqdcp+BIL394jm4sCIdCl46056ZadtZ9jyvPFfSm5nB4DTiE7rKq6tKqg13z0lfrcFrM9WUZXh7uFuHr671pTTsx9CffaDFNtpJgKBe2mfR4No4SRGBxi3hDwxVq4hTfdzXyAu7Sx3C+4GbUNSftGP+5ngGjf5/QhsSvQlY18GBEWG4puBYmEfNUyvaq2u8w4dlUuTNuGJy7J7wg3o8PfmW+lZepUCyXDieU8qKj3Fl1GbMgkNtKArMaq8kDrEMuHtR/ksepGHh2gpMOHTVPgHeeFjUbQyD96OM+AApKtAYp21XeAOtpMH/fJhYxlzYzW//9/NFsBywFFP8KETR0ILoiw2hBMvAV+sCQfFdv6M/7bMyQcKt7gIgTbBDI0PXBrv+yb75qUCz9BCxyY57z/GwQnFXUfJirI9jE9WkNxvMRp2HRemWVUDGIicUQ55+Uuhu1nyLmQ/+VRg496m0+VgfybS1a0mODrfqV4D7OKQYCmPg/hU7zg+jbfzyWy45xTNK+uMIMBCZt6mAnDZBvr/MA14FrMoUp+aBJvA84b/ZOHMJaXWR5IDge9sVbj5gVGkTKzE1gZNW1rmt1SarIorK5XVNqGtoCbIhTq6wS5UseWyFN87/M61cCVYScIwJojByvhAFy7CRK+exfF3L0zlIAEcrykQpz6xIC7bfhj91KMocLZILmMhK7GqaDK1X5tUKczkIM1fWtoCa/12FlzOOQ0ceQktgMG50D4lKgwNuk9CSKCPfN3H+hDzDXMhg9TEoDI0OJwbqj0UCgUnlaueDtFfZxprJEhnxyvw8xWf59FpW4BVrF7ix/lMerEJgCEbdS0hsvm252BvuLdWrKytdf0i/AVm9Ik/86fUK/rNTkA227ShuDhUs3eds2dY34CflybPFmSaC0DnECx5c4UOtN2d1Vns8l/Zw6gjnrUZ+Lu+o5T+ZH3/QvExqq4X6V3wMcfBAAAEe0Gb4EmoQWyZTAhv//6nhAHuEQS15Gix4etVgAqcc8p5r1vFcowLsMfFlv+n0dndd/K63koHX9xWH71xYlut9u+VXkw2wRu6cHP66+dH78BPVppTVnwwL2rnH1P7dB87mrwO4tQm9djELPovk+BfrTCqiKpj4nk6sPSbzshW8ybzbtXLk4jrGerDVdXu4XizlAvAJObRZ9NkwmnmxMEUOfpbld8UNVhS9AA1iveiO+CBu0q2fWAfLZjV+A3eyncJ8w7gz7J25/oUSo/rMh2uBLbIU1xHdC3p6AoPKwEGSFCIp8j4cuTdhRbTnLpXZrbv8GD31WaCH8mQLs+OeX8wNNcPFZdiqIzEGW7TTGyc3PKsicujNMEQL4aBQhlTPQ6igG8kq+goJTDfsMdkCtyqNntYARY0XbW8vNATD5rGjpkKkJhTXIWKZ8cNOAy4Xg5x5Byyn24ua+QesDizbgoiXTz/7mNZd1ThaDpUKwpM8xOBbq6oKc5FOLGocYZlm2PEFXgbM+//E0RNRUUqqG4tbTMld8HsRr/ltJC1dxJATQQ9OMm0EJhHbaTKYQatJQsxqC/Xhj1MJ/kJFcJipVHSsftoKKKqcaLuRzlohuxDdfoeQdr7EGqMPeDZWxYlFgj/EvGC+Yljk8peAliPpDuuKlqhgSD4gXHuYd63hLQB4HfFiYhAid/p+ATU/2tLHp8u8KVEqYg3GSuRy2dFoK4RdZDcUdHxXUcGjjYM8SjgcNUQBvmVLrsJzLgMRb+tHRNnb+A0LxRVdkf6BLkHmsZnV3Ch53Ib9JiHndc5b/ypKM057WrUYYRV9OUeqmuO0XVqregHEFz+kCO+vwni1oYNOIVA7LlkRY3m4TX8QSMLoQ38HvO4tPs+2Vfy8et36DS0ssJ8ktie/JxwU8TANz0bPCnkxSPWJxB7GlH019rR7UMY9MnU+EazHb7D/vdluMcdwu+GYiJeRjbgs703z5/mRVb4Fu44QId7HaWVQXZtKFH3IE//2am/7K7YmV2/8D/AXaIh6dN4fVDVxRFh1l3EIG4ojehn32ytSq+5ExKKeeNEMn3vzCHIlutiKRNcwvRAGIrelBTPMNZUFQjVvZMHjG0CJ6KXItGE+zgkS+G2t4xuMO44LtBxeRECqFJs8m4BpHSMoIHvCdljzx8vJzNFM5KpiHs5BM/qEE+GqAO6UkfpQQ14FPSM7HRi223mGzJKarzEU+At+EMCh7K0IqhNeqyksgK6Tw3hQJcyBoO9Fq4Ej0za3DC+sb90PNRAalXpAjBDpMMDheDCRdn/2JG51/eErfmqKdPgD33woGXNS6+RnjX3vUb39lJP3JWxOsg2YPJqSJ7pjuh08G4QGx2Y4tqtQInEHC5MaBTYSoAZZg5MvgjmgokbXO2Nvy+89lwBxM/BoRLUhwWPNICZen2cERRepsqroG4yd0CZSJKTqX96aDNnhowxvGRQAXvr6wK+8zXoDcY/aPBq/+zzytbQKVZeYXQqzlTPYAUBeCPi2zYRhC8+4Z7w4G2saBkAAATeQZ4eRRUsK/8C8jMAAXQWKq8DC0x/voIqxWqtyopo0WOE+Yj/BW/RYb1il79YMDRGwJQbNdpvAynIZlSZxSKwCe6ZwpbTSIpMTn66dImx3JrKa4zB/ov/WBAoXnuZna9YWQQTyDBF1vP7DITzGVlPoYmlPiLSFAfOQKzIcsG5CJGO3SLwuxgDZmXES8GE+gCVTxWSpIYqQV2OhzIAqDmCDQTMNvy37+Fgq9y1ld/r38ia9BiF8b042zIM6aGrjmJfMBIctPGvgF9n42qXQNvzi/ktfBDOFLOJp3dK1MiIv59PhLPRJhcCV5AulX/QfS/EvIc2U3u4x+PV7dtqoUiT1ATMKDHask5IvG1Bq7VgrIbDNublOehBnosHny/IQSro8ZcB90SYOHKZOqLCJGKJkbiHRrG1jFLj0L4Ii4kNlfivSdG+UUnRHcOrEWgylAG7+bE1AiS921j15x8yj0Ya9kM8CG3oiDH8N5JGy90TymkzKbwbrf7wM+nNbVxauN576O5iP9ZKWzse/8Ag/JTJgq86ML8xPwjXkwF1B/uciwcGa/aUTNMdSF3Sib3xF8q//6caU7xWmfFw5W5A/cq24Hyg9wudk5yXaw9K8dbPg01KRqEg1fiHhfVj6pogMoFXHcmM+IQSdaWa6abWB9i9MS6pZXGihD4Jjrz8HMBAyD1foOuB1ssa/3UX1ORXi6IKWNUHIfIZgisogawigVNuy2iD5gBGsm/XtvCG1sp9CuSP5l4X92PMbXbe7vOmPNuvm+M1r7jFiM1fIlT5kn46dVxR18gC1rsgeOdujGjm5WXE8xH5PHS6MtKray6MGCvy+HWQkf8KqvLoM8hxn0kOfTDZ9HdrBlXZ/1QCUW+u29g1+DJLuwgd/3HXACPz0qrJfg/m0ulXO7u9CfYcQvRThKPNgMBb3vgNktkD6+SMw7EviDIYjTHUvG6YLLaIynBHmoLkLLFyhI+6M8T+1SGVBpsiYrJbOPoCKtWtW8PXHuCQcUt//8yb5YxiL7iU0sDWXsul0G9flqZo1iRDgDaBks+rYI0OFRbRWjMB3rXRtNAp/MASpEtSUm49k9Gib/J3GkCTYENxv2xPmzFWHC5QYqbs6FVlsa3+YfAenHxf17DYt2Rjn5j7//wJOjgstPv5LGF1dIA6mf3/UScihs8/MJypfs2uFNXCmovS7LhAGViaY/5jEuE1tSFkegRXEM41u+Lf/qV+J4YULtv8wuQy7v9sArsLQ+NQ/44YPcz3Kxv2VWUtvTxbSD1Ycnacjsap99SidfjEHnCDdRT7s2O7UOpVKojOhy3kBlTLe/yYImIzd7rVxO95H5LHri0hV3pOuNC8iQb+u3mM8D/oTuUBbltahkc0uI9pnyATL09K9QUFYwVoCh0cgH4ezCz/9Yy1w56UelFpcjNu24rJO/fZldocjyK+oX2cbQ/0DM58D4ePklewBGu54VyODkpaw91Yc4kPIUtnMRAesJBg1fW5fdzJ9MspJ4KIgrxdXSCTjj9iE4Iotp7gVPUwfpSqAPyOBxG+PmHJv7s89/fCj/AATO7G5vb2nyt0IA3292RxjJsoU6j8y5NiNuuOMK4vFbdrpzIm031BXZctchQratQDSK3DoiCEHXKVH9DwV2XVCAD/xzd6TQ9pKE35a/ni2gAABIoBnj10Qn8DrfCs53ovVAAtmU9ExVtwmWTMGylabIB0HPOCa0qQY4+NEi6b3rEGYaEpu7DxE+ecedvprQV19rVzSe28n1Yg6vBBEctF3WwLdZ97wzqN2iUfSpufGCAHmQzfXTkJU9npSGVe76NcU+jxkl+S/mCmsEVQLQOn0+jLqI5jpdp4BCdapfTSfRe2HkiMk7jSlqN7PSep9MdAI598cGvQRPVPRcaKcCV26MaL4vlOu2Boow+xeAXnzxGsDHGEGOsdRMJM7rLahoi4L5RVxG6NwyBme6p7T0rSmW5sEp4gTk5XLNGVEOHKa6q45WdocQp1HdJtF3ZEfO64wAmYN0zXbKMTb67LMwO7GpTn+9qN+C1a7afT+vDKo8Eefdz4YUIJGrpBEgefS6h1v9aKT9USdZ/NJUgbvqw8L9fIhLHZ+4HHUpQjHKm5ZvpQzcu2lG511j+le7Sc4oTkMIwSAzkqnFMDctV5fb7zXkz75G1H5o4INlszoqDtEUxlHaGLiFCjRhtFFtIheayNvyjkzx3fEdULuSreoH5OIYe9FBukrbZ6oUFp7SeVk/hZTpzro7Gb1bWNWp631GRWKTv3Zz9qVz6XD5g2DaxEPZQ2HvQ8e/UWdGimOMmPNL512xgxGXgCbTJTEs7XavAKTWi+mnQtHNWXw9HM4xmX2OaiI+oS06upizMALpg5LvTdiranHteWVjG3iHw86D9GpdnNlOGryk/lx9iLF3KStRe0FkQy9jnPB/G4JnXE3J7j+WUGa4ONjpcJxqlXPBr9eTigMzbg+QHGoPdv5o384lx4jx2eJJ9CzwgqU/x12m74HU9pclLMBlNG2ZJdtW+hjRxONs1eEnetZXSq2yx9HpmPcfbl+r6GHDpJDrLbJuw2Xlfo/CyJImAFJ3HZQRvAlHYjHc40vwofyB462/BI4yUNfVTcpcel1duALJj2h4BbGafOmoS7m0rX41O4aIS88hEu0l2WdmULjvFHPHz0bC+vsgJXVtNkVvzciI+RsNMgq/WTkZDZozOD5OVefI/FWLJ0+BwCuPLKP/CUghFTYj5angUopkigL0hzzQxECnQyZd/3X9bDQ9jyZvlptp3EOBX/5Nnr54RFQo+aonKt4ZnjdB0Jkle2r9xnxvagFkXVu2n8qzYdIMuQBuLIKvBpsMUKylyqGMb/zOlsSIIFWi/3JZNblv2xnUB2AsOj18q//8ZYHUdxFtSTEiMP9L+f5isnZwLOy+Hf3Z84qPmHDZ//pYcpoEr+JI0FNbmT/k/4ZysCpCmdzUZTheVnUVP1yUKrxZj0QHfUlpPTFA1MkWt/1ClaVYs5Yj1DJkiUbVyWHeLsA4DTxGXrU2/W8QiI7U1/GBsg+/a9BY2vN9msg8KoKBi4CqNIf8MWSYMSsX7WFO33EiG+gGBiviB53g9FLdiXnDqYV6CTC1N0EbheEY4ReoxrMcVbSDsdSmp9Ke+wiZ6EqkdGYCMf6S/5iFVaIar9vRSuIieS221gR8vtmgZaErDvhby5ChZniYajzuFb9xyz64KTTW2NScvAAAAEHwGeP2pCfwOxonoRV+9rFMgJAANiwgR5elVF5iFeS1zuHX/CVbHzwsjJy0BOGPiRb7aGVvke5mDyxFA8ZrabucecZm8l1pFfNpEZeaXZPSifcXICLlk+vqvJxV823zlwQzemhDiIIjMM37dDTv5/pKqE3HCPpava9S5SB8pQ2K6TB3zlwTecKJjDuh/I4UOts6py9gBp4f0ENHiDLbUU0HkpbsAW/e5kXqLVRbsKThaitMoYr05jR+WSIdA615ClpDko3/oEejPo8KKbmbSsj/D35VLy8vBc5xN5BslLFdXChZm8Op+3sXRwLzqKp3cp0rCUngH5IjPaNxDoPvqtHtxOBhW67np7ic9lnm8yKuAsx1LbJ+nyUl/H9HaGPs5GcRe6SGNhPyfGMvx0GsZkqRBPMi+w0BR0L8IPR2fN4/55o0MAiLgZfQzGVl1205/Whtd5ffCApemoxT1iUF0Ns9XbhkqLhxXyn4e7zAwoPCt5H3u69NG2In/6rnpA1mJOA2TgAG7j+pdkZ1HCXAjiYa3+kR+FXl5EsnfS4nABWn8ZFvbXZiXU66LbyCJtM5RpY0lZdxTMxOnYBRe3lSRkcsIkxaRf7uOT7G8uvgICkAijM13ViRbS6lvdilwHi/v61S+feSeN1gbA3enCclygOHoU3Rw2UWif2QpO/+mK8ozyKj/p0o3T1clJny06ZkO5CD1mNgSQeACVYUfgMSUA6nQBzGvS3voESNxLldCT1p0Gm1hEn1cjWImQ6bFuXBtK4RBQI63kIhxOaQcTp6WQQHW1SzFWcxXLTA+z/t7/NnbbH3HJNQsDtFCJO8xgSuZ7RhW/tpCiNQyyN2YtJxF7Vxr9J5FG9/gjet/0DsZ9oFr/Odgx2oL+Wq6oU00XMKD3/xAM3X+tg3+A20Fs8C6AXJIo0jjX2hXQzm9wcVBj3SH1pScwfby9uWzdCUx/L7KgzbdD9vlx+wtdsRzxBkj20Qb2cQ7Ih/Rgrcd0PUFQTlvoS/6Rnc7xYI08O2zBFx7d1zxRfpGCX2UESP8RNLkLs2rf+7pcRT1oVvDj8tTfVkKXeoWA50CnYJjlEtZdgOYAoSBHSkWqRgQ2QwUJVbmWWhA4xKgN11mj+BCU9ug9NJfO6zLXTGSX14lJhLM/IYv+Pk9bUUcP6setIkcjw4uA6jnBRKK1BLGWcJ0CfqLzUZZ6UEPkgbMU0BJ6P2VaYjfSE68mCqLxv66bfAHUGcPTbJ6ALe8tp/5WzMS0zyWSkdiD8cUzAkg1NY6B8WsRaFOoz8aOGx/yP1qp3bbRHg7Puc5tB+jgJH6vIf6K2zKBV9eK0ZfDbpWs2BnSWc3vrkWpj70pg2dqSnXxTTO/oO/YQar2vupbrImE1PqrEoR8v5WBqz2JDu2LHggOBbBDLPRxAAAEv0GaJEmoQWyZTAhv//6nhAHx6UZFP54S4Jr6bGoAIb9Wq9FS8pUs4RVQ6L/3sHPD4dFXPN2lS8cbqjKsh7gmPfj2BRc6K0kRutFUaA6ih4PTDkzxvk2VYxraX3b34E/gwo9f687TBO3Fk6LeHTuWwzbBKs2YXbLjjbK/9RxkqtrVaZXzkKIQyPoHBLrGpYWqVA0fk76nj4DSy1urX2AyTcs5+//5ektPWuR4WsaOjo4Kv+JbjW9vhPZk0d1doL176UuewKEXeEFRhRnraZiMXLc/3erRHUcfWKuOGKTueQ3g3eZNL4nGElG0++eJ7y0oa47cOapBo52deQ5m7SHu+N5IonGZoGHNW9I1qKdbemLOAAkUTEuVrppka+TMix+df5fsMhP1hg3UTKYQZwpJCbnF7XA40Y050dbjVNGAXfKIIAPafK0fkYUv8o5VXQbvyQYU7CDnZxqIFsjAHxVAmwcPvEljztKDXjjIyfHkNZ6A7WaBu3Ymtx5ZfKgguOvNmTQTRiRPa//fuD+HrKzoKFCEOzasQk0Pgu6jftcysfdPPNk2vs/hB4xy3SwbvhzMoV8wYIABy62vJF8n9mUQVsVFJQTeqqk88vo1ObImF6HgpTo+VRfqqKHIkiomfr+rsr67rsF3AHJo68nJc4TZT+MVqfR4GVScn5JQhvlEiXdemw8BgIMTZTfDtAFSFyB3NdaU144SzScds8NhQqxtyChOmmt/yljN7lhdTh0gCFCVqzAF/Qf/G0Yv+Co4jQLoHn+nJQh6XYAIJNYof+AzaunIzh4Nw54Zom4rx8VbsXG8vX+FnKY/APSD4ioGSugL71z8O74y6MAFpfM/CDyrjTF9Mw6/0BqKOCofLa+lnS+Jok7SdadL+3dahkd1kLemCQY2xI02sOF3zb+15Xbpl2HLyHemKxTfq47Mln3rPrvNWi+LFphHHfFp/MUMJnlNu9Zwcxlc8efv3WAUC42AjOEVvkRXPJlCMB0s2bN08UXWTU+neKrE19IYfADAvLRAVvvNL/EvoJUtFkBTCPYlXVqGqF8LtEPIsDQng+MTo0TQ6y67bDD5ULGyI7RNRcQFm30DbN9Nw5eQFmZcoboWxkZhtRR9G6UvsCDlrfNFGzLRsit66r8PRtUTpx3+FA/hRqJtdGSfmZ0rs6eZcyPm3UEayG18FeyslsLz5bIMsXWylZIjLtWqpVAworYpTshmm3a8uqTcfl7XHBkAmKIWyeYf9uNY9rqhy9Io9plRrn+LQJ4PkJGyGjR5aHBjt7/17wovWNu7hDyh8+UnSUU8hFWlcT+GLqrlGVazWZKTAcaWXbz+BHLY43LJNAfRZ56/K0Ie5jVEqKTS0WvItR5Naq/xuI4hbhyUDJd7sY4LWsvPG/rV8xokWkyYVBN2uH+52+aDoCr5TPTMOGgiWycXOWDS9JvE38iuwqMa4CpnWs5gu66iS6uYu4yv7Bv+v1gmKDShqCp2H2fQpcIaynUSM8RFdfzjMASlM29TtC8n2Ry4CqMaoN2gsiAXwUkKGbnR64H6c5xVWu9iZ3So4ye8nYpENsigbYCHBCOwo1tPov7eg/0dYVxTN8Kqw87wlrfyuV5Ls/PXQfy1jBO9zS5xcAAABG9BnkJFFSwr/wFiYSUfYAJxrUOe1/xcICdcIU2KaCC07lNfTKpnARfSJVnw/DTIp8/xWy3H0j/C3ScakfAtGA+01nbx4uGYH7OSKaZM0LHw+y2S+/36dj7MHG50y5OH8YPiS2I7/UCtJ/V2sUXPunNtni8fCQJRBk0dG9vkN9fMFzXYFCfruLHHmvXxiHAl9Aea6eYtDpxgt9TUceN8pZu4vvOJKTG4mcWNHxJ+EXZ8EcAlDphS52LxzIsSFpU1Ze/VbtIZsuSa1o0uuRUVd1fzA6s/+bpWLO2QQJExv2fGsyUwQok/IUHBwxCcqwLt3AVZP9Y9Gh4ZIX35tQkVkixJTCziYwECe2SYdhskV6W77ZKxrIQC+Ir2rdLecSBMY3CFIH5k8oM31PkxYCVAS2lQC0YhmEZ/A9HP0IiXAz6WHNF0JJ/OnSn4iDHubiPGcONokY+zGj0iEdQCial8Slqkdq0oTafuvwdo+VA7zfcWJlSx9bWzHtZl8IWjixIsxbMzJgfBAARfgxrEpfmIDQYFPKi30Pl5AE42F5g7PucN9K0/U+GqIC3RDLkRKI6rfJ5gptfxGEN9D0YVb8KdHwpejCJI2NjAF1JzUDlgFe39hm5f0ie57dDc7zGo7HJvOMa/IIjhMn+qZorPN2UHF71ZmIeJGkV7NwNcxTQe1Am0l4GwJX9MJKb/h3UjhR66hTGu5hxdsi5Nov84LTkaHCmZw1ePK10bfcODbbxtumYiTB4hzXtxv5eyv4UJwmXaxMS7oRryMk1L4Oqo0XRzsR8JGdKqZGopwMBb48uh1Juqcjlbj2luCGxJw1XZJau8fH72emZse5QsLVnKvmTm1afzGHb9qIMSXHrcuPotwT2jBkw3HX5PZJS4G4DucqU7kj4ODNrFzCkmWL0VjB1TLVy/+TuDnmteb34mw3I57WIVx8DuejtwfU5YUF1UaILphZ5lIdw78EgFo0IHhzQKfITP4U2LiYNB+zFTvrFU2k7pEiEm8CH0mBiDHP4yty566Kdz4Fgt9Stp5/vLjvk+bBg2lPweX/VAEhAHVe9xYWgteKZlSM3BajrvBca+ELM//NK5CuQgzbjcpvqcsPs9qCFj3bCM00+iLojfDHADOhkYEd/Z9Jdu/KjZJXeC5P3h6Jtaac/SehOFmFamGXOg03ALkYQWJznCGFbL7ojQInYqwKR85eS6lf0aNyuveLQwGgr2egjJa9sVRWJVaJZNMkEArSOuk+lVKbMBCGfyG2i33DELuzDt77BMjgFjMeGpDbDodtIR70FqSp5pHYwf2G3A0vg73eoeeklp60ZK8OmZXemP0tmsoHn2Z6YYy8Ign5oWqJ7MMcERzdSsGfFGg6jnI9YVzt+1oA0MngnslAQrlEtARw8xcIjkmyD0EdLvmULFUVn8FL0++8cG+KJo33p8PKfLs9ekmgzuaKLgPOT+jZhVnfTh5lXeBf7kegDpUfYq+c69c5WXeXCKNP9G8fSpSohrorjesYn8cfZI9pSRAAAEPgGeYXRCfwPgyIAH87ewwbF8/TOqQ/0OkY53N0g/79xGBnjMC+mQHBa5xzfkWi8WJQ4xLwCZldzB7QO/PIK2LjKjm6JKLXP6x9LF1vZ6jCC/0IQQ2zjqFoeobGNE6BcPkvTqnLZLxLg28GWlR7SAxFM4mHRWC/RTyWeRws+HM2tn7dK4O2NpRnOyuGg7dFtHm0cHJxSxA4V72cMLtLPfyy6fYwBCrssga91I4GpSA1ECiURuaQHPqNyspVQZmsqdl/ulrS2NW2NnTPCS76J0gGbTnGdJBcG5RlG2QgR1WsvF891QjqwLUyp9mEQ6xQ3EzGQGBX5yBuc+eowyw4Aq13bKh/r7ti5e9vQ4hR7HTIcIhiNMd4wrcYDIdriOo8uloNA7/K/5EIz3U//DKe1V5MI/CFK6Cn13aiyJOWkzg0w3Jv9Y+W42Zz5jlCUY3JrYHrvpWvoFNSnqnZOgUw/DhRr8yg1tDvNCs2//e/2irDZWYfUmaVfF3HJVtcBQjshptIS1gWYuyJrPihgPqc7Z8+ZlExpXw4vhZiKRRnFuCBV8KVimSMt9yZVp2N8gpLxViLfYs4X37Zj4mJ9f8/S/lg0oBtIn6vv9HxSh019PjSBNfzWkmPe2JPjsDc5A7kn6BujAnfGXgk/H736bZH28kunt/TJ163RkJ5xMNFqoCrQoqWp+U995MuLrzRL90jjnKvSzm0caMgAAGdLfWXCZkBQMJnCoXUPb3doeUx5hRXKz326KssWHnBWKTxQsm/6MJ1u5e/LluyahS7KRuhrycloHE3GmLjss6aSSH2544XRBQfICXCJEY9IMb4t8rH2+TcNs0T7VJFoQw0Ew+6ix6feBlEggKAuBjGHQb8fa/GcfbXkowryGL7fquAArlwgsjIX51RH2n+BFeVGppVBCs0WpIBbWANG35vjF+v7k7xyHpy0Wa8FwxFyTbcQlMMrkqEj86Ptlv0BJq9l3uD6S/p0f4jHKC+qEXkt5X/z6Ztg9gGB49YT5b6z3n3yzFy+/848DcMn0mHotZrVw2r30hib3Vw9ptpGn731F62jBoIc2cLn8BCht3XSC3vJD62xBmqxeuzOyqedcGmhL30rI8KL2tW9xwr78Wqo9Hp76n5YAugwJm+66IB+TGrjsxLdEyQ4vCr60CjPGnUe/CunXQp3OoYy4cB0jOmPQylot13Gw7RalS7ZFHaM9o9H2X9ARkqCCgFezTwzjQAC0Nm0Olg+RUVajvvvcK5uB2EpYxPIWU1DkiAP6XrAzeBO3wJkBrPWUn9a1ld2VwSP3TFIZ5aV1tNnQeBH1aHlvBwcmvaUdUCLX0AKWxffURoS+Pvbp626mMum5orS7juHqrBnkVqJnACA+4RMfHuAEJiynzEZcqJYCetqmC9UgVcNb8ISj+0eWbzGiXJLl5rWHHGzlm3Jbl5s1clS3t4UbvkcWXgAABBkBnmNqQn8BwxXkAFs7Pr4z+w6IK8CP2knmrQ9LSsSew1DQBsNSvB4p3VN0H2wDT4h0d/785Y7aeC0WFFGFSV1clocudwc/w6Bha4PyObxayWwp8p5UnMJ/s/6xYPigH8kh+DRYTTf2xCqZ3Qtmbhmooyd4VeS6tUMX++/ktCTQpTpp9srfJqvkaHjJh9YH6n1Bu7MBs6+CHADusq2yP067ypXUFqYVG5TGAAPh0f6xUP2SgjtiY9KdZvZlH40pYCVScy4sr1IKl6Dz+owo4apAQCJCT+swW/CS47lVjfz43bRaZxGTW+eTQZgs+kxOK+PqWIg2mQGGyxi2J+jiNmDjFusVwvEXl54aiihcm9wlHZ/iNXtM+zQAxfWYwRib8Tq8zCprWj8yF4Q69ohaVSrPOGIlxDlgswUGCzUZdFamRjUzrjc8XZi81SgIyrD+FElBGOh2UulNdrL71fZta8lDr3gYVasdG7gqln3wjwrjh+wVapDLL9iaytzaJ2kQ5o1dRGflMux68n8XdZZRYv373RRBfxOVmiGzLAr9s967iKSPL5TiRPKxzF5MUEYKCjkZdK/tLCrttmQnYePWKIQD06B3SlziF0W4CZw7Xo3kcQXtOK0pHKICYqaR//jta24GhCZ1FO8NsWoUcQ5LZWFmGg5k6ZoWDCPghTp2zrOxXAwd11Z3T55S0sq8SDit+a2oUjBEywflYTLjCEKr2E1aAuP9w4wOoKlFJ8+K+2ugPuXwciCqwQcwgcf65pAL9kYp+GRZeohdMWGG79x4BkH6kuN5/6GA1nUNddWgZDYRyidSfAzFPLvRLN1bf7gn3wxKiv1LslONK01I4LdtKeprFrm1v+IAC86PgVMW+F2Dd5HHi62BywUaQOZYNqg9Q3n+ZtrlOeR8j9AX/JY/FTwJozHxeMAcFgpARRe5iU+WCnXamVmRZFdyaeBKVYpDz6mME2s3ycdA8OqBvK09EJ4q4dLjl22jvbKbdFT/dXdCa5d0teu72Ot9vkX5A8mCeaEUQI6LEybPh2oK6ePpo7Bm+qr/Dif9vHnfEtfrseLDG4/kiWzqlntrneKS1MmzkW36eOJs7MvUfWqJTx/Wc03TGKKve/iyqNrmpm6VvLTsaEruYAONsqJKjc+LXow+9kStEvBtxo5tERfLaR0xuvk2nIz4J4uBUgDGYD6wtraDxu+pnXRTN/7nCIk/WjI4yyqU6WRwPfbs1T3vA/gkXwIgh2oMY9GVeQksNA8u5W215w82xqS3JZV90uzeS0wIFEUR1QEPsQJNZQjIXYEreLlO96y6Mf4nn5qN0Q6CuSGq3VvPbLPwhTq+qhuv6GaiLau4/a+NuIHY7SxtgV1GEw8r5tUYGiXMlxkO84ykisD2EXxDj1GZWhinUwAABIRBmmhJqEFsmUwIb//+p4QA4mvfh0ksiADiOXdUd6q/LTujoy8WUOYtz62D5YaD5b80PIjerSLTM6dKMm1cQfEElcK/UcYW84zuzqhT8cJwDoXV/iamSTJ1ySCj9tzKVmCOJYLRFjJ5KEDi/A+/itpmy7lvuCFsH5Wc+5Z9YtdkNEnLo4AgKwA0VSVisX69Yn+uNqJ/9vjOnw4DoO1e/b7QOswtFAKAbJAjaiM7OCJfZY4p4mYgD/pY7DYuYC9AHuuGt8revYWcbiKTZJ7mA7p/jwUrf3LG4R9riNyQoqtQlDRct/+5oA9SoGPAbpVaNY5J+48Csa6TLkbTaxIRc7ETmeMngIevNmw3tSwnU+7THGA6f2+K+HOFyUsD+LWyYn9Rh6np8nvlxs6+cwfrSWuu5+RLpltsI6L7xKvCvxJVJQtll+PtxidmZ50oCZeXt1He3z8SxKZdxtnp6p2W/c7ehNuWFtngDh4JvKAukLE9haVntye9yosPABOqWQmCoQhNxKem6Vs+1z0KpswWI3VY8kwzY39r/Hk6nnEaw5zvDOg6G4Cp6T0IwH8G5zAK0Yud1Wwq8cXRaHdQUXEbdYbDLJBU5aiwst5md8kAzMkCiIa9V6veOt2vwWHZLcASmhQ3DLoqEXp2GepzuYBKasYYdVgTbtXtsZPpQ2kUPWrEgkD2bsNIWkNjidl6N001QIib2lU7aWJaRgyRvR3g8My9JZ1Sgk7mdSXYneqkkNwHMaseI8pSR6Jio5CCKUDVpZOWtqIP9kRds8KXOhU9oP+sFVDjdlJfeC1zlVhH/h1nf9tsojoC1kdX7yoId4cuYHQbhUZN0a7h6EiRjl49WHYHSncCIYGtC8se339ggNvkJJFC3QRWFxNlhikl8Durf6BcnnzI2uAEojCtV9Xm2bXMIJLhBYUnYwR0iuCyaa2d/iNLFkqKcxvEdgu4nS84mQP5jeEg6yH7/6YGpRVKyUTZ1EDLGABkvvywPcOeMS05wQhfPOML9caqwf+4Df+AVck/wvZYMrU9+Ljna6kZ8GN/kcQmMpvUE+PaXP80OoiTzyuQqbmOQrIHgSFPf6utGCUSigj9QqEPAesRZDDwnCVGAXVcAr2jNV1IkKOnLUieWs9fk0R+CnfesspHf10eqNZP66OEQbszRsck6T86f+w7TylosIhW0HKML/7rAoq0RczGNN05Smeh13NgX6sTKSroIyrQn4nExVhOcze0JqWgi3/ddwbx6zBo6+Ta4qbyaTWgl2G9VwZcDfWFmjJDvtgfVTyQS/lo2ntOZCF1Gzd41fCkXH8n2YfmLXpX/FGKwPzhRuCIIM6c+YAmZuEFcv0pKl4ZDsfoqeiNpxTS+xf0ar/Cnori7ruSHcQelQ7ADmpHty1614F3ZK0GUjZws1D+FE+xwgOPYnapF6vMij3ISPYGTb4jIBmM3i05fwAF/gn0DO9yUWfpP7fiiHk3vYPIjmYzxTXe8QHOTV2E6rpd73IWOnlctyiaUUBtYo2ZBCpQYBi5tl29wTwHBOXIxqp7vgelAAAEVUGehkUVLCv/ALfxVOd4AEmxIdj5feG1pDazUfpYIJXxcpUiDT7rgUY18MHcIZcwNMq1YJBdMXjrVxrO376iImt/++kI3wULlx2DYy9KQPeFuXTzadniW4LhUviweSTxClA3aYCVsXK5O7kVvVk6Mj4RuiE8In+VUHCvwSCHqSVOtncgdSmL93INL4W6BbDKsHpkOn/8p05kVqfB5qH4kKnDPa6f5IODJqDryK4kMwLJqmVRnpYJHSmT+0cKk78x86octkAEPjI6B29Uz70TcYLThxQT1vYC5+UhKC0CpwWWWRGKDwYl3vXDvaIA1FUlm/pZlLqRPH9u0OY6qOOkuI+EHTv+fl183mffek7CMkDF+txY7xrzudV80zXI+szYHP4cZBFk5trBItMewBQX3gCCpG4YQTy6m0X3JL2bzlPrNI0+svE23cOItXWnRB/ZZ2uMZmDGehmHl18U//Ou/PvrzRr6iNF1zKPr9LmydJN9bXxX8RKIMYFMe9eI4vSiyKivCCE8PbKX04pRJmDT78+ZIjDmBbJA3V5X/TUJaJ5JS9u5TE08Zslq/R8BFTQnnfBS9TgjewCmBJmbBJnGSoNCN6Ti3VCTV39SXHTGiCDnFliv5x6/FHv5hLb2kWm1x7cVR/PBzZuSH/27IInkLJ7kXA8Rviz+0B7kit5X/XGRrv9VbkC8YQGMkJe2AbXMeCyOEx+vSugay8FW7xGGW6Tyl82agXxyoYUSIVocJmNLijIxnNb4n4xb65I6zJqTPY+xVcHqsGW2LJJanrnqEMRjtpvWOZe2fDsqTlSOm6CIj6T+q8WDgGqhhXCHR4zH6zIIy9stNJzTmyDaNWdB4FvEMMxb4JhtFDLh3aiBP9PDKn4/i+fUp5JCnWMNxj53geBaPchng0zsYkGjEH3BSdFJzFe+gK0ZGfRAywmkREv953YZ3ynjgMfHgmyE4uwfzr1VO0GCvIjLFcNWzgpB1qmlTbsQdFVtb6kqv/nTznbOLr1vNL1NKYTjoc/u49AChWe6zOu/wukDlAmDs+31eDqZx1EWrqvoG2zfLIiNer893cafGJS1Qism4pOM7LoXqupFb+QIQRPGrG9X9SoXYBCEIM3JT7iAJwBG9UiBWq/bL74PJTfopK7HYcvKlbIi2fZT1gDbIxjIUSKSLmK64BM9EPbJRORw45eotOlgGUs42s/Siw3OJiXhqrkwRzMdtiM9kq4ufvVpNOQpCywGa9giJ1k3qgGp99nOLrxjUxUDJ+RrXuCiPx3XWtoaJYfZLXFKAZi+nlhKkSKxioL/qJ0+NGZWvbGMNtlE7iK24BYcjnzSGsFfnNLe0iuv06wrD8JKvUAa4b++ZnrBS7waIdJNJsEk+CKUC9YRgl3CQ1miSyY1XSZ5SxTVRcvSkj7ZAEAmfOLAyCgfT9CtMypUv5TPePFi4IJY9bu+fHy3wRMULODvq0tLSjE8/R9HEWL7PGpJAZZRAAAESwGepXRCfwHCt5mlvgBuvFdExVtujSPAg0EmlGJPNnUacR627OqNI+mxba3jvBP4SwuxRoJznhHGlxKe7nk2cpc0uFPtdCxQmO0k/Ob7yfKmCMDjItfmtkYlMDqocRcX6RZCNBpwueYHVQ/G0CQUTyjHHvzHFcDil4S/OlqpOVNcf4TSn3YQa6wXVVz6FaySY4/bWTUNHMoY3NW1/OesBebGDiubfggQq/XjxscyTC3TVSmelz5oRE/tRrjJ18yZz1NWZbh0zoYxUDqzPsRo9CBuz3ecdQkk84085D5VaHTp0aW8d84dGGU3bcKUpru7ABeIysxWBFrG3Uu7bFEq0vbIhbOW1/drwVx4OgBJz5RwUsJgHBa3KrEs3MgE/4KvLrU9/IDxTbdPdtcjKu2zcCdM8wrqQcx857/mqp2pBM4tftZiq2xoQSJH4/gkN45fgoBRvdiJMJkXSR8XBw6+evTk9p+JwJIJtquInXR/L083qSX9CquKAsjZPOXsaEL7Cg7d63mt193toDl9iiXnCllMgIWsSqJTY0dcMPDKndEbzfap8JUr+Ws/yS7UEy72DYgbF0pWwd3JYqU2FH1Z5DBCyEt4yC2iBBtqVz/mxo2LC5sY6jNlbBkcwZ8lGkJ9GdFPotIcQWHcjRvodkHcZ2ll41rhm62a2bSfiaUqd9D+FFCR2cCiDc2/0KLD+be0Yp2WYl10loiDp43KJ+ZnVmntissZM0pTyN4rqx8os9NY/QDZXAFV7liKPxxi3+OGV1S8vXD4X+fd06mW05tBNFE1YpgNJjCIWGX+Ul4FHo2xpE5P14xfmLeQ3UilbmpAtxjZkHPK3a7xhqlrwJ4l5OM+WpYvGycq3X2bleLK4p0TSQni/8GoUmwltjBJmAhWVyAj9QA7WHxpfjKGFIb46oe8AHCXm0eWacA2SmhfJN9MnpUpth5xo+n93k7FSjyJReoC716M1w8lfohiPZrXdPESBrzd+kS1ZRDMVc4iQOg7jaJPOJR/6S06llyfTtLj60bLOZE9WOGkMKgdHfaMzR9Rq5NOBclKqC1nTLfAopwWrKBImTxNOFpZNwAFX4SI1y9ymzAzmb61k9kVRQjmrzi85gqCqAyuMiFJktUV8wBJPUFBP0WPTImwjvqHAnfqyIBIRKzts/vg0+3R9xV2qhYrRwpCun4kKrnlXQ4e6ZFSL2910zPh0hX/D3e4H4Bku/O+GXSePgg9d9fT3W7q6rCZ2iJtNUjS5D7hImQMV5EjUvP1//ZYuqKy9S7BGcAx2PaWgDc8ZLbWi5/NQkrl6qKiw2SDYzz3T+3gjkJwy3lCDJ0vy0okBNnPxII1YBPoE9Vx1Hv/xqGk0j91iIogeA/h/PAI5egodScEId9gpU6XFxDIYy/1TSx8H7hLwCWkWBGYVCrcdzDrnWUQyZAiTG6IqceXdlzGxFQJCVour4pilV24n/yQ/8PaT2MAAAROAZ6nakJ/Ab6HuBr/3AC1sIEj1L3SmEQeCZqplkT2IIudpYI/G7WLHEP0hpw5pA4vyXWJwq8NCa53OsIXh3U0SwopzxdF8clhoB5rPFk8/1nXv/42el7D/aru7Nm5gIEsExCaV7OLDaAw6GKciEVBfHPOkBos+quq8ZdfrybhSJymq85pD6iwu+nWgb8ClfuTtAiEG9f5hqauHKihuIj463uduJM11MR3ZdxWKbVGqX7m5lkxOiRGqVSZFcN07Evayaiw61XpaFdF0G8RrUp2vajzssCAApdS3J7fQ6WPERR79V9TvJ8bXicxQCLjyEFZ/+RE0bNy3Yu9O1KZKP+9SAaSbJJ3UvJHetfNGC8z+fjkMumELqYGpVqhrf6+tqBxUeQWjnrQSzcLiuR6RHwx7yBONj/6ARUSBH+fXfm+DiqNbPve1sfAVCAws7Axx5bIjRb0X+dGucdBS+YzmtVTEXv3yPh7StcxIT2XCKlzPP/uBPrRZCd1oSjwx96JyuK4C+rSFEy4ha+mHx9sCEYjcvhVbBWV/7SpEjC06AXKEhR4Bk4LeG13cweCxIn16J17qSJTbFJzQtdcg0ys+/Fp51aPwahVgwnKXistzqYZR8CbVmMyrPUX2+Ku3H9X0ou6uFLUECWfz9eBfKu7gEEGw7B+DVbgA7+tzxZWllMYseZaDKPhvRczlImmiQmwbMKjXLHJRc/NvW5d/N874PAtC6kAEPDFNjUDBFpeAwMZMLsu/rBdctL506ORoiKxLTw8HCUkfnL9rGXhLCCutjILk41GKlHo37XQZ07OVYP4+eEP+d2tVwRKLn/jurNfHSXm3S/20aNR+qEvjVmS2wbWYMdDVoNmre9wmClO3kNp7b6bl9GYRZcYCWwr8bt2QQQvRFRWIck1xFVA7hKsMEtJt5M9qwBNGFARjGJZb20dPyuz7Sk2KpYiw4GHFV+GTQWzujif2wpA9/xwonLWJ3NaN4azuQBfLvbUIDrYt6Wk6EV12VyzOE2JTFovApP1JBVWMU3ObsxliLtXrveQlDyzj38DjeJPJZ2tWvz7fjjrV3USQ6Q4u7TDY/BR5eNl1ESutMAS7A9/8kei5iJ0Gc4pOrDa9p1ER7zFbUaOkxmvDclr/nStOuy9oVeRBKKZSQRWBEjXrPBAY8zoF5qaa7HbFI7DLPXACKZs2QuKwQC2CAoa4FGFOpAgHYjlqXGjvmKXC03T8fW+0+c8a0+o30Nthbke/mfJPEw9v3o7HSztC2gQatNSC6S7N3/1jtUTFqMFlI1PgPNfkCFQe9351GhgJCWlTRDbKZR8MIaqK8gBg7wBvsQEUd9atOOp/xAdS7FEOZ5dYnr1Texk1bYcUXXXipFl7R/HubLrf9bFqA+7fF/0a7n5YKaRtqpk1K3TB5xa9Oic629FtNEfRIPaK2hU/TwTBsYcNEneyY+WkuNpevR48VJSe+Au9OWfOv+MaAAABJRBmqxJqEFsmUwIb//+p4QA4RAJAAXaQjOV+y30tJqR3fvdaTxEMXYpQ6iVZyvtqdhB/gRh7G0nItutv7rhPuhisHO/5lox5JAQUfd50sLyk+qaBPGnexiEB4F5JYo77vvdbmB2JPfex9O3KoaJx2sPxsAgxZ1SPhIp77kz6wVLf4c+L3ypqrybJrG9C8PTZi+dmplxECyY9RErs4GC+TuY1KiIu9cXgMT5WPk3AzzNE/fKIwK6MMdxYrWm0MPzqsmTA/CMbLHgADfTx4AgLgTRYjhhOH0OxqgprCzlNq1DHeH7pCtuLA5ZCnrRUKijYKx2g9ni7fb4aiCOzZbWsAjtHZfDzt1YIh5qlQgAjfiBLYfHn27dXyhigWW26Gu5zr6HLPF8+JFtxDW9fO0bLaQRmidyUToaRvo6iXYefgNThKjkI6sykwdRMM6Q4/PyzLL/KaZMxIPBb2IuXGHyyWJD258Q9CuWD+OJugz106mnC2VP2bscYHjyVI7w/IuN1upaKFk1pPnkq0DIl8xHSoP5L3roGrZwHi8ggS3F54eKr/vrVk62awmpvwvchufMcykQTjzBv92nNet2tSTpdnIFJhIXcZehjjZHvymPlUlRg6D/zytMzuNh3EwHCjbzVwVMkJ4pd32O9iKW0MA9X4PjC/shkHgT9MjDTYJjJzf/iz4Gfb5M1WrWQfJXeavhLxYvThiUuzXp7/onYnOmkoL+T2j0lZzFUi54ii0ieMRzK2ZVjPfmVVvTZaVEiJYElV7UesdVD8pD7Xxq/VKVl4AwOeL4R0pxx/ITcnFUhbzzysg9ocbtMYj12DMk5KxkDYtrDDZ6/pjwE5tjAoxXO66/tqY++nC6c+Z2VIw6Tz7Lodos+Zb1W76oLJlKU73QdNmB6NyJsYBpoJU2zHHI0dkpczT+npJUiifV/cDqApFjpEB6EPic2+pr73tgEXAc2wAM61hLH64HXb4S9QeiC43oBCinlc5wMgJEijbVdNCe5sEIf+b1dmVDrngD1N7K4ofOntwfg8f7v4vT19g49GdThTaJR+wJYiavidusRu8IEh0W6kakIzrzN8Yss1l6H+GNemt5ZtWhtZE+JYI1QxN6oApRlyFyceO8RACs6VQ+al3A6W50hd+rViniVhcoHw+vzFBf6xEKNdBALvtFn7bP1tB/XAcviB7n+LpFB3PhdGvWRugw455rnxoh1Ntd2nJN1cBUa10eQ6lm5hP5akcHPu8XzEP5WNofR2uQfes/U9xM66eg7WBbjwDUwuO1Xg4mfJ/W1e6j6tTO0ahdo+am1B483mHoAbJASO8/lH82jr4hOW+HdrX0NQYgHXvC81dcD91m8NKAvRCd2fMRU23Xfn21Y0wGn6IioT6C2xZ1WyctMY/E0JoIjlQlISf3oF+Bvcwzp5NYJPTyZygCyTQHUiT5oNlbKEy3IaRcg9e7lKZmx/JKxnmUABiKJksY6CnAy5YGaEF1p9NpfuRjOYOkH/kcvrBo8K5nJkCgX2SfD9OFgCqlvXlkBss2Tic0pakY6iFG+/GMlEzvZP5a0qpMffeg4AAABDlBnspFFSwr/wFW83zAqMoAEIjHPSvxTq6mkp0Hzgv3jJ1bCnoZGGkm8kVcrpvHb8YrNIjPEBa+Z4VAacBquPeHxVKesVs7c3nd8iDf6gUlnRGmCnFb/UEDLA/A6lX3JeRqLe1A8a1/Ufu4MvVnFfCKv1lB+PaQ18VPg4FYcWmZzPZtPSCo5f458DhZv7dyWf2WRfRH1Qm2SiRvqr161IvYMz4tVQ2Q0XJmgD9LTnU6sKYsTDROZwZHTPgT+jn3hbY88BTcDjWwZwsrmIFZPKvrG4lOBvfDMxDZjyx5FRkCdR+K+qs4h0+m8Aoy/fesuwB06mNBoY4Y5me7CDP7am0jMOGLAhvPTv1VPuGfjcPCvUlB0KPFmd0fpXu/wWc1FDIBTzzWuCKN6d2XDvEvwBYiFMQziJ7zE6SRqQw64/8U9Qlw2gVKEUZna7m84scGuq8z95xnLj+VdEyCIaOplkX6rgNaBZBgoRi0Y6r2GzVzrE3hsbe86PzpbsCbLDL6/l+uQPSc68Bo501tFl2KNr6Me+cHUWcjIHe8AGmM5Di95k/Q3/auhBHr/3ZXdBMsQ+RcrkF2NxWF4pQm5JVq4KE5pUf5Q7BqpIHgokDlUU+ekMXoQJryvRfTY8IjItFJFPZSJRhG0AnHUlQRHb42kHNzA0dZQ6lQAaqUZ83WbcJfxB9o7307SUljS5fO9rAoa3X3ilPyEcga5H4jgQ/UeADaFVMVQQKJgNUT5VBuZp8DX6wFdp1vZUhpJ8ggDkRag2XnsH9VM3s97/AwvDHUR0y0vyuwe31RbqCxkuJxMzwwujZnArfNjkZf+qDYFFSEO+09XGYOfWurpji21nzlOVrltJq55E2xrnk5vpDR8Nlhe5P9Tx93ym/3fjn8RRIybuJmSjwUyjKqWbDBZCG/m9glqCAqxFAZLlX/xOar3sIyfhI4RDzgqqtcTjlTCHW3MKWLyOPFOJ+12o0W2qFpAeCWUpw420JDDDSSQInnSK2VlmtR0+U0W458O8vEfMVyD0T39UziSgi74K/cO+oCDapXl4IHeFm/Y9jEGtHNPUFyrczn6ibTNZuRFiuQ3lQhyneMd1/Xv/DdAWbYEQuxnP7Jp/gR/zgkS7SgSVZ7Omq7tbTagNvOk9Q4g2cVHC+ZObm9q019lmT/DRzNK2C0pCpLHkjgAYhsLKSbUkz4/ALPz3JCNywnP/hsicRugwFfeLkfuHFNi2zL7jihFXHn4AI7kkD0GgnqdapCdfIP1bKmJWXlOi0zcGoR4JHODAkpkAcL72DHH9oiq0G5PLNiIpnPif7Q+bIsObOjlwbgAEy+8nmCSdYGL4PYGaaZiEReiSLBHmhjUd33SdFS7EU1bqCRdCf5e6pW1XhUUJZ462XjqkFQVXQlb2YXHON68l5EagOypK2Dm4vQiGO9azkuZd+JR9ya+4eooRHNAAAERAGe6XRCfwG+h7TBX8oANi+PESXT3IaZrk6ThQX2vqiMZjAqv4Y0L5/wilbVft3eqfyE1RJeOmyspvbIa+AdS3B1fRurKd7eDSDh2NY1T6cL8v2gxCKtAAeH3WsHyTr8ULWsYZ/O5KhfaQkshhqVQ3ZfDFc+eKjyu7BjK38iEm41jCsEhM9Fnes6I887047PDejkzTMZISx3El+/a8R6y6fy5bb8ExTzWel5F48CzIAnyeY7hq3xLqZp0Wh9Uj4vSUNKhQpTcs60EC7/0qeIA89gCXfWIVwzKnocONS4IhhnMHs5Agw3tUh5C/G5ChqgI5i5oq37HLFEWDiR8aUOXnOmdSP8SaD/L6PaOoq7HLTmGohDaa7kJAyPI5vUdRQSTWGjL//37+B0w4kJDBABErBChe+nz0KLQpmLb9fyhjxsSl/KVi7m87lM/95Ryj25Njft+jnesLeDjoI/RKKjQgPd5NiSB5eaVghjC2FVWoXjPQU6lg5qdv9b2ia7yQc4yt/eE86c8TVG26Ak44AmG7qnScc+I5ameiczD32T15CSzhKsCvNJY7j27Ws6jPOf67iRpfXoFa7wvyoUjMXA7oY/TFD/inVeexGtB9SqeBc+tGOxAYc8cAUC4h+Es8cdygdbidFJLikIGl+9kHsgqXndQ4nLqtZSA5fzcA/zxOlzWjiAN7Lf9L3tPgixmvwkGjtmyLHKJGOZsHvVQk7+48mZpmRaeOgpBrUh1lBeZ+/k8YWf/i3ZA9pVrbRLOWfUUPS/jFiSNDev9DnkpL1C1nGFshyuKFySvRJdx+ORMepn9o39z8hrPQagqdQvlPvhL5joQharnppwngXYGQ5NIPY/7/WLo3CBAb8jF/bmWx8b0ugMqgqFMzr0uqIxMOehq4xgrPD0uTtLye1vP03wdEdqWEkuaBLOAZF6DmbXIcHpS4uVUtgmI2oSiUSHAYlTh5opMZC9dvsZOOkdf2YOjVZa28rRytH742v79wj7sfYlzkcp6vWxtqxr5o6RMaObzu5wgGxQSnOgdMJaRyQ7dSPMbduYOn8eyLoPMJIehZm+nxMSxGmUYa1wd2262t98ibPGEx03QzU/QzJcmfChfcBr9B/9NW9L1iJDHPD1PaL74qHE3mKr1gJj5RPXDn0gvlk1g86qPVkMvVdx6jlEduNi27mFmbXSHgeNifg9dvsBOvmkO2w9R250FBBYqQqq5LDFPixbxmzho+DTJ/WJs9+KvEAueun/xjGFO+2GbdD8E6oLGxTVIWGMox1VpZMoYf/jMFRqPD/AA9tTujYk0+oHUhN7fnht4UJ0mkpI2h1vqwNaNRIAdRl8caWD9fN9NA4hH8k4kfTVnn4mgOIZ/Cf6nomb+14AIQKqLn5F6rO8GsBNJw7M3OQZi+DCzrxteQiO1z+FFAsgSxOH1Iu612l078bxT9Pt9D/YOKGUCpZg+OlUgAAABFkBnutqQn8Dtkq0AHq4E6uhFWwQRFGGHsYZ+VgxMRmAYWT8BX4HxwHJfni3Z7187j7dO4iK5XoayTR0MJjoz6M6Jnip4DYui9A96zst0xOH3BAG+ZJAcIrqAKiUbEtqPwOawCuQitrZ5ZAA7kwf6TpKKM340/6ei29OEPbgOOT146et9aUkxXxwac8aYZnQlxyRbYYdm21CQDIV5Ep7Q+xf0uHvhB9T6KLd5EFJb4C5nYj3jCDoGMDVuQrtwhd451/E/6v4zDth37iIWJQKPJYiUT9b858H1frG01CKpjbqnY9IvorkhOZKCSFbJ3B/SOfIW3PWpc5NA/jJ6R/qnOrKD5kSfi+uBN9z9aTXUj/XnA0yFMqjvS6VaYgrl/P9wszw9oadVJzOsgaIHQMROXEw1UbJK76BcUnym1KKxHw/ldgq5BJCLj/SgLClniFj/3tFb+1eyU/l9IvspRa9M8+2RO5wpvMNddgURQHgetz86CNvCpViGM0P40Nk+kyC9bFgjH6UJkPDfpMDDDdfL+tcnSbogcWc3W4UbBHMayK9blRJ4uXLG0/Nk3x6p6rmlXWSpZZRkVXF6O3E5tPQCw1lEwJFc395ImuWfGGez6/oqf7dxOYku55wyIVjz6Ux6PbKuUSFG+WSbgVTOxNO6c4sa/3vG4Jy4WXbzB2D+m3mw9X3fBK9zG6Ee3Pz2hP0PkYYLVvJ6xqwxSngdrp9RIvsbMUq8w8dU6cn5kUJ180Gvj6Bf7df0VmCnolNW3N9p+JNJ5fiZiZ+gK9J09PXNyarJ5cZOVfmrLp6gTXLrSPdbczYbyEMqZIzGV02svymKpPprysVfZtpaNS4aKG8g2ImKGN79pEm8iQ3YRD8GK6Uxw3lbpRf5+Dem7vPGxCKWm29jc6La+GdrMaX2xy9zTIfxGziV+Q+5qn3BiNxSRPJwfGfotZPYE845nfKumIcdZiN9YCrHQjxlhpAMM7gl05nrDKYhewp61HJDETaI87dXPQNVLw2ACK1vUcXoomnZ8BXT/H1xgW3WU8FmsqU0JnYu9fJ/hql3HIf/aEe/owwoJVNT/0pnXxzl71SIBGCsZmPBwsTOxwcO7wE+MYnWZFg/Et8Nf6CHFaPFwWZVqA235/bbOkxZzpeEMKdW2rzupw7LdDtDBZ7iI+x478CCbam7LxqqijJjgw+v/qrgAka/S0ijV2LJRTTFK/GbVNSdrciiVoHqF7dP9zzVmWnP57TlapvkOnv/ZcNh22LjGSNRsHaechVuPPmaz8+UMuJyjzPEQaIROMc98kg6xIqpgkKj7w4mtq2S4XtU4chNB1tu744V4k6zD0pycvxqt1Q5UaDl2ykC7V6WaMbaFsFl9/7y3Dj1/71ABbjH4euoCFYZGlkVfyMYBFp6HcIietEENajNNIaml5Th8KNJlW/gYIjCurId7Xrvj4/GrTYgA4NOSRT9UrpCbFf0OJwLFAC5KDeh6bdg3ykvdQAAASyQZrwSahBbJlMCG///qeEFJnNwAWqOfAfOwU1A4WzyuHteeRdhkMXEivWij3CCFIssBcNhF9j5Vi1P1T1PEDpiZfSAWCdJMOffRASQT3qWEtaNgDREm6LQ0Ixbw4ZV/L+IKXwBmP4wE3rnR2Dri04YA6ZR0IYVLvjP9gNU04I2ZLFYzIsFRc4C5TLyWgt0EaaxFngAxQr9lArOHaL36cH02TIS2p/4HjELQVuTyhgMGwrkHgogbVrgy46oXwW0NAoDopwqZdw0hfh/4Z8Armcs+6GKumIW05D+2KUlow86QvV35KRNTuqeusVtIuYxScUp0h3pUY2HZnB+5tY5vzorSoOyRSmjBSkBxMMuwH4yUT9QO09WaeAH909kKZWX64RSRc1Q2nKgJ9F2Ddm8gMV6SNSfRJwQZCd3nllYN46xhzmJXI2mbEG3Vpdl/0HRxNFRJljD0OARSV8Vzdh4Z9h7ONSZtc5l0+o1e38HcLiV0zP1OrD4xx29egIj+jklBFnWRJN74FrTJAfho4Iw96fSShcgb29DgwuPc5HjnH2MfYLnFHTPmBJxViDoZI7RZL/TpFIA2/KoXrp1sPFy7f6T6bXm678WpToVfOAY007P17IgvoxHZ9hJO0CgUTXRbC1gdhSemZs+jr0gmKdGIY6S9ykjj68X1YvMgBXL0agfGXU8YkYOxXE4N2AAd04SI0XHjOB/KMBU+7K84YOCEzPLjAp+rPNzCzUn/NnNPLEA1/P9Yek1/nUZszJZvhMM8+Jr2Vs5J4Npp7gs2v/BdS6T0+t2LqDrMMhYcyezbOsrbP5TxgLnb0WUzIXcgOpMgMH+d38psvsiZKde97wK0n1xc+18eYoGAGHKJq1KMPOgEasVfk3nvc7e9yDx0vl++9+QVm2CKggjXRrLr0glQgwJuNCWpKSVKCbhQ1HejgAeyUC3OGa25eteVHZmSQB3if9Fx9d9yOvWkc+oRODl/0EcOpQET/ir7FxpZ9NInT/s05ice39y6U1mh0wy4A3PFo/+cL2TkIgCf+FMCKO6hqFQ+Xi+dIe70qpflLloROzsafhWlw2saPDzpzoPJur8g4dLN12+PQW2yoD2TwAEd7diKrYQY1VKJvBZjjlasCyqN5fFMTlT62Z+ZwmVF1XdvZbcFdSHkn/aJcMnaaudwEnM3Lq1Vy7y5Pa6SEDvT9+KhO/yAWbcYN67+2iy27uo3SKDzraBiIfn249mKqVOanTMV4h2bGo75gPeBQX7YQRcx6o7Cwof5hMqIG/1G8V/im+exIJJUM3javmQbr+tVmp1IsyrAZUZobGx/SNoKThj1lfmO4G1Le+Eh8o3TJ8hvyhiirVya7UUkxI8y5cTABkP/colDLIGBz7bv6o7EOrpllEC5xhn3BP8yq7tjdfTmbr2H2PEZPzn0XqNAF38zhIIdHWujQPfQbiZHh3zHBxgxJyGUuGaWhBJR0h9FAoiZ9jpprINtx3hAvBFG7+yNCKD7xSRGgDbakOQgm80vExbTUZ0Uk3ly669yq2J4GAYULGzNwwXL2z7WY3l8zkY0ygaekLF5mPd9nR/dCJwTlk5Jr1J5DNOOJhrdMS0I3r4ZWyi7kAAASaQZ8ORRUsK/8DFZRPWtryAB/O6oc9r5PaYQbFT534fkUHsZM99ZlCz7J1JJ+uMGd5trVfXrv9NXGbxYvs7SqjKS/nVzRKpuIPsJMPpF4DQaaQ1sHBjSz98PyZHHhv2GzxXiuyEBBIaT6R9oXzX4fd5ZN62brHdFcw+XzvZeLxEPo/UuDBorUAih/luqP8UTawUb+8gtH0/KZow5J6hULgBIxJWM08/1wRiOJb/i54AJaOyFHmANVn3HkjK3LuSGOIcuwYxTojQBJaMJn7eZKIaPs35BAq5fr682zahW2aiyc0neeiGIPvjICvcyKDERpfghfpzKX1B/iFgTqR2J2kApt1hcNXA9Pfvye7JNyjG+NpNpqTJCmKze1PCTvPV9aDAc5D7fzh8ACj12QSPxwG44OT69yrddOljGpbcC03B0c48vE4fd42rOU8CpWBO5Qy5lpz4RjRIm32I+lxH//+voQCygE8R0XrQJ79UiYd0dmU5Vp45JMlsy3fCkdjEWqiR31PeZWdoJD05bR9CR0E9pTCC93a2oqDOwl3UuOy2XZmiscWN5V6ih1rR5g6lD8mX44glanAlpaDSgP2rEi+QogMvE8YgPzwQH0txqIvgvj6swu0GUEld+r+uuTWfb7NRlscmm+tHXjTaFm+gPhGl59SAHNcwvJBYWbT8/J5EOaN8dZ/FDnROzxh4SD2++lrmIRhGV4eiPL3AG3M95jGAq0p8suDvuJ3DzH7IDkg82E6waN0JrF8xikmjDJw4atm3GH9UUKttMt/RQPAgoYKBklDhXxgAVEKJLmaKZBRRzpD7+glJp264C4YvEc0Nglwn1M14eGEbB6gMEk0mGxVcsrOQvFgtyH/GqFro54yvdjhbvDQmpuTDDidjYPvR0WvR6tMID4GF6oBC1JBVVAYCtqaV2mslPZhjHcGgq0sNHnMjZpr4SlWTrTC0STylzEfzrRv//hm5Zd47MToNhnkRABThOVO918rh8PqXpSV1MqnoK+SY/EWa1vU8j6jZqb9DDiNtjA0gXivZgxejPJUY2edk/xF8nEqud8WSWevrJu0c4XT+ABe7ASfgSywfo4knu9Mosbcgj6KMza9RN5PUgbVcfgFfjyDyC0nNSaKwC5NR0foiFDp8NIc7x90caL5WIO+YXnYdOOC+tFDKAk91YlYIblb6IxxyN+e+FWp2vDz1VcTytujbbIGvBXWZgS2sicGZV9l96mXpT/LQDWVLufpUB7IqjqfFUeb7WaPV+VPnSPJnWmd06A1WDHlrXXhy7CK8ScLFRWmd7KC0LYtDU4V8yvDtPtbr4VhmN5OpdCBLLphlv3UZcTvdjM/7Q4ICnanXRDS+MLMhxIONWa71KGXIIVH3fbjy3hNBCA+i47lEJb+GxYcKucm8vQw7BLrqqG8e3s0jIUYjbYfkQekKCKvPpdG34ULrahNzeuKaPaQIghn3hdZZJMUaSnKWc/cC3KEs5af0XYegC+KpQmE6CcYKdyN0eE7lmLAG8SETN3lyxjKxd2Xf9tFRstOsAWhTORUyg070cALQh2mc8oINN00TTp8SZQtpoEAAAQgAZ8tdEJ/AcOhrwA3JRckmcJygHhUD7m5kyRYYgkxhvpakVlaVcrin/r7jGRkUp8kQOBHgHxq9audsHXbUc40cq89iJdrV+b11//Pbbp9+736TSgzEjJF0nctn9lJ/CLsNT0kYmQwjsCLXYdUjWfPSwBrj23/zJhAt5hmTv+Je87uyYd3qGz3s+4aqYgRx+zmFGozJ45UuDie38XX29phj6+Cm/cSSTQpOrzZbwIIjfa9pziTfEfBlCrEQwWxBi745cCDBll/Rs6dEaWULBYuRhAwkKOMWHY8pbqVChcyZrE7kvh7S6q3llMYEkMWQxaNjFyHGBp40CE3ndHZJkG5f4N9X9y8It9bIVMh9fQ81IPPRa8kJiN5Wjnv5s5AH4alashgXN2JwLtkfLVByQxmWYgB90z650DaAF27hNJKDX68omXoUv0jsOTYJ9/uky4NqkX7uKVORed+l2LU/qNEdNHuOvr5q0TkiUBiBaJfGrCD6hqgOumMU9aA5sMSKxtKEip4XTjcsBErNl2nIRjQ2ad+Dw8PoX6pdAh8mdnTR+FlGzkDQ4wGb45tT5/LMrXlNaPYofBh0w6dW8wF83MalwDdn10K85yqujcM10NzctE9NvxWV2hkpxluHJZpedrf1f0aXuM0WV+jAsLf/+Pj0lu3IjAlIbOXfhqyAwmkpiz2d71WjsafmfqVGi56U0PQr9KpPc9qHINrx6hCls+HV/9+L4YUthpvnjgMS3p7k4vD4x6sYrBGzIvlVKe2gk3DU/VSpDK/zGaZehF5StMqYCRGJ29vmZzP/gELUcQvusNG4ihSpPwCGjuO3wYZ/vBzwpBWPk2hZQqhk7p4dDWx3QWywJYjEdBcoJDC9yJD/gZiyJWMg/D96KeXKmA/EvUF3kv7kk9JaZewnDbAFC5scvxmRlH0a1nDH6zLdOJtpTgFj2CTRbVWRzYSzLRusdwv0AE790Ln5nCe3i8GE7cXZv8yCM1wKNh6D2dQLJH85syQc5MBUY1KAmRmyFnIiPZ3AWBfmWkEBop/6mDGOkYb4b0UOhcjihz5e4Hub+bWvG3HQzANKWvrATmDGvHKTxe7uIjO4LncpMQBRrTM6l7gePYpVWiDiauDF6yw0xAoB0PDTES353Mhms+PAoh9SNZwRmUkJTuwkSLDb8cmvlZH5pNiDRZr4F6FBQicG0JC/DbY3IDdfCYDj5tdbP9YcIJ44j7q0sdHaOxUqZVbq2R0UnxVp3AWgIKLl14ASpyntqIgnSJRHP6IeOtIJgTErjJ+0Mc/jw06gqf19fzLYP8Rt5p856GGIhQYHRGRziaz7m/hM4CAWeG3vrX2v3zCjpBmHLIqiFF3QvFi4fNPcr9nhHXCW6wfUZ+A2BWWveTDXz+qtVbbRy/BLjQbpLdF28aBAAAEHgGfL2pCfwPcgdRc2XgBa2ECPMJyX/1IecR90cGnJKMj6bQSXRVl6B11eH+HpPBBU/2HMHcym0cBho8s9sJHlcLAuPOrRqfa8YjqTBNdAuZPZIsxHo8AhfLzNLghWikooZ65Tkbndeqt2IUzNguXpAvQVuO9MSJ+A5EF8N3Jvt5qJGwrCwXC+1VvSXWYrELAZ+/AvtumAyXQxZ4EdVOFmNyitiXS1HgJ3jhHkivInQbxroPdyDYgi0/C2qXGF+xEZr1l/ijctCnKCcH+KpJmQl7Ep7qK35nmo2Lox1cHgEx9riW+OcjoVcq7ZM08pbTM6pVaSCoMfPBkynUJusOhRTpRwSgZzGW12MMpyvdNF/RLMUHJ7XOvU/uTubIUf0VtSLBJvlPRjK7cdgmla4IO9aKl2hK34NDPbRS8Sraq37fg88YeTS2P3IOVMApIWTYMK2KDmbDQtmVB/xhWOkqaAPkhihT0xKNWM7zfkrV9PVu68M792F4dDvhfrC5Ay641gekLPEJTNK+UdMNUUogMK+WRCAREn69D0s5oBwsvOxDz7tNKaDCL7/pePab4/cy3s76p28JWqewvkeJGZxhm/IOQl9kwXxZz7RVqYjVt+Pze+0GEz4Q/eIdIa2Coj6NHdRD9AhHBuQprqS2NEHpcMi5Tm+dWVtLoRwOO4YyMnDtOOfu7F1z9gEpGq2k+COJ52C5Ll3iZYLZMt+p6r04UDRwxF0sTFV02YdObaFFimHEbWggaCUERIagNOYuN3cPYkmeFXZ/PleSD3bPvnISMlexwnWB313UqSFMoOQbSpB09Ffv4Qn7367DgZ0oeCWfj9AbnuJXOWgFtoGUyVPBLY/aRLlss50XbMAVKiCdro+9EbS6tgLBxaapjMkLgKhob0u7lzyG20JpTBuZoGfukoCdGlhH1SznG6spZ8Yl7sAaYLj511HYPYdEnJJCBTefeVkZm+S8QshCLY4hy4ew0ZANhBCJuYPoVwD3T9ocsINAAfV3rzuhcnoYwv1S9LQorP0G/HcHOlYM8zoEvZC8icNbJW1iWQax9ll2/yCqY7bMv+QEm52hkV4xqi4sI8D1WMHb80yxrGnZzLQCbl8u3bPdcwiB95Z/KGSPFtfD0w/Bt9twgK21WQycKfnCeos6/BkpXO2Cqelc1TkGe1G548nKyMsn/BNardrHlOcKIwUB2YQd3HGlvSZYLY8TuidUm/IQXmtBn6DsVT8Q1sT0Sl1akseahslEOsYIfp0w+N0NvL3ZvNmYc8evxrn+lmdhyU6e3pcU5y4rphm6VVc8yeTqr25ymHHiVkyrUgXLtquX4RzDIIB+FWEP9IOn4mWHMO2rHiX200Pnnlk3CVzmcSgOHPqkf1QDF6H8XLDM4Fi4W6DZ45PcJX0E68PQJZtAAAAS4QZs0SahBbJlMCG///qeEFKo86tKKZB8DTAgBCnoHuscHDhADS/HsPuZt0q03QlbAHTIP0ZF7aXhxUqEE/jBePowqwv2nVe7yfBpVl3LWiLipTsShpVVsa5vFTgJnUIItx6S00s3XMXfbS+k9us80SuqaB3G3gbRczuk6ddvVGD5ikm6FPW8Vi2SgBz1iAMBqaG8NRSRWdlHXrS2a9Svh6WbU64sm4SlFLIi8fJtdkvXeaCJtJ34nNI2eRa248MtnjVLq0KzOyd08qSWHv4MYSbdyyuNBnmyS+wirDiJHUbhureXO5L3njQxhGT1iWMDxbIDFfjV+T63SPNkm6Dd0mBOqDLXII3tszaPXe3HUasGPd/MpGzxuz2JWclaJYCqXiX0vSNA2Z+qWDDYEx8GqPpnaAuIGW0W0pguMNcfPpZHYnLyBekb+m+XQimiLNHC3OyD28ilkHIIbIwrkrbPA34/WzvPbpZJ2X2i0RRici9Ih7Cg7Iz6f4uaUkCv5GwqTPUs1kAIEKVyQNIkyY2+0GyktNhf+rsW4YPb5JTKTjEH14INFxsGsKzLrsERj68br0iILt5+0bXu8RPr3n8mEZ5K8sYXBV4B9xP2ZR47ivW3KdW2CNas4HlaHbRIS4eKcK3uND+t5XY+McvafK2xzmW7JFXaLy2KWmM2e4O+oDf+zkbohEnjyWGFRTxRVLU92WHF9nCWnDJrRvsrPQ1k8WIbJ4wdwXs1Up93jLxf5xJFVaPHHdhahHtcN5MqEhMeChoZocSCoKPxBfdzwu21UxQ0tE2uZy7en39hYYWVkdDo3HvUGErVecV4bJIpAuo4gKeCDELRgPJGxuoNXewkFbezw10IH2JqCA9kZLc3BtHtc+IjjNRr4w5D8Iycjrr+h0US6cKZFsDo99rNqfwZKKmpFVck+kbv9X3Ky5RUMdZzJ9pXpQ+WgbAxuCrYnUGYXa0qtXhhAHdeNKSqzuP5pdvMKfuvOhK7FX063g0b0wKaYaKl3rtx2lIxxFrAjX4Kyfttz2jWXT9IAi8L6ODlUwbbTktOKjXEvEHAQaTqN4HCelgS9FhPXgXCPt6gAz6Dmdi8LaHKu2gNFkAVpzoazOCw7Bz7R048515yWmIlrUe7Al91JyhslzKHFZpg7AMU7buWuwGQl94iMTzkc21xfWs9X4DG2kX15fMX52NA6CGQtrXGmfgLVKvinIojnlvqh0LM8bllwemR20Vc6zU7kNuObd3vfcjdMUzLBkz6U70dCzbll1sAIb1iEEH9Z1/tFt7wqLGLsbxJKPZI0312FDxWDFuvqMhssh+FSP8LabHvMc6b70SjzUUVdLD1oDwdRBw9M/teuGF02i9t13pEH4pXgRb7JYXjsfoYwiW8vY12YqpURGaIVenkNaAw6Jb31nAa1H0E2B/yoRghlU8FpTMYskRwYkCgLNwOYKcjU6mCkauTsRuO+ZNUCWPEzST0JOgz23sWLc2EeQVbiwLihizjQvXXLcDBv2VE2A/GCGiOWtjet05lDC9y/RJ9ohPiSm/M/2wKohkNK0ahwnHfmKGScwqWl2qUYenPMekRRwOVVBPi8XmxTfVs57MNtWR8u3MFzXMAAxYAAAARyQZ9SRRUsK/8DFAzS3QAOJ4WKwZCc+6dEE6Zr//+BEATrnAATSPA5AYLlxMxidBu/lcWHPbLYpK93w9OJhab5LKRsYrc+EsKoteKv2gopNvrJgJPia2KD3c5dzA/2Hn9tcusXOXAQH4X2g8hEuz3mdx1R3mtnq5k5mDVpI/pc66jBQp/ov4FvCkeSn5FzuR4/owpzMjlXw35NDWakPRBjKXlx1MurFtWhkNHHUeRMK7puP06e1Hu1zwxFRra4VGMvwSTRb/c/Pqu2VJ6P51PAeLVAR9ayxYaOontGoIxhhaEjWZxiuJOsewCWBx/w14V1hIzHNnYnDYtmm408uM6gqklZSe79IA+JZmGbJC24+2+0k6wbe5NlDW57hXboZ9T0/CupSjNOjShCbIjqtcsAl6EBCnn7sReqiO2BnSSnj2NcF21DQWteMeHE1UMvb6XQa5fJa37GPOR7l+NR4ILDk7cws4Bvlv9nqtTsgUaN8d5hItX0/8YnkzDaTZUQvt3dGJd/AW9iQCDVkk3pZ5jwJOijpUGcMJNMtxm2aK8vU3BZ+2IepFVvxR29h1t5rANGfeS+6yXpkxlZJUBAN9n/dE5blKLAxHRvExVvKBY7g8PJU++Jyl8oR3RLBiClRl8WouCXQ8ZxKKk5b2ZwBFOKfKUUSpoEWJIyQiyxscE9ZcUUlw8fbzrtoNe2x9+0OtDbeqvSmdO+mZYshNbKXGmFkwCDhPM8G8aZTVwJSVkMJ+KFXE1yPpt7Xf0LU9KxMn8fi60rYK5VfIkPdSVL/Fb8LtG1jOdvzKec12zYLbZjacsinjoW6+qPVvjTjc4uCRAMMcqGrqFqI3P21yVti35xUECQymtmuEiW8d8ox0fMdghpJg/YxFkzp4NAHX2Y/jT9KjweaNwwn0ONB8Kf9naad2XwSvXBLyLsFbPEiZ0929yMr4TpbcNrmkgMMccBFXmyj5DJTBkQKu3KqIxv10wCfP1d8JNibKHr9KD+hh/aG+JMZdbsWgRegTL+Y4YcxBcpEznbcIZGNNIO5wdRqpk1azuXghCf86u2+OP56weWg0HrDQmbRtIiRIDJ16a/+LvoOrRmfrL15ejXUHTN1dqHybsJHKhsZYbjqkRorM60o2PfSRdpi0q7gSfTwh344+VXEHHWUt2cvId2AoD5i5hJClO65w4Xxa69PV6YhfIxd9hOSKEJQEhQKkJz68yH75g0/36Z1PCP9FGhIDyHEXVdjZe+MUovNQ/24PlOU8uP9ACRz/pkC1/nF5Z1okfK6iOggHoisJ29KjazaRf+Tb3bxA+rZl2McreA0E1NoGDrWhEjtqjO4sFNtEu046VM9lPQA8/8wvLXCkpFTwDx4r/+GjMD4EYQOH5W5VOVOJJUf2qIxs23ihH2RPP0F1rnfhltnJDHeHZ1QpCWIB+RiCepwnMHkOnxcR/IFR1hwCeclXwF0FJqX588hLTIBd6PK9DjWFfqn2iZ3HWcTNx50dLVVgT9pydzK7jX/BaU/Q/Zj+nVlQAABDgBn3F0Qn8D2WNij5F/VwNhEoATjP7Tw+jpi6hDu+SEBisZLQvXZtvlVmK0Tm1Ol3RSsUti+ThoZabgtU/1wrIJs/3AxGiqIF+/t3JT293cPbcWa/MLzEbz+HCCPrcGcS6pZD5+XYqSxN9pdm6kiuyP36jE/Rq5HhqPezKXjs8Zi4lKqDQiR+pGbFtwSdR0o6AYeOgxTzhCbaURBmJpWbFMmA8vGOgV1Xe3e6DdMx465GH4z9X3VKFVU694sf03QRrrmW0Mvj8JaBOm3R2+qK3wMIFeAWR/F8Aod7cVhWrbM3JvlAuKrnH/4Y62jDTUBF1x5rSIJBHWhiIOGG1OZTOWiKAEJPJAqXn5i0xlX06WqzP7iG3Ck2msSy1sPYd4kQwahMo77yyFO1CA1mQqqfnL2ezmYRBJghQZ25rHM1wqtpfkQNg/uKP36rMUF7CIpeEH4yAIwHNHLRCVRO3eYxEWABmwz9hf7Uxp/490nmKijHepkAwl1yYApCOLUdKy1B+VYEHvxOlVtGJ5goiW1IzuIGqKwrhFkXVHX3S/ammPw89qVEQXUthgpA2sXsWCI1FGx0xldtjHIljFvmNKGnLdeS1RydzBQeXdUcy1eNG1m3wydaMp3WpY+dNdAWpavIcnP0At1KHNETpG/8hsJktdRVTAmQGJSmpSELwb0FW7PKHAZaHwXpt1+tfNorws0rqTnWQNF2p4KSYRLtWeODPTKvI2kI4/AvuuL7LrUOJsS7AOXlW/kITnlKsd6oXc1X7WmpvTfqMx94Yo2PuTpzH+79xUFmm1Mn+Wkbk+G2DJaa/y5f0OdBHKspfVIEt6HvW6I7o96q6j8819MKePVAWzpVUYLOv/gaU/gwM53Y1f1AVkUlWVYE4ijm9p0TxNfwFqXEcsNJSf2FiJG9RvvmWUkMvPBuqrKdQ8V7rNb3/eWByDLfQNM2jIPJypyc2hoxxNSWyrOzCJwNdphzidCCDlo8Q2aQBmDpMK2XeVMIq/KgCg3H9Km0OSdQ9cRjE4HLOaXEpqkuJdd0DlvfQUEORd8CzQQuwagYnctWwOZto05kF805jAIHl2ozPz28tBChFpgGjmwW0DcLBmXIsOXk1zta0gP50XJJYg0jERub25Aq+XLL1Zxx8/vM20ii+h3FaqUgwZHSbCYJqSe8wb9Kf3061CeaNELNxdsQd0JDZTpCPp92i27BkYgnMcO41YtHww5Sm7SmxyavLlCUhTvmlVaY6kK4DVz3Mv0Sf8sU4ZCTpiG4ttDS2QRK28ceQI0SYwpQjVeOLee8gFPe2ty3leXO/SWMwskeCfqW18cVpHoZPg7j6Ew7fP/X4T1UTbuywDIlFmp8QrPoJXejh1SkobAdmVFE7s9qLTeK/ifw+f/DOm8aCwITDFId4+YsYDh4IMjah2ZcSTDV9yfSooVcARxsjnDmpDGsoAAARWAZ9zakJ/AcME8zfZs6c9/agBNXlvxpFutVkWQvrexJioCku3gliqLy9VnRqr2AXSnw01Xf+EpLZu9ioECTcN5bADRL2ZvptFGbE4Z+hKtMkEuMgfR22qiAErSGvhbsEOfR//B/sTp/ntCPSc/tm8v/2IM0meO79Gk4XyKqZTmt6tvkc9rBYtjbXFUTjf4vZS6JC3wJCh/1ONDa1on6C8ehrKwuL++70Cbe9cCTbi1nIAqCUYjBdivaaOdCcgCiU0/ELX/lYluo+92zctRc6RapzabWUoWpLSjeS/LZO/EKRJNYwgJhoqx1jVZIVUwX0nE3BmhHGfbaUY1BifEsJknjTk+yTh1/GwgNmPpkOxVGnHBJIn4b1oskjktLAVPz093DATe8HLqFUMrY1EElIwxOPBkvBDvRRcocTSg+CNXlDrFuW2L4ULGEIrRFtsVwKNyi6l5WXrIGrs3dcg6qbXQ3+ngsDwHL7P68KXHUwvnhDDnws63HIwT63AuabywxwKUZSYFmgbuNXPxhh10uWCbRLjQJxieYKIltSNmXIGhHksKcFJDMNZEzWRyMnMcBW4an+Hjc65fLFSrNCR4RbBDYNto32pSiGpaw1YKX9xffNM5vyuWTnIyBxQZMROBxTGu3o1lo/xZezTPSMHnIai9Oo+TN0xEC9NHtHtzXM0qjaEIDa1pHOINOSCt3B1Kj0yqp7vp8Je1jN0dlV4dYLOZzBCvRAqPLjB+0GhvSx2ExAFCExoIxpEafyD8xNKVeKQHgUkYwDGYuuyoMe1FdwbEWdTDvEkhDR5aDsq7BG9VsSWmRlqv3u5OGTAs3elynPXVk7JS4xN3cIpoHRdW14trd0CSvRB8KEXO9jG/QMxMTkTCPg/5mMqyZaTRKR6ShvJv4ZyQt+z/U/+98O00WxxlKFieDXpWToCT8sbjg6UOnGUUtPYS+B8aFeCZJCW281KrT2SF3yiDez5VhXhUpM0I5IKBu6mM9GNk8w8eZ2Uwr++bkyd1hBXLWY3QvQZP1l1QpFFIrbnmHqxtpZg+OgTvSJDD1cp8vBTZu/I/KPTY6Y2gCo6gd8NGRnJ4MRoyyFn/97xmqZSZppdDjQgoY3lP9sn11V6BXzmfwaZFaEnUH4RSf0+NadV2YUCnJAnBLw2nei3uCDo0Chr16Rqpdux70Bv9SLNPyPZhYZu8rQV5wROvXemOgsR99r9WOa8o4CjkEkMDEhLnXOvPVFuIPIScd8KLA2ZobswRgRvg784bB6MJyaUicHpiM6qAvx0KVAgCQ7TmXQCD1VljrR2PxJG1Bih2s99Kn09xHj161qBp9LkGFV4jHorszaNvfKUb5WyE5nSlCuKdr7GRrlqC0Wemv0iBdiswXsd4me5eXS751ZIYuQ6BShmawPNHgLzbGDEZHbyCwNWShK+OlHhyMoCeS/zpRsSZr5E+cW1LSk1z6mKcP5ImTGiGwg+hdDqls6xDewUbbPgAAAEfkGbeEmoQWyZTAhv//6nhADieydWE2waz1gApsdE6txY+6n/myxj1v49mSuQhmTAA3pSnoAlSs2LpRwR1sIURZwsrFDiZmgFMh4J6pdhLtdBDNqkNYR1zsjwC6aW2tn62kYXvPmKztHe15gBz0OCGkv++zxt+q9YhwU1G3hwG2xbNZqmQOO7x2V3ruJ+4BPqGfbwIaCD2kigv4q/gOjZ+iQjtB3PHfuFDDBB2sg6bfn/xawERGeVzoIHSfChjPTYm3jOCmW9W2Xfc43CDeLs0OAab67rfJ9Nc2M6UwFyyDjSvTu7uiF4CUNFY9jS1izWiMQpD+JJPISggFEcZO2HRTXUPZ15obNh1j7LPe7tkqqiUHiUKZGhcO1GCEmEeHtkE1rv393HMf3Anwzj98Esg1rHISec/Zy9IjUEykoTwAGQrdmqQIxykLlEPz1AYAgdoF3raB12S3LrV2Z1IXR3Kqpg4mUmiR8wI/jPy8Y9CC0BDF3oH9Sca328nzMcSRMs0XHSlO30++kAkB0iL0X0c6G7SK5BgwLW0v8Iy8575nxGZBUYDFilTSls9kXdTXBiP9ePgL0jyUlVzmPR2wzBMnU7YAW/upn9DF+kLUaEgWNAUF0PHoBWB4JjVT3lmnjhScW2nILrmcO+kLaluSRYgAXJKeDE1ZU0DM3wE55cl4Spd3J9I1n0VOdbVVuS9lfBpkv+Y1hCZWNfBh4KrQnnsMTEJZ+YHz3EMHegDz7utMWgU8+ZaAktj9q/letSTW08YN9SXjUeJOo+Oil8Fsf0nNfG/quKhC0IXrONA/6J/TSjoEHfL47SPXVjWIUYsLQ6lF8238BvYondpPjDX4ZYzmIDF3I0k2ymCFiGhztmKFQ2/KC1VHes5CdbJNphc35Neiip0ZmJ4ZvTi66XYtwuvLzqLxM51QFheIA3wvz4fWlZrf7T4yZdVoy8FBiurxCF/gi2Wf/vJDK1qpITpzaIvyWPVd/VL1rzTNZO4VVKXj9taL8ehEycP5tK4pRDishx6jayWNpnhwKezsdZvHbwJfGdsG/m5N46BRQv2ZbueVw9gmWlOp/tJbUrhfOw4uW9/XPO+QTgr/xy4qRE4YlBklz5VVwzUxoWG4vnGAWCuox7i/vmggrCi2DWAGOo2Ces7xABxE4Nx7KjppQUL158WmD5ynuqisWlm7Z07uUJqBc/KG6ALr9sz0LRANaST4KtY1+J7FCUr9Vo3Vw8+/30ZXfYTGYdQcZGG+0UDEotTl5H1TzEw5dqpJBLY7T7YlglXlfxBxVJiHasIaIoAUnpRFRAaJeJjbEr7mQXfO/dAgDfPT+J3X0a8QFvFBcLLRIBqaVKa/KVrpuRQKwnQtiuc3u8oDVbUov+0wnDfRSm0yc7hcNguVQrFu3aEnY87SC8MJBk4IYuRw5gYUArDhdZ8YqL/eSga2CfvXiGixRziQ/nLFxx5/TRDI4wO79nRXPL8jbhyWuFtC7zKxSbKscY1RO8VemenvTDfBqq3+HlMa1sZFwN0uXi8pQ0PHJfxqUAAARPQZ+WRRUsK/8DGQLABEF95uVbGbhGSxICoN9TahTS6gY4nxSSXX+EE7vcGjD1SM5eRO/nKQhCzgQymDUo9KlsdJ8uW3GygCtmn9HcbDLcOyYekpCb+jIDtFJ957Xuv0wDXPUvZBTn3oClEOgUCt4YhWI2Io8zK6xUYmYnlJEOgPGCG5HSNxLw5tgXHdH0k3ZrA4q1shvX+jS+ftcAArdKLgnqfMceiAj9MDZj4BxFY5OWngD+5kViNxBo7WST8VWRFKIMnU1rf+VXwLEXJqC2GA8bjTev/h1ZFX5ANKoY1z6cf13QUEjfFWn60yLgA8tGWh5CT0zZZZ0s0GbJGL2FqmC6iW6MG1bVFfhWsYOB4mCnHKqXax41q5iYoJyY6QprNkTHQuafaZZH3b/8WKuyGuCDQBY1KaCQA3zcmHe2PmkaRsWQaEJ8Opq0D27qCOHTEkp1e2OtPb3RrkQICMh3nbuFMGYtExYGXnNHLsDcTOv715+R2nLLEKI92NVtBkVtU1d2Qq4qte/klIthsC7eWDPXVpeGpqMZfDgvOtBPM04Tbqp4ufdTz7oOj8PsTRTgBqWrJG4+o+kOjOI9QwfxNMsjZ5skxZ5H+vPHcSPzgzmTs6radozqpdI2Qiw6swr7lDnFyiYLYPCpLLVyAgn7f+DmzMJZwKMT6BgpbOqgDd5/+8NOoYs10bK/tZCigm6qhGuI7VAtUrw3saLJtumSwc3QaIo2mx2vTymktl+bu3if3+k3ZipyjcL2dt2UTr8QGRZzG3RGx8PJlrCebbA49bHZvC7VCO65yGwhWHisbA7BCHIFBbHlqSG9qHmBsynLcOH6SuXoZl+MQRmAom++NcvlUuLNN8xRrrErOE+vTT1lkldITFs7Lpf83oy7Wp1TnZzcVSjrgpPe2b2eqrw32zE0B0rLP2elJxU/JG5U7NFMh3Dl9yL2ZLhqUJY3D3t+L46Na//EWgEbrojZugrboxqJinuPgnpYV69zh4KYxLEg88gjT1IIxwChBbrddrwHR52AGy65UNU+P9S9mnJcosQhOiMP5qohkUpQxaHg3ctrKfUM8PoqzjNykULIPih5xbTHd+Prq9GIr/d9vQOHaOmVpVD5qXlYUp42KuAudja5vDqXAr6K5I6DYdP9nZn5vBgyHR2B0PmTH0BSAF3OT6t06R+Txa/EVt80Rw7h2oisqTWW4kBP6KD9pWVxwvJlFzXtTNO+maIpGXIfHlI01heAYx3amHGyV7a0vY+EeaplsehYfvpW22n1a7lzd2HtQt3khTZsCVe/QSc3VyI1NYYgsYaD7q+i9gdMjMS8WpWsWIw5BrK75wkLg6Z0BYyySl/0Ix44wkRgmkT/1hIVeJObEa7mRdVsnuzcLAzhCjWJdh2DtOwvCpETKDjZ2cJKdKN2C4J6I2DKFAikLsMjqKj0PtWITI2xCAOBgwzQtYaphev1dXz26Z/m/oElvPIAAARVAZ+1dEJ/A90ffa25W9wAlr56/DXZ5tGz69nZXIt1tk6xOC7uKHMY3X2QBAmE9c62m3pOU5IW9xO44EXKC+nCLLGsEPdtuPe0rsuYUAjK9JO97IyBADY4Akodj7Rg1I8KvF8tQoNhE3JD9DDdNmioCu/uQJ1Q9xgqu/8dWxqwHSGnsOg2/JKJ8Y20QWUHr1JRhrRmXZs7pboRnFqa8Ohaz2d/sl781P5rXxSQMbxf1ZXwJvRaG3DX1M8pWGdSdAazBIaMtq3dOR5dKPEJEkXqB6GVSYNkg4VZjoEy76wIzdLqzEAVSea5wKiKL5H0hSnRhwAee9rAcAKE5xqGwcsMDyLWmVfSXa5hrV9by/U2kIMhwFpj2Nu68W9qtvLdRS9855ojq4lsuZIu60V5nPodz1gOYjE/WXtHRQP2AiqctRRoAxOOgmKN7CuYKtXWanEWewxEAQowiVz4nG8OAEnNq9+EDcwwOdC317REZg6ldxwT6JLgWtDto69Wv9o9ojIWhwPTXSnVsNlu3nhrAOzFVNBZ+0fgR6LllCOjl92CadLlds101iqo44KCQ7AlI2Ud0BlJ/DWLlq+wTlzQTjMUSgw8/vTD205vl7vExLCdgwk/35WEM4V/cool6joL1PSeL6hrw8Y1oz/Gy/VQgr+d4dwO5XPhOuhNhLL4PnBs1VJ6LRzYljE3oSq1lbfpltWvxEdI4ExV5CAGnDHxgBy991P7oLfyfrvCWEt3HGY4haniu3gtDL50SaPo+Ki20VBVA52EbTsRLxYXVm3MEKPyvz/khmFyDkRPv/Y48/MnJQ5YclaxkLlbaybpGq3EAl52uojiKcHq5/mHeU96GwgoB4qWYmjqAlz7RAQtpu1gPq6fH55DgLBx8iTp9cZAmK8DdfV3uPIB31uSEGudJl79n9BXSSJwa0NBtn0UnFK9V0FwMyUyWwhRNoTtqGoF58FIs8ezGSFfy8/9FiEkt+T4+i1+GCTFUs7SakIORgSV6p4ILPHEf+VlkenNP/cx04L+zv4tR7uzArCcBE9o7dFc3mSkOvbVCBQBj45fd3s9X/TENO0PLZNAEmJOlAHKzx9DQ6dubQEFxQ0zaDjyktHgvTOFMts3LBuE3KCD6iIhnqp8nmITxDVvkQEacU/Kkf4qY2szozIyRQbxYi9cfoxoCk9pO5bno9eiKb4oikDvopeXa+5tTIUf9m+3ZUdMxDEBcIDnnsy2OScGFmqXAZLED6gwRIezFYWNdqVYCMQ8KuAMV+RI4v5K4zT94WWY3nqA5eRedZ056XGJ7wgq8kR5Z6aKwmbwNiJl2lWbhMIDWL2zp03Lq/Ix8ubkbjZEVhhXCbv7eXxC/ZEkL1WnYYhMrOT0mUoykmmjDQNRTnvrgzqI/TWNgiJj4f4LAH0JTZNpG1hplmRcc0wjcMV1C2peT0ICf8rLNT1fA4Mb0Gq4E+MD13NQqEdhVfoSxg41FDWyOaf2px0AAAQiAZ+3akJ/A9pamyesAJpm4r12R8mB84gT6EX6KLwy2rviaIDCf3y2cGdgdrO3aVSTzZ/M7kNQYOXwd/nvZtRGNuV2aQEDJCR+TE1RVhWeNtz9XP48aUkHeejlgCti+VuIaDoUX59QCQWzM8TI/PjFhXdAsDbiuTc450CmK/tboAENT2+wSiLDZZEZazdEdyY7aQl9ZUcwacrx9DQBYcKRce6/56ca4v3MkUiKgwylkOBnCfmBvti/CKGIwcjavctzS7dqyXWpvQocZPRBNsZHsi5At3/2XNleADAwfLgmXMn4QPMA1c8OYkyGtYX4QxuqR3Sm9R+CQMkJYlkR0TfsuOLGWkTLswM3YnbSgT29Oi2Nft1OPYf06oQHFhf3d8+ifH28f6Qmcs/+bF/rtKhCTxJaFqtOTY14HefKuuq/9bdLdzC2yHrt9T1UOGJahfJxtrE7HPzsOwrtgCTXfQN+zc25Y09bwk9BNgtm9cF49lNQyy+YU5/JtnG86umVM31nHGX6fHJ4AtkjYCurxIAoLMJaA9HHXkhzS7m4tgNPfqldVtM/gZoknexIt5dntjLS5dpyy3U1blmFbIO6jmO6t9DqRN9i9tER+XXVTTbEczmpc46X/5Xdjzds8Vyy9zmh16sRiEZGciny2PG1OisD9Kok/jWypkyEdaC3MpFhUedlQPXhaxHC/e3l2fmM/3pjVr9aczxvpe+jMqHT75dOY10YY9hAmvOT0mz/82ib/JxsTNe1YYW2qswAQvH7cAOPGE0sNvY0pYbLYk3Qd79Us9jrs1AW2MzlBPSykRZ5FBhrL9WICK4R2IyZgfAYMUzX//8TlElgg5tom4Y9wqp33vjbnweIj4BmZqiv9NOu/khrAY4n46LXdMmvItHevEqHCBT2E0Z+4Sw2wMxMXc5ol85SjObDFM/2XXXrbLJ3VxzqXUHwEgsshd3K0XoPxQm8tIoi8IKhk+csaza8QEwkMl064PqM/I/WMXP8KjJYN2yARgDU2CaISGJO8mkG433Kv1TYhct7MKV6SufEsatKMGRB6LnTtnKUXeYmvLYlXZA6VUkZC+GC85bLhO9aDT3U0bxA3yO4JQzhj3+bTWooUksGhRGmDQD2ZYOhk50DmAEruFHqotgoUNsUgaZHjp8Zp8cT36sNx4x8e2WQt1afX1LzDc0hwsNMPwibZ2JqSSinckTr52cZPsik7Ny8U0KYKJ/CGDVhB5gEr7CaurPzOCjU3l+bX8dn3RcZKP+6CdcrAeyPRAro+wdJD9Af8VcN7ORLJ/ccrsLKZuhW5gygrfiKLgppgrvZ2Zwu10fj/8RTWcciJ12HwU0CqQoBfKgzehop+5GyIjvvOXRNxbmQw22TQ10Y1mgGp8gtmzsmSpzdQ4HGkJ25FGW6IdFGDXYsuBEAAASnQZu8SahBbJlMCG///qeEAfB0xYJ28C17hanYAKWTS4DfeyA9fR8MkuwyKbTPGop82B5udHnJVjb6ner7UTwRX2SC6LM5QfF/NqHSxpJprhFmxX7jy1uazcw7ZJSDN12bfFulyBoGseCQNvgRGXbAqwhJiuOnNG3Drgg9JyFe25pg9l6wsNNSwmpDyadWHM3wE+Ix/hEs0YOf1bYwm8cncG+LuhmZzAhbnTMbR/hXCi3unrqMMcSpJilk8W6r98QeSQsbayZtevGz2ldptE1N2lkZtB0G613xu1e8XwiT9j41Nizx3sZOSnxVEnPLI38v/Yv4EwEFomKC+vYdFsEt2Nx7nD1JAZ3K80FWaTGmHM1aDN2/pjqU9qVdwTdS5y46wVNBD0nWdVmEADuRiRO2OLHRaXskt4DmdwxgV2h5mb0mnE6ZaR56gVROtJSg30ztVSk1YHFTeNVx1xSsD+pvDGE+0f2lCY/H+yfRfg1tqCg7q4swLsRuYiI8ZIgZKERY/fIEwuaK6SLHGHxzMhxKLI7RWlE3EE8/FmulTlIMJdg0SLSdeqGUZMmGj3InA5zuRhuJ6Hg5nVhGuJmDrG1iydKdCI0x7WFypH6R6zOksVbFMiVfEZNRUNvrct+o+KU1aX8DBW9MrRrM38vxNwjpwLEf3By7OXc3SqyJXkpz+by4JdRUVAcP+x0/aCZwjj8YLs5uBOLjSIcEQgn7B+aR5gSZEB3uIQCqd7cbwVPZ57mn4l4yb2dtc7VhLs9Z+6eUdUPj9iS6bBque6njyxQqhTku9+3eG8ioFVorv1XL7c4OB59dCHIYZpy+bjx5nHDCJvGAvxm5MYo0/aEwY+GW4prkmoq0e3P6VIXbZ46EVBkhVlmEU0Wqye1K7M193qmBQcMUpJSrfK40KTEXmLhbC9/eMhnwLijKUhS0tz2H6xg3B8LAqNY1U0eWhaXI65679C00fzIZtWLaXmz5FiknZuPENqkKOOEeo6+MsOC8ruie+T6spmNy3RrT4fbfcuOC9uT5+et7jiMOMlsvCw0M20U9HGtoQedJEs8tg6vdTsCQjT/aIAoFyIYxZT7YgulbNP3t/l/MdY8vdrYP+56dBxRS864z+lpxzgB6HKzPLI7bDK7zh2kT/cehZ+r9LlnYCWyGWyT2PacrIp0bgBd9qQZtBZ6umuyTB0Xv/k969edbkS12zv9NJhTWC4mgqaKBnmGAMgjkA0OOBxIBstZ6SFUNxzoI59+rgEsNu5hnd82n5QEHdn+rV2NxhmGM8aYAG6cEQaSkywvt7Cb0zmsyjo9tEIXalqMkOmV36zGeZPcGUaYYF5e+ivzyOpqrP8f5PGccvhOes/DsfJJCdXnL8qceVx030sj1QEuhQCFZlr75PkpKpUdrfs4PqzyoOpkNtNY9Zp8mMBWMOo+PEoymkA3L5IZ2gNXtpY91cKaLjD3kJRelcTa/sUvrPmBxnwX9AeeWLy8Gv92W/BdejJHFIZQzfirSvWIz5Ve3L715JxHBdCPPWsc1CLo+zEpDbV55RukNU1CDHk5EEmzTPw1uLMATgY9g69uk7YCl+G8gQfhqeBagyQnAAAAEjkGf2kUVLCv/AxLLC7ABdRpQ4pu1FWTUX1SPSs+/tDxIzxByLhoT5k44TDxzxhsT9ZV1z3fBbH2JP5tyOu4NK1GAji6Z98J/sabqLvNSpUIoMXtflRysMULJgIE5GXk7+W1Vu4N6jxzqlqgWtt/8pE1FHxzE7khYcc9o99gL7/KOLAvriwudZZZQUwpcn2zfoYu0mosI8PJ3/0cgpR2/dg0TnjhjOVmHLEjkTviLhaM4yx4w8b3ArR/7rtdJ3quAbzEvDZ99LMLRogekAEEPZKVSVP2WGufO4zMAFOZqKudyPHgjGaAikRyvsAvoaEmkujrCJPeWnXeTuoLeZ17c3iXyvjfi6sNUFitZUiTofFehB9ITr5J0c8LLe+RZslsHbEVLcryAPt5WE+e3A9SWwzMceVPwNJQftD0n/YgltpCZAj3A3tQJAJtzXVpXX5l6ZbDjAXr4kz4j3Rqv6Fo39tvqwOuwKNGc7sOO+8jnJXlp6NThjvqTkQHzORo/cYC5dEoOZ6kMJoVF7uKhLdq4OQYtsGnvz8GJslvH/d9/sagBtb1O+9vNdYNJ2hVcBA4cgelXr28Veh88cmfPdoQasGo+JeuMTLieHjjOAhypGczQrlmGWqcKBreoV90uD3wPkR+/xrP+Qxl8ikHZPYUzSs2ncjR3eEQikDWRYME3GWjWGo7NEXaImG8LZ96kORGSdbdQdB+uAiruXuRAJVWzi6vQt63a7vuX/cgOAfjv11zMssvlxeKT4O1SXKT149uWYTIKS0R2LgPCfSGKtfXT3BeIIYO4GBAvcu6fqqnBjbE+cEslhje3FoVH/UoHtT8GsQJJlKiDvSAM6BtelUvrav4zfoDYFDSm5H2aAuUVijLKB+4EUyiKgEe8AanmwcxGca1EI/ECQsKol+0Z6/LlB2UR59Bs14uuMjnqIn2D/08Pl//Z1xxwZcxT4R+Pw6bmqukFdzYEX6+7krsenE6/H6xTJSDPkbN03kGbOT7Hz4LpStXeN2UgjON8Iv4Iyy1NR117OU/LPGM7wHT4KRCLVf71W0ntqLoeClwWwJwdA8XqgB0LrFdsIG1Nk8BAiqScOJ31EtYuNFpf5Su3Zutm4X1f1WXh17g60FyeRIKlpEDxR+qBBz5D18Mb1yF7P3dRg9LSSXHx6vVSqJdxnnbqYUlhmiBXQT1uwoiUsdSern2HF/H6MdcwBEPD9CCGg9NHRYZul8rlWRmVwuhbAD5MhN6f/B+SerkixR+VT+SnSfeCMnc/MX0OSRzLWN/QWcU1dJJCbGE1F1gL8NJaoHowry3OuT3z99hKr1CZSheDXox53ahXCQqOYiDjiLtAQcCUECcAzV1AhS+OH/RQKYozLCs/vPhbMx4iLFzCw7jaPC5uPe5DtxqbMJLeuMNfSXlUVWuTm6tPVGprIEPrB1Zy4qLaK+oomlT3h2VhkZp8hax+GDEfzvgpfgQVOyrHdPTz+5D4uRS5YUqt0B58+RxW6BYAzgu9gbpEOH6QRVCzKg+rq1rgfdcIi9E7EVCh3ko0lBaTzS3KH/bwLkd99gILAAAENAGf+XRCfwPZW+XZmtKlpKnQj3EAD+eW/WUrHhAxFTSjCzCgBBBxlqhvuUuNumQUYa7Q/N1A0a5s5ZD6eLhOS5azPFY4OOsDq0+to6LqFU1Yuu3qm3wLoI5qZ20FSkzXjappOR8PsWw+OaQuGC7h6roJI0X1wDXp4aBf+Jh0oenO0DLTfbFi4h2LvsUf74504vbp98gC7hoe+FBE50paE5tsYENhnNM1cVUPobN8oG45h+pkV3DwdioB2FwC3iXthpNG2IcZs9AiC60FUswU+SYyAq4Fjy7orXxdTxlkce5vHLX/p0AzUFqbCFrJvx5xZ4+49GgIx1Dte8rdmfkJtecxSu5m6Qh4riKss1blo6PVHlqjCWUCd1z8vaiqQYoRj/+TWZfE7kj8P+K3y/Ik7b0Q6sXksW4dOAY4ffew8AwvU2ntsnfHDrdGCbXQfVjPSVedMA9sqBhHDM3xYlB7x1eL6Pr/88U+vakChPf9vRo83VQaY/m0YQcIZ5YZltU+mcVxpdAIEPpNOAKc67X3/RYn8RVeEIcjDnn6paccMn7KfqQXNISApcYtSPlpJJQpWKNoCXO4Mx9uCB3kOPd9vLhaYQ766eEtjgiyAYhyNeoqFpL7JYssmRDJbA187aAe/XDr7A8z19eANDwVBzNi6WnQtkRfHzQtjKvz1rQ8YbCy9GdU+C5jmMqk+xHP1nm+nJ2o9e0PZKud3eA9+NyhtbE/WfMJk+D08K/9cp2REGLaZeiIZZB+o/I0kKW2OxmiEbUUF7KvBxho8ySekWUuL3byLBPBR2XDv1q1MAp6XzLjeDvjLeKGvkYAXdq2U6gADqTHnrp7vh8U7bT3YWJB21gSqcoKMzC18o1s3boRRLpvu67nKMpOcyelmCe029R3FLsDrCgnKYWVRfSYjwQFQKrOld9Bams5yijUM/pqZV0qrJpddadHOWbxgFs8AXN78X3qA/lPoL5FhdB8QfQON94WDF+0SFMwbHo3VCJ5zhXClJRPD+yuWtx+RMPaiGWEn2DdPZ6xXi7lKR304GImjdEndf/wpyx43d+JSCcZCfLLAIk3PgS5ChexX0RclADjRPWYnJgdDihmEyO2MW0G4Vhk1831S8/FVeIjRqH3S9WtX2lcDmSuCCCqV1nMQBuzcORg3XiLEWiTp33a1viZGSXWxobL2p72cMTbrWQzTHBfNCZUASxQ6Ime88xY61PGgYyX1YL2iqM46ezcQQAu6OF7+vFgwd/AQpgkcuRuka6vVJhraootkC02LdkLwqdvkn4CX0X391AD6sP8lqvFG6HWht9D5pFRvoJMhnb0FIJ5GkmqgSyXrSWRy2OV29Tb/w6kU2B1XhZDa7e84kuEVbp7VJIqv7z0vP3q0dQd4M7PMpiP63l7fUHa2i4Zoy7OOHGkW6bp60p7nIwHmm4JpUXvaWcuAAAENwGf+2pCfwPaV6+PKxMFUHdAANklSwJMhgbChIdcjUbXshOERscqeN5XYdbWfcLtl1K/BDObxNdplS+HSb13jNB8Kktgto5deTx/H9igpZA2QJoO+XDgNl3KhU4AXGxjEQH/T/jhDGzFCERyHTNG+3Uzk+IOunrODVmjdFBgLMD5pwmh/aB2wI9/7S4gsCE1NdObxS//r2RYkXMHMcd0gncpscGDtS32jUglY50JhofGZF+u51aXGzQ9VyKim9k1aP3Zar4pPwADPbl/OZI9wp3bOZHN+zsu2gw0wP+QznU1L7TXQmUNSfEjeJduC1IRyBERC0YRrAQSPQ0RTXNCZdlPGFi9YH8wau+9o8JsJ3P8djdAj9YpAw9pg+c1fMEbRuGQOvRov2/7qpjj60/R3aEd/w+t6EVSmKRDFqaXqo98ZamSjIhHao3i6Dv3Vez0IuzKm+n68xjCuh0kOSPcgH/Bc7R54wIdtI6jlbZb6ZNZazibOVAj9w37IEeUNVjrjM3JEwAAI/cF0S36n+h1cxhE+xCf+sA8HDHw7UVQufbiSyrRox9SvJrSMNcsgdTa2iVaYZJbpDX1pJeGg0RLMu6Te7XMYzcqcbiPLzUa7bZQheuCkm05NgwBDrizXMLE4bI2tGRVF0SU1TVe+FnM6aqz+DlW2J6z20Ivv/kPGMz/A01g9kKjnYiKXFyoyQdRy8wA3Bh771BShO+xT7ipcKY1CQwfxP3guWZiqx+0RauSSivMr7WM1UPLlqzUc145DCiW8EDApSPAD7xt2WRNJOcyVugviLwkt+ZNh4YioAqAVy2w1XKYsueCLOcheCrsnBoskWVPdGRCwmbOUjFAgTh8KJd5n7TiQCBFJQ8eyEMzvlyzcYsNXY/PyREBuvl2xchobhKEMcXrEA8OoHWf1YgqkLVAFFxrE0ne8ccyH4PzmP2sN7dCA3mwKdIzBAlvzszt8Uk+2yI2EWw6tuPlvMCdYVDIXP4mQAuvIeZopm9q5ZFsuz7tGnPLDjwci3qs5RWksDCyUv9jbXYS7zlTBNskFpRRoG3X+4PMdRiMwyoptFih631BV4PjQnRNFkxeFNCSSkqeKa3bt0PqwaLbsnsi2JaRqHOj9EBIfQ5rYSisqN01WVO8ofTt9xSUY72lM3Yg365H9NKlo18oYVR78O5sA4KNTHQCv40PBdWWIOZKksgNPrlV/5VphRCsBM4vAw9wC5CEPZYb9mrIAUF6Cdj0WYTOm0AdEdGU6Blc28ZPCUVrb0+eC4Y5BQqrluRkCtp9cC5xDP2yeuPN3Pcbdbo9YWKagzuG7BvwFwBAwF7CDf/7pDZhQX7g/UXrxrzNwKNv/SzJGuNVA6KiK1Q35dPW/ha54xBK9BNAnEnQugUvxsqqXdxQRe+kZcvNF4Jgfz5uxMk2aFSoY1WTndw3M3pY6TbZTKmnAAAFCEGb4EmoQWyZTAhv//6nhAHxtPaEwPvkEAAulAZyv20OUtCw3Qq2sp+f5anEiazNfD1YZ5LKyyds3RgOB/i3A6wEhYR3zkNJLFKIJ1Lls63o1Zho1g9+Wo4m4Vrem3TPSaBp5vUT/irC83KZpt6JVejBVRcD/EjzUC6yGH5BtVe2VU4XBPWtO7PkaDx00OXdEQIoLQIZ29UJLHNf4okGdXk2Wv4P82Q/JeXrXMAns7YkORzArhxlZCOLo7DrvVYmqTyjeKvo1hOBHea2A3wvPjUw9DZDT8FABOJM2HG+hDM9m70HNFVpd3MzHfq379L9BNUugYQ26qisjVENQqNqme/X0Zv2f8pJhCE+d6QBRcT0V5rciicniN+rNWSKqj2j2i6l3olvHAuZQ++YmB8cipZJRl3FRx2yVbtTGutVmYHs6wbxYgXsjbeRE08czHoYNLb49X8Tn0r5SkIiBzaDas0Ab/O8U/mkw7AvK3qMSDrdAbuk5OoU0ectrugBMB8J/gwsmk9Bel5r8298mmjM/gdSby5VBVgTgATuizKS3x9rp2V/UGDdn4u12EDo0pLzLCJVFbddr7LXUmXyOhlse+Zw3+2egt4oerm/vVJsNLqF/F0BEcjqFaULi6RdmDc6e+KPj34G6/dyloQ0MZ05RdMU2HCv+HdBq5i4PlRMQ0wNlNGXr44RJ617obdZMO3udXT69yJYlAVp9H01E3T9LKUlgTFw5VeNzthJ3r7lz24+TYRY+g9xxIXt6ZIjmgW7SP+NvYKf/g8zJBELvf7SlPtg3mKcR4G87pi2WXOY91qUo8+Lk9li71Uuza2m/oRqPLhEFm9g3LNtJoaiYqO+4lMGiuQN5+TXWydrR/CMkeCI2l4RFqZ1c9SyHwzxIb5LOPQP1xebqEH9a+NXDt49YthUJRtFwBQY7S+nA9Hg1W7wY929j0X+Omwrut4ra1ZCZBx5SF+FtO4GAVUfX5ZN9ZaSR/ksQUZPA8IKg7VYfJBYVXXrgnhwSZP4zXasfqLI2o8IzCrqsNU9SwpQQqZVM8zGU2tvh0c9JgQncXLFey0PQ4EXc9LWrGNK+2wmJVbMTh5QJZJIUaSnCCXtWDnwJ3J9LVDUhvu7xrNYGg87n5F7TRO5ygwdYIouOgwuSZkzRgCUsRlniS4dhhBJkrCkbm/MwAaaE4a7f0Se3MRSB55pCGpl4zsoS4PIQVQuO5PwKX5HnG0hvh72c4q1F6sYi4ZfJMecFfiUnktfySXJrohH+nh6BXvqM/lowLz6BCWslRmwRVMiR2/3vTDi2YT4cApGRWudWrFvsmIhXjOcrx+JkokyoEOOiGEYKgL/r6qKl+JzY1SNQhr8pTczjhL//MECSgdtjfcrtap6SpGRy+RjALwmLQeMqRxP1p1OzxbzgTQGKaad7Y6QaAJn88rxoWKxn1LNspkdHRyuj1mNtW0cS5U89TxPeU4kMxr5gGHGS0eOsZUluuYQmhYu+qeLZ8bIxgQ6gzk516ZQtRBVUpbSlwoMm0jwH78dNNi5JHkwJXAfrswwR4Y/gPWCHr/xJMYPRd325/UUyeNJwFuV/buj9eCL9OGI5x+BC+0J7+QvDMOGyrVewTbVtmVNELlNsq0w3O1RbXkdrtrO3bs+DW15Ws5FFvWUxqTA+irBktqiGY+ZGn1ocQKn2z3Y+iBEskFbX2+EgoT6DfMmDykRMFN8+cIpdHpR8cEAAASOQZ4eRRUsK/8BY2kHRzgBbx3JWkKozCYpak+zHhVN3Oz3XvtrNBSf9+S+rgQoVFO8Ef6RUCamYaWvlK/w0a4+kPR7UH72myvSwufbh3bzAhFdYf0cAH6g26I3fUrZt/nrC6qoVzra4QpX9Oeo/GRryCkrTiflRBqsqsYrw9wJbV7gz5CpSaKXIe3PQXkmESCN0ALZRt9iBJwaBf5m/O5sq2iqnFwBvHEL/9CxwYGl93ao675ZIMu26Ch1Fn7Gd5z9QgwkJOkwzAp1y7cE9+TpOxImG0yQJsDczHavX2Yh7cUg2rvNrQUuGsk+/4vRd7fISQyfvqLYM1MD6hSeNh8Oj1qLoZlEpK75Vd/uweao2XdCmKMpsnYjqpqr5t8/J1W5lE2BYDU8lPDEGVZwHDsX6i0JKxD06IDBU7wKT0A8eBzdwPb22kcfGYW2vOZKXcmQa9H0vP7dkn8mIiKt6v5B5/gmSdWacB44bk2w99eqi91lUzWJ29yguc42ROmrEOekTMuOKnGwsVPYNcmb6NIu7HuOXahFOC7IVgXxj+Vjo16Iu+uyB+PreGBh2sKOKnfzZhtgfQFBCAf+jkhhaROJQFMoYkFriW8T+W/nGeeEpHCR3c2nQT2xirhwYvcunUK9q4XnqSeAxMyNTwTZ2Srauo1v7T0I245GEXZhTTpYJM9GV+gU0QYppHqU00v440FyZ0VsDz3hTc3ns+AFBfFkhizaX+Esr9C2tXmxdKWwllo0a0zT1twzg+hPRmVHoKDQ++5+ZRDlJWUpNBjCcmaSKF2Kuu34Dro+lnNRdFUQ6jwfL3fzINGtH9rxsk10+I6Z4ezJAns7px7o+VSDgwrrrSlFoCEQUpBPuMGFyTitfpkfjeow1j6wvYkKapu9QjkpRMqX0Cd0UHFkJTve7iwbB2ngis7ZhOJ6Ey8am0ZzzLp+1TmaqZlE3nH74KbUJQvVTLOc6mzi4JrU6tjIvd2rzwAH7tGfDEjxeqjwVWZqzTktzWzIpGPnmMSZjt+9XHbTql2aK1OgcJzZkEOLTF1aPDbamUTZIczQMe/cKbKh4c3HjrjGnNz+8BsAMdGk7nKrswQF4wq1PmtulSpi/KfzywTSvoaHMH/IGo6nJn8wa9RJhewncCqpjLaOGiHv9JSfqOdVTNxWLYEKKzE/8mmO61AUZa560ORsGVlt+CLeyfgPiNu44wTiP8QV5B2BOPVOZww23DOXCs1VHhqo8qLVq05YtHxGA3ULcq9qlZNJfycHBP7SrtVApZmt/47KAxDf+dEGiqNeBrDLdllIalB1d2MNYhh70oUmC3w6x4ClzAtBVWu53Ci4EsKEDkhRYXA49oDucd8zGLNRudqYXyHpb2RBT94ZM/I7x0YCC43nfdHbrHhdg23ZCsNAPVCa+i8aIRjmjgJWM5FSsJwwYzAAubPTpwZuO0B6Ptzh0EDO4GquOWyMzYYHy//FazBhVVg41MRttzpooKS5dRA2WZ+9T0dPeQ3JTVfhp69kWkZcRUGA4kRcshp6P+lMi4iv8461n5Dx5HqjWYliLW56D/AAAAQ3AZ49dEJ/AcOX8o+ACduz7TRgkNGTA1Nh/z1Tz6msOHdhKSzh16+ZlnMien6bmlw/GUxByLeV3GP1vUq3wHUHHnZKUFenFQl+OLbExJOL9UxsnJotjzdPqm/+vLuyTuMfHAGFwormAz+690f1Nn5v/QVBCyjVnkxSI9JbYIEkjh2Knwxr8tQB7hRDyjh9WBrIIGl/41KAmShkxTekdVdOHQUSBjGIyj4Bfz81P34chw+rzKNyAKUdwCbuDPbjaeudKf2yr/fM6JKfArPp281QdsQao336dMPY1hjQLnJEpDXJW3f59q+fp+26f391VH0u7SrcH7jg/wjI0w+Hi9mEpvnoW6xyG+CQLF5y+pEQ8pIav+r0JkCPRmYJfPNkslPOEb0XxJ8LJDfBNUsBnLJ+Hleg6njQ7SHiXbWJhalw6ZM8dl4jcYjpkzelPhJiWRGRW138tka2DxPJG/IWNzctDMndcp1AQfty3qUDL1rqbP8qF/02OsMwU8F49xs3LSuNq7eSdcDuo/URZ0J3moOgya3F8BqmFvyIp8OQm39dYd7N5qJzthFNWfv3xc6a5DZDJjaTlW3sF3/vgCvp/eOH3gj/4wDPansyqfhB0TWY6JbliMlLOxLRY+bcvDHJdVFEQYYRW+Kdyf7DyFJG+bfcbHgyHqzs/NXYwjhMuIudDT2rt/T6ILf5N8SwPj9swogf2kjdpXVmgAVHsFn4F/S/1HJfJjwSumdPJDNnwbLTuSgxJsUrbHCojLlAk6iRG8YUbSpLTXixpYAk8EZSsaubmG0okDT9iDAiOgv0we3o3rC9FQ9zQ2Mif0GJDNkMJs53f3fOM7fuAKi4rgC26y0VLoMcPmYaarm6cDfne+GXcjYi96LDnIanc7IK87siG9b9OSPlkLFI/hCxNxnQrnTHTHXyltht2OzST2An1j+wr0odcFIITics/2YoPsknvyl9dBEK4baRnLbsjPvVlnGiUACw2ejiNZIWguUgBwuDYDZiM3rZskUjA6G//9cA3xWi6BySdvou8My/+9qIcFX5TPRCdFfPmPqL2zMXTPm4aD3NnS51Br4ckXZbsBcM4fCbqVVy0nThhHIqW7mh35+NMKXrZAeJiSmAXhQM+pQXXiUgIpduFTEfK6bKmmOhKSV6SDCoMMvQDMn3oWYIUYeqRaL950UJLTccxFhwZVFUFfnjzaHUcAL8uHFLKE2VJQ1XMmUKsAHhIXt+4UOqWbjB4yCVCDwImsJMKBC8DEKheSa/ej8EXIaiHf2WPKQTANED0EU8hKUXERSJaK5T48Zc7nVcdc4CRhrZyjNoA3VUvdW/GvCyFsOp7PYPt2d9hKKcW6Mk4XQPH5Jb1+tb7QD6Y06djIu5XII6hMu9/54gShnLDdHXX/QpPRm6frR0rDmNRxe4SEScMdwn65mTfVT5tByyHAcvWKwAAAQNAZ4/akJ/AcLfZEXIfd98AEPzp1gq5Rsejj93A/7mHzxqH4C+kVp6hOLsP+xaxzoJO/XiWOtngCC/lE00vCu+WAE/6CwLBy5C2nszZGqTo6/zDLTchIZT3+8WgaC8HVlTf9xrqpK5qr43s92HCsQPTN/4WlrH9bULMKN6M/4CbFa95ke0ywQk0asXnu0lWz1yc0CRSntn0Flqv1EGIDpllv57G1vty1L5UjQJGFZaYLP2fq5CWRPuflb/EzP+wP/I0PDP5noxnexrL/CnukYXVBZky9VhROnucVJBKMTTdDM4+tAPCjtEujSRfv3bd8zjyGzs/C9rAztlaDdpJZWMZ35zud/Z1S/KcgPmB4XicXlZBdrIEk8kAIOqDG1h9Kiu8dASgQK7vSYO8TvX+5nXTD09wl2FVP/f3a89zQm3bJnYbLbLknmyGi7favflm/L9FwgUjeVRwLJ1CQAPoEw8nBkiUrTGGNBo21YrwHTSe6fClqEL5NNJJyP0/iRxIHAN7cDH5eHw1aV4PV/VDyHMkyKsBGxnvtONqlFkL7Q8Uwa2YRJt1PgsPupNQ1p+1dogEVwVTb0fVbTlF8Z5FiGZY/+9BtL4Rim30cA5YzPnWnMImS8P4bI+Zgg5o+dfwUNFq1KyvLkufZTZ+VEORNH73llKhzNASFchn9oj8V/DFxUry1nYmKhHVqD+wpzKtW4GWdnaiNpHHZeRzDO0amlwQYYcTgutEsbPHtgrrb+20FOymT5BJZS4k3MztzSVEj7wvh0vH1KO1vZfQmAghKeSbg1A+8uySzn+OwTp92jUFf/O6YViFcJa1rDuL3FUmdAPZ9yAdCQiJepNkHfcRmNB/46nQNiafNE7IkoqVqemSuO9o1NwqSBGziI3Zfs5j/ZLlkyxTNHcaSJWc7zaLKccK7Q3tWycbCeaj25/pI3sKhDUU1NMJ4V7eQL9PAkencCNosGXxBEpwuXaC30WpPEzz2GtP1zXPXc3u5txzvVws1+uUbIAm4WFE3bow2OIoz7vK72OF//Wvv7FPqu+CJRaMlsMAtGm0X9fI0mDZ7LaOs8GScw0/JD3JzQcQv/Mj1k2ji4S2TcDVuqcgqe11J55j2tO8IsfVCui1OPdZ8U++pKc5oFz2wniIp/gZrPBfpAg97/4hP4iH2OUMk/ozlaVYlcIful7xQbY5ZAtjcTV83dBphDyvZzYjClfqZTnNkUq6hOMO3X23YjvdRdXzYGXqRa680RCVPWU3taB+JHRlubLDgfvhkWKNErNJKzcwdP9AyK1M6p3GnQakWkFVPjv6BzxUTnfF0GO+uxKZjpokPrJgCKC2W5i8cev8RWuCHsxxO4Nz64dUjgKi1+Y0uzYRaLDliNwxx4dOp7RSW0AAASKQZokSahBbJlMCG///qeEAeyOJIADRa1mvoPBwnhuI7GtSoR2PO1J/vcCEtK6HkrgNRrxjujQk8KE/B4Eegxv541l6fvzLT6LzHl1CIGGwyoH73O4gB/QncCYQGNBW88jrAPbUpP3zEyq/bzohUx9dQOP+whYK8juTB4mJp4MQhq+fQPB9NSOrP92p/L8JVrOe93myhn98DDlbzoDlmHOhCC6RyOzVyM50QeXkHTKFA7jeRz4SpOvxnQZiBmjcNCif0FAtCuuwIKr+dAB/DSKuVYBvlJtEHSDbjejsHsfwNN0x0vP1P+xaqukE2tm5VT7yATepYfseCUV7urPknbDuvX68/Z/7f8ZArLIEMnIx7rggwhd21IstyAyWd2u+J0UcV/MKCjlq/ycrd5r/CiRZoby+2rip0dMSzZygTzvNUn2TI97Vhn14WZatA4Hl0o7GJ4rAwpISSGZRg3Kp4EeM+S3hsPyLjbg7EpIHqaHKB8D6x2Ey60F3Of/EB5Z97WwlsNtWxrlGfsbU/citDzJv2Nty4O/kOVrPIjj6J2o3223BjiJ3M496GFmSUEXKgELXckN9M8JSctfQt1rZn1MeKcXvmYDfHB6VcKryY9z1L//rNEuhVM0i7og8v6umoli/b82aoJRjiMKfTDxI87inlcRnuKQGpyEPmFJBFvaZC1/QAIWGWzQGxnqXyqZfsVjKEphPzVUOzqM15T4c49cyLfKEyJJQbr9EFsTV3oPTQtiIcuqrr/OMan+31+kdahfg0LKYRQrpI8hc68qSZ+u3+cB+Bf9XguVE5NDoZ2beSZ3yl/R+k4VI+nCN08RK8S2JrUJ4rmgvDnIV9gogVXxmH+Dx7QhwOS7J2yvcURK3Cha1zK4MrdJkxMnQjAX5Jqvi4p7dB7Xsdwk8K6Gf5YqrDcD0UzAPS45Irbe6mxnrT/pqLaZ/r+XuWA6rhb4gDB+ls3IEEyHAq/mV/6p49S/XO201GbEbCmnv0nUe19+t+O69SKQMRjTb8AuSdU36uJUw5t5rpHfWTb4MQzMgvNfbq5JGsSfiYmqVl7pmOvL/criKie7Wh00NvdPC+fqSVRgg6XZUjKycnqjXvxoOUjYHz+kt1vrE2fW7zpE+8lL+KZPGT5o67ZMPFfnPLNsRUlawJQkpu3dhFNKLB3ulTUzTqAlqhXpLxdzqQCauW6FkcJvAWbI2LvV5onw0nFMqp8TcmPRxjSJJsCuv9pNnPMy9Jm/d7Q3TF2UpYKm5OX4Di3iF6sn5Lj5UjvKhmGd6cNcaLcxWiSwrFGSMi4sfknGlht1WGazjL3e8UKlBLYyzpkJs70PBYgBb4GXS6VvNOCxq92XNc5E4iKyXFcskbSe+fXrrFWxiqlBggvF5iFRtGgcXYUdmynfnKqV31EjzH/UtpvrGcOpZWJ36mJ9lOsVKxOfiDkhgooEvW347jN+IUUX9Xmr4utlwqYY8wxnziTWErHH6M6IYJQulFo6S0Z5lgw5H+f82xXTL1NZN4wHPCi4FDLdi6RPVncec5MlTyJRttR4hOubRQUG9wAABGFBnkJFFSwr/wFjr46O+1MADq0srSucmjC5cfZ3XtksSduyJkycQ8O5WenP2rE61VFDyixp3tu/xd3zcmvwX01jOvjc1X3/0d8OuwMBLzsAgVv6IMbQIwXkDNtzrL2v7r3g2ghmN3mTnv565UrhZquKqNfQ1JZ2TYJaCEvXLczvrJm+fK9WJi9LzVk7a+0l4xSKc3x3NqOB54rpHIIX+lfs7YnR3gL8vC6lCQSs6lZV7i/1OM8zMIQQV8S02I8ek6nPceuiCfBD0w5zuyR8vVQvjWbECdqoupNAXyw1dDXgBQBS9SchtEFwCRqaeAxlq+TLkHlC/rFciRSwYVuDq0mIFZExYFgLCW5qaYOHYtmEaW3vQMx/NGAsmY5jrX6TiaWkCqvh+NJSBL95aTWXOtScQWS2gBpxwCP9GNHLiLS0nguGdw+3mYspiCgdbIhGyYVgVepY5Msi6g8fStnKdiwEn0G8Qd2FP7NYRRPCY98JeqQIi8qrk2kg3wxRTVcERo/2dqKA8weZ9ryEwYTfGbd6nQufBR943LpFKdMcv6lfkYzUfEyXm92sepdRvPjxBI12coaErHj8HcYYBnAATKs9H72H8osPWmrgIjz4t4tBSm7DraN4rAlUDAaTww+hmS84gAlQ9bFBQZJZGoVyiAMWl2VIFs5FRQoPmOvzoyPh/Npm/eZDZ07fkNAXIZuCmRzGHh61/Y1Ux/sGyf9br+/Da1ZWeC+mhEYqqHMzbxuq+JAhGnw7XB+LzTiuJCRVsP0gaX6ihCa3CopJ/GyaM/xjDc+527QONFQiBqlxBzAs2GrzLvip+Z8v5do94EM+56qY8cGsSpHLD1Wp3oFF6VZckhG9pXOCE7Y0SZti6lPbUAcS69WSPth2zrvGYJr+NL70EU49A8AyvIO8s9lAu3sRljBgOdJyGZlVGryg4O8OiuAyV9tlfJRhQklgZ2nVlc1ZR+FNPbDSWeR2TbC1h4agsXXcjjZIuS9m/YESxhM9cqSIzkp2A+C8zbEWU8icHI22AZn5j5nsfhfLMoEiKclywk8waJnHbHr/EMbbNddi0lglygFg4iMk8kZGhnWlOQZZ7PtNRFwNwBioO+pJnp454tvlO9NjYkQVtT5g4Bp2uZ9ZnksEuLXIanXZlacAzvRUx5VPmDyAP5GQMQWAXdg9308zbZ2mHy0gSsJP97weWUr4j44hv4wXnh5uGrSHobXdBS0eli2xxRw+xOeYmmu2dVLA5rw8c68FWMl1eMCsxiwwjjlDTwWWgdfoFjeGZ+s8FZX5us0ovlkx4igXHqVFP9ES7mz2jZISKbQ/Mpmn/6PizWzIV+G1mPKvOKmV9x31HGT0XbSHQp7Oevia0IWgjK4WBb+5i9jccfgndIDN/MsYGRHaH/uI2c6Stp22X8NFSEVDOr0oyfTflEjH3Q0zI+chijpSPeBUg+bWh6A2L+UNydbPbDk1Yo5ZUJsckOsO/MDcaaraC8q4zwF4jbvugQAABBABnmF0Qn8BwmxIRKaZhD8AIIHfNl9ftx6Xf6Su4Z0taZgSnybqTGN2UFzmJfDvMlkL+JLPXA8eGLNHrRnvFU4nvGSc06tLAZF4xu4+3kjk8+D5h4tsL8FCFuQG+tCtCemjMVWHcvTwsKCOKmIgozZ/AQn4XuIkr3MpmITR8Dl99XVbTVf/sb923aqzG+g3gYLxtXDZVpYRgcNVkZSu75YMIV1NI4KaNxRyUMLp5SrSvOR9fZbLLhhrAMxR6NiPTlsxD6VLiySJzs1llDTDo25H8xAtlNOLikHra5w1IMdnNEWsLiDkohiGbl+QE1nBnGrK2NDDTieHDkyTkBkOL5tSIr9dQDMIQ3AOXrRK5VKbFnBEPws+YSuSCMpMXVm6+A5CCmhKO2lVSgRry/dWoBy8j3cbBBmck1RP1r9NcTAj/0XGOtO24n7CaiOBuKu0wzhpTuBrlomPhS4qbyEmdJSgxkA8NlHH6yFsRlQnFJuswCReQwkB4zIq9aMW92tXJD3u7Rd1rljT9AyMCcElMglth+4pMnn54/UE7cstvjS9yAC2CYVThWhlohCOXO7RDJa7SODj0sUKEY+ZZOv48QY4weQdWKeEpjpd43xkzcmsu8mvuYdgiFUaFjjXeLJL/y6vnWhfbCTqi1iMM3VW3lJ6ha+bYjv2ng29dXpt44/T1FicQC1lFftPWG7fvTKZ9Wdr1B+bb7PDXCKgmEIZMgUB/GD0Dd3yyzEik5Ut74DlB9nzGx7cf9rqvHP42xHioXMy0Ypq6emwSi5KfMVMSrYn2VveGRSjy7q8vzGAOHce6tCjUfJ5mICqwTMPSXUeMnMuYLET6tJs9jYWJy80OmSf9zEGNXAgXNX3rY8VxIJ67mSFaCkBE1qf9F/VOIFqrBiHcw0jvpU+2ZtH/qgmQ9hBUJ7dd5x6PlwvVWtmjgupG5zKWwrn/qSRGUIKQVhf65Hme99hMZtjB48nXSnSi/yxeXCYudQMlWiT7y7qn1nmnmnx1eP56Akrgx9Po6qj5PEYqg2aMXnpWHzHb2qbhdoGjbioKbC1t5xhNH+YkHldUh/8wBIXnP5Ckdxvc7kdyPxSx06HnIcCW/J6cUlwdy4MSCvJsXcBd04Ls24IPLPPZOqkfpJ4xb0e3WUWJr20KEGK7Cvun9f6G+y3oJ+klFyRQsTKNDxfwvl0M7C1vzb6nh3mGYredgw5gzCCEkvPSS+mpfZEH03XpzHSL/4R0Q6aCc51rNvtvgQuOfw56GzUnQ0jIFpIuvwBBBbOd0KFlPIK5Dg9Ou51TXguVgJ6mJXBcf4ybo6nledH+wBW66H3/PplHc1CCSZRWazdrqwjv/wWfgFzkFfo4xyBf10EBl/urMm55+MaUBK61H65BZ+wIAAABH8BnmNqQn8DtkXQATrNorqu3m5xXud+xX+08Denz3V+1s82qh/hybOR8GwLTxuOHiq7uBoC3Mc+sehkChSECOvAwRp/xHm424puNZAOMu8ETsxZ0HiP7SpTQ265FLxWv4gFVKgiSuIGfQwaUG80To2G/gk0EwRd4edl/1gzPRZC+UFrsjrwMIrk4d7JOKepNg0YNh19om+cLmS72ifHOcA859/BXUStjWTz9EMTuLWr3XusAmKg3QAVCKe0Bfq3MvBrxMTKtGm+Qe7Vo3pSA4Vd9bCkiCgGZa3AOQlrX5IIA9zZtHALiDt1DaBxH4AuUytAXj5PZwB5MWLaeVIZjywY6cQkcCbwh52j89EMRoS8IFURbW8XZp6hkeTjXRWEskTlvCHSiSNDpkQaR9+xIRTiWxa+3/YFgIhIz50bnUXKYCo6FnRdJwTXurBsK+kCdIvGjbCRvkKdJtVxgy+h/US9o4ubdb8MP1dkrFSt8U+enIwEn4SyU3neO1UshekbKiWLYEnN8dvIbqhTMx6EPPnU9UsjrpSdI9iu+Vw0tRh7xQ51H/yOqoaUys/DnyvGqSz/Em9M88SyqMRItxHCvmtNTdDCYsZiqobyXiy0AEKYz8JQstW/hchYqXHQSpd4sdu6vLjq005XJtOxCDjjqVSZIVcrWkywKz6ilR7ZCifAEidtdQBVamSXqiiPl9c55sF2oEMkxogOdYaxPzpGMn1cDAfdSWc/7GTLnRdJ6PnxeELNAgoLXCAg7rrr4pkYfD6TaiTxxmhRVpZyoL5FMVI5GxXuQToh+/mrzdxNSjwLOtqwhw55FK5ZYZbyK/19CxDVTJFYCwCetx1S/yyzLQMdsbIdwe7prucUqtzmPuSI4YDOuOcEtPh3l+i8AxK0q8MV4mvJqwZy5IX3IGu1blEJgi+H/E5onqj1VsmcmFkgHCFL3KrxVayCzSuqlQ6X+5XlWFAmPUwq7nFo+P/2LT8VIJXv0Bwj6T/GTLDMYNRdWXrJAQksCdlIO+affKG/J/JSxuz/Qi9xZJE1LPAO0wCKzF3/N0K8L5bu7QWv7X7ONjcjaHBWg4kRXH2kz7e9jHfbiiruZ6urNpbA/Xl3+55oZ5nBRoSKts9+TsQM+kc/5CObBi9uzwGwbWpFRzVmP7lCPC8CdNuU5siHGYP7M2Vw2zO2RwSNGQEahNXZfh3a/RwvtzPBL81f2V6ThMG65X+G5iXkaAsbhEycA9DuCe0tXcZjeiz0CgxRzdIjH3eU8z1a4DbwSi7GtHr7Hgushg37zGhtnIZyDXeLTugxRReF1VtOgzuOwA5Pb6UQsIzQOjOx5E9IY4nsvqeuebJILYYuAYtP9KjnhTDdbDkTsIh0m/ycbMyK73YQX8wq/nGuMclV/3wX2f0qT4wNLtE0pF59Ea2gUbMYs36SIs2di91PYp4ZDOw+YchMxCBwfaVgvVveucJJgFeyM0sMqrBoaOtdA1L4MEA85KBsfU4GwxMG/VhQIFk6vQghqkNKCnWRFPnpL3z2nhJLQMtNkjdGPwAABMVBmmhJqEFsmUwIb//+p4QB7g3nWWqKCfxCQ1ABD3TOV72rCAO0pImbsKhFqhg48uh8MvknwvvKFwhT9x4W8wXmfd1+XGKeB/AfjMOllAqIdwgI45WAVtFbb0Q1LVP10aiEk4ohug6gd7q7JC2sCcF4ImfC26Z5JrbhHHHQrCToLkE+BH51d4r8je6b+FzuQwfDY8XxfAD92mjjYEaIQgsb4RmbJFiQludtyRDgLv6xkqHo7CWS9CxZOjU37KphWIaF4mKjjiqEy4Qxm4oG67/Kj+9dDd1/fjgk6iGPHPIPpUkhoYws5640TmjSDreCBOR8RVyYSQGeEk5wX5kQZLSMsO5D8ldgLW/g1RG6/pBKI+9AE2yzlLMFydZS7JklUM7pGrimPElbSR4UAhN+QXkY6x599XAJ+Ps7Fx0zrQy5Wbe5nW6GreaaMrCP7blw4uEuCzmaRZ5wL7m76New8JaNPBc5Vbzo4Sowp75Xcd1cAwp4NMxJKSyZ6bHj4r/gy+yN5n0Y8EyK6L+V9FjsOee6hsP5nK5Fk4qJ4ljJIUUj0bL9YFjXxvSbtug9D5y9BzZyS5f+avhiDaKJ4FbcVwQ+aJYcCnBwc2wdaCSg56rGmjTPCaZWhquXja99QQN3vBrgBh8u0kPEgRjETBCBaGR8f2HTbGDG1qp39hno5bi7jRPlwRPYP5h9sB7D9SAsu+VgQ3324P08qsXfWpGwrNriHmcD3/cFppYevaqffYY6mOqjCpndMp9BlH71MXf5Cl2D7kfQusMKBKVQi47K/Ti8zB6/tTN/F2iGYqDa01f7kW7X7AQ3HUxC2XFe8MHhDQ2rUw/uFI46aZaD5upKJINObhtl5fkC4tFoyVWi2fO+z483b8JOr2K2Ttv39lN+NKxfk6CYkg0Mrij69OAo8k+osu0rY/6uBzDNOoCvOyUv7JkT1emWvRwKDmPtyBtc265/+Fw72J+mg0E+6QC8ykG2VLas6ioIQlMkeZ+glBEJvzbO+55lJv00BrNb0E7Icev1GpBFpPZT7GaDPqcxxxFWRcztKiv5S2hjJc2IK6QPuJXJZSpNjosgk96XDPl0rq99Ql5LhwLmW+YnW1jqGblHKK/IRYfjqQPinwHI2ZnMCA+Q4OuEKyDArURKyhilh3f4eGxX3fipybYkBUkEf6mH7zQwmZ6lh1OCtKyHOvRMPL9ux+HRF6zsK8x0ZLLOFmUQyJj9yAwTnx+kEqmaAbIKTkCvMxMduuVuszHUIgT8d7GkKI4mTJUBsF+tUgIWT96KxlUYNTt4XWltPreRsHE5dcThlCWncuxG3paTGtCeqmZ1YUF3gNLUouA1RwHpVfQnhNFVrsjRGzjVxgiB3eoYtK0EwzlGUyFBB7IJBEZy0JPBBplVonXFjaPjr+xF6XXeeJS2k2KaASv+hZsXtV/xQ9EQLiWVTRrJ6Z/VjeY6bWDM1mv+shaOFafFAYVZjGsYH4D/O1KudfVOm+DKEF216OT8iD9ta3MpaNfItv79VeofxlSewIJCfhSRfnanN6zfbMQEvd0/hG1/+s+cJFUOHSg2L45/ceDDhE7s885g7uO3o4WJi5npM++hODdBk1PNsqDQocclQ1BLwhkSpVK6LZh3zukAAASYQZ6GRRUsK/8BY8z6TH8JzKiAEN1qHPa+a4M6/tP1xoH7FzRyWtUhEEcndG8SbRQk0o5SMFlT8gZxp5YTWKQ4VTA/sZivSUszv+XlH5rUxGWYY+rAFlM+74i6NVGgcKEvRLUqIptIwIwvuBDcY0DLc4E577B8hXlBoIHAXwG6B08ca5Hbin/qw4jbpJIi4TdHH0/Qle2pY3dXiggx0oUtKk4AxzCZhDhbFSPUW4JPjAWAYOiOgV7rUAPRvXkXHYF3PLUMVgGeqhWVFjWUdq9AUsVEMMYXhofAsiNq0hjaFheNXxa4HQo1I5pqf0/SmlT9XoMCjq7Q3kNzW4awMJXY4mbRYOwRpZPUCWvE8bPFn9hAbTXB/ectd5lnn7/9b6Cnk0kVqFhtFs6fekH6QlAk7DqrPXZRK76gB5U9vNzb4Kp2gXisDayJk3KkFMWnPdj5lQQIh8O0wn1C3+nfS6mwNvuE8zJPAeoGNOfQ1NhuRSGdZww/M/cese9f6L70xl4mQKJmfh6007JBo2O9WBj9k492t61cCfTvIVRhd5kfM0hEaLW+clugc0Ff8csGM7pr5fDn/3hUjK0glBE1X9cSK2grAmc8RnqYgSIwMEJi/K+EEkRhcjmZOXykJcyqjT+RJigY8GQnfgRWo0LOZ3syp6dvRc/JMl77eepJ+6cdoT3w8FCBq10e0zKWJWYGkg8dgfyUstblDQ7daGXp+0YFJz0hq+Wxuo/05JWBhRp8/Dqx7lCrENVqin5knTqWuIzARAvzCLhzUq6J+0yoSh9T21+OKhR2w6SUc1P0PeCsKbZNSgw9cVdN7yPJplV08lDZJAg18CGslldC+6HFZniw8kBXbAluZ3QZ7bP1tPLdXlmzLZNLJo5QPAdtDnJebGge8iJU8hoEAv8U/FGZPQxY/H16raj9gLjUS2cfC5e/zcwf5wJ7INiAehxGDmAg+PQtK+NLeLKuucKI7PNlMph7GP2yNdevvdEtsz014PyKuebOHMnzXfWIDQlqfe97jNxLJ+D5n4ma1AsXS8uObvVoehoyBzBYw3wejjodnKvhouyoj5TSsd9ul/gZjN5q7UFdUvFtvUGHOvVcmJMH38Pg8H8K6MPwGnj6uTGYkI/Hl0fYg7Mxs2OTeKtvmmj4A+K083Ywazwf+fJzyRugERjLT6dZNNLhIgxRb7k0rXxVROoqdfbisJvL4323Kv7SqlHP0EDtb/HhWTSKjaG86AVWDRLVZO46iXrIaN3M0hyJGYHKCwwldeBj+YNKP+tE6+UZqdjJOKWhDSo2ooRym66Y/RmXRNy3YHFursdK2ym2aOFUO6AM26U0LLGDasKropfij8MW5/c+MaBS/5Gi5d1d5A/hy03xyklB9OWKdLxxYF/A0NzdT0Su0oopCx2kk/9Io2mjf7KFKOqkc1aHTl/pi+peJ45o+/uBIxuhOkS28YpYMuCqedmXRyzelRnrHIyMOAzgTpSIjDtM+bxs+IURMlFxfNz6NB6MsNUGR/tBDTKQc48FeiI28lLay70QFenkfSdyw0eHttJEjnOKfmnNTZk0HpCBluMvAAAEQQGepXRCfwHDdSkFshiAllCABriNwVxcFf5VK03giyLN8qNPFF8fKqz9en7CcqrQZJhvEqLhRLv4KLc6ywFWCL5SYHQSA00uSwmUsKZkDLK9QRmGuv6XZ8osDTyoPIuCzbViVqdJRdUr1C4BhCrS0apql3kLmd0aeDAiW3g9GqsiTFNea7+P1EWQzHcVKaJWJdTcnMMWIlfFhV3uwWZriwYwb6sIM0ZTu2XwVFJl/cCQTW9hT/ixaluepBQwWHA7Ben3VPNexiMGncZl9dgA9JNeZdnaqL2sfDYep9aj0aW4qkWRGEysRWu4BKriaTnzZJ5JGv/lUnwU7z3/HI1fgvjiWpmuADM6YbMG51BbC4vttwvIQf5ZZmSch/NxbqkY+5wjhrSgnjWfmaU/R7vJaiqHQMC6Iekm1PirCkQ3Nwd64v78Mhb9LaE/wdbav5ZJGTj7vs7VTOI77VnNewL42/QZZz6ET7qzULhexmdd9GC2g9OF9lDGP4hMX0RywMBcDZrod4jYX+LNbU01oK+JyOIHUKMt9EJbFkd1Tw1+h1YSRUNcLBFQ+JywQzFwQ2Izb7cpgAn+4mLJLAhZJsAQErMw5feudxgrSQcP18/c0BSrhdq1NUUARjGyliTwXGtMabg6ZFqW/shNTZTwD5gXEghIaQZcm6qIb5BrRrQhAJWxN/AnyKwLis+EBFtr2uECIPbASuG81kKg9pW9wcke/U8LyWhr1FroscDWff91HHKjeYZXbF/CkBuRNQPwlTuu8HIAMHYruJwNa+SEJl3PkoIQ0akTRPMWignZFhtid/yRp7Vac4Mviof5qGEbQNE5MXwvxDnUuyoPifsZgXmuJETIsXATOjJFRNSuG0xH0kMnaxESa4SwADXDo5cAuAmhQoV5TbQ54Jts3bZf6UVn2ymKosG9kJvYbc4MuMuxHS3Z7ye8wkouYNU/Zbuq1M14eMPqQWjKbLg0Pc3zFKaOhch6bcZvTtUnzARa/20CixmoWrz9UFPW4Kfx7SaKBEqaj7H0yxtbofb+8iMSg9AVIgQ2CF3kUHkVf1qPwhdJDuBJs8fVRf3dF6njPdWhxsx9WJFGV+W+PTiINKgstpg1Z00oIo/XBo9zsI1WHBFh2EXD8mDZ26JZdAYbBA67hBnOumfKWaaNQ1kEsPf25juwyQmUt6TuSma2qtSiAcY6D4MhpEH/pphBthh1CJAqxIew/hoBBPG/QiIvLZiU+2VYEiXpOAabGgftJXd1buubm2o33+9f6yeH6TJcAaTbr4N8zA0a0EXCcCkBuc8vAotA5VKpkbK5oGUASdW6AWIa+PhDOfcnAwsWsZ8PC8UMM+6t/MJCpxuJnkYQW1XM05qcXnieiHUglF1uCYn/GpFfHHTLk18ivpVaCenHsK2YROoTbV/yimMgPtG3pmieb9lw2l4GAFkOxJ1Ha+NI02XyLAgHwQAAA/cBnqdqQn8Bvoe4Gv/cAJln9p4fR0xdQhy4FUVHaZ1qn4ss4Imqmy2ji6eDmiJV6+Ssz8kdppDDev+YdUirjnjbaH8zTsxUwiok3xVDmGtiKKyAxq1bnd8VGJgtFTXwbaTG+2tlXXcN/Xa3x0XqHSWQYppPAPhR/RZfYdl6pWcaZBN0N/dDNX93XehtMqlM9reVOLpy81YCtj6qUG6Ao35B5nVvoksPE22xnRS4d7/DxzyMdP7EzXJrTuOrJopGy50r0mwIsKDtghgDag7zH7FLtkWHDTnT5aUOfPw57i+/643ZsBGp3yWT7F5GSRgGujDjVl8ntNxiHQa64+LEZY0Qt8Vy6mmk7Qg2SxoIxLsyrLaVO1trUSD1HLBTfJIHINxbSYtr57PeQFhGaNZNhqdEXkwWfRNHHeenOFyviYaYGTjn0vvRLsGtN0UJ/1tvMASWipScbWAGjvDuvJTwaon/0YeQmuciGl8oxulXsozFqTh+SJWaAzxQ8ZlmpyIAAVV5fTYL1UsUY2vp5iR7fI6HW4KvmcamM/usM2qQ1IhKH6m5kED89fD+bEWC0MUOc3x0D0XieVVZJ6k9xbi41i+X6l5sUdjWFlOZg5wI/kqXwNLGcaLfu57L0AOZ/MyzphnE2326ssua4DWeG2T1rg2Cqrg4RjMVrZvmKZNBY/LSnxIKB3XiihQjIq0oWvWB4bhRuGVzRqxb5/JqOc84ecbYnjt+xBQrKfYZf5SX0DI9l1+Nqi0XYJrNOVbTc674wGlnn3NvfevKJEMBwvKnAQFV/USvJVIbG6RBzllIgV3h2mlaMCVx7uL2RYYx9bUBHg42jN+hXUh42TolUpv/pe1yk4V6qCboieCbpJCi4ebgV6T1lEFptzw0pDLf5n9BQFDLbhEqLLq18oGVwbahGJ6jWr0O20Z84g6ju/UrN+nnmTDRGNANq5yH8C/yCWkEt7m9yVEstueCnJCGhLvRtw+0eWbjyl+/CMh4ue4SC2eXsS/i5k1G+wKByWI65dzkG/l2VWYW08uvNxNVtHZS95TrfiHVAvXh6zLTFnblIzyHca6Q9V0NOsjN50OGoMFs9vhuHIYeI/E0QXs3w6TdWHSMzkstEaVZwVnCEtz2FG2LK8hc+KPKAHUTVFj9GY5hSC8jzyDAIRcMXzeY6ngnUn1KUSqme4SF8a/24ncj4iZRvQc23364K6tjIkiGzbla5392sRPMncUE/4Fxcbw0Ii5nXMdvl2GFs8aEgacWEKY/cNFpWK1Y1NvT3fbdulgFoWk0am770qVb4P/whFpTkp011TiGneRuy+Urg5g3PnAWa6CmDQQOfINnxXTCBXUS9Hjv4s2dRd/HAAAEb0GarEmoQWyZTAhv//6nhAHx6UCSA8W9SAClk0uvb0TpjmKwnvWeA7COpcNP5WuXo+9AC2kn47nMBOaavogBvHDzU9iClV1yUySJq+C21x5Y8JK7d9yk5whrDi7q9yNAZrQ2oscSiW+CuHbRTyRlrlsJivKFNtsZsB+WBFMIIyAxSjh9aGM5KPMzmN5ertNwQ0I2MLG/YL7wS1tFf2J3+RPairYEFqH0JXoOTkVcXXuGS7h8eECEH542HpbuBivaF9VTSZSjFOs2uxkbZjyCoopwmOCGD5VbIrJ+wGF0Zxk78gjUR0geNavJQRfPAn5+8NMJQ8HpC5ZBQTf5p8lXPNzjUKaYAjz2Iuw8SJCChXOZZo3RfyJf7BsKQI1wf0OkqAslHkY5+c85ETjF4g/OnY3qODzB+bDuG0Ryrg6bd8B9wQE3Pc6giv8e9GOauC5MZ9675Q/Dy6yBDkRKCX9HOApSdyMh/b2GNqYegA0EbQg3I0EELXXNQnhaKSS9KVUvRcLqDtRdys7U7DLdm3h1d6H0JCrlKUNHBN/22PxQMENRDrq5tgiAoBb4CvoeTVgS95uUcREVzzwAHemcJTqlboTEFZy7rY3sCH2YmndNVtaaFyVyuNQ1JtZBpI9qL5qojxJV6eaH0jl9QRblCatgD1yIGR7L7zMSuKZpLVMyWPPxJmuXg2zf4rW6oePJuW3bljLnHfk8BMyzoQ6B7GjGPI3BvIYS94CSIH20jyrXhkOnj0ZDDqzp5GzMDyg+ffnTegiKi1LHJVgXPadb4UGLvL/0MoNOy0qSHeSjAux/aBILYHQLNEfx4TQXdsJthykQia+CtuB669hOQIWaoKPLsxQrQ6MflZ2CVOWJyrUkYAXramhd4fOE9YFXHrKuF2ChR+dP2JmOyZUDDd6pv7XsdN5cY/eoWP4m3d/LpRXF+h0N7IXcOLHsVPUtJ0DEwv+SSwPXhJG5s7gM7pxEyoAqC2/45IKLVeb1n4gTlWOnkiAezImwueGUHv3xQmeR0jiCNSk3nD6ROf7c+R7vjaR54/UtoWQJYAJ4liq6pIa6UrkZ/+1Khx+yO956z/6bf3ALDNcJJ2KOS8d895t/u/ltPkKkJOwu+sdc7vW41D6C1BF4mIzMpzavHpfsD2w2SXNKNJSYdy40lePsWvucJ74jXAPFjbMRnU6dW+va2sr3y77yFZjJ4FKVz/2LD5Wy6C/dDENJEJhXukirpv9uUj+Mfvmpq7sFG50bvimSA8Elr6EHCJdsIUESXs3SqiJs75iS2e+f/f6ZJwP2qjQCAsZ/jLaG8uJP1GY9ABETOzqnLp/b/E92ezeicCMTLJ/XDRQOE2n7zXGvcwhJIPq9NIgBX/tXatEpjiOo4yuNVxlXKeYesL1ZdsRhFp+AhPurQSjSnLIysiVXryS0N9CNTfrK3P6l9DvAWqNSB0U0ngMMfp8LpMNi9bcOqJMP1VNv8cSZpOHo9pTl25hHLeYc2of5QiGBHBxSRBlZ0TLZPyyIKR8AAASSQZ7KRRUsK/8BY2g/dX2YAPX+B9rXNbQ7VRb4KZcB8gp+opZh1Y00Txzh5K9VKYJ+c1kAiIW139mzbLRHqFnm4pjmjW+L9ylDHhcXjzz0ZN6wzxsMIhk02LdzEiAlnJ5reTLqziyn7YCNbDlphUcHIhrUExvRPWCi/VcFiNc3P+tvpYo3i0w5j2MF8lf2cFz6oRLKXf5ckd7+AdR0LSGzRa1UQqr6xF1cxl0g7OwJqioB0ksKCSZVnUNr8689RzCQ4eTEJ/6S2GgCn0aI4zw0Lgk82IgT6/jbnTrzcyrCYekNdO6RD7baqRCCBW0U4VvPRZkJzqfeznHNm/KugeJSezxIx/20/a+9Uos4sffRgyvpPPeDaJrYYjc/wIIerJJg0iFdPT48h7W0g4p4IEwkin8fUKkc64FV7OV1gNQW3lqbeouxY1UiyNGyXmaIT0UDq8S+CbOToi2YaowUrVNk6yu1gbcukeSBX3l1m1KfkSHMEVI2gdB6HljVQfXOvgIF55I+lC1QeOw3M2XYxvEoaGLXAaa4v3fBMBJ/YhDd2vmdUrYySdbBOQGSHhQ7LEIfQZFIlknuZW4Osfsh9CxoDJHU210s66oA+q07JNX+/T5cJgsWMho6430VVfYcRHoLrzzY3lPiEvczzkrtwT1Onr7/Bv78+4TngKRiZM7Q8r2OrO63tnnv90Ahg76RAtUVBfkdEr4mX0xaR/z8mOYj4C5AB/D0GPaMhWRmzeHZ0YP/JqGn5/yZfd2tGhWat7UC4b28rD63830hHjgZyI4wh0KD4eqUnWCXhuVaDlZUyr92y4niq0X5zKwR+VdLDmtJOiDym9fLBCvAKxRPtvY01tXS7uei5b23kdjVT3oNDzXzTjyDnSdP2/72aa96IM0UCfcJqJzQyGugKYpiNkiPXmRtKz7jdY3wZaQwX0SFmshFsgQCweIsffZftVrepTLZw6JxJOoWoaP1i/B3L2EWrpieVG062eM8+OfrYk8xOj0/qWu/2I20qctKEadOIKUvKeQuMs3Bv2jMhR+rVIn4aYYMUVXzw/ee2WRKm+dsQCDE9Njb3s2A7T5Pu1ByQTQ6ysOvNk38P7BruBSSaNU1s4SvJjsPRaog8I7MGllrec8AGRZ9ox1JEFrUXshACknzQOKNDYAsZFjmT7ZxDUnr3nP/5FQgK4iLQVY3YEITnEbqqommV1pt3fLb6PaCX3PCr+2rRM8UNBAmsLgnktAgXOvbl7Te62bYOreJkrzHizAlfOIZQ49clFJW2YSBldsC80YpKRQYOTPlS8vQ9Z5AH9BVBsi3gU8YzQCpvd9CsvXmWawDey317NAkkKKIBGiop++SJUr/8yEcoRknT7O/OY6Ewu/CCiYGEgpyUkBxu38S0OE4osJTKxqT8zT0zyajNfE2l9JQjbg5nI7+i9sCq4qYBhesOx5nPkknI4QkY4X+D80EUzgj7pnrfXYhqV+Ls12rb5DzAICS4XBacQjeusTg5KJeOefCfuhYXVYYVo0D/B0jYhLwL6ETtfqCbFc7TWcx5dGxpXeroo+CQRg/0bqBAAAEBAGe6XRCfwHC1PsOymUOwAEPx8CR6l+sAxYyyF6FQZFEI3CizHEkUZgSRyl+00OZUYbKZHVbB/QlZz1YVKInh4nRRTFSNVUwK9i0Dn8/1Ck9Wrl2sf0mjAVDy7ngwE8++NhdhYbXF4I4ji9yIG8t8EgVbpsY3M7pm8eQdWVyOMuaPVgOBpRwooR3W1QwWnzd0Hg9Y44SIMYuEnPNiC3xbLUPyQdvVBAnR4HBMbioAWN+R+bgMPAWpUH7BwJxa9DRwVP7Av+0Gg6AlFzfsvkhajbUQ6QZWyi8WKt81XaQSs+C5N1YOc+eEBB2gtLGQt3lxvL/9yhvnnjBZXBlDMzMEZZaAYmIYqdKDQzoZK3cR2c4zsrxBITta6ETlFjhXFtywr5+5gsJx988TB0+JTnhb+m9okcs9tw1VBQeoktsARIuefHmjJLK+Bz/Eyo14Z4JqePYnBghaKz1vmfcjYogZLh5C95TlEXPaKT4C7SXoa3q/jfUkwatlYlt3fFpc4uCEORqQaf4gZpLipsV0dpfsAh8GJnOKv/8wAG4ExGnhcSdqSXne5erx4ySjGh6MErKNfOljF2pyz25iTXetXbVAzLyPXn+y3gCk5JPbirPn/QPnU90kvZKDLsqDXLkTYslrXPlF79EVFQurIK4NCd0OyA22GRxVAW71mYCtb3dpHDBVLfiwiTxaN/wYotlD+o6sV0RWO5GVQjm4oRPTV5QLVLtEtJOw1KcyE1BatVQMZtaM9cdqUi3sHW8URdf3AORZ7ARTt1LUCKJ8aR01ugwFQrPbwnqYTfLxkE1aBnkMTyMmphysKVnzjQxTx1P7T4EOipohezaPrAlnu4uSz1Kb0JFwxv509qfipZNWpHXkm4eVRrfQRMrYqrLc77SOXJqqggboiwZCKmUjypFbVuLYteFEhj6GgR9N+bGvoVf0Q1dVsJpCSloOdjoJHndVBbe7xe7Y5Py1j2/lVtwmNkwYtQQvoyhs+/frP0jWSJTGtYnqLivjvPjhC4iRHflaQ3e0S1fjofRj73RsSMN/S0lz8kRzU53rOsOp1YDcETLU11A49Gwfnj+FX9tlme5XhcNVQIs4FCX+sRYHsq25GC57YlG3E/ujaC+Gf6xzq6fUESjY/tid/C59vvhc2LrDyEAXG257sIy1hw7Le7WqHXT9stVpDCcNQE0VdnG1s7wXB3iyJgHnIgKczeKVt2UL+nSkQ6T7RdNmume0CU1D9GOeEAfdXGbTIEicWu7PrKq3LvbXVlELyc5lxWMe9riLUjmqKIfYlSzKz1qC3I1NGDwEAkrrZB3U38s8OcGkExJhovPSAi6hc0RnsM26Qe+FKN/+8ewOBd4Cu3RK9NJ60rgqi1EMXOgAAAEKgGe62pCfwG4Zk0GNf2pWecDj5QAbQVuBXC6iCwciTY1CNjLm9Znri0PskhEkGPfChgMMEzkJMIRPIFk9gCO2Vj18aCC72ZpBx30nx6sERxezbDu+6clKdj1rTzmCCLVn/5sUmIcKtE0ea26BrkXsqHphMTwsR7DMEZj/umZJAKmpBbPJpt4iGUiRC/v3HamYbUXjCRqYqMJZeWEhiC+S1btGbzxuoZw90rBzjugojT/jIZZ8rhjVUyL1Xw+aFbiU+nBGvlpglbDEK/L0RruBoVgk3LFwj5dsAV05Ikf3p8dO3B9jghl82LnTgvMOULARra91qmsD/ZIp8BMLYKQGuxLGksTYdAm4rK+nnmk6jgDfWd6dZIOwD/mu0KcUkex+EDzrsE1bN97+eqGplge2IcyuYizg993L6ZKH1C7NGUC3M5njPDGNMegF5tkGATbS4aTB5oEQubsz8uO+IiELvQlreekhHEIyRfhCqspEn4aALtM3ESfNzQ+eZQOf8C7tC+YN5gsFXFihjxL3UnFo8jLo1F9J9aPgAsaAlE3gtuLpHKvX6ZNC2J4wSsc739vq0pTKYn1jKUpxXDoODMjhXkvaqMUkilHEMJ0JjJncScmbe9MrpAd0/RjG6eloNZ9V0NOXc6wDpU3O+tNbO1uVxYsPx3oI8/W2rftc/ixurFcDag6IlIUKN0PG/r23Zd+l9xh/guobDZmX+Ug4hVkw1ZsPncFj4ZgAkBgAHNqXy0t+EU8HrXX73KjXSO2npR9oP+T1zIs2nantTPW5OUPqvz9nYLyJ9bvPEYLJYy0oOFEtpfGIpDHjttMxnOeKQCcSfb1ymE62wrnWPAeSYgiKcBWI4y/R107cA6u8G3KXPFk13PVsSJ83G5RolNsDxcFkhIgdL+C0xullLc+StdaJLlv0aR5L+ualHyTZNKPtpXZAWhii1ZrZ07o8suxzsFn2gtl85BGiGMXRPFdeJanxOz6jI3l1KrzHvZb8LuzHOApDN+tMmwlSZuekuK8sb7DsF5LGg2aKbCjvTKko7iO+HmStkkWAVN/w6AvWOlRsjXA6xnz7fwdFr++8Wi2Tdo9d8jWaYQ+FFayg/WACYpHmakQeqiKQu5LPCR9yQwZ/Zru+gTfTXBVUHKrcPbM21ftUHHIBHftDDhqI8uO6cVoF/XvgUy/M/0U1QLjDpiAPiRTXOXVbTwt13hAsrLlEPfAlmqgVoTuD9cwNLKsHxZpjPkCV2bJ6mjtF9Pz7+kHussdMm8xVeIHMTCDflBkWEhUlRukTjHmvK8901Wvt7dTCtMmPxATvl/xBrDL0DGCXaWy+Vlx6dAVClTR+NIX4JLY6BYvpCiYbfNKP6VKM18x3woIXqdFxlMc79B45AEp5lmNGIDPUODeebDiEeS9AVUs6sUXI7gMtoLWqYAAAASqQZrwSahBbJlMCG///qeEAduuOX2BN4gAiagM5XvasIA7SkiZuDSAJKuq+c4WxnwDP/2ZIg2ZNHIQxqlQah7wrk7SUsD1RIbNiWsFjJkP5csLzH7ufdm6zDIEP9nWIpwM8kMTzAl8IFVkpVcc9zKhTSuq63HZJLkdSbmFkfZ2aGXzvlC1tB34eRXnXgzRipm/uJyf+B9SFXzf00IFugFAoxaRcIcqkYJZjiTew8Vrx8k5Gl9vG07UbchBybOnIdg4lokYyAIvqM3PzV8b4FKcu7vyV7PxmQ5F+l1UEBoSyK3p33qOOapGflQwYHUmE811DZwrMcUhw6iYGk4J69SX0SW/K3jfrKeAmgmhxkNhN9ddw/B0CP62NBY0iFVuIy+TqgGOhOWDBYAA5JInJx55WARy1Z0YTZu9N5KUSlXk+gpeFO2UG2iTi1usXXNZRoW9GQeF2bQPWdK05Gwf3RZGPAbO9uruw6m4j3VUrwNX9SnaY0JpraWWXA6oqdGoeWDQxJjcZElcXQ5X9fCZMbPSuGa4tKAG1qkiDBJwRUiOnFMIaxRxYovGvpUfl1+3rpYEP8X9bJgqYeUHKUteANTyF22L6T0BfvY7cZoyk7eWwVouJREKXNu0jv9qiQuiJlsvNaKtHQq+uedQ30bJIwT8tjhYgXrNwFJBYqS+w3k2POWCPXKtXn2TbAYNwFZ7ew6SlLySJ4AbIGS18/OA0csopLwAu4dBErNOgzg7cca7qsYRhvtdXZZshE4Y1emrRzbb34JGGc5srgOAZvYj+GZUCIVTxvrAPpw8wUlT8wzfECq6a48EeUtoIgzZCjhES0gLJosJT+RRwsrP5/Tv53dvGSLs3Tu/xkRT0MVnB92154qQ3dg0NAEtQKcwtjjFrcME7pOSf9UM6pjFJn414W2Ufk7mtagNwnZJqqLHtPN2xGVzln/Mbagw6zTIqdloB+sNY9mu7SVWYCnN3Bf5fjorFfY3ukxX3K0bF2K7F37XJHoykQ9LrHWIa+4/m9jGNPfNR30aJndkfd+wXjjSv6b9aj2koNYUhPnS/XRnOLKXl/d1C3J3ZjZvpPx2E/L9WFBaJigIcT7oKejjBmJsX/w/s/AC5cZCpYMhybdPqXkB63LGsd/KtCOd+j/UB2fyTZAuZjq/cbe4KQBocrZugsruLomlgWCWUaRGUeB4k7nnje5od40voLInR+73sAKF5uXScQtI2BLwF5nQdlnouAwfmenZr9Z1A2xlJw+Z2/9QWswrTW60SzgyGplkuq6xvnojzBxXvbvJGU7rOSP6cfRbv9RSLqj4H+R9xP0oNpqlwdozy5l5u/HJ+65Q9YibwgvP7qtaBsTLMw/tEDQ7WWTv4Vyez/wj1tbuJ0jwpjf4fsxnvcTvdc+sBmfmyXuUeDDz1LHyCxnpyjOwAivwRVxx16LTz9dzyxi52uYPKYVRq8bIzcrx3octK4z+25E5ojZ3bHQfwKOqpkOk1APJ73DgrZZWr8X/YA4VRVn5l3Yg7BZl6dVBIApqwXNzIq30UuIZm7k93rEHm32TkD/qX3WPGfM2d6E4eo2igMad/yTuiYZVti2oe2BOqakhAAAEXUGfDkUVLCv/AvIzAAEPz5K0q6qbK7mkXdb8Ohn86I8hSmKfajfCSiTy+C0jw41f0NPwSP/Youy9liGUVJO3MhBHdEuB0gwU+iLf+Dn0Eb/Glpx+u6xlsO8O7naw5ZjEa5gUfjHqWGqdwRqTLFRDcfUmohqKvGdIe00SE95B7LTSBFq03r6Vb67GuiTTGIQBXJ1feP255tM4DTL9Bgdd5h4rTn9TBAry6c/BcIg3eSoOYWH173Z3ceVRqei/951rpjPycxHSlLN4m0sB0yBtgT3dUfK085+XU/k6bsPdSYtXHQRBlU5xPb7xcbqIKJ9S6AtHjr/u1zfSB3crkYbYAJh0JQhQ/a06i423RYLiDAz1iTVqNl+SX12pOIsubWmZOqngVIZQE8fCWLuXBf/70TJ1IP5k8ZC4ZUJValDOkbLYZ4XBwZHhw+akZLkxX8c5mAbfWHRRYSg8vDCA4x//LRgg6ULwA0eWWgvT8hkfQVZbaxWerAzR3ep1QMH77Wm+61ccjkSqqIw5aZCpggpq+BldcfLJFa0RBL92VmQMUEdUuQkmylBcBU03K1eswH50V336rLFa1P++7bWA7a37q04QAAOn0GnBqMo2fAk57iLREhRH+Xwy1u7oIId9NLJtCU+qB8CridmLCgeBFxwk0uv7DCRByuPTIJxp/Ur6yoHvgvKG0EzJtWCT927ZSBb1CQ30WhKNhwu1UH/5jfXaycxx9ZXxIScJshNb0SFha9ge8V/V0z5ebk0l2uXHFQ2ObWqGJ++8kDUd//eJ2NP3KO7jQ7nTtUvtcazqqljCJHPCdkKtkW4JaXgpNucuYZegqqS3fKCkgTKqrYlqfrPxtApWDYRAdZ8oQNX1ijGfkTlLBnIdO65gyX8cdyKc+U6kegU0MjF4YQ72M+o3jIImxF8oOvTvKk/a0bmOxO2KtoTRTJzQoYR2VprF4366XSCioBXegCXSaAawAkcoFbN3XJXLimQJmu8WraIX+C90vnag1TcCAzUmSjkuMYv2e+SrqgHFYSjoGO4Oan/50Yf9RgvKfb7MS10FU2FfdobuGb0Pt++YE0K2X9mIqRbz6dle1veIprTAOlLaN+CapU2lg9/Fcha/15Xl/013ny0wnRmN2stKpD2VlwDLhN2mxEl7I5cXXlGpb1mLILi8wtefft8KKJk1YIEvjQwdFdDvMOQJsHDnJQ3KbREgUX8RVRTV7QoQpgOB8RVkfLkLWxSj5ben1QwG0WOPDWZaJDYRMLfeXFuWd0K2ZXmIuDnitdbgXQjUXk1czvQrEH3hDNpSfUrCLhGGWRS/Y0IMMkpNYwsM4uYX7InR1+MvqPj2m+gVUaG6ICOJIQjynkqVO682lgYVSIidvwgU4hrbbDNYoHFJ1z8eukCPfuBpr8zGqZ4dOkfTM8tXjGwvpOnw69XdydOvFPQJPtC+r+7CqKKAMSmVPpQQC4atkATLGNDT/WaYCz5imGjDB51ZYbiygSEAAARUAZ8tdEJ/A7JAhILeYjZAAfzyvswxjVvXY4Cq/z9tHyj2eRAY8kdntWdKbyWoi/RENYc01Z4AzFH/UlGsUNUWABn36XwaTmZgIuRgEvm3a59d2eAsSODarKSvkXi/7ZhjmEi2v90Uy0+2Jofb0QChvlsVJwZDABdtLz4Rd9y8Wx7zBawTlwrjBzpCTXsvj+o4rxPJQiECIp7Ptm8i6eSN7FuQluJOH+uhxRNF9F79Y2HoTShw2AYwmf3vhHafzehAxxG+L8mpsiiNzQAJbrNFIh2CGJE5t1d8lbonPB7HGQial2eoKsL015w2JWgp+D1kJMkb15ZDj5PLpapW8CLf+KiLfHNHcIQ84RctpZ9dkhzOBI/iKtkHuDPLNPJfy3IkDJ3fS8MdspVtGNdbUXeaC3YRnaF65tPCfO3djzeOV2R4Ozo2S6gSqmec5bp+G1s09AdBzfnUjoGDsj19mjFanN9c8jk7siwf5Y/lVBQ3cZgyO6BrC16WAFsAh6LM4VElW/yQwNDe50XyFjuiRyGRlO7MtMAVKHXXNQ5AGnhOzNc5rBrzB6SnfR6nlu59dn7BBbyAGpAIn1DXC9txwXcdm5aW8KzA9ARa92ZqZBuErMWg2vkYmzK7f+dvAOLn+CTZLnxphAx7rKZsfiv/6XGGydRi/WtNk3pxp7elJBo06DCYXXgMmbMY3c/Ayox6c44JKuucflJ6ZO9k9/j4w8RZe0xZo3HFj+k7glTVj122e+AR5rBWzmagVqovTzh2wgrfckYjiQcub2hywObn2oZ9pYW92KJbe5tUMPJcMXYlGkdXScgf1TcENVImZttOSXrA7+jnLwxmU6xOigJR6xZfBi16zKv/+QcTyerVa6YSxet/Pf+3VGo9YLVB3BEA3ZPoF9dPwxRPIczVWz17C18DsxJPDBTx1M0/x6dNQGyB2rbclaYso5dogXMzKjaAN/ebluqjn4E/TBGRR9lXQib5zKh2IpT+xORi1kDF2qWBtLiKOoQVhcX3t1Q2oiZF095OMmlAzcNBLgU2zVIqhn+5jNIrh7XtoV5fvWz2b/FX/I+VISmZna+39GvftTop6vKcQCn7uANfQVqT3ZFmtR77G5MLu6VgxRL07LSkZVuErH1JSBcVav7bNm04ZuSca9WOf+dRpxAwmwcu5cEDwdgrRytnPuwQ5IT3Z3zOXwyTusyG1n9q9eTsPsIZZYnIm9gE4UpsLDixFTimFxjMVLocFTTilDvK70Erkm2oaNDxMI7k5YTcy+KhoUDQv2f6yb1OL/TQUA5wt/unPICnh19c65czEPCYl7ePtrom8MkVHFoOKEfDD7MIpv8Fad5MaIL8G8eLl00VcJ3a2IwYhXqskbI75Eeruja4uNCp9ugYwpOvUiFXsie6UV0Mn8CJqUnyOaz1jK2mM+Js6xCjwIwAUbQ2tRiTwhvWecyV9QCLbJX1569WNSxPJ8zFz89n+DflZU2tgQAABEABny9qQn8Dr4TQBvk0gxNaACE50xPgkoO7h0Jba//06N4KUkXuEA6nZn4CVDwhqugBTGhG07atmMsAW3Ngt9ULhrU1AIfwHJ0iOSn5pj5wDWE3Lt7ig6w00mEjvXONxFahUrkl5AHIMZ/tTxxcWulmBgeh8cLsOVagFxs+5SZ6CsBctQ410XtKTd/psIi36wsRK8DRMGUP17mdHZ2mKhDNW1WjfJDUf0X6x/h1G1ybwDpCoM5REjJ9jH3Xu+ofoYBgTwcDX8T90U5zGvtWQnEybrddUdoi2hb38RGj3A07BzzB71nWHRWgztR+PRv0ybqUOHY4WBZzLfndx7picfZitAHGCB7QQVL0KrMvqsep0aEXKpYGjl8pV08q5UuERO4kKJXWt0zA4+g2W08VX45zgUyWqQd3U71aWW43gHleCn2qCHgDNbAKa21YDTNmtBVn+TR2+/BSzjm4aO0RyRf/MftiJMpFpAOxW5CVdCV8aY6zDBnXsoqGBiZm7p9W1KCnZsMv847LUWstk0PULXCzNf67l5x6qej86QKl3E8/DDUvm+ub41Kwq3h+cx6u48FoVXMyp0rd6slefKKwYAVEXGgzvUE1wZJxLzSSB+Yd7Pnyfpp6VZpTN5QP+SDJd8vrSLmg9gvbHOV8fON77o7+n477ksnCeXZlM22YMDeQTJiTA4VAQaICoFP0K7HDP1M8FXtub2tZqNr53PiHWCx0yFUMWP2Ns1HqwsJqupsM1G4qtknf//BGTTJPEJ3DPj/iYG1gMh0hXe864m2RjU2NdrwnpxVj2H4fMlCodYoH8gJ+O+f61bX2zT6Um1yims/kj+clCJUL9yKMVqpIjkY/j5QGJUyhQxzgchhCYExPZ72i9ArfrBlCVx7RvvwBWCck8ivztHQSzONUFtgfOJ71KYC0+VChJFLCClr6u5I3wpO3cmbekz0LBf9bKCaeSVPobRu44eqEcrUjwf2fepw9NO4TYQanG9h6MAu1PUlCLXzVC9HfNsqgS6p4GzOB2+0H8IsG3Kjydz0kyAuYbNsdKIBAQdzbYwaUeUGxviGTNrsP1nlLCCzu8/FAegwZ5z+t8hs+R+KoKOgImISOmp5p9F3oFAsqULVoYjZaGZVNHQoQJGwj7z/J7APsUqbqsbO/2uyvLFD3lgE19ubBRtUg+J4xBoQxiGTbMN4QDMtsYIP69FmBuPmIyJptRvUFuwjo5SZSktEYjWGXuh3B8TrKe33kP3wVOqrxlqDO/a5ealw+ssSDh2DJqgkp02wj0o1U+UlLc4Ul6z13bINbj+eFWET7a+EPLDSeMdquf3uCNoP1wV9bmvrxTaYUSalV/WLP4duak3X1DE3j0h5yq8uwrllvDUileTPFu8NEfq3/JkkY36Z13mqQphuNgpmQGC/g2XXCcAAP6yNkk0JGtwpuB9fPdd+s1lmwgdSilIPAgAAABMVBmzNJqEFsmUwIb//+p4QRb9aQAS+uJL8syA7tW//pwF4KuEOdv7EAnre82JcDbdlfGJTEzcE+KlgAZrq+tZ+iUp5wXin3wTdYzDOXeGZDZTR2B1EYXeVjMSvHn4RwWpU+/jgJlyZIRlSFLX6eSD1SlMWExnkHHwmBokyqMIHhv8GHiifE/ygkahzFTqrZTRT/bA4JkzpvEwjiFOx0BdBrMD0aecfSmx2nozk5VGdaPbGjX61u0QqEsJw3VpW66y+Uwzpy2oUiMZVHPpG1iLXoxo/CEaRYAnr4bW98JyQqgYtQ+Hjq5vIWHwu0rFQZ7nlflMHQH1Y734xfXDRsf/YCadp1tylb5k9Ua8oniHPpDZOPO1mzRGOWcyXAoOXcx4/6mbBxupInESOakqfqLUPKQxxuSHfrTqYT7KFO8zzuBVovr5OU1qtKK/3aGflIaomy75vPU1MCkoBnCe0bF7KEpqDHXklEML2OtwwoP6AjQGwJdIZEHtPw2GQ7bC3YDVxlA8repURuthDVQ7Hc8TpZbkTyjagCWgVa2B00AGKWeCEn/dwq2CGjLx6I7arJ8oCnE5Q+4HouwB9bp14T3ibYnFcO5x4sW/RJcV3miyW/jyvVrSHUBmun7UcIEKsGeVNxc82Ua6hsPAMLV1EB0bqNVO+xOy37RsSuXMbHHyh3AkhvlU+AkxdtoNIwMHtoDTIQ/lIPO5xr4WGo4n8GVmGsYILT6IaUtYTi7VQW6tppaC8vHZOb/m3ISUgwuCkELX5/sLD52BSapMnEQRWbs8IYunWWAs7Y1wUXtNMWvqz4MC4tTTayF89f1E5l4NAlaO/lZN6fcAjb4zbXQkpIx4XLXI6Fmo6yLy3FHHBpaSAVL8M0zZipxcJ3gRGGc5wnRbqUi/T1XIeZDAtlgT+a7FuHeLftK+sBRCmgCxZCjUhKiwFnJ8s3sOXChjXTD4/Ihp5v08dyg7ihkvIICao7SgnlermjB1Pu5DPg3VcXvyRUqPRxwgP1z9zUY0osrdiaBUO5dAq0VFvZYKptYljR3N+X/bK5iP4ueCZMHBmxfmIoZbC5kHHPUsU+M/IA5zFMAW/FPRy2XbFeljrNKprw9TIZ21QPnkjV4fK6WqavP5a9XFILfYEXmxW1bQYZKhW7npp0vplMVZdJnioc++inmbUeCVB9cqKzl0GZf5rgFukvgG3AFtvLYkdRp3ASqBmj8kWhvo+4NUqhFub0t5aIklaMSbv+mb1QRaBczbOoXpkS/mEGXKdvcKyJ1z3ZyPGsYZ9mI1rocLcWZh4v1QUx66N+MoLE1IzFBefwxU+roqR9it2ERSo7unxqdnEVb0zuyS1CxR6wWxuG7d/VdZasLpYbr46WWfGAX7+f5hiy+5rOKKMP9ughB7qp5xiPnalc4YHs46F3Ume2WU1I+7J3lSamXAqtjW4gW42R9z71rTJS91UjycUAv2b/51fX+ewYdmiFtRlZvqy3XkLWUHPo5pTtvr8DTj1Q5krh5S/jXmN2wuc07eV8G0+a5OrqSwDwkklSe02p3Q4vLGc9cgJBIXADFuBQoqV4RYFO7YIF1ZcvkYdM+7qWPrMWDfpnZYykDASHMLdF26bzAZXYsUZhrImJDfiURcAAAAQ9QZ9RRRUsK/8C7r/1n9r8aPGeAATjWoc9r5PaYRGYt6YQu+z+1wbVy9ogHzwMB5ZxGpMKTcTVICtTM7LkU37LE4UtFv0aAvqSZPEYNcwOFb4r/aHz2WLXxTl5TaW443b/ONiFPgO+6yddzCpfmFQYQ5rlazeEedmRVvIAqsYrS/wEtCGbYtwkjNQD4u2Grk2gbthTL+T0BIyxpjREZHiClWe1Ku7lGTW7kWUy1WTyr28GNG6r53hsdElhnKcHXMSfezqLEW+qvkuuJCqox1JKkiEFG3ykzgtbTYqVR1B7prgBxh5/+n2IEcfs/4rcsjpL9aaD7pWRu7ma6udw74RuPLizXA1aX4OUTRqcskB3ZZJN/avRCP5ZOcv8VTbNechIOK8YMVmUJqqG9qeZohK8DW/wPLbTyUVkLBm8Pp5oSX8wotuE6aTJmMYJpXfERENp5pFVtdHcrpAUPprdwakcOsVRkxAxoQ8nZ05h4L/UtZVvm4C0/EuNP8QnDPOSkTBzkZEBDtofDoUz96+5ZCCFWZNwVZQ6kkXUmdc1Pshd8fybJB39bYDM2pZSAfDVdk7begwTvmdh07mO2uysTiKyS4QfaRmid80b0ZrlWy+bvRJQcF0Jn87Px4eoZnOxRs4ENYtgH29BR95oE8ER4QNqDT9SckToPWx1WlEriHoyzrrJm+A1ZyM5f6dFKgBqBl2LFGugSXcpL/9ECPdWtfpMN/fj3++L+sxKTGfPe+0jTpQTF3cNFdrdct3yLUWnLsCLH5FUuL1dmfY1qt7F0Hi/GX1+k4pM/svKmO1RAqOWSuhtdH4UX/2A9ujzI6i17IGW0zsT1xfMoIp08OgR9GPJL4kwL499ORW6X4mLGaRUgO5J65ml+Xynrl5gY810i8h44FkMEXyqTAFTmebgHu/1f41p0/uTczS/x9wkiNImLODEyZRx4OHKLTnOfp9O7mG40/KSh31cicSHVs1lm5sBvKCiKSFjBqc87rolnfvq/VCSR1A3VNM0XPh/ORp/UtiYKLBPEWoZD5q7AoSs7VouIrShS3+miNt4p41WFIbM6NehGcF8gZx3EsiS5NVr87rIlzLdyCfU1YcgQjVgtRW94doQyiB5GYpspgnu51KjDrUF2ZFmKIQyWaus60d9/+sCtpuXwn//U7Qj4XYEgIZr3y2dVcke5ra1vaK6RZci7Hlg5G0hIDfESSDU1leIRkC3CWroQ9xEcsEumyvmkD9B0w0c4Cl2ZtCb+A6qfnoMcAQ0MKKQ04kaBa6x5QU4TUZKa4QkGIF2ghMeMPmqxjVneARhQp0L9RgH7jfG3jcK+5z/vNmU/n9OjRnIUAB7LlBqC3pvslWUc0CcNVmcVbdZO2OgtJrWEbmKhHsRLCbdmGroCEgzxqlR70F/L/IE28yFOXcdUuIMkh8CVdxDvRtGSlP6dHgCnWn4zZH+9UkAAAQjAZ9yakJ/A7Gidvq4PyoUACIOz7Tpj1Nxo0lsCDe/OOQtNaFktfSLIiLF6Rio4+dQq40jOXwb6Ya3nu6R/ZKa0fAAJ4a+9NJ9OJXMGsBK+GWmAvPJEN77DukeN7ChDVwj/BCeWbUdvqv+x5q1k5noMsHSUuOyUMbFWTySFJimHUIseqqQIRXpYv2RPlnn2BDiXCjrqb/P+BU+PHeSns5/U+ZI/rD375KcPYUEplBsrTm4uPC70M1FIzkvpHGzPW3/eFu1QNL8ELgf2QxS4XrZQiNB39ZaFjFTpQres/DoiInORWsPqLEIj6jIBLVib0YjxBQLonCteLaMDUNUtmbtV3Ivv1dUZ/DQqdzJzp47nB2unccMFMTLXWFKsLXlOZj3nBGzi9qykfOAgYh8SLMxodJwzGEjlqZ4HQIQ2A3xkZJWmXUKF6ZX3tYu1dz5NPxtJji0BQSBFWRaKLaz9BUw3IdD0GwsIgptX3SejzNMABEYIC4T69+lhzMPNxcZAiFpl2t+ENamjLxiTrO3Cuu+Y5TVwtwXenD/gXtidb+//KF3MVcnEnXHaOqxxK380Ze+C7ArCjRgfqJbqB6EObavzXLCbge/s8hteYVgtG/nutAGSacokS82zMQ6RjbooTsWRw+/abuAn++xpAxxY/OUtjltGCHBN/Ce3+pZyK9pW/UH5PNjM9BrW0jvunnIdHAli2apzZv+TXCENPJQzwsAZ+gMqplrhzn6gVOJww3t5XfV0JE1pdA0usOrW9RtbjjLPHcijU+v6glrc8PJ8pwKBz/2MOmTzFMqy1oINn5K8Ex8KdtwDthV5aToV+8LySr86z0j99gid3UBJWyAu9omZgQXNi+6+dDYwS1mNvex94ZevoUW1aJdaYFFO2QcN/4Nm9ThF9F72p1c6XnqlkitxJ9zWOC4wQzirSiKfemaCks5iyLvotrQG6TNrw9H4v9YyfeDIRB6DwtJzYl1iLRa3ArdrHownIDBIAgsW2wNOfIOjtUUVsymeO/mmaknL1CZ9spUISgUW5RBncJOQiC5HoJIPxr+jN7r1mr0UtepcQvbxgENgT3u5a/7oBbjMNOD6bt009K07uAP0ooby0xo+MWC50uXaz+PJNU3HK6hFUa5OB2soqc9+b8xHYXIy14YoDRBBP2/OXoTkMVoU64tpNdQDgLV8PIw2xwCfciAkhbzmBOouTDgjKBpNisfNMvQ3Am4Vo62w0A8uRn/pJpq3Rf622scp42XY/cla1HzWVD4dN1DYN5q1kvToPnldT16bnKBhKQkO4hWS6+Mx1QZb9bKgXjQ0MuF06GNwqXIhGErNg8bcm2WymUw9ywUbrgR3mW1H7ZRsxR6yMXsyhqPbye9a6Nhx13tFsyOTQC6jV0d0uhzIu/eXZ8vyzUzAS5wTTiwAAAEakGbd0moQWyZTAhn//6eEDvY8hvsaBPKpoAF80uFoM4KcopjDTHYK/bcrWpePK68I/8FeLVbnQfebTGNydQA0EkdU8hjKSfZldiC2Mn00BOovy4jSpRd6/LhMvhSqrEOPk75+Y52flRcdd2ZfCsQk4ENwua9htW0kODUb6dJOgpfJprnsKi4H8TEDDp+apRPTnBlR1vA20b3C/i6pWUsUUuxJTYQ9Bwm2xsaLcpAEayhJkGYKZ9tLRR1NqBAn7wik4tDfJi0IbBTy0bomDmAgPjEb9G1JYjrwGLQfuPNjeH1HbUKSn2VTxUZo5yzn6Z4hXBHtHlpURdeKb9/skWZhHW2SKYts+bOffio7oxGwa7A7ifVkZDB5wWkhPhnPIx2tzIPf7Z2nlUXDsaQEapOcmL1WNmCFTYylfecssNxg1HOSdsnr2mwMtfTLU7QO/IIccFeI2NkOGrgKH6ESMZ1MHeTeQD0hFLxP8002Qbs/OXRzY7Af75Mxty/uMNgGquvO8npdJv1QOCl+ReXPCxyIpqEUoVSUZgz5wMyGV4jxcNOxkEEZBNoJrqK8ulkhtLS7ndQwJgh5S9Jk0JBA1cL6M8DOwALNMrp6tbE2nQTeoDHarreDCUFvchuh1nKEj5PEa9E0SpDE3tUNYnBG/pdCJVQyt0bwbhmVYVbOvO2c/nGY9EJ+OEG9KFB45+eSnIztcFtsd7ZeSDAon5ZgVRVPcgqzgACH4oyZ5yegI3Phq5Pf9Q10Evzq5VI266nPEVXdyC0wvcwV145sR5U5RU3GGIGV3x97IS1sLAZYdDJJBHZcWMaRS0FaEXe4yWTp6bPHIfDGodW8dHs4xd3J5yfLTiyVfAt8Jhyflmz1GTSaTDFjrkyKM7EyTis7LRRzt1S3pjbLof9+nKc78bC1Dt4bWhe1mdpsIvUv5H//x+RfvlForFE+nVhnc1U8PevxCBfDX9fO0OzMlkvThghxMxhHt78rlOWn0B1NysZ0c/bsnAGujvOh+GJ+DAvytQLSoQYXx1mos51XKrTS/5qo9bx5gi3xVXBj+fdQjJWsJpxfVMgqi33ehotdZU8YJGqXgNqfWGGBN18OAdP3zvxWdVxTqcerWHvT3FiIWqFBuT1cd2iQ5NNlupFcRFTIcrYbn9o8h2Hzxyyxei+h2Y8AiggCY4u0Y6NaCuwz+fJlAd52RcXWaGzFBD109Fb9stCW/nCRP/kw+aob4/ucDA7YPj7s3PmqFV+Ob3C09/0/PjgRpCxCXfGCB30aNoImaWOuF9k9VXInoabxLWnWHamvebThEl+1ViMERTVs/lu4V7urMZjXtV/Ldh5avLyKVVeA/1k4X/9//CV5GsunG0MfLsSj7G0f9qSQf5hX8b+EKZRw3JmvX94pBnrHGgKKzZtrmkn4eFvRKsrnluZTyfdBQS7iyNqNKp3yTJPnGZ8kO01FZIVP8zpjTS8vkTfkHCzBExDu9teTOWbakaqNTd5NidWAt0l0+fwHOkg5EPAAAAEQ0GflUUVLCv/Au2n65pVnMqPJSBJt+IACC+StFTRTub1RNkbO9jPIVJoVyfCKGfxlTsW4IHul/ZcTbPPk/GI2HGhBBJ4q8BjUkZHwKN4VYLeszvxczXx+EIFAI+7ZXz/cwlCrYi7SWjPIDrRWhGkVkJRFfDZWa9T7keEnKpug9w79t/6lQwWtlMrYjQfufJ2Mixhc5ipXJeajqQhirEOqNcchJFLMua1oeMJji3H4JGwQlPLX/Cm2rE73Kotv46rqTlwIh86sblS2P49XQur//Q9oB8i/898Mdip4S75C79BKv1lGeTUITsk/fYGwwLaKp+P++HuvPEtzww4oap8bV4LNi4icbbJI+L/4ONfH0d/zUttrO6J+hr9mAoOhZZotNCBmNPSsMq0GCrmL0UgzY8iYrEShI4KTcym0V2DKETmRMQEAFGMxMza0S6ZF/YCWbfAnVT1KytslzPCuIu2g+YlmAp8XOQ2hO9tPXiCOtINtv/QCqmmt6HK+IGTZRp64+19hKN30A5XGGWm0jxkVT1x2K69t9dBeM927aQAWdIkgApBxmb5BKy7VtRN05qwX03SeU7EIJ9maUJbGTaeZ1ItxWDpo3eMrn9EATeHPAassdAY5DuRglRlQ+1B7XEsIs6ExE9lhkgnpAhctSGThZNaZA+WFedwCuS8jlexKVx20jikoMvZwEjqpLB48UcKiF+qKDSHsk8iqe+cSMMyO1RnuvnnslHzLDe5ppxrX4WeThebC0/dVKZEjP5Jl3MFXrXtrJ+ny2/Litqdd6pNuRl5DkUSUElXUyzR5UsQSBMEvhlnf4gDDFqryq1dzF2mpMmbJxiT25p5rxOqnbj4HOLcSovtwR2O18InbT55w/1wN/QaNsPcbL997TPhIQxhvhY9WqDW9sFhPfVf2Mt4lxG7IHfcQEDsDrpR8KRERvHwtOaQ18BmNmWAdJSoaMp86IW6vQ12vZHbeIwJox1Lb3/WJ8LgEpZ0udCa9s9AMR3wproNOJtHqY/9J0lJNTz65OZfTudKxcSXVLRCp+xvYal0TGd53w7L93jGuiGkVcxhTBWTrNpnj6m2BHj+f7Go1hNimX4FlthR7G2G5pL4TC0jizvS4AcN5DPKkdfSWX8+a3pJQ4/DDkjIu2zhoCy6xWG2jFVMVtxaGD5e4vM1KdK12Bygkye6L/WiX8MbeaFzVFKDjv4HNuVnLYzuwEUAuEH3daZNLhAgCEdAoV2Jph1a3LBiPLkiH9Cvpw+XDaCnn4lRAXfoKBOnkfe9p7pkQ9QePZYtAdBeSIFewjB1Q2UJ4bb3ZkkuOkNOdSQhASQNVRCdV+NLCCTfV8Q5fJah4M0BnV7zyYlZojrf7bvaeWy6MpXnvNawkx+DYiJzpdtNPlhqMw/O3cfRAl+/WW+w3zG3aC5KxgE+4VA9kQ4IgvEAT+mVenKQZeLQMhuHx0wfERsXAAAEWgGftHRCfwOuhIOAdJBgjuAG5mjX+en9xpLBxZ9IIpCBdy55B+q+qf/9+bTK+KVWvnAAG8odiIEznylukWXbbJDeJr94qSH9U4PTH8W1TIKkHed/RnxR29oO3c5fsLtihM7uf5FHZT4N4mAmDLTqDi6UM//xe19grqdhaOYXw4Z0C0tEwAwY9b+yg+OKw2maNJyBKqe6MpQnblVOz75EtzN7kJAxo89eN4/k3YhpC+tX4e6EkmUm/Cnvx48E3XMRdIdmOv1L2VmU2NR8ZRPbJgxZOupkkn8RTy/2szZ9arRPiozp/vuec7rVFl2LEQhwWf567KpeQ3AozsOhCfvX94P+SL6HBQKpuLt6P02aojyi5hvG8OOTwUDQH4L6iXyrcgAsJN13RPGwpwmUCttAzoHFVzqbBbaZGe3zYqr7zt5X+qdnG5G2i8J8H2H8ZGwXZ5jZxZ3uGEm2fPfNDc6ydnNbvUqKTjaxgohB0Khr3Y3vVVEgJXdb/osL1ctDoi+uKSNjhlsF16C0t3HGOJ7xC2YJFUWHCFpFnk+N2NdgzlmRgOm9uY52zhs4uusR+Q0HED+FPuaIGcTMNHXSzAD9tAsMuDIdEPitkpArVP74slCoysx/K6HrA5cod30Ghi8hSzJjl1xGUyGadY/3vXIvbMEAo9DIxKYVyKnQXXG7xL8FnsISpB6+TfSxHZVbTAY79aa+aI37fBienXKrfZHJMUSTGLpDU1LzzoF9ddd2gg1vL9DNjTlOKdl4cyLFVPJWQHvLYEhlSqZJ2oFqNX9c/B51ALfhwqLK4DrV3csA0tNUczf0kauwwrRrdv9ApZmou2rO9zA5YxG8ng+7IyYWHEzlTTvEKmK4letCGguOX8sspSgCXNJi048oU7jLPJPStWHW3DKtlfpcBl2sN1HlAI7agSqH0wHKJzYKTssBh1H1J5SnH9lHKZtS0S7Fwu1xczEdV5SMRJU3GJv+AlfBGqOE4gfn+fYZuXdTwKQzdUEfG37U/EUYth59CC2oNvlZGfaRvTWfgzJw2glgxSTu+2qyYXjfcBDYiOVJLT6JpHZeiE5P3pDq2jnS5p03CbxSz8sOcpRL6NNfGXMndAGAcrLFpFErHg6luAEpx26/1yuGAY/cm0HnJUDpQI8VzAycRLceCLHwRbalUEs5yuWY7LP0+1wg7eh5VAXDf3/Fgj2Wtu9SZ6ajGwGU2OiKM122ZkikE2iQf2S5jANFDsAvadtSIfTnAvVsFVZpAj3nccGsRvvVYLerb5S2VMEXpcQbcCaya3Po931y9PgMdL5AxXisW53HkQ+BlabshPzOevgSwYPHE5bCqgjBh8pC+N50U5EE3GboENEQlRDqBZA4UCcowm2pA+uiSwn9h36PJbycLDFFg9n0wX/P8A+XY2JkNWVd+/8oPVYZjl9PPB6hMyUC4JQuQKf0GLxFIwVkSG+oAEkDU2ovRd2J3UpcMrGRO7LzEcARfqg2LJgAAAQaAZ+2akJ/AcLd7b9zlwAQiSBGnQWC/H9K95QPoe9eYjyeYiGRdsPbsPG6rxZLRaDqEynUQl/PtyqfYaXLw7JusI8oVixZjIqETSYo69lwOP3aqm65grc9fLAGsj2OSp8ApTW402/CV6pgjXLUZtroIzaoHsmlSlJ4Co8ONvH3GTE9Q1MISzoC/7KfE7b2JTuxyO55w4yK2LftyXRFt4DyCVY768gf1hZLjAWN2nVR14XXOO6TKWMgAS9carqaO5t6kQEcJ7jmNIKiugO6Xi8fzKZBWJMcf4IZZ2JbSQuYBp3B8ifRdNgMwOEFGC8rBMt7VAX6w9zi2gASQiKnvHjsCFYu5pq5Q4fVm50SmMJZ2uALRFVOyQrItyqmyrl6vxfD6wHe1J3ux3N3vspuG65d6dgvREF9Q1L7rumzEnfGDKhl2PJRnUaM92rb3d9QdbdtOPbSrisqoHDwucvU6b2EBqtQkzM4Zqf2uFyY65izWD2fsszDFAJNoTDjIpTzbu8h4xQmi/N2hDj0/tFSs3CLKSXcT6dWgq0dMCx2MlTVYqlGgCPouUEaiRrssZ6VcvJbwyIeR1o3JKBiVHT68sU4ZH/JJwToH/TeS6zBJYNA5TlROP1XInyoCAAPsKim5t4qJS0brASco4xY8Yn7mGQOqwPXwkf8sXZYX2OAqJsKZtTRYzX/xHCm7ZPYRgm5ZvUFyP2eWgebe4SxMWBjGpt+FxKttx6YFn/plalo9vAaCDjfysLXZjTBGYfzK5jtkiMG4yHLyERCpsA9uFD/9XPXXEGtH/uEQcE+Z24FxHVN8XUBs7V6kPIGyCeX4dCQ+ROr/h60hJeggWlx25T6J1NSAzwimPhhziJERz6wwvMxzDma2427YSG7J2awqH/lIHvoaL/TQBhkV3H7Lwb4UM4oDEB0OBA4KfqnGp2h8LDptPJmW6vVlOzp+fAuqWFtccqAy5OD+PMkp6Yw/KzWtBAXLd4YHhbVEtRYhigbcTmUeSi3YraoJpdmlWcEcZ/2MRpVQ8jg8D2tQf9WamaIJRJdXBkpM66WUZTzsuB0SNyX4UtVHsmdyAQQiUMECd8G0YgKM9L8lx17YAmp0coFzJBucJMG1P2e9tG4riwggTb9mOj8Ce/VkL3x1j85ThZsdNCzeSA4CjNbMw82UCv8yhK3AxV0gpexOYDq8DDeyR7D8+leBl15YXkQ8Ti0zGLC48XgYlhDPqkxajNyl/q6pi6UmFlpwjY3gHfKXZgjSU9fKbs0Kg+VZwszA9PtJrun7hhjt/KYMQmGb4C0T/7wObntoM4O77H/eqVICg9ObJu3jJ/YF+u9mZLUpkflg7QzaHpuUjC8cA//FMwMuGY44RLEPI8l1MsJHB4c+wW2uFzogH7qJqnXnR4YMRWBAAAEZ0Gbu0moQWyZTAhX//44QBxqq4PyONABzhmHPNAX36O4snOMFXx2jsyjowAfl3ffrZ0lmO3dBV4BCPUCmbofpXMpTma6yLGsHb2ZTqL1I48ScsXdUuoGGyluvg5E5+KqPy6IqSncKSro4UU//71R9F4r/XPWW1fSKSdPdJkefbQKdVvwKJZABD8AOHU3m5btvc586V6BYBIZjFosm48+V2JVxywBHpBO9JoTflpmKRrsWaeEhJDNLExrs6D5xs05F/YF8ZCkBI590JwJPlVCGwQ44sp3+Fao3O2d9QFbOa5eAMpBTslEsgF727eoo/d1YEGNJynx7YXbemd7m49eSA7TEVNuDp0Hscay7iIZkmWBD6MvakEye0myBnpTvvDIY+gPY8HtE6qYFr+MeLJGd132m0Ra6YoXSCTJAfbdVGDxOLbEcN7QyplLpQ5jyOQEFG6OJ6pn+Aki9Vw1cUopIp/3DdNcpd2d3ppRMG6fMH6QAeVhTABA4mYmdLpbSKKORLh8VZobml8yNUODx9zHcBotxBvKS9WAyz/65GJl7gC/tGI72drVTKL1GabfYGHx2m9VFcMNw4/0tXxW2MyDjsaZ70JP7C8qFMwKOHkQeZW8/7yIYCBTCynwccTLRlHeCgkMW3FKxiRlm/N9JwA5nnZTxYn89xGDxhM3QT9V7ObOqiLwZdwr/al3jVNcgpwixBFiwfIzk9NzyDP3tx60/dJV2NfSP6R1QcVEMlXb7Zwyvli2Jgz9Q9bFZfWu6kTh2WTdtkK6WplXCXyFxRJtmlXOMnjfXRgiyGjONA31uBbvLU1ZwateBFhy0foU3iBRDsLZrv7b92Sl0gcnd0Ujr8ulLYODgjNGg1FcBaHUHCpYwe92JZZDbYQULXi6CjMfRo0i6g1obrerFhcmSojC48jNxJyBFJI3ok0xikH1BgyQrA31hLRP4cqJbVU2yDrjsQPpF4dCx3GC8ddZyrmTjKPTIT1TTHN44iMrWuuLLEbUxK37NPqxdH/Ae0YgWrbCyu/+LivGTKPIgBjw7vZDv3g5vwQP2d2H8K/9rmD5RWEvs5xsmd+wGJYVZT5J1B3MMoMyZLZsb8h0ef2txoWg7k5TSXoth9o3aXKYvNOHucEAWqVQLbFbgKJo3Jm8qJb4KRgZI85TE1g4iZwBvr+CCBnR1w4YGLAo+FXR81/F1L6zcjtO+LQa1SGHxOwwCTn5G0X/L4pXuVt0cQ5hMHKyTPMK9URKtE9ZuY1AD+QSdpsVNkx2g/aRlWzEjmpMc/AQSWuQCO3xwooesPO1OFWuhIkpJ+rmLoMmPGVrEIJivgI6DU0OmHr1iqM+lXfsu0GDeUPgOoWwIM7jBNWM8/wmHBz2I5vbu5btXw++g006fLk5VnGcKASqwasDkiuFRt4CdelQXnoUle76090bBFeYtxt0TImJ0P55XIy2rUSdFTlKzCDOZXsfyp6At5ondxhR4KTOasKcNUSjX1BHyuZOY9f2A4U0PRY3AAAD80Gf2UUVLCv/AWOvjo7ifpMfQAgDLlJPRivPm//4pAHspu3azaI396xCrOeVceNS5x4KKAr8vmsquBeIdl9/IEzrvfjwinkgTDxWmZ85D1aKzE3xKNFrXjs1WfbFjwiO2LQTRldO9UxiOba9j5df4C6Is8zJK8MBMaizE/XkDnMl1giWcmPKA0WN1dLv3IaR9WYfEhGlredd9joi+Z0+NWpe8dVUfFCPCxCMTEcCLJWoLHQ3wBdu6sQfrcubeS+TjCXJsrx7Qrz2p67R9I/2VJhwSE+F5VTX/mveGKUkStRatifHVYO6GIYVGKDHsTp5iTV1lNUeTubmWFabyL1WbMjFd2h4xTB95sxrOyAAAnJ6TaMmWs0YM9rSqh3D9FChGQSIWm6vAzUqsODPSCEFx6GgCjBkQpGKwOOP3SI7AQiDhUnjq9gq/19PMkpXiltdCY78NkaMgx/RCNMb55fVHK4wR5DEFyUi20MJzvmSYXpA7Kk+vJkvsi2VD5XPZ43Gts63HWaBSNvsQ85ez+lwnGXAIZKam8BarqzPku96bi63LKLbiIOZNPGhaP2oDEo2pXS7W0Lg3seazB9fpwRUWqSxhtUMGGSzt4n1ywu9FTdpHPhcT8h4NkLv+35QEAqtrZM04R22ruPOHohxgbhVwm7PKBG+NSgwc3flMu/If5611TTQHUu8l4ISkdhoKL0H2QYhrOV1uAUHwKfAxrBiB8pLmm187JRrRH3qY7CooSP1rfRaNVYd4FhXGsJS987e+rSmVG/2WOdXfnzXAMR7oDvMFsuHswqduTTBvXIN0PygQIUgAFS2/j6I1B0vIdpkJ7pTLFEvHTJaBvVhFQNUd5NS+aittRYqgwWl/F4Z6cNdoMabN5zEFxY8ZdybheFoBq+hJLoPGpIvwSNmpGW2rWSmhXFbnB6ZVUIuZBcFopMX/tsrle0VyNLauUVATQXFu/KeAULu7KTKZpMVQ5iBGx5hRzwQ6RoY4PqSiOCDIQFa1Pamit8pq2aSZLWbN9KBLs9781dJ5XE6WO3hFsKmhjbCyqvAx25jnWGK1dXpTG5l8Jh++T7m/yvQ+GlZ8mvHydGb4V3BHE7LuCYryzKnxMgniVPSMBNcTKi4EuIeaNWIiagGcaj6xGy4ZTVuBGO3c/JawUePT8AhRc+fL5vKiYTS85jnBm6a3ngxMMY3g8TUtFhFhrhgjI4BPD/5OAMEireRWoRqpwxnYFUXVkwol+/IySYNNAHmB+n0s9goMptFLNq7Z3TOuGq2vshcLQQiBjqxK53sCe5d2AjENbNbTng1aD3nTHhPjt/7MxEwUrXxpoDR622gcMTXOYfH5eednPiwHAAABFMBn/h0Qn8DtkXQATrNorqu3m5xXud+rNEgNFmTfYFdpytu8LCW9QRm5xijX/UBxh0werDdzFeJn5CHyAGQNiSFGR3TlfoaBTqANOd3W/61dWAui1+Oq01vX4S6AEn+VZ0nFB6WNKw7ekTZPOgREBc0fT3DXU4wmnAOwUw3UFxPO2Uc3lZXZZvBZwmjfMTOtmqNRqQG2VBAshd4AwB2meCfC94GuptX1w3K23DiGdsjdiLjppV8jgLXlZEN3l8qCnnmiz9Vuw5oSlmIca2Ot8fCXR0HHEsoSbTHJtZKetEynVuuUZjSUVVhkDPcNfF84absF+N6JMNev2TBrCnx2HDS+TXqVAKEi+glMxrPr5BzH7H8PeisYPdRVajVnGnj0AmatGnn1MGPDPHfik18v2LamgjjtfgeCPS0a2MaoBzprBX2XGyXCOxBYOs9nxY/3YP60qOt31z4hVKQe66nHp/LZvZ2D8rtZo7OZcjLzxf6jfkNGzsJtyUgGDKqoVeFHt7V8URGeq0VsZ5Ki9Xn+iN9sN0zgYY9jyTl0rD3RUY0k6AHIpcmW4c+v1yPV5VK4+xvt+loXQSg9TFhcvusD6PduwuTVMfZhebCmjCV2TPrAD6qJLn2jCrHo4IeRzxF0uphmXtX1vSV4/dYM9BG2Yu8oOWDnWl52cSzqG5uz2B52MsGMxFESxQq8a2wT+TZWy+R0dqhUjWPR1Av5TBNmMXrBQdenpIsxGUD5TkUtdDlNNyCGs+0QLvFyLh10I6flMr5qvix3mIgrhb/jp3IWtF0ge6bgrahXGdZ2tRrsbSaOBe7ZRBkgp+hYrg5gmwZ5Q1aLhVdXncUHrtJxOahoiN3yNdcu+0E7tZ9pmk2u+UGGCTuKhAAAkT+CN4vbaW9GkAGB6N6u0E9p6eroXyaVbv9NTyYcQgVSDrIIVaojlpUqMPU/IWv2ByAh/PxVq3D2Hm7yFRxVCLy+hRVQ1ZP1fQPbjsgneQTfglwk+1knPZxc0mVjAzv8eP9bhjlH36HZZO/7gg6lQ7FD20kXiazk8M93RmRVVO6FO4azr4PtWaKcMImiAzxv/naVoFdBE/7vbQ6kbsi4As9nZXRjrqZm9ambyxyNUKEJuXeSOnCVbWGpgxT3GQSIDNciCfQ0Y7ImC8WBslU2mCe6pDD3YZbMDrnDdTsTtDJygogsQzitIPUM3oteuO30uQnkqgjZ0m2DZa+mkDr+QSbO84iqLBuqM2/70jXiWmOtM0d2bIroVSgQn/oEWr5/HOxejoYvaqYh1lcasUlVts9txnpnIDiXqTDhSIYCKKIFXa9df/WCGCxMMkmcyGp9B/mF2/c6TYhvWJOlAA+59WZsbm1uWubk1Q8RTsSBZvvVnDjh4t6dgwemzsJ+KSeR7ZNUNmHDynrRr90knxOeLmoIGw9hdcIYoIgW0DGtuY4KBd2YW1KdxIlYYTX/s/klCpGiMfHU4ju738dSekAAAQgAZ/6akJ/AcO9Wa6vp9BLgbDMwAtbnnj/q/W2xT28HocJRFBFhORY+S1IUFGmlHDanVke3FXqJa1wEJVCmloqIx5wEBgCS0JXm19NELYie6Fsy48xOU4+nWq95lxm8qSYoS/llrgk4F+nPxCqtm/hSkcLZWqUJgr8u3ZGsaUhiyzSr+WTSFSvBlgK5DhqGI7DsXmI2PPdBlSc8jMEnnR4zQ4zGkTDpDFGT5rNRbb642Agaha7jYwsVa9Z91Di8miBCpKckmUbVDSuB18inNizHXCUaWFtDfY3MJQvt4GJ3MXtRV3dl7i6YBY5+O5pi7ZJ/7ijKjHNLHOM788FLwa5HdLK+xzirx+612IzUgwbxm6g2nCMloLlP9C0+Fv4Mg3ST/wPhYck4M4fbMLZfyr21RjUn5Q/UWqTftIE9IbtF6PwcfLxBIRdw3RGw+nw2HlE6+NG6w6hQSh0ppezeTYrp8U4x5uf/jCquQLOylR82u2XqeqVOtFeAi0UFxT3i4bRuzywjTpsvlxbmrRVa2S0jt/i/zz/nU4T6OG5ObRevGdLfX6WK6PlXfIydpqtl+nydvU/hoWzmyGQEVeJguDL1viRuTnHGsb/6BrRJX7EFieDvmfr0umZkQeM3/vAzy16dy/2E9PSVMQMDrieBOc+iCaSUjo0RaZpoAsCr5LmxGWdl5pAJUDvdn1pvXIbjoUytM9q2lu0nTyZmiZxcXP5KL1Gj3ruFqTkQv7e++KQ5uNoFwSHHO6a1PrJAsF/94aqjF9T3QhrOhSv3OM5ABSkP1FYsCnVPcTQwI8QOkQxHJbX61QRYMwlEE1HaV3ditJ5A0rKlabyLJOv7LFiYzZxZmmvhXwNo43kfvllJWhmA7RN6GbIiPPCsq/5KAoPSSp4EQT/0cQZskXb8YpcxzIK7BSEhIwkl6EQ6vbIsnEashr1LmxfT95VaOG4EIIwWkEX36MGBHhmA4oygRDmSY7XihgscyWCCIOFv4ITpMeKZdOp2zICVNgv8F30r8QD98/3HCHdrISXUlUuyIm5AHtN2qBOt7F7JPTTCDQMGvIw20mXuq2xMrZr25xZsPC/ca0WjMYXJcWxituXPBWeeP5OfkBTPMRPb2MED3pyZwWhhN+YxJZSIw832SO1MeKJ5E7pFTt2zw6b/ujaWOsS/dFt7hhmZA2I/iESIIhboYd++vljgJZTyd8eD1czxJabM0Rl1HUECxCy1xgT38Q36yMwb9PAhiaqMIyffJYE4bnMs29cFgGjGPLTbQDpStR6XmE+fvwgzP/lLiaayquW5BjqfKd3kpG8G/4vs5a8wLUuAk80rjPs9tozZypMnfUNhoHoZksGvvj4oVyAu6RS+6VF3msYLEMyl3itUgK08SliquU4PUU94e0ohb+njSxQaVSAAAAEAkGb/EmoQWyZTAhP//3xAEV8nPW7Is0eIAS208RJfe/YbKvm7rfvwdSk2Fj1Cakvk8W7ofJUPfW7V39cGhIs7DynHnCqZzFBQnkjVWtUYT7eC4m0ttpDSBl5egzFNJH49hX9vIHk2516wnRPLOqpbyteb4rAQsSqooDclBReSmxsat9epNAsks3hA0OKWVQSFZ4w8HbKin+SkHWSAZUupGrbOVtxzq6nj880JJHkvhmkCATjweCfdOLa4JC+6tHCS5+e+9FsLflkxVeqHXyTyDjef0BqFsDGNloX/8SpBx1BqeMRANVvlmu2c559Uh7FFADP5jwc0jDLQqIUFCSqE0ktZNZhmJoZdoyNbYYOYF3VUJW2HW/wBA3XKMREmfgyUrGDlpVhDvnsbvcA5f7TVaJJ4nysHg8ik28GzEvPQY9P+Iu+i1reMTYWDDtGqxBC785tz7glkqs92fT99Zkn23VTHfjiwR3ddi6MuvE/ry3vfwbgoTfRS39LBtDgYTH8hsZoQu7MI5WPZHiKoNudPvqwdZY05YhGgB8/TAUE3QfAd++YsYEA7muGR8rqE67KZNJPvS+qecUGG043WpZRaBo0jxovPNlFnGYHz4dmSZ1vT8pnGFG6OPEr4I8iv6rjCjsmcW8muhkLo66v0tOgs+qeaxn/aSw9WNFR9C5HF94NwaOvXjVvHNlLc6mQLZqEf+0vgA9OYKf7Shs6sqvNT/vvT0WfG1Tf0r78GH7XpUlQEjmtXrZzwhMi8EfIwBjdDznJs9veO4mOd9P98MoUH+sl43Rn07qUPZ0ynLs8rsuIJ6d8MrwV1IucRYPN3xlilsW4SuH8dHioxtmbTXBHooSpqBhRTZcX4ZifFK0pU0ZvRefSBrfSb/rpSOjNpJsPXLC3kS7U2sJzoqPZMGsGsPIyTfp6yI9xk1g919JtnIH5j7/Vye/VgL9xENrIEl0YRz1cj02SVLgY5lRYFTCIeO49BCV60Uxr/YkuMlzrQCZrr2HpfQtl3fNxNbPR50XdLprNLICjGamra83RG2PJSdzCbQ23sSV7wd9E1yH5fTTSSLqPBv9vndUqxpdUND+y5umZwPNWJciFjQoIoVepilonI2XBtyoD0oUYiEeBBd9uHnQxarsT4lVSnVkUnLzmEhFDvlZGA6zkAIYm26hFZlcPoGovJDXr+wZSIMGvegHQjcyI2FaM1MyaRnkckslQUjcx9Ky3A/3OX1nOtndmiABnrS11RULfGVuSz2N8KnqZvLjv6J0ObmNCD3tZ8sDOTCf4eGVLQIAOSDVFgGy7lWIEIYALV+RA52ofikFkAjvSLWHCK2entHKWoB+dYq/kqDd7GF/P1qqRbmjYW5DEos6z4QAAB2dtb292AAAAbG12aGQAAAAAAAAAAAAAAAAAAAPoAAAMHAABAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAGkXRyYWsAAABcdGtoZAAAAAMAAAAAAAAAAAAAAAEAAAAAAAAMHAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAEAAAAABjAAAAYwAAAAAACRlZHRzAAAAHGVsc3QAAAAAAAAAAQAADBwAAAQAAAEAAAAABgltZGlhAAAAIG1kaGQAAAAAAAAAAAAAAAAAADwAAAC6AFXEAAAAAAAtaGRscgAAAAAAAAAAdmlkZQAAAAAAAAAAAAAAAFZpZGVvSGFuZGxlcgAAAAW0bWluZgAAABR2bWhkAAAAAQAAAAAAAAAAAAAAJGRpbmYAAAAcZHJlZgAAAAAAAAABAAAADHVybCAAAAABAAAFdHN0YmwAAACYc3RzZAAAAAAAAAABAAAAiGF2YzEAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAABjAGMAEgAAABIAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY//8AAAAyYXZjQwFkABX/4QAZZ2QAFazZQZDPu4QAAAMABAAAAwDwPFi2WAEABmjr48siwAAAABhzdHRzAAAAAAAAAAEAAABdAAACAAAAABRzdHNzAAAAAAAAAAEAAAABAAAC8GN0dHMAAAAAAAAAXAAAAAEAAAQAAAAAAQAACgAAAAABAAAEAAAAAAEAAAAAAAAAAQAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAoAAAAAAQAABAAAAAABAAAAAAAAAAEAAAIAAAAAAQAACgAAAAABAAAEAAAAAAEAAAAAAAAAAQAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAoAAAAAAQAABAAAAAABAAAAAAAAAAEAAAIAAAAAAQAACgAAAAABAAAEAAAAAAEAAAAAAAAAAQAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAoAAAAAAQAABAAAAAABAAAAAAAAAAEAAAIAAAAAAQAACgAAAAABAAAEAAAAAAEAAAAAAAAAAQAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAoAAAAAAQAABAAAAAABAAAAAAAAAAEAAAIAAAAAAQAACgAAAAABAAAEAAAAAAEAAAAAAAAAAQAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAoAAAAAAQAABAAAAAABAAAAAAAAAAEAAAIAAAAAAQAACgAAAAABAAAEAAAAAAEAAAAAAAAAAQAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAoAAAAAAQAABAAAAAABAAAAAAAAAAEAAAIAAAAAAQAACgAAAAABAAAEAAAAAAEAAAAAAAAAAQAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAgAAAAAAgAAAgAAAAABAAAKAAAAAAEAAAQAAAAAAQAAAAAAAAABAAACAAAAAAEAAAoAAAAAAQAABAAAAAABAAAAAAAAAAEAAAIAAAAAAQAABAAAAAAcc3RzYwAAAAAAAAABAAAAAQAAAF0AAAABAAABiHN0c3oAAAAAAAAAAAAAAF0AABYXAAAKJwAAB+MAAAa2AAAE6QAAB18AAAVkAAADpwAAA38AAAXIAAAERwAAA0oAAAKzAAAFKQAABPIAAATLAAAEUgAABP4AAASaAAAENAAABAEAAATbAAAEYwAABBsAAARWAAAEhgAABJYAAAQ4AAAESwAABH8AAATiAAAEjgAABCMAAATDAAAEcwAABEIAAAQdAAAEiAAABFkAAARPAAAEUgAABJgAAAQ9AAAESAAABF0AAAS2AAAEngAABCQAAAQiAAAEvAAABHYAAAQ8AAAEWgAABIIAAARTAAAEWQAABCYAAASrAAAEkgAABDgAAAQ7AAAFDAAABJIAAAQ7AAAEEQAABI4AAARlAAAEFAAABIMAAATJAAAEnAAABEUAAAP7AAAEcwAABJYAAAQIAAAELgAABK4AAARhAAAEWAAABEQAAATJAAAEQQAABCcAAARuAAAERwAABF4AAAQeAAAEawAAA/cAAARXAAAEJAAABAYAAAAUc3RjbwAAAAAAAAABAAAAMAAAAGJ1ZHRhAAAAWm1ldGEAAAAAAAAAIWhkbHIAAAAAAAAAAG1kaXJhcHBsAAAAAAAAAAAAAAAALWlsc3QAAAAlqXRvbwAAAB1kYXRhAAAAAQAAAABMYXZmNTcuODMuMTAw" type="video/mp4">
Your browser does not support the video tag.
</video>
Generate some more samples.
```python
p, post, q = sampler.run_mcmc(100)
```
```python
fig, [ax1, ax2, ax3] = plt.subplots(1, 3, figsize=(15, 5))
ax1.plot(sampler.acceptance_fraction, 'k', alpha=.5, label="Mean Acceptance Rate");
for p, ax in zip(range(sampler.dim), [ax2, ax3]):
ax.plot(sampler.chain[..., p], alpha=0.1)
ax1.legend(loc='lower right');
```

Plot independent samples.
```python
acls = np.ceil(2/np.mean(sampler.acceptance[-100:], axis=0) - 1).astype(int)
ind_samps = np.concatenate([sampler.chain[-100::acl, c].reshape(-1, 2) for c, acl in enumerate(acls)])
print("{} independent samples collected with a mean ACL of {}.".format(len(ind_samps), np.mean(acls)))
corner.corner(ind_samps);
```
25000 independent samples collected with a mean ACL of 2.0.

```python
```
|
bfarrREPO_NAMEkombinePATH_START.@kombine_extracted@kombine-master@examples@2D_gaussian.ipynb@.PATH_END.py
|
{
"filename": "volume-plot-circum_vel.py",
"repo_name": "amrex-astro/MAESTRO",
"repo_path": "MAESTRO_extracted/MAESTRO-master/Util/postprocessing/urca-tools/volume-plot-circum_vel.py",
"type": "Python"
}
|
#!/usr/bin/env python
import yt
from yt.units import dimensions
from yt import derived_field
import yt.visualization.volume_rendering.api as vr
from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper
from yt.visualization.volume_rendering.api import Scene, VolumeSource, Camera, ColorTransferFunction
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str, help='Name of input plotfile.')
parser.add_argument('-rup', '--rup', type=float, default=1.0e8, help='Maximum radius (cm). Default 1.0e8.')
parser.add_argument('-zoom', '--zoom', type=float, default=1.0, help='Camera zoom factor. Default 1.0.')
parser.add_argument('-dd', '--drawdomain', action='store_true', help='If supplied, draw the boundaries of the domain.')
parser.add_argument('-dg', '--drawgrids', action='store_true', help='If supplied, draw the grids.')
parser.add_argument('-da', '--drawaxes', action='store_true', help='If supplied, draw an axes triad.')
parser.add_argument('-alpha_ones', '--alpha_ones', action='store_true', help='If supplied, set the transfer function values to ones.')
parser.add_argument('-res', '--resolution', type=int, default=2048, help='Resolution for output plot.')
args = parser.parse_args()
# Open Dataset
ds = yt.load(args.infile)
core = ds.sphere(ds.domain_center, (args.rup, 'cm'))
# Create Scene
sc = Scene()
# Create Sources
so_circum_vel = VolumeSource(core, ('boxlib', 'circum_velocity'))
mag_vel_bounds = np.array([1.0e1, 1.0e6])
mag_vel_sigma = 0.08
nlayers = 6
if args.alpha_ones:
alphavec = np.ones(nlayers)
else:
alphavec = np.logspace(-5,0,nlayers)
tfh = TransferFunctionHelper(ds)
tfh.set_field(('boxlib', 'circum_velocity'))
tfh.set_log(True)
tfh.grey_opacity = False
tfh.set_bounds(mag_vel_bounds)
tfh.build_transfer_function()
tfh.tf.add_layers(nlayers, colormap='viridis', w=mag_vel_sigma**2, mi=1, ma=6, alpha=alphavec)
tfh.plot('{}_tfun_circum_vel.png'.format(args.infile))
so_circum_vel.transfer_function = tfh.tf
# Add sources to scene
sc.add_source(so_circum_vel)
# Add camera to scene
sc.add_camera()
# Set camera properties
sc.camera.focus = ds.domain_center
sc.camera.resolution = args.resolution
sc.camera.north_vector = [0, 0, 1]
sc.camera.position = ds.domain_center + [1.0, 1.0, 1.0] * ds.domain_width * args.rup/5.12e8
sc.camera.zoom(2.5*args.zoom)
# Annotate domain - draw boundaries
if args.drawdomain:
sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
# Annotate by drawing grids
if args.drawgrids:
sc.annotate_grids(ds, alpha=0.01)
# Annotate by drawing axes triad
if args.drawaxes:
sc.annotate_axes(alpha=0.01)
# Render
sc.render()
sc.save('{}_rendering_circum-vel.png'.format(args.infile), sigma_clip=6)
|
amrex-astroREPO_NAMEMAESTROPATH_START.@MAESTRO_extracted@MAESTRO-master@Util@postprocessing@urca-tools@volume-plot-circum_vel.py@.PATH_END.py
|
{
"filename": "deltat.py",
"repo_name": "rat-pac/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/ratproc/deltat.py",
"type": "Python"
}
|
from ratproc.base import Processor
from rat import ROOT
class DeltaT(Processor):
def __init__(self):
# Calculates the time since the last event in ns.
Processor.__init__(self)
self.t0 = ROOT.TTimeStamp()
self.first = True
def event(self, ds, ev):
t1 = ev.GetUTC()
dt = 0.0
if self.first:
self.first = False
else:
dt += (t1.GetSec() - self.t0.GetSec()) * 1.0e9
dt += t1.GetNanoSec() - self.t0.GetNanoSec()
ev.SetDeltaT(dt)
self.t0 = t1
return 0
|
rat-pacREPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@ratproc@deltat.py@.PATH_END.py
|
{
"filename": "gmosaicfile.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/gempy/gemini/eti/gmosaicfile.py",
"type": "Python"
}
|
import os
import tempfile
import re
import astrodata
import gemini_instruments
from gempy.utils import logutils
from gempy.eti_core.pyrafetifile import PyrafETIFile
from gempy.gemini import gemini_tools
log = logutils.get_logger(__name__)
class GmosaicFile(PyrafETIFile):
"""This class coordinates the ETI files as it pertains to the IRAF
task gmosaic directly.
"""
inputs = None
params = None
diskinlist = None
diskoutlist = None
pid_str = None
pid_task = None
adinput = None
ad = None
def __init__(self, inputs=None, params=None, ad=None):
"""
:param rc: Used to store reduction information
:type rc: ReductionContext
"""
log.debug("GmosaicFile __init__")
PyrafETIFile.__init__(self, inputs, params)
self.diskinlist = []
self.diskoutlist = []
self.taskname = "gmosaic"
self.pid_str = str(os.getpid())
self.pid_task = self.pid_str + self.taskname
if ad:
self.adinput = [ad]
else:
self.adinput = inputs
def get_prefix(self):
return "tmp" + self.pid_task
class InAtList(GmosaicFile):
inputs = None
params = None
atlist = None
ad = None
def __init__(self, inputs=None, params=None, ad=None):
"""
:param rc: Used to store reduction information
:type rc: ReductionContext
"""
log.debug("InAtList __init__")
GmosaicFile.__init__(self, inputs, params, ad)
self.atlist = ""
def prepare(self):
log.debug("InAtList prepare()")
for ad in self.adinput:
ad = gemini_tools.obsmode_add(ad)
origname = ad.filename
ad.update_filename(prefix=self.get_prefix(), strip=True)
self.diskinlist.append(ad.filename)
log.fullinfo("Temporary image (%s) on disk for the IRAF task %s" % \
(ad.filename, self.taskname))
ad.write(ad.filename, overwrite=True)
ad.filename = origname
self.atlist = "tmpImageList" + self.pid_task
fhdl = open(self.atlist, "w")
for fil in self.diskinlist:
fhdl.writelines(fil + "\n")
fhdl.close()
log.fullinfo("Temporary list (%s) on disk for the IRAF task %s" % \
(self.atlist, self.taskname))
self.filedict.update({"inimages": "@" + self.atlist})
def clean(self):
log.debug("InAtList clean()")
for a_file in self.diskinlist:
os.remove(a_file)
log.fullinfo("%s was deleted from disk" % a_file)
os.remove(self.atlist)
log.fullinfo("%s was deleted from disk" % self.atlist)
class OutAtList(GmosaicFile):
inputs = None
params = None
suffix = None
recover_name = None
ad_name = None
atlist = None
ad = None
def __init__(self, inputs, params, ad):
"""
:param rc: Used to store reduction information
:type rc: ReductionContext
"""
log.debug("OutAtList __init__")
GmosaicFile.__init__(self, inputs, params, ad)
self.suffix = params["suffix"]
self.ad_name = []
self.atlist = ""
def prepare(self):
log.debug("OutAtList prepare()")
for ad in self.adinput:
origname = ad.filename
ad.update_filename(suffix=self.suffix, strip=True)
self.ad_name.append(ad.filename)
self.diskoutlist.append(self.get_prefix() + ad.filename)
ad.filename = origname
self.atlist = "tmpOutList" + self.pid_task
fhdl = open(self.atlist, "w")
for fil in self.diskoutlist:
fhdl.writelines(fil + "\n")
fhdl.close()
log.fullinfo("Temporary list (%s) on disk for the IRAF task %s" % \
(self.atlist, self.taskname))
self.filedict.update({"outimages": "@" + self.atlist})
def recover(self):
log.debug("OutAtList recover()")
adlist = []
for i, tmpname in enumerate(self.diskoutlist):
ad = astrodata.open(tmpname)
ad.filename = self.ad_name[i]
ad = gemini_tools.obsmode_del(ad)
adlist.append(ad)
log.fullinfo(tmpname + " was loaded into memory")
return adlist
def clean(self):
log.debug("OutAtList clean()")
for tmpname in self.diskoutlist:
os.remove(tmpname)
log.fullinfo(tmpname + " was deleted from disk")
os.remove(self.atlist)
log.fullinfo(self.atlist + " was deleted from disk")
class LogFile(GmosaicFile):
inputs = None
params = None
def __init__(self, inputs=None, params=None):
"""
:param rc: Used to store reduction information
:type rc: ReductionContext
"""
log.debug("LogFile __init__")
GmosaicFile.__init__(self, inputs, params)
def prepare(self):
log.debug("LogFile prepare()")
tmplog = tempfile.NamedTemporaryFile()
self.filedict.update({"logfile": tmplog.name})
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@gempy@gemini@eti@gmosaicfile.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/heatmap/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmap.hoverlabel"
_path_str = "heatmap.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmap.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@heatmap@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "project.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/support/project.py",
"type": "Python"
}
|
"""
Some general functions useful for AMUSE science projects
"""
import os
import os.path
import shutil
import pickle
def new_working_directory(script_filename=None, sub_directories=[]):
"""
Call this function from your script to create a new directory and move
into it, for storing all your simulation output. Invoke it with:
new_working_directory(__file__)
to copy the current version of your script to this new directory for
book-keeping purposes.
"""
i = 0
while os.path.exists("run_{0:=03}".format(i)):
i += 1
new_directory = "run_{0:=03}".format(i)
os.mkdir(new_directory)
print("Created new directory for output:", new_directory)
for sub_directory in sub_directories:
os.mkdir(os.path.join(new_directory, sub_directory))
if not script_filename is None:
shutil.copy(script_filename, new_directory)
os.chdir(new_directory)
def store_results_in_file(results, datafile):
with open(datafile, 'wb') as outfile:
pickle.dump(results, outfile)
def load_results_from_file(datafile):
with open(datafile, 'rb') as infile:
results = pickle.load(infile)
return results
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@support@project.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/barpolar/unselected/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._opacity import OpacityValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._opacity.OpacityValidator", "._color.ColorValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@barpolar@unselected@marker@__init__.py@.PATH_END.py
|
{
"filename": "uvottemplating.py",
"repo_name": "PaulKuin/uvotpy",
"repo_path": "uvotpy_extracted/uvotpy-master/uvotpy/uvottemplating.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
#
# This software was written by N.P.M. Kuin (Paul Kuin)
# Copyright N.P.M. Kuin
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# part of uvotpy (c) 2009-2023,
# this code September 2023 N.P.M. Kuin
import numpy as np
from uvotpy import uvotspec, uvotgetspec
# get uvotSpec processed parameters dictionary:
uvotgetspec.give_new_result=True
uvotgetspec.trackwidth=1.5
class withTemplateBackground(object):
"""
Use a later observation ("template") as replacement for the background
extraction in the final resulting spectrum
No accomodation made yet for summed spectra input (needs setting parameter in
extract_*
The template and spectrum should be taken at the same roll angle and
close to the same detector location if clocked mode was used.
"""
def __init__(self, spectra=[], templates=[], pos=None, extsp=1, obsidsp="",
obsidtempl="", exttempl=1, redshift=None, chatter=1):
# input parameters, note all spectra, templates to be PHA files
self.spectra = spectra
self.templates = templates
self.pos = pos # [astropy coordinates] of source
self.obsidsp=obsidsp
self.obsidtempl=obsidtempl
self.extsp = extsp
self.exttempl = exttempl
self.indir = "./"
self.redshift = redshift
self.chatter=chatter
# process variables, parameters
self.spResult=None
self.tmplResult=None
#self.summed_sp = False # fit header is different for summed
#self.summed_templ = False # ditto, for spectrum, template
self.specimg=None
self.templimg=None
self.spec_exp=50.
self.templ_exp=0.
self.spec_bkg=0. # background found in first order
self.templ_bkg=0.
self.dimsp = (-400,1150)
self.dimtempl = (-400,1150)
self.bkg_scale=1. # ratio backgrounds [-400,800] zero at anchor (interp1d)
#self.ZO_scale=1. # ratio exposure times
#self.spectrum=None
self.template=None
self.anchor_templimg=[]
self.anchor_specimg=[]
self.movexy=0,0 # return from manual alignment
self.yloc_sp = 100
self.widthsp = 15 # pix width for optimal extraction
#self.specimg_aligned=None
#self.templimg_aligned=None
# spectral extraction parameters
self.offsetlimit=[100,0.2]
self.background_lower=[None,None]
self.background_upper=[None,None]
# Prep so that there is just one spectrum and one teplate PHA file
self.c = None # contour
def auto_template(self,):
"""
template,Y = auto_template()
run all steps in sequence
1. sum spectra if needed, before using this
2. run extract * to get headers, extracted image, ank_c, slice param.
get *_exp for scaling; set *_bkg found near anchor; scale_factor
create specimg, templimg from extracted image extension
3. note anchor position in specimg, tempimg
4. align
5. scale templ
6. embed to get correctly sized template
7. extract spectrum using template (writes output)
8. return template array and full output Y
"""
self.set_parameter('offsetlimit',[100,0.1]) # easier matching
self.extract_spectrum()
self.extract_template()
self.match_slice()
self.dragit(spimg=self.spimg[self.dimsp[0]:self.dimsp[1]],
tempimg=self.templimg[self.dimtempl[0]:self.dimtempl[1]])
self.scale_template()
self.embed_template() # match with spimg size
# now extract the spectrum with the template as background:
self.yloc_spectrum() # find the y-coordinate of the spectrum
# for the following, set uvotgetspec.trackwidth using the width of the spectrum
# from yloc_spectrum
self.Y = uvotgetspec.curved_extraction( # quick draft
self.spimg[:,self.dimsp[0]:self.dimsp[1]],
self.tmplResult['ank_c'],
self.spResult['ank_c']-self.dimsp[0],
self.spResult['wheelpos'],
expmap=self.spResult['exposure'], offset=0.,
anker0=None, anker2=None, anker3=None, angle=None,
offsetlimit=[self.yloc_sp,0.2],
background_lower=[None,None],
background_upper=[None,None],
background_template=self.template,
trackonly=False,
trackfull=False,
caldefault=True,
curved="noupdate", \
poly_1=None,poly_2=None,poly_3=None,
set_offset=False,
composite_fit=True,
test=None, chatter=0,
skip_field_sources=True,\
predict_second_order=False,
ZOpos=None,
outfull=True, # check what is needed by I/O module
msg='',
fit_second=False,
fit_third=False,
C_1=self.spResult['C_1'] ,C_2=None,dist12=None,
dropout_mask=None)
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first, bg_second,bg_third), \
(borderup,borderdown), apercorr, expospec, msg, curved = self.Y
# write output
# first update fitourder in "Yout, etc..." in spResult ,spResult['eff_area1'] should be populated.
outfile = "uvottemplating.output.pha"
F = uvotio.writeSpectrum(RA,DEC,filestub,
self.extsp, self.Y,
fileoutstub=outfile,
arf1=None, arf2=None,
fit_second=False,
write_rmffile=False, fileversion=2,
used_lenticular=use_lenticular_image,
history=self.spResult['msg'],
calibration_mode=uvotgetspec.calmode,
chatter=self.chatter,
clobber=self.clobber )
#xx = self.extract_spectrum(background_template=self.template,wr_outfile=True,
# interactive=True, plotit=True) does not work, requires whole image
def yloc_spectrum(self):
"""
quick draft
This is input to curved_extraction of spimg, using template after matching, scaling, etc.
"""
net = self.spimg - self.template
# define range where spectrum is
if self.redshift == None:
x1 = self.spResult['ank_c'][1] - self.dimsp[0]
x2 = np.min([ self.dimsp[1], 600+x1])
x1 = np.max([x1-200, 0 ])
else:
# find where spectrum starts
wbreak = 912.*(1+self.redshift)
disp = self.spResult['C_1']
x1 = uvotgetspec.def pix_from_wave( disp, wbreak)
# and ends
x2 = self.dimsp[1]
fsum = net[:,x1:x2].sum(1)
# now find the y-peak in fsum
from scipy.signal import find_peaks
cont = fsum.std()
peaks = find_peaks(fsum,cont)
#this needs testing...
self.yloc_sp = 100 # placeholder (see uvotspec.peakfinder
self.widthsp = 15 # pix width for optimal extraction
def embed_template(self,):
sbgimg = self.spec_bkg
sanky,sankx,sxstart,sxend = self.spResult['ank_c']
tanky,tankx,txstart,txend = ank_c= self.spResult['ank_c']
sdim = self.dimsp #should be same as:
tdim = self.dimtempl
# match anchors - this should have been done alraidy
# da = sankx - tankx # how the anchers are shifted in spimg/bgimg and tmplimg
# find limits x1,x2 for drop-in
# so typically, x1 = sankx-sdim[0] for start embedding ,x2=tdim[1]-tdim[0]+x1 for length
x1 = int(sdim[0]) # bed
a1 = int(0) # templ
a2 = int( np.min([sdim[1]-sdim[0],sxend]) ) # crop temo, if extends too far
x2 = a2 - a1 +x1 # must match length and offset
print (f"x1={x1}, x2={x2} \n")
sbgimg[:,x1:x2] = self.template[:,a1:a2]
# update template
self.template=sbgimg
def extract_spectrum(self,background_template=None,wr_outfile=False,
interactive=False,plotit=False): # needs getspec params.
# run uvotgetspec.getSpec() -> self.spectrum, *_exp, *_bkg, ...
self.spResult=uvotgetspec.getSpec(self.pos.ra.deg,self.pos.dec.deg,self.obsidsp, self.extsp,
indir=self.indir+self.obsidsp+"/uvot/image/", wr_outfile=wr_outfile,
outfile=None, calfile=None, fluxcalfile=None, use_lenticular_image=True,
offsetlimit=self.offsetlimit, anchor_offset=None, anchor_position=[None,None],
background_lower=self.background_lower, background_upper=self.background_upper,
background_template=background_template, fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=uvotgetspec.write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=plotit, plot_raw=plotit, plot_spec=plotit, zoom=True, highlight=False,
uvotgraspcorr_on=True, update_pnt=True, clobber=False, chatter=self.chatter )
self.spimg=self.spResult['extimg']
hdr=self.spResult["hdr"]
self.spec_exp= hdr['exposure']
anky,ankx,xstart,xend = ank_c= self.spResult['ank_c']
self.anchor_specimg = ankx
self.dimsp = dimL,dimu = self.set_dims(xstart,xend)
bg, bg1, bg2, bgsig, bgimg, bg_limits, \
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) \
= uvotgetspec.findBackground(self.spimg,background_lower=[None,None],
background_upper=[None,None],yloc_spectrum=anky, chatter=0)
self.spec_bkg = bgimg
def extract_template(self,):
# run uvotgetspec.getSpec() -> self.template
"""
extimg = extracted image
ank_c = array( [ X pos anchor, Y pos anchor, start position spectrum, end spectrum]) in extimg
anchor1 = anchor position in original image in det coordinates
"""
self.tmplResult=uvotgetspec.getSpec(self.pos.ra.deg,self.pos.dec.deg,self.obsidtempl, self.exttempl,
indir=self.indir+self.obsidtempl+"/uvot/image/", wr_outfile=False,
outfile=None, calfile=None, fluxcalfile=None, use_lenticular_image=True,
offsetlimit=self.offsetlimit, anchor_offset=None, anchor_position=[None,None],
background_lower=self.background_lower, background_upper=self.background_upper,
background_template=None, fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=uvotgetspec.write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=False, sumimage=None, set_maglimit=None,
plot_img=False, plot_raw=False, plot_spec=False, zoom=True, highlight=False,
uvotgraspcorr_on=True, update_pnt=True, clobber=False, chatter=self.chatter )
self.templimg = extimg = self.tmplResult['extimg']
hdr=self.tmplResult["hdr"]
anker = self.tmplResult['anker']
offset = self.tmplResult['offset']
ank_c = self.tmplResult['ank_c']
self.templ_exp = hdr['exposure']
anky,ankx,xstart,xend = ank_c= self.tmplResult['ank_c']
self.anchor_templimg = ankx
self.dimtempl = dimL,dimu = self.set_dims(xstart,xend)
bg, bg1, bg2, bgsig, bgimg, bg_limits, \
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) \
= uvotgetspec.findBackground(extimg,background_lower=[None,None],
background_upper=[None,None],yloc_spectrum=anky, chatter=0)
self.templ_bkg = bgimg
def set_dims(self,xstart,xend):
# length of first order with respect to ank_c[1 ?
dlim1L=-400
dlim1U=3000 #1150
if (xstart > dlim1L): dlim1L = xstart
if (xend < dlim1U): dlim1U = xend
return dlim1L,dlim1U
def scale_template(self,):
# first run extract_*, match, dragit
x = self.template.copy()
qbg = (x - 2.*self.templ_bkg[:,self.dimtempl[0]:self.dimtempl[1]]) < 0.
x[qbg == False] = self.templ_bkg[:,self.dimtempl[0]:self.dimtempl[1]][qbg == False] \
* self.spec_exp/self.templ_exp # scale peaks
x[qbg == True] = (self.spec_bkg[:,self.dimsp[0]:self.dimsp[1]]\
/ self.templ_bkg[:,self.dimtempl[0]:self.dimtempl[1]])[qbg == True] # scale the background
self.template = x
def match_slice(self):
"""
now determine where the spec and templ overlap (in x)
first run extrac_*
"""
#x anchors
asp = self.anchor_specimg
atp = self.anchor_templimg
# roll templimg so that anchors match
self.templimg = np.roll(self.templimg,int(asp-atp),axis=1)
#dimensions in x
sp1,sp2 = self.dimsp
tm1,tm2 = self.dimtempl
# ignore the wrap from the roll operation
#
start = np.max([sp1,tm1])
end = np.min([sp2,tm2])
self.dimsp = start,end
self.dimtempl = start,end
def dragit(self,figno=42,spimg=None, tempimg=None):
"""
first run extract_*, match_slice
delxy, tempimg = dragit(figno=42,spimg=<path>,tempimg=<path>)
The output gives the shift in pixels between the initial spimg and
the tempimg, and returns the aligned tempimg
"""
import matplotlib.pyplot as plt
import sys
if isinstance(self.spimg, np.ndarray):
spimg=self.spimg[:,self.dimsp[0]:self.dimsp[1]]
if isinstance(self.templimg, np.ndarray):
tempimg=self.templimg[:,self.dimtempl[0]:self.dimtempl[1]]
fig = plt.figure(figno,figsize=[10,3])
fig.clf()
fig.set_facecolor('lightgreen')
ax = fig.add_axes([0.03,0.1,0.94,0.87],)
canvas = ax.figure.canvas
ax.set_title("start")
sp = ax.imshow ( np.log(spimg-np.median(spimg)+0.01),alpha=1.0,cmap='gist_rainbow' ) # ax.imshow(spimg)
self.c = cont = ax.contour( np.log(tempimg-np.median(tempimg)*2+0.06),colors='k',lw=0.5)# ax.contour(tempimg)
fig.show()
newsp = DraggableContour(ax,cont)
fig.show()
delxy = 0,0
try:
ans1 = input("Do you want to adjust ? (Y/N) ").upper()
print("answer read = ", ans1," length = ", len(ans1))
if len(ans1) > 0:
if ans1.upper().strip()[0] == 'Y':
done = False
while not done:
print('drag the contour spectrum until match and happy')
ax.set_title(f"... when done press key ...")
newsp.connect()
print ("connected")
print ("draw from black to corresponding blue feature")
#delxy += newsp.out_delxy()
ans = input("update contour?\n\n")
if ans.upper().strip()[0] == 'Y':
# update templimg
newsp.disconnect()
ax.cla()
delxy += newsp.out_delxy()
print(f"The selected shift is {newsp.delx},{newsp.dely} and will be applied when done. ")
tempimg = np.roll(tempimg,int(newsp.delx),axis=1)
tempimg = np.roll(tempimg,int(newsp.dely),axis=0)
print (f"changed templ img with shifts {tempimg.shape}; now plottting")
sp = ax.imshow ( np.log(spimg-np.median(spimg)+0.01),alpha=1.0,cmap='gist_rainbow' ) # ax.imshow(spimg)
self.c = ax.contour( np.log(tempimg-np.median(tempimg)*2+0.01),alpha=0.9,colors='k')
ax.set_title("done")
ax.show()
done = True
newsp.disconnect()
elif ans1.upper().strip()[0] == 'N':
done = True
else: print(" answer Y or N ")
except:
sys.stderr.write(f"drag error: {delxy} ")
newsp.disconnect()
# roll the array elements of tempimg to make them line up with the spimg (wrap-arounds)
# print update
self.template = tempimg
self.movexy = newsp.delx, newsp.dely
def set_parameter(self,parametername,value):
# eval() or exec() ?
# ... include 'self.' in the parameter name
exec(f"self.{parametername} = {value}")
class DraggableContour(object):
"""
Drag contour img1 on image img2 until correctly lined up
return shifts in x, y
"""
import matplotlib as mpl
def __init__(self, ax, contour):
self.img1 = contour # move contour over image
self.press = None
self.delx = 0.0
self.dely = 0.0
self.incx = 0.0
self.incy = 0.0
self.ax = ax
self.cidpress = None
self.cidrelease = None
self.cidmotion = None
self.cidkey = None
self.startpos = [0,0]
self.endpos = [0,0]
def connect(self):
'connect to all the events we need'
self.cidpress = self.img1.axes.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.img1.axes.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.img1.axes.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.cidkey = self.img1.axes.figure.canvas.mpl_connect(
'key_press_event', self.on_key)
print("active")
def on_press(self, event):
'on button press we will store some data'
if event.inaxes != self.img1.axes: return
self.press = event.x, event.y, event.xdata, event.ydata #, self.img1.get_xdata(), self.img1.get_ydata()
print("on_press start position (%f,%e)"%(event.xdata,event.ydata))
self.startpos = [event.xdata,event.ydata]
def on_motion(self, event):
'on motion we will move the spectrum if the mouse is over us'
if self.press is None: return
if event.inaxes != self.img1.axes: return
#x0, y0, xpress, ypress, xdata = self.press
x0, y0, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.incx = dx
self.incy = dy
#self.img1.set_xdata(xdata+dx)
'''
#the following tried to modify the data arrays in the contour thing,
# but seems to fail to update...
#
xx = self.img1.collections
nx = len(xx)
for k in np.arange(nx): # loop over collections
xy = xx.pop() #xx[k]
xz = xy.properties()['segments']
ns = len(xz)
for gs in np.arange(ns): # loop over segments
y = xz[gs]
y[:,0] += dx
y[:,1] += dy
a = xy.properties
a().update({'segments':xz})
# update xy
xy.properties = a
xx.append(xy)
# now we have replaced the data array with the +dx,+dy values.
self.img1.collections = xx
self.img1.changed() # this should do the update
'''
self.ax.figure.canvas.draw()
def on_release(self, event):
'on release we reset the press data'
self.delx += self.incx
self.dely += self.incy
self.press = None
self.ax.figure.canvas.draw()
if event.inaxes == self.img1.axes:
print("on_release end position (%f,%e)"%(event.xdata,event.ydata))
self.endpos = [event.xdata,event.ydata]
def on_key(self,event):
'on press outside canvas disconnect '
print("you pushed the |%s| key"%event.key)
print("ignoring ...")
# retrieve out_delxy and then execute *.disconnect()
def disconnect(self):
print (f"position start = {self.startpos}, end = {self.endpos}")
print (f"movement dx={self.startpos[0]-self.endpos[0]}, dy={self.startpos[1]-self.endpos[1]}")
'disconnect all the stored connection ids'
self.img1.axes.figure.canvas.mpl_disconnect(self.cidpress)
self.img1.axes.figure.canvas.mpl_disconnect(self.cidrelease)
self.img1.axes.figure.canvas.mpl_disconnect(self.cidmotion)
self.img1.axes.figure.canvas.mpl_disconnect(self.cidkey)
print("disconnected")
def out_delxy(self):
return self.delx,self.dely
|
PaulKuinREPO_NAMEuvotpyPATH_START.@uvotpy_extracted@uvotpy-master@uvotpy@uvottemplating.py@.PATH_END.py
|
{
"filename": "jla.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/sn/jla.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import SN
class JLA(SN):
r"""
Likelihood of the JLA type Ia supernova sample \cite{Betoule:2014frx}, based on
observations obtained by the SDSS-II and SNLS collaborations.
"""
pass
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@sn@jla.py@.PATH_END.py
|
{
"filename": "model_trace.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/models/model_trace.py",
"type": "Python"
}
|
from datetime import datetime
from typing import TYPE_CHECKING
from sqlalchemy import ForeignKey, UniqueConstraint
from sqlalchemy.orm import Mapped, mapped_column, relationship
from .base import Model
if TYPE_CHECKING:
from .model_prompt import Prompt
from .model_run import Run
from .model_stdout import Stdout
from .model_trace_call import TraceCall
class Trace(Model):
__tablename__ = 'trace'
id: Mapped[int] = mapped_column(primary_key=True, index=True)
trace_no: Mapped[int] # unique in each run
state: Mapped[str]
thread_no: Mapped[int]
task_no: Mapped[int | None]
started_at: Mapped[datetime]
ended_at: Mapped[datetime | None]
run_id: Mapped[int] = mapped_column(ForeignKey('run.id'))
run: Mapped['Run'] = relationship(back_populates='traces')
trace_calls: Mapped[list['TraceCall']] = relationship(
back_populates='trace', cascade='all, delete-orphan'
)
prompts: Mapped[list['Prompt']] = relationship(
back_populates='trace', cascade='all, delete-orphan'
)
stdouts: Mapped[list['Stdout']] = relationship(back_populates='trace')
__table_args__ = (UniqueConstraint('run_id', 'trace_no'),)
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@models@model_trace.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.